Merge branch 'master' into export-slabh
authorTejun Heo <tj@kernel.org>
Mon, 5 Apr 2010 02:37:28 +0000 (11:37 +0900)
committerTejun Heo <tj@kernel.org>
Mon, 5 Apr 2010 02:37:28 +0000 (11:37 +0900)
46 files changed:
1  2 
arch/arm/kernel/kprobes.c
arch/microblaze/kernel/dma.c
arch/microblaze/mm/init.c
arch/sparc/mm/init_64.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/setup.c
arch/x86/kernel/smpboot.c
arch/x86/mm/init.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r420.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/ttm/ttm_memory.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/hid/usbhid/hid-quirks.c
drivers/platform/x86/asus-laptop.c
drivers/serial/sunsu.c
drivers/usb/gadget/r8a66597-udc.c
drivers/video/sunxvr500.c
fs/logfs/dev_bdev.c
fs/logfs/dir.c
fs/logfs/journal.c
fs/logfs/readwrite.c
fs/logfs/segment.c
fs/logfs/super.c
fs/proc/base.c
fs/proc/task_mmu.c
fs/reiserfs/super.c
include/drm/drmP.h
kernel/cgroup_freezer.c
kernel/cred.c
kernel/perf_event.c
kernel/sched.c
kernel/trace/ring_buffer.c

@@@ -22,7 -22,6 +22,7 @@@
  #include <linux/kernel.h>
  #include <linux/kprobes.h>
  #include <linux/module.h>
 +#include <linux/slab.h>
  #include <linux/stop_machine.h>
  #include <linux/stringify.h>
  #include <asm/traps.h>
@@@ -394,6 -393,14 +394,14 @@@ void __kprobes jprobe_return(void
                /*
                 * Setup an empty pt_regs. Fill SP and PC fields as
                 * they're needed by longjmp_break_handler.
+                *
+                * We allocate some slack between the original SP and start of
+                * our fabricated regs. To be precise we want to have worst case
+                * covered which is STMFD with all 16 regs so we allocate 2 *
+                * sizeof(struct_pt_regs)).
+                *
+                * This is to prevent any simulated instruction from writing
+                * over the regs when they are accessing the stack.
                 */
                "sub    sp, %0, %1              \n\t"
                "ldr    r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t"
                "ldmia  sp, {r0 - pc}           \n\t"
                :
                : "r" (kcb->jprobe_saved_regs.ARM_sp),
-                 "I" (sizeof(struct pt_regs)),
+                 "I" (sizeof(struct pt_regs) * 2),
                  "J" (offsetof(struct pt_regs, ARM_sp)),
                  "J" (offsetof(struct pt_regs, ARM_pc)),
                  "J" (offsetof(struct pt_regs, ARM_cpsr))
@@@ -8,7 -8,6 +8,7 @@@
  
  #include <linux/device.h>
  #include <linux/dma-mapping.h>
 +#include <linux/gfp.h>
  #include <linux/dma-debug.h>
  #include <asm/bug.h>
  #include <asm/cacheflush.h>
@@@ -38,7 -37,7 +38,7 @@@ static inline void __dma_sync_page(unsi
  
  static unsigned long get_dma_direct_offset(struct device *dev)
  {
-       if (dev)
+       if (likely(dev))
                return (unsigned long)dev->archdata.dma_data;
  
        return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
@@@ -15,7 -15,6 +15,7 @@@
  #include <linux/initrd.h>
  #include <linux/pagemap.h>
  #include <linux/pfn.h>
 +#include <linux/slab.h>
  #include <linux/swap.h>
  
  #include <asm/page.h>
@@@ -166,7 -165,6 +166,6 @@@ void free_init_pages(char *what, unsign
        for (addr = begin; addr < end; addr += PAGE_SIZE) {
                ClearPageReserved(virt_to_page(addr));
                init_page_count(virt_to_page(addr));
-               memset((void *)addr, 0xcc, PAGE_SIZE);
                free_page(addr);
                totalram_pages++;
        }
@@@ -209,14 -207,6 +208,6 @@@ void __init mem_init(void
  }
  
  #ifndef CONFIG_MMU
- /* Check against bounds of physical memory */
- int ___range_ok(unsigned long addr, unsigned long size)
- {
-       return ((addr < memory_start) ||
-               ((addr + size) > memory_end));
- }
- EXPORT_SYMBOL(___range_ok);
  int page_is_ram(unsigned long pfn)
  {
        return __range_ok(pfn, 0);
diff --combined arch/sparc/mm/init_64.c
@@@ -13,6 -13,7 +13,6 @@@
  #include <linux/bootmem.h>
  #include <linux/mm.h>
  #include <linux/hugetlb.h>
 -#include <linux/slab.h>
  #include <linux/initrd.h>
  #include <linux/swap.h>
  #include <linux/pagemap.h>
@@@ -25,7 -26,6 +25,7 @@@
  #include <linux/percpu.h>
  #include <linux/lmb.h>
  #include <linux/mmzone.h>
 +#include <linux/gfp.h>
  
  #include <asm/head.h>
  #include <asm/system.h>
@@@ -2117,7 -2117,7 +2117,7 @@@ int __meminit vmemmap_populate(struct p
                               "node=%d entry=%lu/%lu\n", start, block, nr,
                               node,
                               addr >> VMEMMAP_CHUNK_SHIFT,
-                              VMEMMAP_SIZE >> VMEMMAP_CHUNK_SHIFT);
+                              VMEMMAP_SIZE);
                }
        }
        return 0;
@@@ -21,7 -21,6 +21,7 @@@
  #include <linux/kdebug.h>
  #include <linux/sched.h>
  #include <linux/uaccess.h>
 +#include <linux/slab.h>
  #include <linux/highmem.h>
  #include <linux/cpu.h>
  #include <linux/bitops.h>
@@@ -29,6 -28,7 +29,7 @@@
  #include <asm/apic.h>
  #include <asm/stacktrace.h>
  #include <asm/nmi.h>
+ #include <asm/compat.h>
  
  static u64 perf_event_mask __read_mostly;
  
@@@ -159,7 -159,7 +160,7 @@@ struct x86_pmu 
                                                 struct perf_event *event);
        struct event_constraint *event_constraints;
  
-       void            (*cpu_prepare)(int cpu);
+       int             (*cpu_prepare)(int cpu);
        void            (*cpu_starting)(int cpu);
        void            (*cpu_dying)(int cpu);
        void            (*cpu_dead)(int cpu);
@@@ -1334,11 -1334,12 +1335,12 @@@ static int __cpuini
  x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
  {
        unsigned int cpu = (long)hcpu;
+       int ret = NOTIFY_OK;
  
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_UP_PREPARE:
                if (x86_pmu.cpu_prepare)
-                       x86_pmu.cpu_prepare(cpu);
+                       ret = x86_pmu.cpu_prepare(cpu);
                break;
  
        case CPU_STARTING:
                        x86_pmu.cpu_dying(cpu);
                break;
  
+       case CPU_UP_CANCELED:
        case CPU_DEAD:
                if (x86_pmu.cpu_dead)
                        x86_pmu.cpu_dead(cpu);
                break;
        }
  
-       return NOTIFY_OK;
+       return ret;
  }
  
  static void __init pmu_check_apic(void)
@@@ -1629,14 -1631,42 +1632,42 @@@ copy_from_user_nmi(void *to, const voi
        return len;
  }
  
- static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
+ #ifdef CONFIG_COMPAT
+ static inline int
+ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
  {
-       unsigned long bytes;
+       /* 32-bit process in 64-bit kernel. */
+       struct stack_frame_ia32 frame;
+       const void __user *fp;
  
-       bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
+       if (!test_thread_flag(TIF_IA32))
+               return 0;
+       fp = compat_ptr(regs->bp);
+       while (entry->nr < PERF_MAX_STACK_DEPTH) {
+               unsigned long bytes;
+               frame.next_frame     = 0;
+               frame.return_address = 0;
+               bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
+               if (bytes != sizeof(frame))
+                       break;
+               if (fp < compat_ptr(regs->sp))
+                       break;
  
-       return bytes == sizeof(*frame);
+               callchain_store(entry, frame.return_address);
+               fp = compat_ptr(frame.next_frame);
+       }
+       return 1;
+ }
+ #else
+ static inline int
+ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
+ {
+     return 0;
  }
+ #endif
  
  static void
  perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
        callchain_store(entry, PERF_CONTEXT_USER);
        callchain_store(entry, regs->ip);
  
+       if (perf_callchain_user32(regs, entry))
+               return;
        while (entry->nr < PERF_MAX_STACK_DEPTH) {
+               unsigned long bytes;
                frame.next_frame             = NULL;
                frame.return_address = 0;
  
-               if (!copy_stack_frame(fp, &frame))
+               bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
+               if (bytes != sizeof(frame))
                        break;
  
                if ((unsigned long)fp < regs->sp)
@@@ -1703,7 -1738,6 +1739,6 @@@ struct perf_callchain_entry *perf_callc
        return entry;
  }
  
- #ifdef CONFIG_EVENT_TRACING
  void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
  {
        regs->ip = ip;
        regs->cs = __KERNEL_CS;
        local_save_flags(regs->flags);
  }
- #endif
diff --combined arch/x86/kernel/setup.c
@@@ -55,6 -55,7 +55,6 @@@
  #include <linux/stddef.h>
  #include <linux/unistd.h>
  #include <linux/ptrace.h>
 -#include <linux/slab.h>
  #include <linux/user.h>
  #include <linux/delay.h>
  
@@@ -313,16 -314,17 +313,17 @@@ static void __init reserve_brk(void
  #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
  static void __init relocate_initrd(void)
  {
+       /* Assume only end is not page aligned */
        u64 ramdisk_image = boot_params.hdr.ramdisk_image;
        u64 ramdisk_size  = boot_params.hdr.ramdisk_size;
+       u64 area_size     = PAGE_ALIGN(ramdisk_size);
        u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
        u64 ramdisk_here;
        unsigned long slop, clen, mapaddr;
        char *p, *q;
  
        /* We need to move the initrd down into lowmem */
-       ramdisk_here = find_e820_area(0, end_of_lowmem, ramdisk_size,
+       ramdisk_here = find_e820_area(0, end_of_lowmem, area_size,
                                         PAGE_SIZE);
  
        if (ramdisk_here == -1ULL)
  
        /* Note: this includes all the lowmem currently occupied by
           the initrd, we rely on that fact to keep the data intact. */
-       reserve_early(ramdisk_here, ramdisk_here + ramdisk_size,
+       reserve_early(ramdisk_here, ramdisk_here + area_size,
                         "NEW RAMDISK");
        initrd_start = ramdisk_here + PAGE_OFFSET;
        initrd_end   = initrd_start + ramdisk_size;
  
  static void __init reserve_initrd(void)
  {
+       /* Assume only end is not page aligned */
        u64 ramdisk_image = boot_params.hdr.ramdisk_image;
        u64 ramdisk_size  = boot_params.hdr.ramdisk_size;
-       u64 ramdisk_end   = ramdisk_image + ramdisk_size;
+       u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
        u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
  
        if (!boot_params.hdr.type_of_loader ||
@@@ -49,7 -49,6 +49,7 @@@
  #include <linux/nmi.h>
  #include <linux/tboot.h>
  #include <linux/stackprotector.h>
 +#include <linux/gfp.h>
  
  #include <asm/acpi.h>
  #include <asm/desc.h>
@@@ -243,8 -242,6 +243,6 @@@ static void __cpuinit smp_callin(void
        end_local_APIC_setup();
        map_cpu_to_logical_apicid();
  
-       notify_cpu_starting(cpuid);
        /*
         * Need to setup vector mappings before we enable interrupts.
         */
         */
        smp_store_cpu_info(cpuid);
  
+       notify_cpu_starting(cpuid);
        /*
         * Allow the master to continue.
         */
diff --combined arch/x86/mm/init.c
@@@ -1,4 -1,3 +1,4 @@@
 +#include <linux/gfp.h>
  #include <linux/initrd.h>
  #include <linux/ioport.h>
  #include <linux/swap.h>
@@@ -332,11 -331,23 +332,23 @@@ int devmem_is_allowed(unsigned long pag
  
  void free_init_pages(char *what, unsigned long begin, unsigned long end)
  {
-       unsigned long addr = begin;
+       unsigned long addr;
+       unsigned long begin_aligned, end_aligned;
  
-       if (addr >= end)
+       /* Make sure boundaries are page aligned */
+       begin_aligned = PAGE_ALIGN(begin);
+       end_aligned   = end & PAGE_MASK;
+       if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
+               begin = begin_aligned;
+               end   = end_aligned;
+       }
+       if (begin >= end)
                return;
  
+       addr = begin;
        /*
         * If debugging page accesses then do not free this memory but
         * mark them not present - any buggy init-section access will
         */
  #ifdef CONFIG_DEBUG_PAGEALLOC
        printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
-               begin, PAGE_ALIGN(end));
+               begin, end);
        set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
  #else
        /*
        for (; addr < end; addr += PAGE_SIZE) {
                ClearPageReserved(virt_to_page(addr));
                init_page_count(virt_to_page(addr));
-               memset((void *)(addr & ~(PAGE_SIZE-1)),
-                       POISON_FREE_INITMEM, PAGE_SIZE);
+               memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
                free_page(addr);
                totalram_pages++;
        }
@@@ -377,6 -387,15 +388,15 @@@ void free_initmem(void
  #ifdef CONFIG_BLK_DEV_INITRD
  void free_initrd_mem(unsigned long start, unsigned long end)
  {
-       free_init_pages("initrd memory", start, end);
+       /*
+        * end could be not aligned, and We can not align that,
+        * decompresser could be confused by aligned initrd_end
+        * We already reserve the end partial page before in
+        *   - i386_start_kernel()
+        *   - x86_64_start_kernel()
+        *   - relocate_initrd()
+        * So here We can do PAGE_ALIGN() safely to get partial page to be freed
+        */
+       free_init_pages("initrd memory", start, PAGE_ALIGN(end));
  }
  #endif
@@@ -27,7 -27,6 +27,7 @@@
   * DEALINGS IN THE SOFTWARE.
   */
  #include <linux/kernel.h>
 +#include <linux/slab.h>
  #include <linux/i2c.h>
  #include <linux/i2c-algo-bit.h>
  #include "drmP.h"
@@@ -708,15 -707,6 +708,6 @@@ static struct drm_display_mode *drm_mod
        mode->vsync_end = mode->vsync_start + vsync_pulse_width;
        mode->vtotal = mode->vdisplay + vblank;
  
-       /* perform the basic check for the detailed timing */
-       if (mode->hsync_end > mode->htotal ||
-               mode->vsync_end > mode->vtotal) {
-               drm_mode_destroy(dev, mode);
-               DRM_DEBUG_KMS("Incorrect detailed timing. "
-                               "Sync is beyond the blank.\n");
-               return NULL;
-       }
        /* Some EDIDs have bogus h/vtotal values */
        if (mode->hsync_end > mode->htotal)
                mode->htotal = mode->hsync_end + 1;
@@@ -29,7 -29,6 +29,7 @@@
   */
  #include <linux/kernel.h>
  #include <linux/sysrq.h>
 +#include <linux/slab.h>
  #include <linux/fb.h>
  #include "drmP.h"
  #include "drm_crtc.h"
@@@ -284,6 -283,8 +284,8 @@@ static struct sysrq_key_op sysrq_drm_fb
        .help_msg = "force-fb(V)",
        .action_msg = "Restore framebuffer console",
  };
+ #else
+ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
  #endif
  
  static void drm_fb_helper_on(struct fb_info *info)
@@@ -36,7 -36,6 +36,7 @@@
  
  #include "drmP.h"
  #include <linux/poll.h>
 +#include <linux/slab.h>
  #include <linux/smp_lock.h>
  
  static int drm_open_helper(struct inode *inode, struct file *filp,
@@@ -141,14 -140,16 +141,16 @@@ int drm_open(struct inode *inode, struc
                spin_unlock(&dev->count_lock);
        }
  out:
-       mutex_lock(&dev->struct_mutex);
-       if (minor->type == DRM_MINOR_LEGACY) {
-               BUG_ON((dev->dev_mapping != NULL) &&
-                       (dev->dev_mapping != inode->i_mapping));
-               if (dev->dev_mapping == NULL)
-                       dev->dev_mapping = inode->i_mapping;
+       if (!retcode) {
+               mutex_lock(&dev->struct_mutex);
+               if (minor->type == DRM_MINOR_LEGACY) {
+                       if (dev->dev_mapping == NULL)
+                               dev->dev_mapping = inode->i_mapping;
+                       else if (dev->dev_mapping != inode->i_mapping)
+                               retcode = -ENODEV;
+               }
+               mutex_unlock(&dev->struct_mutex);
        }
-       mutex_unlock(&dev->struct_mutex);
  
        return retcode;
  }
@@@ -34,7 -34,6 +34,7 @@@
  #include "nouveau_dma.h"
  
  #include <linux/log2.h>
 +#include <linux/slab.h>
  
  static void
  nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
@@@ -440,8 -439,7 +440,7 @@@ nouveau_bo_evict_flags(struct ttm_buffe
  
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
-               nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT |
-                                        TTM_PL_FLAG_SYSTEM);
+               nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT);
                break;
        default:
                nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
@@@ -24,7 -24,6 +24,7 @@@
   */
  
  #include <linux/swab.h>
 +#include <linux/slab.h>
  #include "drmP.h"
  #include "drm.h"
  #include "drm_sarea.h"
@@@ -36,7 -35,6 +36,6 @@@
  #include "nouveau_drm.h"
  #include "nv50_display.h"
  
- static int nouveau_stub_init(struct drm_device *dev) { return 0; }
  static void nouveau_stub_takedown(struct drm_device *dev) {}
  
  static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->timer.init              = nv04_timer_init;
                engine->timer.read              = nv04_timer_read;
                engine->timer.takedown          = nv04_timer_takedown;
-               engine->fb.init                 = nouveau_stub_init;
-               engine->fb.takedown             = nouveau_stub_takedown;
+               engine->fb.init                 = nv50_fb_init;
+               engine->fb.takedown             = nv50_fb_takedown;
                engine->graph.grclass           = nv50_graph_grclass;
                engine->graph.init              = nv50_graph_init;
                engine->graph.takedown          = nv50_graph_takedown;
@@@ -24,7 -24,6 +24,7 @@@
  
  #include <linux/module.h>
  #include <linux/sched.h>
 +#include <linux/slab.h>
  #include <asm/unaligned.h>
  
  #define ATOM_DEBUG
  
  typedef struct {
        struct atom_context *ctx;
        uint32_t *ps, *ws;
        int ps_shift;
        uint16_t start;
+       unsigned last_jump;
+       unsigned long last_jump_jiffies;
+       bool abort;
  } atom_exec_context;
  
  int atom_debug = 0;
- static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
+ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
  
  static uint32_t atom_arg_mask[8] =
      { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
@@@ -605,12 -606,17 +607,17 @@@ static void atom_op_beep(atom_exec_cont
  static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
  {
        int idx = U8((*ptr)++);
+       int r = 0;
        if (idx < ATOM_TABLE_NAMES_CNT)
                SDEBUG("   table: %d (%s)\n", idx, atom_table_names[idx]);
        else
                SDEBUG("   table: %d\n", idx);
        if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
-               atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+               r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+       if (r) {
+               ctx->abort = true;
+       }
  }
  
  static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
@@@ -674,6 -680,8 +681,8 @@@ static void atom_op_eot(atom_exec_conte
  static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
  {
        int execute = 0, target = U16(*ptr);
+       unsigned long cjiffies;
        (*ptr) += 2;
        switch (arg) {
        case ATOM_COND_ABOVE:
        if (arg != ATOM_COND_ALWAYS)
                SDEBUG("   taken: %s\n", execute ? "yes" : "no");
        SDEBUG("   target: 0x%04X\n", target);
-       if (execute)
+       if (execute) {
+               if (ctx->last_jump == (ctx->start + target)) {
+                       cjiffies = jiffies;
+                       if (time_after(cjiffies, ctx->last_jump_jiffies)) {
+                               cjiffies -= ctx->last_jump_jiffies;
+                               if ((jiffies_to_msecs(cjiffies) > 1000)) {
+                                       DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n");
+                                       ctx->abort = true;
+                               }
+                       } else {
+                               /* jiffies wrap around we will just wait a little longer */
+                               ctx->last_jump_jiffies = jiffies;
+                       }
+               } else {
+                       ctx->last_jump = ctx->start + target;
+                       ctx->last_jump_jiffies = jiffies;
+               }
                *ptr = ctx->start + target;
+       }
  }
  
  static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
@@@ -1105,7 -1130,7 +1131,7 @@@ static struct 
        atom_op_shr, ATOM_ARG_MC}, {
  atom_op_debug, 0},};
  
- static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
+ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
  {
        int base = CU16(ctx->cmd_table + 4 + 2 * index);
        int len, ws, ps, ptr;
        atom_exec_context ectx;
  
        if (!base)
-               return;
+               return -EINVAL;
  
        len = CU16(base + ATOM_CT_SIZE_PTR);
        ws = CU8(base + ATOM_CT_WS_PTR);
        ectx.ps_shift = ps / 4;
        ectx.start = base;
        ectx.ps = params;
+       ectx.abort = false;
+       ectx.last_jump = 0;
        if (ws)
                ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
        else
                        SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
                else
                        SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
+               if (ectx.abort) {
+                       DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
+                               base, len, ws, ps, ptr - 1);
+                       return -EINVAL;
+               }
  
                if (op < ATOM_OP_CNT && op > 0)
                        opcode_table[op].func(&ectx, &ptr,
  
        if (ws)
                kfree(ectx.ws);
+       return 0;
  }
  
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
  {
+       int r;
        mutex_lock(&ctx->mutex);
        /* reset reg block */
        ctx->reg_block = 0;
        ctx->fb_base = 0;
        /* reset io mode */
        ctx->io_mode = ATOM_IO_MM;
-       atom_execute_table_locked(ctx, index, params);
+       r = atom_execute_table_locked(ctx, index, params);
        mutex_unlock(&ctx->mutex);
+       return r;
  }
  
  static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
@@@ -1249,9 -1285,7 +1286,7 @@@ int atom_asic_init(struct atom_context 
  
        if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
                return 1;
-       atom_execute_table(ctx, ATOM_CMD_INIT, ps);
-       return 0;
+       return atom_execute_table(ctx, ATOM_CMD_INIT, ps);
  }
  
  void atom_destroy(struct atom_context *ctx)
        kfree(ctx);
  }
  
void atom_parse_data_header(struct atom_context *ctx, int index,
bool atom_parse_data_header(struct atom_context *ctx, int index,
                            uint16_t * size, uint8_t * frev, uint8_t * crev,
                            uint16_t * data_start)
  {
        int offset = index * 2 + 4;
        int idx = CU16(ctx->data_table + offset);
+       u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
+       if (!mdt[index])
+               return false;
  
        if (size)
                *size = CU16(idx);
        if (crev)
                *crev = CU8(idx + 3);
        *data_start = idx;
-       return;
+       return true;
  }
  
void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
                           uint8_t * crev)
  {
        int offset = index * 2 + 4;
        int idx = CU16(ctx->cmd_table + offset);
+       u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
+       if (!mct[index])
+               return false;
  
        if (frev)
                *frev = CU8(idx + 2);
        if (crev)
                *crev = CU8(idx + 3);
-       return;
+       return true;
  }
  
  int atom_allocate_fb_scratch(struct atom_context *ctx)
  {
        int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
        uint16_t data_offset;
-       int usage_bytes;
+       int usage_bytes = 0;
        struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
  
-       atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
+       if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+               firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
  
-       firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
+               DRM_DEBUG("atom firmware requested %08x %dkb\n",
+                         firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
+                         firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
  
-       DRM_DEBUG("atom firmware requested %08x %dkb\n",
-                 firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
-                 firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
-       usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+               usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+       }
        if (usage_bytes == 0)
                usage_bytes = 20 * 1024;
        /* allocate some scratch memory */
   */
  #include <linux/firmware.h>
  #include <linux/platform_device.h>
 +#include <linux/slab.h>
  #include "drmP.h"
  #include "radeon.h"
+ #include "radeon_asic.h"
  #include "radeon_drm.h"
  #include "rv770d.h"
  #include "atom.h"
@@@ -437,7 -437,6 +438,6 @@@ static void evergreen_gpu_init(struct r
  
  int evergreen_mc_init(struct radeon_device *rdev)
  {
-       fixed20_12 a;
        u32 tmp;
        int chansize, numchan;
  
                rdev->mc.real_vram_size = rdev->mc.aper_size;
        }
        r600_vram_gtt_location(rdev, &rdev->mc);
-       /* FIXME: we should enforce default clock in case GPU is not in
-        * default setup
-        */
-       a.full = rfixed_const(100);
-       rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
-       rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+       radeon_update_bandwidth_info(rdev);
        return 0;
  }
  
@@@ -747,6 -742,7 +743,7 @@@ int evergreen_init(struct radeon_devic
  
  void evergreen_fini(struct radeon_device *rdev)
  {
+       radeon_pm_fini(rdev);
        evergreen_suspend(rdev);
  #if 0
        r600_blit_fini(rdev);
   *          Jerome Glisse
   */
  #include <linux/seq_file.h>
 +#include <linux/slab.h>
  #include "drmP.h"
  #include "drm.h"
  #include "radeon_drm.h"
  #include "radeon_reg.h"
  #include "radeon.h"
+ #include "radeon_asic.h"
  #include "r100d.h"
  #include "rs100d.h"
  #include "rv200d.h"
@@@ -236,9 -236,9 +237,9 @@@ int r100_pci_gart_set_page(struct radeo
  
  void r100_pci_gart_fini(struct radeon_device *rdev)
  {
+       radeon_gart_fini(rdev);
        r100_pci_gart_disable(rdev);
        radeon_gart_table_ram_free(rdev);
-       radeon_gart_fini(rdev);
  }
  
  int r100_irq_set(struct radeon_device *rdev)
@@@ -313,10 -313,12 +314,12 @@@ int r100_irq_process(struct radeon_devi
                /* Vertical blank interrupts */
                if (status & RADEON_CRTC_VBLANK_STAT) {
                        drm_handle_vblank(rdev->ddev, 0);
+                       rdev->pm.vblank_sync = true;
                        wake_up(&rdev->irq.vblank_queue);
                }
                if (status & RADEON_CRTC2_VBLANK_STAT) {
                        drm_handle_vblank(rdev->ddev, 1);
+                       rdev->pm.vblank_sync = true;
                        wake_up(&rdev->irq.vblank_queue);
                }
                if (status & RADEON_FP_DETECT_STAT) {
@@@ -742,6 -744,8 +745,8 @@@ int r100_cp_init(struct radeon_device *
        udelay(10);
        rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
        rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
+       /* protect against crazy HW on resume */
+       rdev->cp.wptr &= rdev->cp.ptr_mask;
        /* Set cp mode to bus mastering & enable cp*/
        WREG32(RADEON_CP_CSQ_MODE,
               REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@@ -1805,6 -1809,7 +1810,7 @@@ void r100_set_common_regs(struct radeon
  {
        struct drm_device *dev = rdev->ddev;
        bool force_dac2 = false;
+       u32 tmp;
  
        /* set these so they don't interfere with anything */
        WREG32(RADEON_OV0_SCALE_CNTL, 0);
                WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
                WREG32(RADEON_DAC_CNTL2, dac2_cntl);
        }
+       /* switch PM block to ACPI mode */
+       tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
+       tmp &= ~RADEON_PM_MODE_SEL;
+       WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
  }
  
  /*
@@@ -2023,6 -2034,7 +2035,7 @@@ void r100_mc_init(struct radeon_device 
        radeon_vram_location(rdev, &rdev->mc, base);
        if (!(rdev->flags & RADEON_IS_AGP))
                radeon_gtt_location(rdev, &rdev->mc);
+       radeon_update_bandwidth_info(rdev);
  }
  
  
@@@ -2386,6 -2398,8 +2399,8 @@@ void r100_bandwidth_update(struct radeo
        uint32_t pixel_bytes1 = 0;
        uint32_t pixel_bytes2 = 0;
  
+       radeon_update_display_priority(rdev);
        if (rdev->mode_info.crtcs[0]->base.enabled) {
                mode1 = &rdev->mode_info.crtcs[0]->base.mode;
                pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
        /*
         * determine is there is enough bw for current mode
         */
-       mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
-       temp_ff.full = rfixed_const(100);
-       mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
-       sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
-       sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
+       sclk_ff = rdev->pm.sclk;
+       mclk_ff = rdev->pm.mclk;
  
        temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
        temp_ff.full = rfixed_const(temp);
@@@ -3441,6 -3452,7 +3453,7 @@@ int r100_suspend(struct radeon_device *
  
  void r100_fini(struct radeon_device *rdev)
  {
+       radeon_pm_fini(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
   *          Jerome Glisse
   */
  #include <linux/seq_file.h>
 +#include <linux/slab.h>
  #include "drmP.h"
  #include "drm.h"
  #include "radeon_reg.h"
  #include "radeon.h"
+ #include "radeon_asic.h"
  #include "radeon_drm.h"
  #include "r100_track.h"
  #include "r300d.h"
@@@ -165,9 -165,9 +166,9 @@@ void rv370_pcie_gart_disable(struct rad
  
  void rv370_pcie_gart_fini(struct radeon_device *rdev)
  {
+       radeon_gart_fini(rdev);
        rv370_pcie_gart_disable(rdev);
        radeon_gart_table_vram_free(rdev);
-       radeon_gart_fini(rdev);
  }
  
  void r300_fence_ring_emit(struct radeon_device *rdev,
@@@ -482,6 -482,7 +483,7 @@@ void r300_mc_init(struct radeon_device 
        radeon_vram_location(rdev, &rdev->mc, base);
        if (!(rdev->flags & RADEON_IS_AGP))
                radeon_gtt_location(rdev, &rdev->mc);
+       radeon_update_bandwidth_info(rdev);
  }
  
  void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
@@@ -1335,6 -1336,7 +1337,7 @@@ int r300_suspend(struct radeon_device *
  
  void r300_fini(struct radeon_device *rdev)
  {
+       radeon_pm_fini(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
   *          Jerome Glisse
   */
  #include <linux/seq_file.h>
 +#include <linux/slab.h>
  #include "drmP.h"
  #include "radeon_reg.h"
  #include "radeon.h"
+ #include "radeon_asic.h"
  #include "atom.h"
  #include "r100d.h"
  #include "r420d.h"
@@@ -267,6 -267,7 +268,7 @@@ int r420_suspend(struct radeon_device *
  
  void r420_fini(struct radeon_device *rdev)
  {
+       radeon_pm_fini(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
   *          Alex Deucher
   *          Jerome Glisse
   */
 +#include <linux/slab.h>
  #include <linux/seq_file.h>
  #include <linux/firmware.h>
  #include <linux/platform_device.h>
  #include "drmP.h"
  #include "radeon_drm.h"
  #include "radeon.h"
+ #include "radeon_asic.h"
  #include "radeon_mode.h"
  #include "r600d.h"
  #include "atom.h"
@@@ -492,9 -492,9 +493,9 @@@ void r600_pcie_gart_disable(struct rade
  
  void r600_pcie_gart_fini(struct radeon_device *rdev)
  {
+       radeon_gart_fini(rdev);
        r600_pcie_gart_disable(rdev);
        radeon_gart_table_vram_free(rdev);
-       radeon_gart_fini(rdev);
  }
  
  void r600_agp_enable(struct radeon_device *rdev)
@@@ -676,7 -676,6 +677,6 @@@ void r600_vram_gtt_location(struct rade
  
  int r600_mc_init(struct radeon_device *rdev)
  {
-       fixed20_12 a;
        u32 tmp;
        int chansize, numchan;
  
                rdev->mc.real_vram_size = rdev->mc.aper_size;
        }
        r600_vram_gtt_location(rdev, &rdev->mc);
-       /* FIXME: we should enforce default clock in case GPU is not in
-        * default setup
-        */
-       a.full = rfixed_const(100);
-       rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
-       rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
        if (rdev->flags & RADEON_IS_IGP)
                rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+       radeon_update_bandwidth_info(rdev);
        return 0;
  }
  
@@@ -1133,6 -1128,7 +1129,7 @@@ void r600_gpu_init(struct radeon_devic
        /* Setup pipes */
        WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
        WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+       WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
  
        tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
        WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
@@@ -2120,6 -2116,7 +2117,7 @@@ int r600_init(struct radeon_device *rde
  
  void r600_fini(struct radeon_device *rdev)
  {
+       radeon_pm_fini(rdev);
        r600_audio_fini(rdev);
        r600_blit_fini(rdev);
        r600_cp_fini(rdev);
@@@ -2399,19 -2396,19 +2397,19 @@@ static void r600_disable_interrupt_stat
                WREG32(DC_HPD4_INT_CONTROL, tmp);
                if (ASIC_IS_DCE32(rdev)) {
                        tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-                       WREG32(DC_HPD5_INT_CONTROL, 0);
+                       WREG32(DC_HPD5_INT_CONTROL, tmp);
                        tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-                       WREG32(DC_HPD6_INT_CONTROL, 0);
+                       WREG32(DC_HPD6_INT_CONTROL, tmp);
                }
        } else {
                WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
                WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
                tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
-               WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0);
+               WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
                tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
-               WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0);
+               WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
                tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
-               WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0);
+               WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
        }
  }
  
@@@ -2766,6 -2763,7 +2764,7 @@@ restart_ih
                        case 0: /* D1 vblank */
                                if (disp_int & LB_D1_VBLANK_INTERRUPT) {
                                        drm_handle_vblank(rdev->ddev, 0);
+                                       rdev->pm.vblank_sync = true;
                                        wake_up(&rdev->irq.vblank_queue);
                                        disp_int &= ~LB_D1_VBLANK_INTERRUPT;
                                        DRM_DEBUG("IH: D1 vblank\n");
                        case 0: /* D2 vblank */
                                if (disp_int & LB_D2_VBLANK_INTERRUPT) {
                                        drm_handle_vblank(rdev->ddev, 1);
+                                       rdev->pm.vblank_sync = true;
                                        wake_up(&rdev->irq.vblank_queue);
                                        disp_int &= ~LB_D2_VBLANK_INTERRUPT;
                                        DRM_DEBUG("IH: D2 vblank\n");
                                break;
                        case 10:
                                if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
-                                       disp_int_cont &= ~DC_HPD5_INTERRUPT;
+                                       disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
                                        queue_hotplug = true;
                                        DRM_DEBUG("IH: HPD5\n");
                                }
                                break;
                        case 12:
                                if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
-                                       disp_int_cont &= ~DC_HPD6_INTERRUPT;
+                                       disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
                                        queue_hotplug = true;
                                        DRM_DEBUG("IH: HPD6\n");
                                }
@@@ -26,7 -26,6 +26,7 @@@
   *          Jerome Glisse
   */
  #include <linux/console.h>
 +#include <linux/slab.h>
  #include <drm/drmP.h>
  #include <drm/drm_crtc_helper.h>
  #include <drm/radeon_drm.h>
@@@ -34,7 -33,6 +34,6 @@@
  #include <linux/vga_switcheroo.h>
  #include "radeon_reg.h"
  #include "radeon.h"
- #include "radeon_asic.h"
  #include "atom.h"
  
  /*
@@@ -243,6 -241,36 +242,36 @@@ bool radeon_card_posted(struct radeon_d
  
  }
  
+ void radeon_update_bandwidth_info(struct radeon_device *rdev)
+ {
+       fixed20_12 a;
+       u32 sclk, mclk;
+       if (rdev->flags & RADEON_IS_IGP) {
+               sclk = radeon_get_engine_clock(rdev);
+               mclk = rdev->clock.default_mclk;
+               a.full = rfixed_const(100);
+               rdev->pm.sclk.full = rfixed_const(sclk);
+               rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+               rdev->pm.mclk.full = rfixed_const(mclk);
+               rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
+               a.full = rfixed_const(16);
+               /* core_bandwidth = sclk(Mhz) * 16 */
+               rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
+       } else {
+               sclk = radeon_get_engine_clock(rdev);
+               mclk = radeon_get_memory_clock(rdev);
+               a.full = rfixed_const(100);
+               rdev->pm.sclk.full = rfixed_const(sclk);
+               rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+               rdev->pm.mclk.full = rfixed_const(mclk);
+               rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
+       }
+ }
  bool radeon_boot_test_post_card(struct radeon_device *rdev)
  {
        if (radeon_card_posted(rdev))
@@@ -289,181 -317,6 +318,6 @@@ void radeon_dummy_page_fini(struct rade
  }
  
  
- /*
-  * Registers accessors functions.
-  */
- uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
- {
-       DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
-       BUG_ON(1);
-       return 0;
- }
- void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
- {
-       DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
-                 reg, v);
-       BUG_ON(1);
- }
- void radeon_register_accessor_init(struct radeon_device *rdev)
- {
-       rdev->mc_rreg = &radeon_invalid_rreg;
-       rdev->mc_wreg = &radeon_invalid_wreg;
-       rdev->pll_rreg = &radeon_invalid_rreg;
-       rdev->pll_wreg = &radeon_invalid_wreg;
-       rdev->pciep_rreg = &radeon_invalid_rreg;
-       rdev->pciep_wreg = &radeon_invalid_wreg;
-       /* Don't change order as we are overridding accessor. */
-       if (rdev->family < CHIP_RV515) {
-               rdev->pcie_reg_mask = 0xff;
-       } else {
-               rdev->pcie_reg_mask = 0x7ff;
-       }
-       /* FIXME: not sure here */
-       if (rdev->family <= CHIP_R580) {
-               rdev->pll_rreg = &r100_pll_rreg;
-               rdev->pll_wreg = &r100_pll_wreg;
-       }
-       if (rdev->family >= CHIP_R420) {
-               rdev->mc_rreg = &r420_mc_rreg;
-               rdev->mc_wreg = &r420_mc_wreg;
-       }
-       if (rdev->family >= CHIP_RV515) {
-               rdev->mc_rreg = &rv515_mc_rreg;
-               rdev->mc_wreg = &rv515_mc_wreg;
-       }
-       if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
-               rdev->mc_rreg = &rs400_mc_rreg;
-               rdev->mc_wreg = &rs400_mc_wreg;
-       }
-       if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
-               rdev->mc_rreg = &rs690_mc_rreg;
-               rdev->mc_wreg = &rs690_mc_wreg;
-       }
-       if (rdev->family == CHIP_RS600) {
-               rdev->mc_rreg = &rs600_mc_rreg;
-               rdev->mc_wreg = &rs600_mc_wreg;
-       }
-       if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
-               rdev->pciep_rreg = &r600_pciep_rreg;
-               rdev->pciep_wreg = &r600_pciep_wreg;
-       }
- }
- /*
-  * ASIC
-  */
- int radeon_asic_init(struct radeon_device *rdev)
- {
-       radeon_register_accessor_init(rdev);
-       switch (rdev->family) {
-       case CHIP_R100:
-       case CHIP_RV100:
-       case CHIP_RS100:
-       case CHIP_RV200:
-       case CHIP_RS200:
-               rdev->asic = &r100_asic;
-               break;
-       case CHIP_R200:
-       case CHIP_RV250:
-       case CHIP_RS300:
-       case CHIP_RV280:
-               rdev->asic = &r200_asic;
-               break;
-       case CHIP_R300:
-       case CHIP_R350:
-       case CHIP_RV350:
-       case CHIP_RV380:
-               if (rdev->flags & RADEON_IS_PCIE)
-                       rdev->asic = &r300_asic_pcie;
-               else
-                       rdev->asic = &r300_asic;
-               break;
-       case CHIP_R420:
-       case CHIP_R423:
-       case CHIP_RV410:
-               rdev->asic = &r420_asic;
-               break;
-       case CHIP_RS400:
-       case CHIP_RS480:
-               rdev->asic = &rs400_asic;
-               break;
-       case CHIP_RS600:
-               rdev->asic = &rs600_asic;
-               break;
-       case CHIP_RS690:
-       case CHIP_RS740:
-               rdev->asic = &rs690_asic;
-               break;
-       case CHIP_RV515:
-               rdev->asic = &rv515_asic;
-               break;
-       case CHIP_R520:
-       case CHIP_RV530:
-       case CHIP_RV560:
-       case CHIP_RV570:
-       case CHIP_R580:
-               rdev->asic = &r520_asic;
-               break;
-       case CHIP_R600:
-       case CHIP_RV610:
-       case CHIP_RV630:
-       case CHIP_RV620:
-       case CHIP_RV635:
-       case CHIP_RV670:
-       case CHIP_RS780:
-       case CHIP_RS880:
-               rdev->asic = &r600_asic;
-               break;
-       case CHIP_RV770:
-       case CHIP_RV730:
-       case CHIP_RV710:
-       case CHIP_RV740:
-               rdev->asic = &rv770_asic;
-               break;
-       case CHIP_CEDAR:
-       case CHIP_REDWOOD:
-       case CHIP_JUNIPER:
-       case CHIP_CYPRESS:
-       case CHIP_HEMLOCK:
-               rdev->asic = &evergreen_asic;
-               break;
-       default:
-               /* FIXME: not supported yet */
-               return -EINVAL;
-       }
-       if (rdev->flags & RADEON_IS_IGP) {
-               rdev->asic->get_memory_clock = NULL;
-               rdev->asic->set_memory_clock = NULL;
-       }
-       return 0;
- }
- /*
-  * Wrapper around modesetting bits.
-  */
- int radeon_clocks_init(struct radeon_device *rdev)
- {
-       int r;
-       r = radeon_static_clocks_init(rdev->ddev);
-       if (r) {
-               return r;
-       }
-       DRM_INFO("Clocks initialized !\n");
-       return 0;
- }
- void radeon_clocks_fini(struct radeon_device *rdev)
- {
- }
  /* ATOM accessor methods */
  static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
  {
@@@ -568,29 -421,6 +422,6 @@@ static unsigned int radeon_vga_set_deco
                return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  }
  
- void radeon_agp_disable(struct radeon_device *rdev)
- {
-       rdev->flags &= ~RADEON_IS_AGP;
-       if (rdev->family >= CHIP_R600) {
-               DRM_INFO("Forcing AGP to PCIE mode\n");
-               rdev->flags |= RADEON_IS_PCIE;
-       } else if (rdev->family >= CHIP_RV515 ||
-                       rdev->family == CHIP_RV380 ||
-                       rdev->family == CHIP_RV410 ||
-                       rdev->family == CHIP_R423) {
-               DRM_INFO("Forcing AGP to PCIE mode\n");
-               rdev->flags |= RADEON_IS_PCIE;
-               rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
-               rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
-       } else {
-               DRM_INFO("Forcing AGP to PCI mode\n");
-               rdev->flags |= RADEON_IS_PCI;
-               rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
-               rdev->asic->gart_set_page = &r100_pci_gart_set_page;
-       }
-       rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
- }
  void radeon_check_arguments(struct radeon_device *rdev)
  {
        /* vramlimit must be a power of two */
@@@ -732,6 -562,14 +563,14 @@@ int radeon_device_init(struct radeon_de
                return r;
        radeon_check_arguments(rdev);
  
+       /* all of the newer IGP chips have an internal gart
+        * However some rs4xx report as AGP, so remove that here.
+        */
+       if ((rdev->family >= CHIP_RS400) &&
+           (rdev->flags & RADEON_IS_IGP)) {
+               rdev->flags &= ~RADEON_IS_AGP;
+       }
        if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
                radeon_agp_disable(rdev);
        }
@@@ -30,7 -30,6 +30,7 @@@
   *    Dave Airlie
   */
  #include <linux/list.h>
 +#include <linux/slab.h>
  #include <drm/drmP.h>
  #include "radeon_drm.h"
  #include "radeon.h"
@@@ -186,8 -185,10 +186,10 @@@ int radeon_bo_pin(struct radeon_bo *bo
                return 0;
        }
        radeon_ttm_placement_from_domain(bo, domain);
-       /* force to pin into visible video ram */
-       bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+       if (domain == RADEON_GEM_DOMAIN_VRAM) {
+               /* force to pin into visible video ram */
+               bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+       }
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
        r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
   *          Jerome Glisse
   */
  #include <linux/seq_file.h>
 +#include <linux/slab.h>
  #include <drm/drmP.h>
  #include "radeon.h"
+ #include "radeon_asic.h"
  #include "rs400d.h"
  
  /* This files gather functions specifics to : rs400,rs480 */
@@@ -203,9 -203,9 +204,9 @@@ void rs400_gart_disable(struct radeon_d
  
  void rs400_gart_fini(struct radeon_device *rdev)
  {
+       radeon_gart_fini(rdev);
        rs400_gart_disable(rdev);
        radeon_gart_table_ram_free(rdev);
-       radeon_gart_fini(rdev);
  }
  
  int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
@@@ -265,6 -265,7 +266,7 @@@ void rs400_mc_init(struct radeon_devic
        base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
        radeon_vram_location(rdev, &rdev->mc, base);
        radeon_gtt_location(rdev, &rdev->mc);
+       radeon_update_bandwidth_info(rdev);
  }
  
  uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@@ -389,6 -390,8 +391,8 @@@ static int rs400_startup(struct radeon_
  {
        int r;
  
+       r100_set_common_regs(rdev);
        rs400_mc_program(rdev);
        /* Resume clock */
        r300_clock_startup(rdev);
@@@ -454,6 -457,7 +458,7 @@@ int rs400_suspend(struct radeon_device 
  
  void rs400_fini(struct radeon_device *rdev)
  {
+       radeon_pm_fini(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
   *          Jerome Glisse
   */
  #include <linux/seq_file.h>
 +#include <linux/slab.h>
  #include "drmP.h"
  #include "rv515d.h"
  #include "radeon.h"
+ #include "radeon_asic.h"
  #include "atom.h"
  #include "rv515_reg_safe.h"
  
@@@ -280,19 -280,13 +281,13 @@@ static void rv515_vram_get_type(struct 
  
  void rv515_mc_init(struct radeon_device *rdev)
  {
-       fixed20_12 a;
  
        rv515_vram_get_type(rdev);
        r100_vram_init_sizes(rdev);
        radeon_vram_location(rdev, &rdev->mc, 0);
        if (!(rdev->flags & RADEON_IS_AGP))
                radeon_gtt_location(rdev, &rdev->mc);
-       /* FIXME: we should enforce default clock in case GPU is not in
-        * default setup
-        */
-       a.full = rfixed_const(100);
-       rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
-       rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+       radeon_update_bandwidth_info(rdev);
  }
  
  uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@@ -540,6 -534,7 +535,7 @@@ void rv515_set_safe_registers(struct ra
  
  void rv515_fini(struct radeon_device *rdev)
  {
+       radeon_pm_fini(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
@@@ -1021,7 -1016,7 +1017,7 @@@ void rv515_bandwidth_avivo_update(struc
        struct drm_display_mode *mode1 = NULL;
        struct rv515_watermark wm0;
        struct rv515_watermark wm1;
-       u32 tmp;
+       u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
        fixed20_12 priority_mark02, priority_mark12, fill_rate;
        fixed20_12 a, b;
  
                        priority_mark12.full = 0;
                if (wm1.priority_mark_max.full > priority_mark12.full)
                        priority_mark12.full = wm1.priority_mark_max.full;
-               WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
-               WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
-               WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
-               WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+               d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+               d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+               if (rdev->disp_priority == 2) {
+                       d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+                       d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+               }
+               WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+               WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+               WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+               WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
        } else if (mode0) {
                if (rfixed_trunc(wm0.dbpp) > 64)
                        a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
                        priority_mark02.full = 0;
                if (wm0.priority_mark_max.full > priority_mark02.full)
                        priority_mark02.full = wm0.priority_mark_max.full;
-               WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
-               WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
+               d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+               if (rdev->disp_priority == 2)
+                       d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+               WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+               WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
                WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
                WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
        } else {
                        priority_mark12.full = 0;
                if (wm1.priority_mark_max.full > priority_mark12.full)
                        priority_mark12.full = wm1.priority_mark_max.full;
+               d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+               if (rdev->disp_priority == 2)
+                       d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
                WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
                WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
-               WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
-               WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+               WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+               WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
        }
  }
  
@@@ -1162,6 -1169,8 +1170,8 @@@ void rv515_bandwidth_update(struct rade
        struct drm_display_mode *mode0 = NULL;
        struct drm_display_mode *mode1 = NULL;
  
+       radeon_update_display_priority(rdev);
        if (rdev->mode_info.crtcs[0]->base.enabled)
                mode0 = &rdev->mode_info.crtcs[0]->base.mode;
        if (rdev->mode_info.crtcs[1]->base.enabled)
         * modes if the user specifies HIGH for displaypriority
         * option.
         */
-       if (rdev->disp_priority == 2) {
+       if ((rdev->disp_priority == 2) &&
+           (rdev->family == CHIP_RV515)) {
                tmp = RREG32_MC(MC_MISC_LAT_TIMER);
                tmp &= ~MC_DISP1R_INIT_LAT_MASK;
                tmp &= ~MC_DISP0R_INIT_LAT_MASK;
   */
  #include <linux/firmware.h>
  #include <linux/platform_device.h>
 +#include <linux/slab.h>
  #include "drmP.h"
  #include "radeon.h"
+ #include "radeon_asic.h"
  #include "radeon_drm.h"
  #include "rv770d.h"
  #include "atom.h"
@@@ -126,9 -126,9 +127,9 @@@ void rv770_pcie_gart_disable(struct rad
  
  void rv770_pcie_gart_fini(struct radeon_device *rdev)
  {
+       radeon_gart_fini(rdev);
        rv770_pcie_gart_disable(rdev);
        radeon_gart_table_vram_free(rdev);
-       radeon_gart_fini(rdev);
  }
  
  
@@@ -648,10 -648,13 +649,13 @@@ static void rv770_gpu_init(struct radeo
  
        WREG32(CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
        WREG32(CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
+       WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
        WREG32(CC_SYS_RB_BACKEND_DISABLE,  cc_rb_backend_disable);
  
        WREG32(CGTS_SYS_TCC_DISABLE, 0);
        WREG32(CGTS_TCC_DISABLE, 0);
+       WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+       WREG32(CGTS_USER_TCC_DISABLE, 0);
  
        num_qd_pipes =
                R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
  
  int rv770_mc_init(struct radeon_device *rdev)
  {
-       fixed20_12 a;
        u32 tmp;
        int chansize, numchan;
  
                rdev->mc.real_vram_size = rdev->mc.aper_size;
        }
        r600_vram_gtt_location(rdev, &rdev->mc);
-       /* FIXME: we should enforce default clock in case GPU is not in
-        * default setup
-        */
-       a.full = rfixed_const(100);
-       rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
-       rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+       radeon_update_bandwidth_info(rdev);
        return 0;
  }
  
@@@ -1014,6 -1012,13 +1013,13 @@@ int rv770_resume(struct radeon_device *
                DRM_ERROR("radeon: failled testing IB (%d).\n", r);
                return r;
        }
+       r = r600_audio_init(rdev);
+       if (r) {
+               dev_err(rdev->dev, "radeon: audio init failed\n");
+               return r;
+       }
        return r;
  
  }
@@@ -1022,6 -1027,7 +1028,7 @@@ int rv770_suspend(struct radeon_device 
  {
        int r;
  
+       r600_audio_fini(rdev);
        /* FIXME: we should wait for ring to be empty */
        r700_cp_stop(rdev);
        rdev->cp.ready = false;
@@@ -1145,11 -1151,19 +1152,19 @@@ int rv770_init(struct radeon_device *rd
                        }
                }
        }
+       r = r600_audio_init(rdev);
+       if (r) {
+               dev_err(rdev->dev, "radeon: audio init failed\n");
+               return r;
+       }
        return 0;
  }
  
  void rv770_fini(struct radeon_device *rdev)
  {
+       radeon_pm_fini(rdev);
        r600_blit_fini(rdev);
        r600_cp_fini(rdev);
        r600_wb_fini(rdev);
@@@ -32,7 -32,6 +32,7 @@@
  #include <linux/wait.h>
  #include <linux/mm.h>
  #include <linux/module.h>
 +#include <linux/slab.h>
  
  #define TTM_MEMORY_ALLOC_RETRIES 4
  
@@@ -261,8 -260,8 +261,8 @@@ static int ttm_mem_init_kernel_zone(str
        zone->used_mem = 0;
        zone->glob = glob;
        glob->zone_kernel = zone;
-       kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
-       ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
+       ret = kobject_init_and_add(
+               &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
        if (unlikely(ret != 0)) {
                kobject_put(&zone->kobj);
                return ret;
@@@ -297,8 -296,8 +297,8 @@@ static int ttm_mem_init_highmem_zone(st
        zone->used_mem = 0;
        zone->glob = glob;
        glob->zone_highmem = zone;
-       kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
-       ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
+       ret = kobject_init_and_add(
+               &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
        if (unlikely(ret != 0)) {
                kobject_put(&zone->kobj);
                return ret;
@@@ -344,8 -343,8 +344,8 @@@ static int ttm_mem_init_dma32_zone(stru
        zone->used_mem = 0;
        zone->glob = glob;
        glob->zone_dma32 = zone;
-       kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
-       ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
+       ret = kobject_init_and_add(
+               &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
        if (unlikely(ret != 0)) {
                kobject_put(&zone->kobj);
                return ret;
@@@ -366,10 -365,8 +366,8 @@@ int ttm_mem_global_init(struct ttm_mem_
        glob->swap_queue = create_singlethread_workqueue("ttm_swap");
        INIT_WORK(&glob->work, ttm_shrink_work);
        init_waitqueue_head(&glob->queue);
-       kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type);
-       ret = kobject_add(&glob->kobj,
-                         ttm_get_kobj(),
-                         "memory_accounting");
+       ret = kobject_init_and_add(
+               &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
        if (unlikely(ret != 0)) {
                kobject_put(&glob->kobj);
                return ret;
   * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
   */
  
- #include <linux/vmalloc.h>
  #include <linux/sched.h>
  #include <linux/highmem.h>
  #include <linux/pagemap.h>
  #include <linux/file.h>
  #include <linux/swap.h>
 +#include <linux/slab.h>
  #include "drm_cache.h"
+ #include "drm_mem_util.h"
  #include "ttm/ttm_module.h"
  #include "ttm/ttm_bo_driver.h"
  #include "ttm/ttm_placement.h"
@@@ -44,32 -43,15 +44,15 @@@ static int ttm_tt_swapin(struct ttm_tt 
  
  /**
   * Allocates storage for pointers to the pages that back the ttm.
-  *
-  * Uses kmalloc if possible. Otherwise falls back to vmalloc.
   */
  static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
  {
-       unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
-       ttm->pages = NULL;
-       if (size <= PAGE_SIZE)
-               ttm->pages = kzalloc(size, GFP_KERNEL);
-       if (!ttm->pages) {
-               ttm->pages = vmalloc_user(size);
-               if (ttm->pages)
-                       ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
-       }
+       ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
  }
  
  static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
  {
-       if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
-               vfree(ttm->pages);
-               ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
-       } else {
-               kfree(ttm->pages);
-       }
+       drm_free_large(ttm->pages);
        ttm->pages = NULL;
  }
  
@@@ -16,7 -16,6 +16,7 @@@
   */
  
  #include <linux/hid.h>
 +#include <linux/slab.h>
  
  #include "../hid-ids.h"
  
@@@ -61,6 -60,7 +61,7 @@@ static const struct hid_blacklist 
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
@@@ -49,7 -49,6 +49,7 @@@
  #include <linux/input.h>
  #include <linux/input/sparse-keymap.h>
  #include <linux/rfkill.h>
 +#include <linux/slab.h>
  #include <acpi/acpi_drivers.h>
  #include <acpi/acpi_bus.h>
  
@@@ -140,7 -139,7 +140,7 @@@ MODULE_PARM_DESC(bluetooth_status, "Se
  
  /* Backlight */
  static acpi_handle lcd_switch_handle;
- static const char *lcd_switch_paths[] = {
+ static char *lcd_switch_paths[] = {
    "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
    "\\_SB.PCI0.ISA.EC0._Q10",  /* A1x */
    "\\_SB.PCI0.PX40.ECD0._Q10",        /* L3C */
  #define METHOD_SWITCH_DISPLAY "SDSP"
  
  static acpi_handle display_get_handle;
- static const char *display_get_paths[] = {
+ static char *display_get_paths[] = {
    /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */
    "\\_SB.PCI0.P0P1.VGA.GETD",
    /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */
diff --combined drivers/serial/sunsu.c
@@@ -29,7 -29,6 +29,7 @@@
  #include <linux/serial.h>
  #include <linux/sysrq.h>
  #include <linux/console.h>
 +#include <linux/slab.h>
  #ifdef CONFIG_SERIO
  #include <linux/serio.h>
  #endif
@@@ -1454,8 -1453,10 +1454,10 @@@ static int __devinit su_probe(struct of
        if (up->su_type == SU_PORT_KBD || up->su_type == SU_PORT_MS) {
                err = sunsu_kbd_ms_init(up);
                if (err) {
+                       of_iounmap(&op->resource[0],
+                                  up->port.membase, up->reg_size);
                        kfree(up);
-                       goto out_unmap;
+                       return err;
                }
                dev_set_drvdata(&op->dev, up);
  
  #include <linux/module.h>
  #include <linux/interrupt.h>
  #include <linux/delay.h>
- #include <linux/err.h>
  #include <linux/io.h>
  #include <linux/platform_device.h>
  #include <linux/clk.h>
  #include <linux/err.h>
 +#include <linux/slab.h>
  
  #include <linux/usb/ch9.h>
  #include <linux/usb/gadget.h>
@@@ -5,6 -5,7 +5,6 @@@
  
  #include <linux/module.h>
  #include <linux/kernel.h>
 -#include <linux/slab.h>
  #include <linux/fb.h>
  #include <linux/pci.h>
  #include <linux/init.h>
@@@ -241,11 -242,27 +241,27 @@@ static int __devinit e3d_set_fbinfo(str
  static int __devinit e3d_pci_register(struct pci_dev *pdev,
                                      const struct pci_device_id *ent)
  {
+       struct device_node *of_node;
+       const char *device_type;
        struct fb_info *info;
        struct e3d_info *ep;
        unsigned int line_length;
        int err;
  
+       of_node = pci_device_to_OF_node(pdev);
+       if (!of_node) {
+               printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
+                      pci_name(pdev));
+               return -ENODEV;
+       }
+       device_type = of_get_property(of_node, "device_type", NULL);
+       if (!device_type) {
+               printk(KERN_INFO "e3d: Ignoring secondary output device "
+                      "at %s\n", pci_name(pdev));
+               return -ENODEV;
+       }
        err = pci_enable_device(pdev);
        if (err < 0) {
                printk(KERN_ERR "e3d: Cannot enable PCI device %s\n",
        ep->info = info;
        ep->pdev = pdev;
        spin_lock_init(&ep->lock);
-       ep->of_node = pci_device_to_OF_node(pdev);
-       if (!ep->of_node) {
-               printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
-                      pci_name(pdev));
-               err = -ENODEV;
-               goto err_release_fb;
-       }
+       ep->of_node = of_node;
  
        /* Read the PCI base register of the frame buffer, which we
         * need in order to interpret the RAMDAC_VID_*FB* values in
diff --combined fs/logfs/dev_bdev.c
@@@ -9,7 -9,6 +9,7 @@@
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/buffer_head.h>
 +#include <linux/gfp.h>
  
  #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
  
@@@ -81,6 -80,7 +81,7 @@@ static void writeseg_end_io(struct bio 
                        prefetchw(&bvec->bv_page->flags);
  
                end_page_writeback(page);
+               page_cache_release(page);
        } while (bvec >= bio->bi_io_vec);
        bio_put(bio);
        if (atomic_dec_and_test(&super->s_pending_writes))
@@@ -98,8 -98,10 +99,10 @@@ static int __bdev_writeseg(struct super
        unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
        int i;
  
+       if (max_pages > BIO_MAX_PAGES)
+               max_pages = BIO_MAX_PAGES;
        bio = bio_alloc(GFP_NOFS, max_pages);
-       BUG_ON(!bio); /* FIXME: handle this */
+       BUG_ON(!bio);
  
        for (i = 0; i < nr_pages; i++) {
                if (i >= max_pages) {
@@@ -192,8 -194,10 +195,10 @@@ static int do_erase(struct super_block 
        unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
        int i;
  
+       if (max_pages > BIO_MAX_PAGES)
+               max_pages = BIO_MAX_PAGES;
        bio = bio_alloc(GFP_NOFS, max_pages);
-       BUG_ON(!bio); /* FIXME: handle this */
+       BUG_ON(!bio);
  
        for (i = 0; i < nr_pages; i++) {
                if (i >= max_pages) {
diff --combined fs/logfs/dir.c
@@@ -6,7 -6,7 +6,7 @@@
   * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
   */
  #include "logfs.h"
 -
 +#include <linux/slab.h>
  
  /*
   * Atomic dir operations
@@@ -303,12 -303,12 +303,12 @@@ static int __logfs_readdir(struct file 
                                (filler_t *)logfs_readpage, NULL);
                if (IS_ERR(page))
                        return PTR_ERR(page);
-               dd = kmap_atomic(page, KM_USER0);
+               dd = kmap(page);
                BUG_ON(dd->namelen == 0);
  
                full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen),
                                pos, be64_to_cpu(dd->ino), dd->type);
-               kunmap_atomic(dd, KM_USER0);
+               kunmap(page);
                page_cache_release(page);
                if (full)
                        break;
diff --combined fs/logfs/journal.c
@@@ -6,7 -6,6 +6,7 @@@
   * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
   */
  #include "logfs.h"
 +#include <linux/slab.h>
  
  static void logfs_calc_free(struct super_block *sb)
  {
@@@ -801,6 -800,7 +801,7 @@@ void do_logfs_journal_wl_pass(struct su
  {
        struct logfs_super *super = logfs_super(sb);
        struct logfs_area *area = super->s_journal_area;
+       struct btree_head32 *head = &super->s_reserved_segments;
        u32 segno, ec;
        int i, err;
  
        /* Drop old segments */
        journal_for_each(i)
                if (super->s_journal_seg[i]) {
+                       btree_remove32(head, super->s_journal_seg[i]);
                        logfs_set_segment_unreserved(sb,
                                        super->s_journal_seg[i],
                                        super->s_journal_ec[i]);
                super->s_journal_seg[i] = segno;
                super->s_journal_ec[i] = ec;
                logfs_set_segment_reserved(sb, segno);
+               err = btree_insert32(head, segno, (void *)1, GFP_KERNEL);
+               BUG_ON(err); /* mempool should prevent this */
+               err = logfs_erase_segment(sb, segno, 1);
+               BUG_ON(err); /* FIXME: remount-ro would be nicer */
        }
        /* Manually move journal_area */
+       freeseg(sb, area->a_segno);
        area->a_segno = super->s_journal_seg[0];
        area->a_is_open = 0;
        area->a_used_bytes = 0;
diff --combined fs/logfs/readwrite.c
@@@ -18,7 -18,6 +18,7 @@@
   */
  #include "logfs.h"
  #include <linux/sched.h>
 +#include <linux/slab.h>
  
  static u64 adjust_bix(u64 bix, level_t level)
  {
@@@ -1595,7 -1594,6 +1595,6 @@@ int logfs_delete(struct inode *inode, p
        return ret;
  }
  
- /* Rewrite cannot mark the inode dirty but has to write it immediatly. */
  int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
                gc_level_t gc_level, long flags)
  {
                if (level != 0)
                        alloc_indirect_block(inode, page, 0);
                err = logfs_write_buf(inode, page, flags);
+               if (!err && shrink_level(gc_level) == 0) {
+                       /* Rewrite cannot mark the inode dirty but has to
+                        * write it immediatly.
+                        * Q: Can't we just create an alias for the inode
+                        * instead?  And if not, why not?
+                        */
+                       if (inode->i_ino == LOGFS_INO_MASTER)
+                               logfs_write_anchor(inode->i_sb);
+                       else {
+                               err = __logfs_write_inode(inode, flags);
+                       }
+               }
        }
        logfs_put_write_page(page);
        return err;
diff --combined fs/logfs/segment.c
@@@ -10,7 -10,6 +10,7 @@@
   * three kinds of objects: inodes, dentries and blocks, both data and indirect.
   */
  #include "logfs.h"
 +#include <linux/slab.h>
  
  static int logfs_mark_segment_bad(struct super_block *sb, u32 segno)
  {
@@@ -94,49 -93,57 +94,57 @@@ void __logfs_buf_write(struct logfs_are
        } while (len);
  }
  
- /*
-  * bdev_writeseg will write full pages.  Memset the tail to prevent data leaks.
-  */
- static void pad_wbuf(struct logfs_area *area, int final)
+ static void pad_partial_page(struct logfs_area *area)
  {
        struct super_block *sb = area->a_sb;
-       struct logfs_super *super = logfs_super(sb);
        struct page *page;
        u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
        pgoff_t index = ofs >> PAGE_SHIFT;
        long offset = ofs & (PAGE_SIZE-1);
        u32 len = PAGE_SIZE - offset;
  
-       if (len == PAGE_SIZE) {
-               /* The math in this function can surely use some love */
-               len = 0;
-       }
-       if (len) {
-               BUG_ON(area->a_used_bytes >= super->s_segsize);
-               page = get_mapping_page(area->a_sb, index, 0);
+       if (len % PAGE_SIZE) {
+               page = get_mapping_page(sb, index, 0);
                BUG_ON(!page); /* FIXME: reserve a pool */
                memset(page_address(page) + offset, 0xff, len);
                SetPagePrivate(page);
                page_cache_release(page);
        }
+ }
  
-       if (!final)
-               return;
+ static void pad_full_pages(struct logfs_area *area)
+ {
+       struct super_block *sb = area->a_sb;
+       struct logfs_super *super = logfs_super(sb);
+       u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
+       u32 len = super->s_segsize - area->a_used_bytes;
+       pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT;
+       pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT;
+       struct page *page;
  
-       area->a_used_bytes += len;
-       for ( ; area->a_used_bytes < super->s_segsize;
-                       area->a_used_bytes += PAGE_SIZE) {
-               /* Memset another page */
-               index++;
-               page = get_mapping_page(area->a_sb, index, 0);
+       while (no_indizes) {
+               page = get_mapping_page(sb, index, 0);
                BUG_ON(!page); /* FIXME: reserve a pool */
-               memset(page_address(page), 0xff, PAGE_SIZE);
+               SetPageUptodate(page);
+               memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
                SetPagePrivate(page);
                page_cache_release(page);
+               index++;
+               no_indizes--;
        }
  }
  
+ /*
+  * bdev_writeseg will write full pages.  Memset the tail to prevent data leaks.
+  * Also make sure we allocate (and memset) all pages for final writeout.
+  */
+ static void pad_wbuf(struct logfs_area *area, int final)
+ {
+       pad_partial_page(area);
+       if (final)
+               pad_full_pages(area);
+ }
  /*
   * We have to be careful with the alias tree.  Since lookup is done by bix,
   * it needs to be normalized, so 14, 15, 16, etc. all match when dealing with
@@@ -684,7 -691,7 +692,7 @@@ int logfs_segment_delete(struct inode *
        return 0;
  }
  
static void freeseg(struct super_block *sb, u32 segno)
+ void freeseg(struct super_block *sb, u32 segno)
  {
        struct logfs_super *super = logfs_super(sb);
        struct address_space *mapping = super->s_mapping_inode->i_mapping;
diff --combined fs/logfs/super.c
@@@ -11,7 -11,6 +11,7 @@@
   */
  #include "logfs.h"
  #include <linux/bio.h>
 +#include <linux/slab.h>
  #include <linux/mtd/mtd.h>
  #include <linux/statfs.h>
  #include <linux/buffer_head.h>
@@@ -278,7 -277,7 +278,7 @@@ static int logfs_recover_sb(struct supe
        }
        if (valid0 && valid1 && ds_cmp(ds0, ds1)) {
                printk(KERN_INFO"Superblocks don't match - fixing.\n");
-               return write_one_sb(sb, super->s_devops->find_last_sb);
+               return logfs_write_sb(sb);
        }
        /* If neither is valid now, something's wrong.  Didn't we properly
         * check them before?!? */
@@@ -290,6 -289,10 +290,10 @@@ static int logfs_make_writeable(struct 
  {
        int err;
  
+       err = logfs_open_segfile(sb);
+       if (err)
+               return err;
        /* Repair any broken superblock copies */
        err = logfs_recover_sb(sb);
        if (err)
        if (err)
                return err;
  
-       err = logfs_open_segfile(sb);
-       if (err)
-               return err;
        /* Do one GC pass before any data gets dirtied */
        logfs_gc_pass(sb);
  
@@@ -329,7 -328,7 +329,7 @@@ static int logfs_get_sb_final(struct su
  
        sb->s_root = d_alloc_root(rootdir);
        if (!sb->s_root)
-               goto fail;
+               goto fail2;
  
        super->s_erase_page = alloc_pages(GFP_KERNEL, 0);
        if (!super->s_erase_page)
@@@ -573,8 -572,7 +573,7 @@@ int logfs_get_sb_device(struct file_sys
        return 0;
  
  err1:
-       up_write(&sb->s_umount);
-       deactivate_super(sb);
+       deactivate_locked_super(sb);
        return err;
  err0:
        kfree(super);
diff --combined fs/proc/base.c
@@@ -81,7 -81,6 +81,7 @@@
  #include <linux/elf.h>
  #include <linux/pid_namespace.h>
  #include <linux/fs_struct.h>
 +#include <linux/slab.h>
  #include "internal.h"
  
  /* NOTE:
@@@ -443,12 -442,13 +443,13 @@@ static const struct file_operations pro
  unsigned long badness(struct task_struct *p, unsigned long uptime);
  static int proc_oom_score(struct task_struct *task, char *buffer)
  {
-       unsigned long points;
+       unsigned long points = 0;
        struct timespec uptime;
  
        do_posix_clock_monotonic_gettime(&uptime);
        read_lock(&tasklist_lock);
-       points = badness(task->group_leader, uptime.tv_sec);
+       if (pid_alive(task))
+               points = badness(task, uptime.tv_sec);
        read_unlock(&tasklist_lock);
        return sprintf(buffer, "%lu\n", points);
  }
diff --combined fs/proc/task_mmu.c
@@@ -4,7 -4,6 +4,7 @@@
  #include <linux/seq_file.h>
  #include <linux/highmem.h>
  #include <linux/ptrace.h>
 +#include <linux/slab.h>
  #include <linux/pagemap.h>
  #include <linux/mempolicy.h>
  #include <linux/swap.h>
@@@ -407,6 -406,7 +407,7 @@@ static int show_smap(struct seq_file *m
  
        memset(&mss, 0, sizeof mss);
        mss.vma = vma;
+       /* mmap_sem is held in m_start */
        if (vma->vm_mm && !is_vm_hugetlb_page(vma))
                walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
  
@@@ -553,7 -553,8 +554,8 @@@ const struct file_operations proc_clear
  };
  
  struct pagemapread {
-       u64 __user *out, *end;
+       int pos, len;
+       u64 *buffer;
  };
  
  #define PM_ENTRY_BYTES      sizeof(u64)
  static int add_to_pagemap(unsigned long addr, u64 pfn,
                          struct pagemapread *pm)
  {
-       if (put_user(pfn, pm->out))
-               return -EFAULT;
-       pm->out++;
-       if (pm->out >= pm->end)
+       pm->buffer[pm->pos++] = pfn;
+       if (pm->pos >= pm->len)
                return PM_END_OF_BUFFER;
        return 0;
  }
@@@ -721,21 -720,20 +721,20 @@@ static int pagemap_hugetlb_range(pte_t 
   * determine which areas of memory are actually mapped and llseek to
   * skip over unmapped regions.
   */
+ #define PAGEMAP_WALK_SIZE     (PMD_SIZE)
  static ssize_t pagemap_read(struct file *file, char __user *buf,
                            size_t count, loff_t *ppos)
  {
        struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
-       struct page **pages, *page;
-       unsigned long uaddr, uend;
        struct mm_struct *mm;
        struct pagemapread pm;
-       int pagecount;
        int ret = -ESRCH;
        struct mm_walk pagemap_walk = {};
        unsigned long src;
        unsigned long svpfn;
        unsigned long start_vaddr;
        unsigned long end_vaddr;
+       int copied = 0;
  
        if (!task)
                goto out;
        if (!mm)
                goto out_task;
  
-       uaddr = (unsigned long)buf & PAGE_MASK;
-       uend = (unsigned long)(buf + count);
-       pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE;
-       ret = 0;
-       if (pagecount == 0)
-               goto out_mm;
-       pages = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
+       pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
+       pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
        ret = -ENOMEM;
-       if (!pages)
+       if (!pm.buffer)
                goto out_mm;
  
-       down_read(&current->mm->mmap_sem);
-       ret = get_user_pages(current, current->mm, uaddr, pagecount,
-                            1, 0, pages, NULL);
-       up_read(&current->mm->mmap_sem);
-       if (ret < 0)
-               goto out_free;
-       if (ret != pagecount) {
-               pagecount = ret;
-               ret = -EFAULT;
-               goto out_pages;
-       }
-       pm.out = (u64 __user *)buf;
-       pm.end = (u64 __user *)(buf + count);
        pagemap_walk.pmd_entry = pagemap_pte_range;
        pagemap_walk.pte_hole = pagemap_pte_hole;
        pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
         * user buffer is tracked in "pm", and the walk
         * will stop when we hit the end of the buffer.
         */
-       ret = walk_page_range(start_vaddr, end_vaddr, &pagemap_walk);
-       if (ret == PM_END_OF_BUFFER)
-               ret = 0;
-       /* don't need mmap_sem for these, but this looks cleaner */
-       *ppos += (char __user *)pm.out - buf;
-       if (!ret)
-               ret = (char __user *)pm.out - buf;
- out_pages:
-       for (; pagecount; pagecount--) {
-               page = pages[pagecount-1];
-               if (!PageReserved(page))
-                       SetPageDirty(page);
-               page_cache_release(page);
+       ret = 0;
+       while (count && (start_vaddr < end_vaddr)) {
+               int len;
+               unsigned long end;
+               pm.pos = 0;
+               end = start_vaddr + PAGEMAP_WALK_SIZE;
+               /* overflow ? */
+               if (end < start_vaddr || end > end_vaddr)
+                       end = end_vaddr;
+               down_read(&mm->mmap_sem);
+               ret = walk_page_range(start_vaddr, end, &pagemap_walk);
+               up_read(&mm->mmap_sem);
+               start_vaddr = end;
+               len = min(count, PM_ENTRY_BYTES * pm.pos);
+               if (copy_to_user(buf, pm.buffer, len) < 0) {
+                       ret = -EFAULT;
+                       goto out_free;
+               }
+               copied += len;
+               buf += len;
+               count -= len;
        }
+       *ppos += copied;
+       if (!ret || ret == PM_END_OF_BUFFER)
+               ret = copied;
  out_free:
-       kfree(pages);
+       kfree(pm.buffer);
  out_mm:
        mmput(mm);
  out_task:
diff --combined fs/reiserfs/super.c
@@@ -12,7 -12,6 +12,7 @@@
   */
  
  #include <linux/module.h>
 +#include <linux/slab.h>
  #include <linux/vmalloc.h>
  #include <linux/time.h>
  #include <asm/uaccess.h>
@@@ -1619,10 -1618,8 +1619,8 @@@ static int reiserfs_fill_super(struct s
        save_mount_options(s, data);
  
        sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
-       if (!sbi) {
-               errval = -ENOMEM;
-               goto error_alloc;
-       }
+       if (!sbi)
+               return -ENOMEM;
        s->s_fs_info = sbi;
        /* Set default values for options: non-aggressive tails, RO on errors */
        REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
        return (0);
  
  error:
-       reiserfs_write_unlock(s);
- error_alloc:
        if (jinit_done) {       /* kill the commit thread, free journal ram */
                journal_release_error(NULL, s);
        }
  
+       reiserfs_write_unlock(s);
        reiserfs_free_bitmap_cache(s);
        if (SB_BUFFER_WITH_SB(s))
                brelse(SB_BUFFER_WITH_SB(s));
diff --combined include/drm/drmP.h
@@@ -55,7 -55,6 +55,7 @@@
  #include <linux/mm.h>
  #include <linux/cdev.h>
  #include <linux/mutex.h>
 +#include <linux/slab.h>
  #if defined(__alpha__) || defined(__powerpc__)
  #include <asm/pgtable.h>      /* For pte_wrprotect */
  #endif
@@@ -1546,39 -1545,7 +1546,7 @@@ static __inline__ void drm_core_dropmap
  {
  }
  
- static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
- {
-       if (size != 0 && nmemb > ULONG_MAX / size)
-               return NULL;
-       if (size * nmemb <= PAGE_SIZE)
-           return kcalloc(nmemb, size, GFP_KERNEL);
-       return __vmalloc(size * nmemb,
-                        GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
- }
- /* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
- static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
- {
-       if (size != 0 && nmemb > ULONG_MAX / size)
-               return NULL;
-       if (size * nmemb <= PAGE_SIZE)
-           return kmalloc(nmemb * size, GFP_KERNEL);
-       return __vmalloc(size * nmemb,
-                        GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
- }
- static __inline void drm_free_large(void *ptr)
- {
-       if (!is_vmalloc_addr(ptr))
-               return kfree(ptr);
-       vfree(ptr);
- }
+ #include "drm_mem_util.h"
  /*@}*/
  
  #endif                                /* __KERNEL__ */
diff --combined kernel/cgroup_freezer.c
@@@ -15,7 -15,6 +15,7 @@@
   */
  
  #include <linux/module.h>
 +#include <linux/slab.h>
  #include <linux/cgroup.h>
  #include <linux/fs.h>
  #include <linux/uaccess.h>
@@@ -48,17 -47,20 +48,20 @@@ static inline struct freezer *task_free
                            struct freezer, css);
  }
  
- int cgroup_frozen(struct task_struct *task)
+ int cgroup_freezing_or_frozen(struct task_struct *task)
  {
        struct freezer *freezer;
        enum freezer_state state;
  
        task_lock(task);
        freezer = task_freezer(task);
-       state = freezer->state;
+       if (!freezer->css.cgroup->parent)
+               state = CGROUP_THAWED; /* root cgroup can't be frozen */
+       else
+               state = freezer->state;
        task_unlock(task);
  
-       return state == CGROUP_FROZEN;
+       return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
  }
  
  /*
diff --combined kernel/cred.c
@@@ -10,7 -10,6 +10,7 @@@
   */
  #include <linux/module.h>
  #include <linux/cred.h>
 +#include <linux/slab.h>
  #include <linux/sched.h>
  #include <linux/key.h>
  #include <linux/keyctl.h>
@@@ -365,7 -364,7 +365,7 @@@ struct cred *prepare_usermodehelper_cre
  
        new = kmem_cache_alloc(cred_jar, GFP_ATOMIC);
        if (!new)
-               return NULL;
+               goto free_tgcred;
  
        kdebug("prepare_usermodehelper_creds() alloc %p", new);
  
  
  error:
        put_cred(new);
+ free_tgcred:
+ #ifdef CONFIG_KEYS
+       kfree(tgcred);
+ #endif
        return NULL;
  }
  
diff --combined kernel/perf_event.c
@@@ -15,7 -15,6 +15,7 @@@
  #include <linux/smp.h>
  #include <linux/file.h>
  #include <linux/poll.h>
 +#include <linux/slab.h>
  #include <linux/sysfs.h>
  #include <linux/dcache.h>
  #include <linux/percpu.h>
@@@ -1165,11 -1164,9 +1165,9 @@@ void perf_event_task_sched_out(struct t
        struct perf_event_context *ctx = task->perf_event_ctxp;
        struct perf_event_context *next_ctx;
        struct perf_event_context *parent;
-       struct pt_regs *regs;
        int do_switch = 1;
  
-       regs = task_pt_regs(task);
-       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
+       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
  
        if (likely(!ctx || !cpuctx->task_ctx))
                return;
@@@ -2787,12 -2784,11 +2785,11 @@@ __weak struct perf_callchain_entry *per
        return NULL;
  }
  
- #ifdef CONFIG_EVENT_TRACING
  __weak
  void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
  {
  }
- #endif
  
  /*
   * Output
@@@ -3379,15 -3375,23 +3376,23 @@@ static void perf_event_task_output(stru
                                     struct perf_task_event *task_event)
  {
        struct perf_output_handle handle;
-       int size;
        struct task_struct *task = task_event->task;
-       int ret;
+       unsigned long flags;
+       int size, ret;
+       /*
+        * If this CPU attempts to acquire an rq lock held by a CPU spinning
+        * in perf_output_lock() from interrupt context, it's game over.
+        */
+       local_irq_save(flags);
  
        size  = task_event->event_id.header.size;
        ret = perf_output_begin(&handle, event, size, 0, 0);
  
-       if (ret)
+       if (ret) {
+               local_irq_restore(flags);
                return;
+       }
  
        task_event->event_id.pid = perf_event_pid(event, task);
        task_event->event_id.ppid = perf_event_pid(event, current);
        perf_output_put(&handle, task_event->event_id);
  
        perf_output_end(&handle);
+       local_irq_restore(flags);
  }
  
  static int perf_event_task_match(struct perf_event *event)
diff --combined kernel/sched.c
@@@ -71,7 -71,6 +71,7 @@@
  #include <linux/debugfs.h>
  #include <linux/ctype.h>
  #include <linux/ftrace.h>
 +#include <linux/slab.h>
  
  #include <asm/tlb.h>
  #include <asm/irq_regs.h>
@@@ -5388,7 -5387,7 +5388,7 @@@ int set_cpus_allowed_ptr(struct task_st
  
                get_task_struct(mt);
                task_rq_unlock(rq, &flags);
-               wake_up_process(rq->migration_thread);
+               wake_up_process(mt);
                put_task_struct(mt);
                wait_for_completion(&req.done);
                tlb_migrate_finish(p->mm);
@@@ -14,7 -14,6 +14,7 @@@
  #include <linux/module.h>
  #include <linux/percpu.h>
  #include <linux/mutex.h>
 +#include <linux/slab.h>
  #include <linux/init.h>
  #include <linux/hash.h>
  #include <linux/list.h>
@@@ -1210,18 -1209,19 +1210,19 @@@ rb_remove_pages(struct ring_buffer_per_
  
        for (i = 0; i < nr_pages; i++) {
                if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
-                       return;
+                       goto out;
                p = cpu_buffer->pages->next;
                bpage = list_entry(p, struct buffer_page, list);
                list_del_init(&bpage->list);
                free_buffer_page(bpage);
        }
        if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
-               return;
+               goto out;
  
        rb_reset_cpu(cpu_buffer);
        rb_check_pages(cpu_buffer);
  
+ out:
        spin_unlock_irq(&cpu_buffer->reader_lock);
  }
  
@@@ -1238,7 -1238,7 +1239,7 @@@ rb_insert_pages(struct ring_buffer_per_
  
        for (i = 0; i < nr_pages; i++) {
                if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
-                       return;
+                       goto out;
                p = pages->next;
                bpage = list_entry(p, struct buffer_page, list);
                list_del_init(&bpage->list);
        rb_reset_cpu(cpu_buffer);
        rb_check_pages(cpu_buffer);
  
+ out:
        spin_unlock_irq(&cpu_buffer->reader_lock);
  }