no delay (0).
Format: integer
+ + + ++ bootmem_debug [KNL] Enable bootmem allocator debug messages.
+ + + ++
bttv.card= [HW,V4L] bttv (bt848 + bt878 based grabber cards)
bttv.radio= Most important insmod options are available as
kernel args too.
Range: 0 - 8192
Default: 64
------- ------------- disable_8254_timer
------- ------------- enable_8254_timer
------- ------------- [IA32/X86_64] Disable/Enable interrupt 0 timer routing
------- ------------- over the 8254 in addition to over the IO-APIC. The
------- ------------- kernel tries to set a sensible default.
------- -------------
hpet= [X86-32,HPET] option to control HPET usage
Format: { enable (default) | disable | force }
disable: disable HPET and use PIT instead
* [no]ncq: Turn on or off NCQ.
+ + + + ++ + * nohrst, nosrst, norst: suppress hard, soft
+ + + + ++ + and both resets.
+ + + + ++ +
If there are multiple matching configurations changing
the same attribute, the last one is used.
shapers= [NET]
Maximal number of shapers.
+++++ +++++++++++++++ show_msr= [x86] show boot-time MSR settings
+++++ +++++++++++++++ Format: { <integer> }
+++++ +++++++++++++++ Show boot-time (BIOS-initialized) MSR settings.
+++++ +++++++++++++++ The parameter means the number of CPUs to show,
+++++ +++++++++++++++ for example 1 means boot CPU only.
+++++ +++++++++++++++
sim710= [SCSI,HW]
See header of drivers/scsi/sim710.c.
select HAVE_IDE
select HAVE_OPROFILE
select HAVE_IOREMAP_PROT
- select HAVE_GET_USER_PAGES_FAST
select HAVE_KPROBES
select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_KRETPROBES
select HAVE_FTRACE
select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
select HAVE_ARCH_KGDB if !X86_VOYAGER
++++++++++++++++++++ select HAVE_ARCH_TRACEHOOK
select HAVE_GENERIC_DMA_COHERENT if X86_32
select HAVE_EFFICIENT_UNALIGNED_ACCESS
config IOMMU_HELPER
def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU)
+ + + + +++ +
config MAXSMP
bool "Configure Maximum number of SMP Processors and NUMA Nodes"
- - - - --- - depends on X86_64 && SMP
+ + + + +++ + depends on X86_64 && SMP && BROKEN
default n
help
Configure maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N.
- - - - --- -if MAXSMP
- config NR_CPUS
- int
- default "4096"
- endif
-
- if !MAXSMP
config NR_CPUS
- - - --- - int
- - - --- - default "4096"
- - - --- -endif
- - - --- -
- - - --- -if !MAXSMP
- - - --- -config NR_CPUS
- - - - --- - int "Maximum number of CPUs (2-4096)"
- - - - --- - range 2 4096
+ + + + +++ + int "Maximum number of CPUs (2-512)" if !MAXSMP
+ + + + +++ + range 2 512
depends on SMP
+ + + + +++ + default "4096" if MAXSMP
default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000
default "8"
help
This allows you to specify the maximum number of CPUs which this
- - - - --- - kernel will support. The maximum supported value is 4096 and the
+ + + + +++ + kernel will support. The maximum supported value is 512 and the
minimum value which makes sense is 2.
This is purely to save memory - each supported CPU adds
approximately eight kilobytes to the kernel image.
- - - - --- -endif
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
local memory controller of the CPU and add some more
NUMA awareness to the kernel.
- - - - For i386 this is currently highly experimental and should be only
+ + + + For 32-bit this is currently highly experimental and should be only
used for kernel development. It might also cause boot failures.
- - - - For x86_64 this is recommended on all multiprocessor Opteron systems.
+ + + + For 64-bit this is recommended on all multiprocessor Opteron systems.
If the system is EM64T, you should say N unless your system is
EM64T NUMA.
into virtual nodes when booted with "numa=fake=N", where N is the
number of nodes. This is only useful for debugging.
- - - - --- -if MAXSMP
- - - - --- -
config NODES_SHIFT
- - - - --- - int
- - - - --- - default "9"
- - - - --- -endif
- - - - --- -
- - - - --- -if !MAXSMP
- - - - --- -config NODES_SHIFT
- - - - --- - int "Maximum NUMA Nodes (as a power of 2)"
+ + + + +++ + int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP
range 1 9 if X86_64
+ + + + +++ + default "9" if MAXSMP
default "6" if X86_64
default "4" if X86_NUMAQ
default "3"
help
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accomodate various tables.
- - - - --- -endif
config HAVE_ARCH_BOOTMEM_NODE
def_bool y
config ARCH_FLATMEM_ENABLE
def_bool y
------ -------------- depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && X86_PC && !NUMA
++++++ ++++++++++++++ depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && !NUMA
config ARCH_DISCONTIGMEM_ENABLE
def_bool y
config ARCH_SPARSEMEM_ENABLE
def_bool y
------ -------------- depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC)
++++++ ++++++++++++++ depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) || X86_GENERICARCH
select SPARSEMEM_STATIC if X86_32
select SPARSEMEM_VMEMMAP_ENABLE if X86_64
You can safely say Y even if your machine doesn't have MTRRs, you'll
just add about 9 KB to your kernel.
------- ------------- See <file:Documentation/mtrr.txt> for more information.
+++++++ +++++++++++++ See <file:Documentation/x86/mtrr.txt> for more information.
config MTRR_SANITIZER
------------ -------- bool
++++++++++++ ++++++++ def_bool y
prompt "MTRR cleanup support"
depends on MTRR
help
The largest mtrr entry size for a continous block can be set with
mtrr_chunk_size.
------------ -------- If unsure, say N.
++++++++++++ ++++++++ If unsure, say Y.
config MTRR_SANITIZER_ENABLE_DEFAULT
int "MTRR cleanup enable value (0-1)"
config SECCOMP
def_bool y
prompt "Enable seccomp to safely compute untrusted bytecode"
-- ------------------ depends on PROC_FS
help
This kernel feature is useful for number crunching applications
that may need to compute untrusted bytecode during their
the process as file descriptors supporting the read/write
syscalls, it's possible to isolate those applications in
their own address space using seccomp. Once seccomp is
-- ------------------ enabled via /proc/<pid>/seccomp, it cannot be disabled
++ ++++++++++++++++++ enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
and the task is only allowed to execute a few safe syscalls
defined by each seccomp mode.
strongly in flux, so no good recommendation can be made.
config CRASH_DUMP
- - - - bool "kernel crash dumps (EXPERIMENTAL)"
+ + + + bool "kernel crash dumps"
depends on X86_64 || (X86_32 && HIGHMEM)
help
Generate crash dump after being started by kexec.
Don't change this unless you know what you are doing.
config HOTPLUG_CPU
------ -------------- bool "Support for suspend on SMP and hot-pluggable CPUs (EXPERIMENTAL)"
------ -------------- depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER
++++++ ++++++++++++++ bool "Support for hot-pluggable CPUs"
++++++ ++++++++++++++ depends on SMP && HOTPLUG && !X86_VOYAGER
---help---
------ -------------- Say Y here to experiment with turning CPUs off and on, and to
------ -------------- enable suspend on SMP systems. CPUs can be controlled through
------ -------------- /sys/devices/system/cpu.
------ -------------- Say N if you want to disable CPU hotplug and don't need to
------ -------------- suspend.
++++++ ++++++++++++++ Say Y here to allow turning CPUs off and on. CPUs can be
++++++ ++++++++++++++ controlled through /sys/devices/system/cpu.
++++++ ++++++++++++++ ( Note: power management support will enable this option
++++++ ++++++++++++++ automatically on SMP systems. )
++++++ ++++++++++++++ Say N if you want to disable CPU hotplug.
config COMPAT_VDSO
def_bool y
If unsure, say Y.
+++ +++++++++++++++++config CMDLINE_BOOL
+++ +++++++++++++++++ bool "Built-in kernel command line"
+++ +++++++++++++++++ default n
+++ +++++++++++++++++ help
+++ +++++++++++++++++ Allow for specifying boot arguments to the kernel at
+++ +++++++++++++++++ build time. On some systems (e.g. embedded ones), it is
+++ +++++++++++++++++ necessary or convenient to provide some or all of the
+++ +++++++++++++++++ kernel boot arguments with the kernel itself (that is,
+++ +++++++++++++++++ to not rely on the boot loader to provide them.)
+++ +++++++++++++++++
+++ +++++++++++++++++ To compile command line arguments into the kernel,
+++ +++++++++++++++++ set this option to 'Y', then fill in the
+++ +++++++++++++++++ the boot arguments in CONFIG_CMDLINE.
+++ +++++++++++++++++
+++ +++++++++++++++++ Systems with fully functional boot loaders (i.e. non-embedded)
+++ +++++++++++++++++ should leave this option set to 'N'.
+++ +++++++++++++++++
+++ +++++++++++++++++config CMDLINE
+++ +++++++++++++++++ string "Built-in kernel command string"
+++ +++++++++++++++++ depends on CMDLINE_BOOL
+++ +++++++++++++++++ default ""
+++ +++++++++++++++++ help
+++ +++++++++++++++++ Enter arguments here that should be compiled into the kernel
+++ +++++++++++++++++ image and used at boot time. If the boot loader provides a
+++ +++++++++++++++++ command line at boot time, it is appended to this string to
+++ +++++++++++++++++ form the full kernel command line, when the system boots.
+++ +++++++++++++++++
+++ +++++++++++++++++ However, you can use the CONFIG_CMDLINE_OVERRIDE option to
+++ +++++++++++++++++ change this behavior.
+++ +++++++++++++++++
+++ +++++++++++++++++ In most cases, the command line (whether built-in or provided
+++ +++++++++++++++++ by the boot loader) should specify the device for the root
+++ +++++++++++++++++ file system.
+++ +++++++++++++++++
+++ +++++++++++++++++config CMDLINE_OVERRIDE
+++ +++++++++++++++++ bool "Built-in command line overrides boot loader arguments"
+++ +++++++++++++++++ default n
+++ +++++++++++++++++ depends on CMDLINE_BOOL
+++ +++++++++++++++++ help
+++ +++++++++++++++++ Set this option to 'Y' to have the kernel ignore the boot loader
+++ +++++++++++++++++ command line, and use ONLY the built-in command line.
+++ +++++++++++++++++
+++ +++++++++++++++++ This is used to work around broken boot loaders. This should
+++ +++++++++++++++++ be set to 'N' under normal conditions.
+++ +++++++++++++++++
endmenu
config ARCH_ENABLE_MEMORY_HOTPLUG
config SYSVIPC_COMPAT
def_bool y
-- ------------------ depends on X86_64 && COMPAT && SYSVIPC
++ ++++++++++++++++++ depends on COMPAT && SYSVIPC
endmenu
*/
#undef CONFIG_PARAVIRT
#ifdef CONFIG_X86_32
--------------------#define _ASM_DESC_H_ 1
++++++++++++++++++++#define ASM_X86__DESC_H 1
#endif
#ifdef CONFIG_X86_64
#include <linux/linkage.h>
#include <linux/screen_info.h>
#include <linux/elf.h>
-- ------------------#include <asm/io.h>
++ ++++++++++++++++++#include <linux/io.h>
#include <asm/page.h>
#include <asm/boot.h>
#include <asm/bootparam.h>
y--;
}
} else {
-- ------------------ vidmem [(x + cols * y) * 2] = c;
++ ++++++++++++++++++ vidmem[(x + cols * y) * 2] = c;
if (++x >= cols) {
x = 0;
if (++y >= lines) {
int i;
char *ss = s;
-- ------------------ for (i = 0; i < n; i++) ss[i] = c;
++ ++++++++++++++++++ for (i = 0; i < n; i++)
++ ++++++++++++++++++ ss[i] = c;
return s;
}
const char *s = src;
char *d = dest;
-- ------------------ for (i = 0; i < n; i++) d[i] = s[i];
++ ++++++++++++++++++ for (i = 0; i < n; i++)
++ ++++++++++++++++++ d[i] = s[i];
return dest;
}
extern char __vsyscall_0;
const unsigned char *const *find_nop_table(void)
{
- - -- -- --- - - return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
- - -- -- --- - - boot_cpu_data.x86 < 6 ? k8_nops : p6_nops;
+ + ++ ++ +++ + + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ + ++ ++ +++ + + boot_cpu_has(X86_FEATURE_NOPL))
+ + ++ ++ +++ + + return p6_nops;
+ + ++ ++ +++ + + else
+ + ++ ++ +++ + + return k8_nops;
}
#else /* CONFIG_X86_64 */
- - -- -- --- - -static const struct nop {
- - -- -- --- - - int cpuid;
- - -- -- --- - - const unsigned char *const *noptable;
- - -- -- --- - -} noptypes[] = {
- - -- -- --- - - { X86_FEATURE_K8, k8_nops },
- - -- -- --- - - { X86_FEATURE_K7, k7_nops },
- - -- -- --- - - { X86_FEATURE_P4, p6_nops },
- - -- -- --- - - { X86_FEATURE_P3, p6_nops },
- - -- -- --- - - { -1, NULL }
- - -- -- --- - -};
- - -- -- --- - -
const unsigned char *const *find_nop_table(void)
{
- - -- -- --- - - const unsigned char *const *noptable = intel_nops;
- - -- -- --- - - int i;
- - -- -- --- - -
- - -- -- --- - - for (i = 0; noptypes[i].cpuid >= 0; i++) {
- - -- -- --- - - if (boot_cpu_has(noptypes[i].cpuid)) {
- - -- -- --- - - noptable = noptypes[i].noptable;
- - -- -- --- - - break;
- - -- -- --- - - }
- - -- -- --- - - }
- - -- -- --- - - return noptable;
+ + ++ ++ +++ + + if (boot_cpu_has(X86_FEATURE_K8))
+ + ++ ++ +++ + + return k8_nops;
+ + ++ ++ +++ + + else if (boot_cpu_has(X86_FEATURE_K7))
+ + ++ ++ +++ + + return k7_nops;
+ + ++ ++ +++ + + else if (boot_cpu_has(X86_FEATURE_NOPL))
+ + ++ ++ +++ + + return p6_nops;
+ + ++ ++ +++ + + else
+ + ++ ++ +++ + + return intel_nops;
}
#endif /* CONFIG_X86_64 */
continue;
if (*ptr > text_end)
continue;
- ------------------- text_poke(*ptr, ((unsigned char []){0xf0}), 1); /* add lock prefix */
+ +++++++++++++++++++ /* turn DS segment override prefix into lock prefix */
+ +++++++++++++++++++ text_poke(*ptr, ((unsigned char []){0xf0}), 1);
};
}
static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
{
u8 **ptr;
- ------------------- char insn[1];
if (noreplace_smp)
return;
- ------------------- add_nops(insn, 1);
for (ptr = start; ptr < end; ptr++) {
if (*ptr < text)
continue;
if (*ptr > text_end)
continue;
- ------------------- text_poke(*ptr, insn, 1);
+ +++++++++++++++++++ /* turn lock prefix into DS segment override prefix */
+ +++++++++++++++++++ text_poke(*ptr, ((unsigned char []){0x3E}), 1);
};
}
#include <linux/suspend.h>
#include <linux/kthread.h>
#include <linux/jiffies.h>
-- ------------------#include <linux/smp_lock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/desc.h>
#include <asm/i8253.h>
+++++++ ++++++++++ +#include <asm/olpc.h>
#include <asm/paravirt.h>
#include <asm/reboot.h>
dmi_check_system(apm_dmi_table);
------- ---------- - if (apm_info.bios.version == 0 || paravirt_enabled()) {
+++++++ ++++++++++ + if (apm_info.bios.version == 0 || paravirt_enabled() || machine_is_olpc()) {
printk(KERN_INFO "apm: BIOS not found.\n");
return -ENODEV;
}
mtrr_type type;
};
--------------------struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
++++++++++++++++++++static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
static int __initdata debug_print;
static int __init
/* take out UC ranges */
for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type;
------------ -------- if (type != MTRR_TYPE_UNCACHABLE)
++++++++++++ ++++++++ if (type != MTRR_TYPE_UNCACHABLE &&
++++++++++++ ++++++++ type != MTRR_TYPE_WRPROT)
continue;
size = range_state[i].size_pfn;
if (!size)
enable_mtrr_cleanup = 1;
return 0;
}
------------------ -early_param("enble_mtrr_cleanup", enable_mtrr_cleanup_setup);
++++++++++++++++++ +early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup);
+++++++++++ ++++++ +
++++++++++++ ++++++++static int __init mtrr_cleanup_debug_setup(char *str)
++++++++++++ ++++++++{
++++++++++++ ++++++++ debug_print = 1;
++++++++++++ ++++++++ return 0;
++++++++++++ ++++++++}
++++++++++++ ++++++++early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup);
+ +
struct var_mtrr_state {
unsigned long range_startk;
unsigned long range_sizek;
}
}
++++++++++++ ++++++++static unsigned long to_size_factor(unsigned long sizek, char *factorp)
++++++++++++ ++++++++{
++++++++++++ ++++++++ char factor;
++++++++++++ ++++++++ unsigned long base = sizek;
++++++++++++ ++++++++
++++++++++++ ++++++++ if (base & ((1<<10) - 1)) {
++++++++++++ ++++++++ /* not MB alignment */
++++++++++++ ++++++++ factor = 'K';
++++++++++++ ++++++++ } else if (base & ((1<<20) - 1)){
++++++++++++ ++++++++ factor = 'M';
++++++++++++ ++++++++ base >>= 10;
++++++++++++ ++++++++ } else {
++++++++++++ ++++++++ factor = 'G';
++++++++++++ ++++++++ base >>= 20;
++++++++++++ ++++++++ }
++++++++++++ ++++++++
++++++++++++ ++++++++ *factorp = factor;
++++++++++++ ++++++++
++++++++++++ ++++++++ return base;
++++++++++++ ++++++++}
++++++++++++ ++++++++
static unsigned int __init
range_to_mtrr(unsigned int reg, unsigned long range_startk,
unsigned long range_sizek, unsigned char type)
align = max_align;
sizek = 1 << align;
------------ -------- if (debug_print)
++++++++++++ ++++++++ if (debug_print) {
++++++++++++ ++++++++ char start_factor = 'K', size_factor = 'K';
++++++++++++ ++++++++ unsigned long start_base, size_base;
++++++++++++ ++++++++
++++++++++++ ++++++++ start_base = to_size_factor(range_startk, &start_factor),
++++++++++++ ++++++++ size_base = to_size_factor(sizek, &size_factor),
++++++++++++ ++++++++
printk(KERN_DEBUG "Setting variable MTRR %d, "
------------ -------- "base: %ldMB, range: %ldMB, type %s\n",
------------ -------- reg, range_startk >> 10, sizek >> 10,
++++++++++++ ++++++++ "base: %ld%cB, range: %ld%cB, type %s\n",
++++++++++++ ++++++++ reg, start_base, start_factor,
++++++++++++ ++++++++ size_base, size_factor,
(type == MTRR_TYPE_UNCACHABLE)?"UC":
((type == MTRR_TYPE_WRBACK)?"WB":"Other")
);
++++++++++++ ++++++++ }
save_var_mtrr(reg++, range_startk, sizek, type);
range_startk += sizek;
range_sizek -= sizek;
/* try to append some small hole */
range0_basek = state->range_startk;
range0_sizek = ALIGN(state->range_sizek, chunk_sizek);
++++++++++++ ++++++++
++++++++++++ ++++++++ /* no increase */
if (range0_sizek == state->range_sizek) {
if (debug_print)
printk(KERN_DEBUG "rangeX: %016lx - %016lx\n",
return 0;
}
------------ -------- range0_sizek -= chunk_sizek;
------------ -------- if (range0_sizek && sizek) {
------------ -------- while (range0_basek + range0_sizek > (basek + sizek)) {
------------ -------- range0_sizek -= chunk_sizek;
------------ -------- if (!range0_sizek)
------------ -------- break;
------------ -------- }
++++++++++++ ++++++++ /* only cut back, when it is not the last */
++++++++++++ ++++++++ if (sizek) {
++++++++++++ ++++++++ while (range0_basek + range0_sizek > (basek + sizek)) {
++++++++++++ ++++++++ if (range0_sizek >= chunk_sizek)
++++++++++++ ++++++++ range0_sizek -= chunk_sizek;
++++++++++++ ++++++++ else
++++++++++++ ++++++++ range0_sizek = 0;
++++++++++++ ++++++++
++++++++++++ ++++++++ if (!range0_sizek)
++++++++++++ ++++++++ break;
++++++++++++ ++++++++ }
++++++++++++ ++++++++ }
++++++++++++ ++++++++
++++++++++++ ++++++++second_try:
++++++++++++ ++++++++ range_basek = range0_basek + range0_sizek;
++++++++++++ ++++++++
++++++++++++ ++++++++ /* one hole in the middle */
++++++++++++ ++++++++ if (range_basek > basek && range_basek <= (basek + sizek))
++++++++++++ ++++++++ second_sizek = range_basek - basek;
++++++++++++ ++++++++
++++++++++++ ++++++++ if (range0_sizek > state->range_sizek) {
++++++++++++ ++++++++
++++++++++++ ++++++++ /* one hole in middle or at end */
++++++++++++ ++++++++ hole_sizek = range0_sizek - state->range_sizek - second_sizek;
++++++++++++ ++++++++
++++++++++++ ++++++++ /* hole size should be less than half of range0 size */
++++++++++++ ++++++++ if (hole_sizek >= (range0_sizek >> 1) &&
++++++++++++ ++++++++ range0_sizek >= chunk_sizek) {
++++++++++++ ++++++++ range0_sizek -= chunk_sizek;
++++++++++++ ++++++++ second_sizek = 0;
++++++++++++ ++++++++ hole_sizek = 0;
++++++++++++ ++++++++
++++++++++++ ++++++++ goto second_try;
++++++++++++ ++++++++ }
}
if (range0_sizek) {
(range0_basek + range0_sizek)<<10);
state->reg = range_to_mtrr(state->reg, range0_basek,
range0_sizek, MTRR_TYPE_WRBACK);
- --- - -- - ----
- --- - -- - ---- }
- --- - -- - ----
- --- - -- - ---- range_basek = range0_basek + range0_sizek;
- --- - -- - ---- range_sizek = chunk_sizek;
------------ --------
- --- - -- - ---- if (range_basek + range_sizek > basek &&
- --- - -- - ---- range_basek + range_sizek <= (basek + sizek)) {
- --- - -- - ---- /* one hole */
- --- - -- - ---- second_basek = basek;
- --- - -- - ---- second_sizek = range_basek + range_sizek - basek;
}
--- - - -- - range_basek = range0_basek + range0_sizek;
--- - - -- - range_sizek = chunk_sizek;
--- - - -- -
--- - - -- - if (range_basek + range_sizek > basek &&
--- - - -- - range_basek + range_sizek <= (basek + sizek)) {
--- - - -- - /* one hole */
--- - - -- - second_basek = basek;
--- - - -- - second_sizek = range_basek + range_sizek - basek;
--- - - -- - }
--- - - -- -
------------ -------- /* if last piece, only could one hole near end */
------------ -------- if ((second_basek || !basek) &&
------------ -------- range_sizek - (state->range_sizek - range0_sizek) - second_sizek <
------------ -------- (chunk_sizek >> 1)) {
------------ -------- /*
------------ -------- * one hole in middle (second_sizek is 0) or at end
------------ -------- * (second_sizek is 0 )
------------ -------- */
------------ -------- hole_sizek = range_sizek - (state->range_sizek - range0_sizek)
------------ -------- - second_sizek;
------------ -------- hole_basek = range_basek + range_sizek - hole_sizek
------------ -------- - second_sizek;
------------ -------- } else {
------------ -------- /* fallback for big hole, or several holes */
++++++++++++ ++++++++ if (range0_sizek < state->range_sizek) {
++++++++++++ ++++++++ /* need to handle left over */
range_sizek = state->range_sizek - range0_sizek;
------------ -------- second_basek = 0;
------------ -------- second_sizek = 0;
++++++++++++ ++++++++
++++++++++++ ++++++++ if (debug_print)
++++++++++++ ++++++++ printk(KERN_DEBUG "range: %016lx - %016lx\n",
++++++++++++ ++++++++ range_basek<<10,
++++++++++++ ++++++++ (range_basek + range_sizek)<<10);
++++++++++++ ++++++++ state->reg = range_to_mtrr(state->reg, range_basek,
++++++++++++ ++++++++ range_sizek, MTRR_TYPE_WRBACK);
}
------------ -------- if (debug_print)
------------ -------- printk(KERN_DEBUG "range: %016lx - %016lx\n", range_basek<<10,
------------ -------- (range_basek + range_sizek)<<10);
------------ -------- state->reg = range_to_mtrr(state->reg, range_basek, range_sizek,
------------ -------- MTRR_TYPE_WRBACK);
if (hole_sizek) {
++++++++++++ ++++++++ hole_basek = range_basek - hole_sizek - second_sizek;
if (debug_print)
printk(KERN_DEBUG "hole: %016lx - %016lx\n",
------------ -------- hole_basek<<10, (hole_basek + hole_sizek)<<10);
------------ -------- state->reg = range_to_mtrr(state->reg, hole_basek, hole_sizek,
------------ -------- MTRR_TYPE_UNCACHABLE);
------------ --------
++++++++++++ ++++++++ hole_basek<<10,
++++++++++++ ++++++++ (hole_basek + hole_sizek)<<10);
++++++++++++ ++++++++ state->reg = range_to_mtrr(state->reg, hole_basek,
++++++++++++ ++++++++ hole_sizek, MTRR_TYPE_UNCACHABLE);
}
return second_sizek;
};
/*
------------ -------- * gran_size: 1M, 2M, ..., 2G
------------ -------- * chunk size: gran_size, ..., 4G
------------ -------- * so we need (2+13)*6
++++++++++++ ++++++++ * gran_size: 64K, 128K, 256K, 512K, 1M, 2M, ..., 2G
++++++++++++ ++++++++ * chunk size: gran_size, ..., 2G
++++++++++++ ++++++++ * so we need (1+16)*8
*/
------------ --------#define NUM_RESULT 90
++++++++++++ ++++++++#define NUM_RESULT 136
#define PSHIFT (PAGE_SHIFT - 10)
static struct mtrr_cleanup_result __initdata result[NUM_RESULT];
static int __init mtrr_cleanup(unsigned address_bits)
{
unsigned long extra_remove_base, extra_remove_size;
------------ -------- unsigned long i, base, size, def, dummy;
++++++++++++ ++++++++ unsigned long base, size, def, dummy;
mtrr_type type;
int nr_range, nr_range_new;
u64 chunk_size, gran_size;
unsigned long range_sums, range_sums_new;
int index_good;
int num_reg_good;
++++++++++++ ++++++++ int i;
/* extra one for all 0 */
int num[MTRR_NUM_TYPES + 1];
continue;
if (!size)
type = MTRR_NUM_TYPES;
++++++++++++ ++++++++ if (type == MTRR_TYPE_WRPROT)
++++++++++++ ++++++++ type = MTRR_TYPE_UNCACHABLE;
num[type]++;
}
num_var_ranges - num[MTRR_NUM_TYPES])
return 0;
++++++++++++ ++++++++ /* print original var MTRRs at first, for debugging: */
++++++++++++ ++++++++ printk(KERN_DEBUG "original variable MTRRs\n");
++++++++++++ ++++++++ for (i = 0; i < num_var_ranges; i++) {
++++++++++++ ++++++++ char start_factor = 'K', size_factor = 'K';
++++++++++++ ++++++++ unsigned long start_base, size_base;
++++++++++++ ++++++++
++++++++++++ ++++++++ size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10);
++++++++++++ ++++++++ if (!size_base)
++++++++++++ ++++++++ continue;
++++++++++++ ++++++++
++++++++++++ ++++++++ size_base = to_size_factor(size_base, &size_factor),
++++++++++++ ++++++++ start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10);
++++++++++++ ++++++++ start_base = to_size_factor(start_base, &start_factor),
++++++++++++ ++++++++ type = range_state[i].type;
++++++++++++ ++++++++
++++++++++++ ++++++++ printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
++++++++++++ ++++++++ i, start_base, start_factor,
++++++++++++ ++++++++ size_base, size_factor,
++++++++++++ ++++++++ (type == MTRR_TYPE_UNCACHABLE) ? "UC" :
++++++++++++ ++++++++ ((type == MTRR_TYPE_WRPROT) ? "WP" :
++++++++++++ ++++++++ ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other"))
++++++++++++ ++++++++ );
++++++++++++ ++++++++ }
++++++++++++ ++++++++
memset(range, 0, sizeof(range));
extra_remove_size = 0;
-- ------------------ if (mtrr_tom2) {
-- ------------------ extra_remove_base = 1 << (32 - PAGE_SHIFT);
++ ++++++++++++++++++ extra_remove_base = 1 << (32 - PAGE_SHIFT);
++ ++++++++++++++++++ if (mtrr_tom2)
extra_remove_size =
(mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base;
-- ------------------ }
nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base,
extra_remove_size);
++++++++++++ ++++++++ /*
++++++++++++ ++++++++ * [0, 1M) should always be coverred by var mtrr with WB
++++++++++++ ++++++++ * and fixed mtrrs should take effective before var mtrr for it
++++++++++++ ++++++++ */
++++++++++++ ++++++++ nr_range = add_range_with_merge(range, nr_range, 0,
++++++++++++ ++++++++ (1ULL<<(20 - PAGE_SHIFT)) - 1);
++++++++++++ ++++++++ /* sort the ranges */
++++++++++++ ++++++++ sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
++++++++++++ ++++++++
range_sums = sum_ranges(range, nr_range);
printk(KERN_INFO "total RAM coverred: %ldM\n",
range_sums >> (20 - PAGE_SHIFT));
if (mtrr_chunk_size && mtrr_gran_size) {
int num_reg;
++++++++++++ ++++++++ char gran_factor, chunk_factor, lose_factor;
++++++++++++ ++++++++ unsigned long gran_base, chunk_base, lose_base;
------------ -------- debug_print = 1;
++++++++++++ ++++++++ debug_print++;
/* convert ranges to var ranges state */
num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size,
mtrr_gran_size);
result[i].lose_cover_sizek =
(range_sums - range_sums_new) << PSHIFT;
------------ -------- printk(KERN_INFO "%sgran_size: %ldM \tchunk_size: %ldM \t",
------------ -------- result[i].bad?"*BAD*":" ", result[i].gran_sizek >> 10,
------------ -------- result[i].chunk_sizek >> 10);
------------ -------- printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ldM \n",
++++++++++++ ++++++++ gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
++++++++++++ ++++++++ chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
++++++++++++ ++++++++ lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
++++++++++++ ++++++++ printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
++++++++++++ ++++++++ result[i].bad?"*BAD*":" ",
++++++++++++ ++++++++ gran_base, gran_factor, chunk_base, chunk_factor);
++++++++++++ ++++++++ printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
result[i].num_reg, result[i].bad?"-":"",
------------ -------- result[i].lose_cover_sizek >> 10);
++++++++++++ ++++++++ lose_base, lose_factor);
if (!result[i].bad) {
set_var_mtrr_all(address_bits);
return 1;
}
printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, "
"will find optimal one\n");
------------ -------- debug_print = 0;
++++++++++++ ++++++++ debug_print--;
memset(result, 0, sizeof(result[0]));
}
i = 0;
memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn));
memset(result, 0, sizeof(result));
------------ -------- for (gran_size = (1ULL<<20); gran_size < (1ULL<<32); gran_size <<= 1) {
------------ -------- for (chunk_size = gran_size; chunk_size < (1ULL<<33);
++++++++++++ ++++++++ for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) {
++++++++++++ ++++++++ char gran_factor;
++++++++++++ ++++++++ unsigned long gran_base;
++++++++++++ ++++++++
++++++++++++ ++++++++ if (debug_print)
++++++++++++ ++++++++ gran_base = to_size_factor(gran_size >> 10, &gran_factor);
++++++++++++ ++++++++
++++++++++++ ++++++++ for (chunk_size = gran_size; chunk_size < (1ULL<<32);
chunk_size <<= 1) {
int num_reg;
------------ -------- if (debug_print)
------------ -------- printk(KERN_INFO
------------ -------- "\ngran_size: %lldM chunk_size_size: %lldM\n",
------------ -------- gran_size >> 20, chunk_size >> 20);
++++++++++++ ++++++++ if (debug_print) {
++++++++++++ ++++++++ char chunk_factor;
++++++++++++ ++++++++ unsigned long chunk_base;
++++++++++++ ++++++++
++++++++++++ ++++++++ chunk_base = to_size_factor(chunk_size>>10, &chunk_factor),
++++++++++++ ++++++++ printk(KERN_INFO "\n");
++++++++++++ ++++++++ printk(KERN_INFO "gran_size: %ld%c chunk_size: %ld%c \n",
++++++++++++ ++++++++ gran_base, gran_factor, chunk_base, chunk_factor);
++++++++++++ ++++++++ }
if (i >= NUM_RESULT)
continue;
/* print out all */
for (i = 0; i < NUM_RESULT; i++) {
------------ -------- printk(KERN_INFO "%sgran_size: %ldM \tchunk_size: %ldM \t",
------------ -------- result[i].bad?"*BAD* ":" ", result[i].gran_sizek >> 10,
------------ -------- result[i].chunk_sizek >> 10);
------------ -------- printk(KERN_CONT "num_reg: %d \tlose RAM: %s%ldM\n",
------------ -------- result[i].num_reg, result[i].bad?"-":"",
------------ -------- result[i].lose_cover_sizek >> 10);
++++++++++++ ++++++++ char gran_factor, chunk_factor, lose_factor;
++++++++++++ ++++++++ unsigned long gran_base, chunk_base, lose_base;
++++++++++++ ++++++++
++++++++++++ ++++++++ gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
++++++++++++ ++++++++ chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
++++++++++++ ++++++++ lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
++++++++++++ ++++++++ printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
++++++++++++ ++++++++ result[i].bad?"*BAD*":" ",
++++++++++++ ++++++++ gran_base, gran_factor, chunk_base, chunk_factor);
++++++++++++ ++++++++ printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
++++++++++++ ++++++++ result[i].num_reg, result[i].bad?"-":"",
++++++++++++ ++++++++ lose_base, lose_factor);
}
/* try to find the optimal index */
nr_mtrr_spare_reg = num_var_ranges - 1;
num_reg_good = -1;
for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
------------ -------- if (!min_loss_pfn[i]) {
++++++++++++ ++++++++ if (!min_loss_pfn[i])
num_reg_good = i;
------------ -------- break;
------------ -------- }
}
index_good = -1;
}
if (index_good != -1) {
++++++++++++ ++++++++ char gran_factor, chunk_factor, lose_factor;
++++++++++++ ++++++++ unsigned long gran_base, chunk_base, lose_base;
++++++++++++ ++++++++
printk(KERN_INFO "Found optimal setting for mtrr clean up\n");
i = index_good;
------------ -------- printk(KERN_INFO "gran_size: %ldM \tchunk_size: %ldM \t",
------------ -------- result[i].gran_sizek >> 10,
------------ -------- result[i].chunk_sizek >> 10);
------------ -------- printk(KERN_CONT "num_reg: %d \tlose RAM: %ldM\n",
------------ -------- result[i].num_reg,
------------ -------- result[i].lose_cover_sizek >> 10);
++++++++++++ ++++++++ gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
++++++++++++ ++++++++ chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
++++++++++++ ++++++++ lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
++++++++++++ ++++++++ printk(KERN_INFO "gran_size: %ld%c \tchunk_size: %ld%c \t",
++++++++++++ ++++++++ gran_base, gran_factor, chunk_base, chunk_factor);
++++++++++++ ++++++++ printk(KERN_CONT "num_reg: %d \tlose RAM: %ld%c\n",
++++++++++++ ++++++++ result[i].num_reg, lose_base, lose_factor);
/* convert ranges to var ranges state */
chunk_size = result[i].chunk_sizek;
chunk_size <<= 10;
gran_size = result[i].gran_sizek;
gran_size <<= 10;
------------ -------- debug_print = 1;
++++++++++++ ++++++++ debug_print++;
x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
++++++++++++ ++++++++ debug_print--;
set_var_mtrr_all(address_bits);
return 1;
}
/* kvm/qemu doesn't have mtrr set right, don't trim them all */
if (!highest_pfn) {
- - - - -- - if (!kvm_para_available()) {
- - - - -- - printk(KERN_WARNING
+ + + + ++ + WARN(!kvm_para_available(), KERN_WARNING
"WARNING: strange, CPU MTRRs all blank?\n");
- - - - -- - WARN_ON(1);
- - - - -- - }
return 0;
}
#include <linux/errno.h>
#include <linux/crash_dump.h>
-- ------------------
-- ------------------#include <asm/uaccess.h>
-- ------------------#include <asm/io.h>
++ ++++++++++++++++++#include <linux/uaccess.h>
++ ++++++++++++++++++#include <linux/io.h>
/**
* copy_oldmem_page - copy one page from "oldmem"
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
-- ------------------ size_t csize, unsigned long offset, int userbuf)
++ ++++++++++++++++++ size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
return 0;
vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
++++ ++++++++++++++++ if (!vaddr)
++++ ++++++++++++++++ return -ENOMEM;
if (userbuf) {
---- ---------------- if (copy_to_user(buf, (vaddr + offset), csize)) {
++++ ++++++++++++++++ if (copy_to_user(buf, vaddr + offset, csize)) {
iounmap(vaddr);
return -EFAULT;
}
} else
---- ---------------- memcpy(buf, (vaddr + offset), csize);
++++ ++++++++++++++++ memcpy(buf, vaddr + offset, csize);
iounmap(vaddr);
return csize;
static unsigned long next_bit; /* protected by iommu_bitmap_lock */
static int need_flush; /* global flush state. set for each gart wrap */
--------- -----------static unsigned long alloc_iommu(struct device *dev, int size)
+++++++++ +++++++++++static unsigned long alloc_iommu(struct device *dev, int size,
+++++++++ +++++++++++ unsigned long align_mask)
{
unsigned long offset, flags;
unsigned long boundary_size;
base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
PAGE_SIZE) >> PAGE_SHIFT;
--------- ----------- boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+++++++++ +++++++++++ boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
PAGE_SIZE) >> PAGE_SHIFT;
spin_lock_irqsave(&iommu_bitmap_lock, flags);
offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
--------- ----------- size, base_index, boundary_size, 0);
+++++++++ +++++++++++ size, base_index, boundary_size, align_mask);
if (offset == -1) {
need_flush = 1;
offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
--------- ----------- size, base_index, boundary_size, 0);
+++++++++ +++++++++++ size, base_index, boundary_size,
+++++++++ +++++++++++ align_mask);
}
if (offset != -1) {
next_bit = offset+size;
* Caller needs to check if the iommu is needed and flush.
*/
static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
--------- ----------- size_t size, int dir)
+++++++++ +++++++++++ size_t size, int dir, unsigned long align_mask)
{
unsigned long npages = iommu_num_pages(phys_mem, size);
--------- ----------- unsigned long iommu_page = alloc_iommu(dev, npages);
+++++++++ +++++++++++ unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
int i;
if (iommu_page == -1) {
static dma_addr_t
gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
{
--------- ----------- dma_addr_t map = dma_map_area(dev, paddr, size, dir);
+++++++++ +++++++++++ dma_addr_t map;
+++++++++ +++++++++++ unsigned long align_mask;
+++++++++ +++++++++++
+++++++++ +++++++++++ align_mask = (1UL << get_order(size)) - 1;
+++++++++ +++++++++++ map = dma_map_area(dev, paddr, size, dir, align_mask);
flush_gart();
if (!need_iommu(dev, paddr, size))
return paddr;
--------- ----------- bus = gart_map_simple(dev, paddr, size, dir);
+++++++++ +++++++++++ bus = dma_map_area(dev, paddr, size, dir, 0);
+++++++++ +++++++++++ flush_gart();
return bus;
}
unsigned long addr = sg_phys(s);
if (nonforced_iommu(dev, addr, s->length)) {
--------- ----------- addr = dma_map_area(dev, addr, s->length, dir);
+++++++++ +++++++++++ addr = dma_map_area(dev, addr, s->length, dir, 0);
if (addr == bad_dma_address) {
if (i > 0)
gart_unmap_sg(dev, sg, i, dir);
int nelems, struct scatterlist *sout,
unsigned long pages)
{
--------- ----------- unsigned long iommu_start = alloc_iommu(dev, pages);
+++++++++ +++++++++++ unsigned long iommu_start = alloc_iommu(dev, pages, 0);
unsigned long iommu_page = iommu_start;
struct scatterlist *s;
int i;
struct pci_dev *dev;
void *gatt;
int i, error;
------------------- - unsigned long start_pfn, end_pfn;
printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
aper_size = aper_base = info->aper_size = 0;
printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
aper_base, aper_size>>10);
------------------- - /* need to map that range */
------------------- - end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
------------------- - if (end_pfn > max_low_pfn_mapped) {
------------------- - start_pfn = (aper_base>>PAGE_SHIFT);
------------------- - init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
------------------- - }
return 0;
nommu:
{
struct agp_kern_info info;
unsigned long iommu_start;
------------------- - unsigned long aper_size;
+++++++++++++++++++ + unsigned long aper_base, aper_size;
+++++++++++++++++++ + unsigned long start_pfn, end_pfn;
unsigned long scratch;
long i;
return;
}
+++++++++++++++++++ + /* need to map that range */
+++++++++++++++++++ + aper_size = info.aper_size << 20;
+++++++++++++++++++ + aper_base = info.aper_base;
+++++++++++++++++++ + end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
+++++++++++++++++++ + if (end_pfn > max_low_pfn_mapped) {
+++++++++++++++++++ + start_pfn = (aper_base>>PAGE_SHIFT);
+++++++++++++++++++ + init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
+++++++++++++++++++ + }
+++++++++++++++++++ +
printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
------------------- - aper_size = info.aper_size * 1024 * 1024;
iommu_size = check_iommu_size(info.aper_base, aper_size);
iommu_pages = iommu_size >> PAGE_SHIFT;
static void poll_idle(void)
{
local_irq_enable();
---------- ---------- cpu_relax();
++++++++++ ++++++++++ while (!need_resched())
++++++++++ ++++++++++ cpu_relax();
}
/*
return 1;
}
+++++++ +++ ++++ + +static cpumask_t c1e_mask = CPU_MASK_NONE;
+++++++ +++ ++++ + +static int c1e_detected;
+++++++ +++ ++++ + +
+++++++ +++ ++++ + +void c1e_remove_cpu(int cpu)
+++++++ +++ ++++ + +{
+++++++ +++ ++++ + + cpu_clear(cpu, c1e_mask);
+++++++ +++ ++++ + +}
+++++++ +++ ++++ + +
/*
* C1E aware idle routine. We check for C1E active in the interrupt
* pending message MSR. If we detect C1E, then we handle it the same
*/
static void c1e_idle(void)
{
------- --- ---- - - static cpumask_t c1e_mask = CPU_MASK_NONE;
------- --- ---- - - static int c1e_detected;
------- --- ---- - -
if (need_resched())
return;
rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
if (lo & K8_INTP_C1E_ACTIVE_MASK) {
c1e_detected = 1;
------- --- ---- - - mark_tsc_unstable("TSC halt in C1E");
------- --- ---- - - printk(KERN_INFO "System has C1E enabled\n");
+++++++ +++ ++++ + + if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
+++++++ +++ ++++ + + mark_tsc_unstable("TSC halt in AMD C1E");
+++++++ +++ ++++ + + printk(KERN_INFO "System has AMD C1E enabled\n");
+++++++ +++ ++++ + + set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
}
}
#include <linux/tick.h>
#include <linux/percpu.h>
#include <linux/prctl.h>
+++++ +++++++++++++++#include <linux/dmi.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/cpu.h>
#include <asm/kdebug.h>
+++++++ +++ ++++++ +#include <asm/idle.h>
++++++++++++++++++++#include <asm/syscalls.h>
++++++++++++++++++++#include <asm/smp.h>
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
cpu_clear(cpu, cpu_callin_map);
numa_remove_cpu(cpu);
+++++++ +++ ++++ + + c1e_remove_cpu(cpu);
}
/* We don't actually take CPU down, just spin without interrupts. */
{
/* This must be done before dead CPU ack */
cpu_exit_clear();
- - - - wbinvd();
mb();
/* Ack it */
__get_cpu_var(cpu_state) = CPU_DEAD;
* With physical CPU hotplug, we should halt the cpu
*/
local_irq_disable();
- - - - while (1)
- - - - halt();
+ + + + /* mask all interrupts, flush any and all caches, and halt */
+ + + + wbinvd_halt();
}
#else
static inline void play_dead(void)
unsigned long d0, d1, d2, d3, d6, d7;
unsigned long sp;
unsigned short ss, gs;
+++++ +++++++++++++++ const char *board;
if (user_mode_vm(regs)) {
sp = regs->sp;
}
printk("\n");
----- --------------- printk("Pid: %d, comm: %s %s (%s %.*s)\n",
+++++ +++++++++++++++
+++++ +++++++++++++++ board = dmi_get_system_info(DMI_PRODUCT_NAME);
+++++ +++++++++++++++ if (!board)
+++++ +++++++++++++++ board = "";
+++++ +++++++++++++++ printk("Pid: %d, comm: %s %s (%s %.*s) %s\n",
task_pid_nr(current), current->comm,
print_tainted(), init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
----- --------------- init_utsname()->version);
+++++ +++++++++++++++ init_utsname()->version, board);
printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
(u16)regs->cs, regs->ip, regs->flags,
tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
put_cpu();
}
++++++++++++++++++++#ifdef CONFIG_X86_DS
++++++++++++++++++++ /* Free any DS contexts that have not been properly released. */
++++++++++++++++++++ if (unlikely(current->thread.ds_ctx)) {
++++++++++++++++++++ /* we clear debugctl to make sure DS is not used. */
++++++++++++++++++++ update_debugctlmsr(0);
++++++++++++++++++++ ds_free(current->thread.ds_ctx);
++++++++++++++++++++ }
++++++++++++++++++++#endif /* CONFIG_X86_DS */
}
void flush_thread(void)
return 0;
}
++++++++++++++++++++#ifdef CONFIG_X86_DS
++++++++++++++++++++static int update_debugctl(struct thread_struct *prev,
++++++++++++++++++++ struct thread_struct *next, unsigned long debugctl)
++++++++++++++++++++{
++++++++++++++++++++ unsigned long ds_prev = 0;
++++++++++++++++++++ unsigned long ds_next = 0;
++++++++++++++++++++
++++++++++++++++++++ if (prev->ds_ctx)
++++++++++++++++++++ ds_prev = (unsigned long)prev->ds_ctx->ds;
++++++++++++++++++++ if (next->ds_ctx)
++++++++++++++++++++ ds_next = (unsigned long)next->ds_ctx->ds;
++++++++++++++++++++
++++++++++++++++++++ if (ds_next != ds_prev) {
++++++++++++++++++++ /* we clear debugctl to make sure DS
++++++++++++++++++++ * is not in use when we change it */
++++++++++++++++++++ debugctl = 0;
++++++++++++++++++++ update_debugctlmsr(0);
++++++++++++++++++++ wrmsr(MSR_IA32_DS_AREA, ds_next, 0);
++++++++++++++++++++ }
++++++++++++++++++++ return debugctl;
++++++++++++++++++++}
++++++++++++++++++++#else
++++++++++++++++++++static int update_debugctl(struct thread_struct *prev,
++++++++++++++++++++ struct thread_struct *next, unsigned long debugctl)
++++++++++++++++++++{
++++++++++++++++++++ return debugctl;
++++++++++++++++++++}
++++++++++++++++++++#endif /* CONFIG_X86_DS */
++++++++++++++++++++
static noinline void
__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
struct tss_struct *tss)
prev = &prev_p->thread;
next = &next_p->thread;
-------------------- debugctl = prev->debugctlmsr;
-------------------- if (next->ds_area_msr != prev->ds_area_msr) {
-------------------- /* we clear debugctl to make sure DS
-------------------- * is not in use when we change it */
-------------------- debugctl = 0;
-------------------- update_debugctlmsr(0);
-------------------- wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
-------------------- }
++++++++++++++++++++ debugctl = update_debugctl(prev, next, prev->debugctlmsr);
if (next->debugctlmsr != debugctl)
update_debugctlmsr(next->debugctlmsr);
hard_enable_TSC();
}
--------------------#ifdef X86_BTS
++++++++++++++++++++#ifdef CONFIG_X86_PTRACE_BTS
if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
--------------------#endif
++++++++++++++++++++#endif /* CONFIG_X86_PTRACE_BTS */
if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
#include <linux/kdebug.h>
#include <linux/tick.h>
#include <linux/prctl.h>
++ ++++++++++++++++++#include <linux/uaccess.h>
++ ++++++++++++++++++#include <linux/io.h>
-- ------------------#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
-- ------------------#include <asm/io.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/ia32.h>
#include <asm/idle.h>
++++++++++++++++++++#include <asm/syscalls.h>
asmlinkage extern void ret_from_fork(void);
#ifdef CONFIG_HOTPLUG_CPU
DECLARE_PER_CPU(int, cpu_state);
-- ------------------#include <asm/nmi.h>
++ ++++++++++++++++++#include <linux/nmi.h>
/* We halt the CPU with physical CPU hotplug */
static inline void play_dead(void)
{
idle_task_exit();
- - - - wbinvd();
+++++++ +++ ++++ + + c1e_remove_cpu(raw_smp_processor_id());
+++++++ +++ ++++ + +
mb();
/* Ack it */
__get_cpu_var(cpu_state) = CPU_DEAD;
local_irq_disable();
- - - - while (1)
- - - - halt();
+ + + + /* mask all interrupts, flush any and all caches, and halt */
+ + + + wbinvd_halt();
}
#else
static inline void play_dead(void)
}
/* Prints also some state that isn't saved in the pt_regs */
-- ------------------void __show_regs(struct pt_regs * regs)
++ ++++++++++++++++++void __show_regs(struct pt_regs *regs)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
unsigned long d0, d1, d2, d3, d6, d7;
printk("\n");
print_modules();
-- ------------------ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
++ ++++++++++++++++++ printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n",
current->pid, current->comm, print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
-- ------------------ printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
++ ++++++++++++++++++ printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
printk_address(regs->ip, 1);
-- ------------------ printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp,
-- ------------------ regs->flags);
-- ------------------ printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
++ ++++++++++++++++++ printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
++ ++++++++++++++++++ regs->sp, regs->flags);
++ ++++++++++++++++++ printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
regs->ax, regs->bx, regs->cx);
-- ------------------ printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
++ ++++++++++++++++++ printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
regs->dx, regs->si, regs->di);
-- ------------------ printk("RBP: %016lx R08: %016lx R09: %016lx\n",
++ ++++++++++++++++++ printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
regs->bp, regs->r8, regs->r9);
-- ------------------ printk("R10: %016lx R11: %016lx R12: %016lx\n",
-- ------------------ regs->r10, regs->r11, regs->r12);
-- ------------------ printk("R13: %016lx R14: %016lx R15: %016lx\n",
-- ------------------ regs->r13, regs->r14, regs->r15);
-- ------------------
-- ------------------ asm("movl %%ds,%0" : "=r" (ds));
-- ------------------ asm("movl %%cs,%0" : "=r" (cs));
-- ------------------ asm("movl %%es,%0" : "=r" (es));
++ ++++++++++++++++++ printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
++ ++++++++++++++++++ regs->r10, regs->r11, regs->r12);
++ ++++++++++++++++++ printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
++ ++++++++++++++++++ regs->r13, regs->r14, regs->r15);
++ ++++++++++++++++++
++ ++++++++++++++++++ asm("movl %%ds,%0" : "=r" (ds));
++ ++++++++++++++++++ asm("movl %%cs,%0" : "=r" (cs));
++ ++++++++++++++++++ asm("movl %%es,%0" : "=r" (es));
asm("movl %%fs,%0" : "=r" (fsindex));
asm("movl %%gs,%0" : "=r" (gsindex));
rdmsrl(MSR_FS_BASE, fs);
-- ------------------ rdmsrl(MSR_GS_BASE, gs);
-- ------------------ rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
++ ++++++++++++++++++ rdmsrl(MSR_GS_BASE, gs);
++ ++++++++++++++++++ rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
cr0 = read_cr0();
cr2 = read_cr2();
cr3 = read_cr3();
cr4 = read_cr4();
-- ------------------ printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
-- ------------------ fs,fsindex,gs,gsindex,shadowgs);
-- ------------------ printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
-- ------------------ printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
++ ++++++++++++++++++ printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
++ ++++++++++++++++++ fs, fsindex, gs, gsindex, shadowgs);
++ ++++++++++++++++++ printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
++ ++++++++++++++++++ es, cr0);
++ ++++++++++++++++++ printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
++ ++++++++++++++++++ cr4);
get_debugreg(d0, 0);
get_debugreg(d1, 1);
get_debugreg(d2, 2);
-- ------------------ printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
++ ++++++++++++++++++ printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
get_debugreg(d3, 3);
get_debugreg(d6, 6);
get_debugreg(d7, 7);
-- ------------------ printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
++ ++++++++++++++++++ printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
}
void show_regs(struct pt_regs *regs)
{
-- ------------------ printk("CPU %d:", smp_processor_id());
++ ++++++++++++++++++ printk(KERN_INFO "CPU %d:", smp_processor_id());
__show_regs(regs);
show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
}
t->io_bitmap_max = 0;
put_cpu();
}
++++++++++++++++++++#ifdef CONFIG_X86_DS
++++++++++++++++++++ /* Free any DS contexts that have not been properly released. */
++++++++++++++++++++ if (unlikely(t->ds_ctx)) {
++++++++++++++++++++ /* we clear debugctl to make sure DS is not used. */
++++++++++++++++++++ update_debugctlmsr(0);
++++++++++++++++++++ ds_free(t->ds_ctx);
++++++++++++++++++++ }
++++++++++++++++++++#endif /* CONFIG_X86_DS */
}
void flush_thread(void)
int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
unsigned long unused,
-- ------------------ struct task_struct * p, struct pt_regs * regs)
++ ++++++++++++++++++ struct task_struct *p, struct pt_regs *regs)
{
int err;
-- ------------------ struct pt_regs * childregs;
++ ++++++++++++++++++ struct pt_regs *childregs;
struct task_struct *me = current;
childregs = ((struct pt_regs *)
if (test_thread_flag(TIF_IA32))
err = do_set_thread_area(p, -1,
(struct user_desc __user *)childregs->si, 0);
-- ------------------ else
-- ------------------#endif
-- ------------------ err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
-- ------------------ if (err)
++ ++++++++++++++++++ else
++ ++++++++++++++++++#endif
++ ++++++++++++++++++ err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
++ ++++++++++++++++++ if (err)
goto out;
}
err = 0;
next = &next_p->thread;
debugctl = prev->debugctlmsr;
-------------------- if (next->ds_area_msr != prev->ds_area_msr) {
-------------------- /* we clear debugctl to make sure DS
-------------------- * is not in use when we change it */
-------------------- debugctl = 0;
-------------------- update_debugctlmsr(0);
-------------------- wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr);
++++++++++++++++++++
++++++++++++++++++++#ifdef CONFIG_X86_DS
++++++++++++++++++++ {
++++++++++++++++++++ unsigned long ds_prev = 0, ds_next = 0;
++++++++++++++++++++
++++++++++++++++++++ if (prev->ds_ctx)
++++++++++++++++++++ ds_prev = (unsigned long)prev->ds_ctx->ds;
++++++++++++++++++++ if (next->ds_ctx)
++++++++++++++++++++ ds_next = (unsigned long)next->ds_ctx->ds;
++++++++++++++++++++
++++++++++++++++++++ if (ds_next != ds_prev) {
++++++++++++++++++++ /*
++++++++++++++++++++ * We clear debugctl to make sure DS
++++++++++++++++++++ * is not in use when we change it:
++++++++++++++++++++ */
++++++++++++++++++++ debugctl = 0;
++++++++++++++++++++ update_debugctlmsr(0);
++++++++++++++++++++ wrmsrl(MSR_IA32_DS_AREA, ds_next);
++++++++++++++++++++ }
}
++++++++++++++++++++#endif /* CONFIG_X86_DS */
if (next->debugctlmsr != debugctl)
update_debugctlmsr(next->debugctlmsr);
memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
}
--------------------#ifdef X86_BTS
++++++++++++++++++++#ifdef CONFIG_X86_PTRACE_BTS
if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
--------------------#endif
++++++++++++++++++++#endif /* CONFIG_X86_PTRACE_BTS */
}
/*
unsigned fsindex, gsindex;
/* we're going to use this soon, after a few expensive things */
-- ------------------ if (next_p->fpu_counter>5)
++ ++++++++++++++++++ if (next_p->fpu_counter > 5)
prefetch(next->xstate);
/*
*/
load_sp0(tss, next);
-- ------------------ /*
++ ++++++++++++++++++ /*
* Switch DS and ES.
* This won't pick up thread selector changes, but I guess that is ok.
*/
savesegment(es, prev->es);
if (unlikely(next->es | prev->es))
-- ------------------ loadsegment(es, next->es);
++ ++++++++++++++++++ loadsegment(es, next->es);
savesegment(ds, prev->ds);
if (unlikely(next->ds | prev->ds))
*/
arch_leave_lazy_cpu_mode();
-- ------------------ /*
++ ++++++++++++++++++ /*
* Switch FS and GS.
*
* Segment register != 0 always requires a reload. Also
*/
if (unlikely(fsindex | next->fsindex | prev->fs)) {
loadsegment(fs, next->fsindex);
-- ------------------ /*
++ ++++++++++++++++++ /*
* Check if the user used a selector != 0; if yes
* clear 64bit base, since overloaded base is always
* mapped to the Null selector
*/
if (fsindex)
-- ------------------ prev->fs = 0;
++ ++++++++++++++++++ prev->fs = 0;
}
/* when next process has a 64bit base use it */
if (next->fs)
if (unlikely(gsindex | next->gsindex | prev->gs)) {
load_gs_index(next->gsindex);
if (gsindex)
-- ------------------ prev->gs = 0;
++ ++++++++++++++++++ prev->gs = 0;
}
if (next->gs)
wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
/* Must be after DS reload */
unlazy_fpu(prev_p);
-- ------------------ /*
++ ++++++++++++++++++ /*
* Switch the PDA and FPU contexts.
*/
prev->usersp = read_pda(oldrsp);
write_pda(oldrsp, next->usersp);
-- ------------------ write_pda(pcurrent, next_p);
++ ++++++++++++++++++ write_pda(pcurrent, next_p);
write_pda(kernelstack,
(unsigned long)task_stack_page(next_p) +
char __user * __user *envp, struct pt_regs *regs)
{
long error;
-- ------------------ char * filename;
++ ++++++++++++++++++ char *filename;
filename = getname(name);
error = PTR_ERR(filename);
unsigned long get_wchan(struct task_struct *p)
{
unsigned long stack;
-- ------------------ u64 fp,ip;
++ ++++++++++++++++++ u64 fp, ip;
int count = 0;
-- ------------------ if (!p || p == current || p->state==TASK_RUNNING)
-- ------------------ return 0;
++ ++++++++++++++++++ if (!p || p == current || p->state == TASK_RUNNING)
++ ++++++++++++++++++ return 0;
stack = (unsigned long)task_stack_page(p);
if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
return 0;
fp = *(u64 *)(p->thread.sp);
-- ------------------ do {
++ ++++++++++++++++++ do {
if (fp < (unsigned long)stack ||
fp > (unsigned long)stack+THREAD_SIZE)
-- ------------------ return 0;
++ ++++++++++++++++++ return 0;
ip = *(u64 *)(fp+8);
if (!in_sched_functions(ip))
return ip;
-- ------------------ fp = *(u64 *)fp;
-- ------------------ } while (count++ < 16);
++ ++++++++++++++++++ fp = *(u64 *)fp;
++ ++++++++++++++++++ } while (count++ < 16);
return 0;
}
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
-- ------------------{
-- ------------------ int ret = 0;
++ ++++++++++++++++++{
++ ++++++++++++++++++ int ret = 0;
int doit = task == current;
int cpu;
-- ------------------ switch (code) {
++ ++++++++++++++++++ switch (code) {
case ARCH_SET_GS:
if (addr >= TASK_SIZE_OF(task))
-- ------------------ return -EPERM;
++ ++++++++++++++++++ return -EPERM;
cpu = get_cpu();
-- ------------------ /* handle small bases via the GDT because that's faster to
++ ++++++++++++++++++ /* handle small bases via the GDT because that's faster to
switch. */
-- ------------------ if (addr <= 0xffffffff) {
-- ------------------ set_32bit_tls(task, GS_TLS, addr);
-- ------------------ if (doit) {
++ ++++++++++++++++++ if (addr <= 0xffffffff) {
++ ++++++++++++++++++ set_32bit_tls(task, GS_TLS, addr);
++ ++++++++++++++++++ if (doit) {
load_TLS(&task->thread, cpu);
-- ------------------ load_gs_index(GS_TLS_SEL);
++ ++++++++++++++++++ load_gs_index(GS_TLS_SEL);
}
-- ------------------ task->thread.gsindex = GS_TLS_SEL;
++ ++++++++++++++++++ task->thread.gsindex = GS_TLS_SEL;
task->thread.gs = 0;
-- ------------------ } else {
++ ++++++++++++++++++ } else {
task->thread.gsindex = 0;
task->thread.gs = addr;
if (doit) {
load_gs_index(0);
ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
-- ------------------ }
++ ++++++++++++++++++ }
}
put_cpu();
break;
rdmsrl(MSR_KERNEL_GS_BASE, base);
else
base = task->thread.gs;
-- ------------------ }
-- ------------------ else
++ ++++++++++++++++++ } else
base = task->thread.gs;
ret = put_user(base, (unsigned long __user *)addr);
break;
#define RAMDISK_LOAD_FLAG 0x4000
static char __initdata command_line[COMMAND_LINE_SIZE];
+++ +++++++++++++++++#ifdef CONFIG_CMDLINE_BOOL
+++ +++++++++++++++++static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
+++ +++++++++++++++++#endif
#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
struct edd edd;
* @size: Size of the crashkernel memory to reserve.
* Returns the base address on success, and -1ULL on failure.
*/
- - - - unsigned long long find_and_reserve_crashkernel(unsigned long long size)
+ + + + unsigned long long __init find_and_reserve_crashkernel(unsigned long long size)
{
const unsigned long long alignment = 16<<20; /* 16M */
unsigned long long start = 0LL;
early_cpu_init();
early_ioremap_init();
- - - - - -#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
- - - - - - /*
- - - - - - * Must be before kernel pagetables are setup
- - - - - - * or fixmap area is touched.
- - - - - - */
- - - - - - vmi_init();
- - - - - -#endif
- - - - - -
ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
screen_info = boot_params.screen_info;
edid_info = boot_params.edid_info;
bss_resource.start = virt_to_phys(&__bss_start);
bss_resource.end = virt_to_phys(&__bss_stop)-1;
+++ +++++++++++++++++#ifdef CONFIG_CMDLINE_BOOL
+++ +++++++++++++++++#ifdef CONFIG_CMDLINE_OVERRIDE
+++ +++++++++++++++++ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+++ +++++++++++++++++#else
+++ +++++++++++++++++ if (builtin_cmdline[0]) {
+++ +++++++++++++++++ /* append boot loader cmdline to builtin */
+++ +++++++++++++++++ strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
+++ +++++++++++++++++ strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+++ +++++++++++++++++ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+++ +++++++++++++++++ }
+++ +++++++++++++++++#endif
+++ +++++++++++++++++#endif
+++ +++++++++++++++++
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
parse_early_param();
+++ ++ +++ +++ + +#ifdef CONFIG_X86_64
+++ ++ +++ +++ + + check_efer();
+++ ++ +++ +++ + +#endif
+++ ++ +++ +++ + +
+ + + + ++ +#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
+ + + + ++ + /*
+ + + + ++ + * Must be before kernel pagetables are setup
+ + + + ++ + * or fixmap area is touched.
+ + + + ++ + */
+ + + + ++ + vmi_init();
+ + + + ++ +#endif
+ + + + ++ +
/* after early param, so could get panic from serial */
reserve_early_setup_data();
#else
num_physpages = max_pfn;
--- -- --- --- - - check_efer();
/* How many end-of-memory variables you have, grandma! */
/* need this before calling reserve_initrd */
initmem_init(0, max_pfn);
- #ifdef CONFIG_X86_64
- dma32_reserve_bootmem();
- #endif
-
#ifdef CONFIG_ACPI_SLEEP
/*
* Reserve low memory region for sleep support.
#endif
reserve_crashkernel();
+ #ifdef CONFIG_X86_64
+ /*
+ * dma32_reserve_bootmem() allocates bootmem which may conflict
+ * with the crashkernel command line, so do that after
+ * reserve_crashkernel()
+ */
+ dma32_reserve_bootmem();
+ #endif
+
reserve_ibft_region();
#ifdef CONFIG_KVM_CLOCK
kvmclock_init();
#endif
- #if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
- /*
- * Must be after max_low_pfn is determined, and before kernel
- * pagetables are setup.
- */
- vmi_init();
- #endif
-
paravirt_pagetable_setup_start(swapper_pg_dir);
paging_init();
paravirt_pagetable_setup_done(swapper_pg_dir);
init_apic_mappings();
ioapic_init_mappings();
- #if defined(CONFIG_SMP) && defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
- if (def_to_bigsmp)
- printk(KERN_WARNING "More than 8 CPUs detected and "
- "CONFIG_X86_PC cannot handle it.\nUse "
- "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
- #endif
kvm_guest_init();
e820_reserve_resources();
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
++++++++++++++++++++#include <linux/tracehook.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/personality.h>
#include <linux/compiler.h>
++ ++++++++++++++++++#include <linux/uaccess.h>
++ ++++++++++++++++++
#include <asm/processor.h>
#include <asm/ucontext.h>
-- ------------------#include <asm/uaccess.h>
#include <asm/i387.h>
#include <asm/proto.h>
#include <asm/ia32_unistd.h>
#include <asm/mce.h>
++++++++++++++++++++#include <asm/syscall.h>
++++++++++++++++++++#include <asm/syscalls.h>
#include "sigframe.h"
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
# define FIX_EFLAGS __FIX_EFLAGS
#endif
-- ------------------int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
-- ------------------ sigset_t *set, struct pt_regs * regs);
-- ------------------int ia32_setup_frame(int sig, struct k_sigaction *ka,
-- ------------------ sigset_t *set, struct pt_regs * regs);
-- ------------------
asmlinkage long
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
struct pt_regs *regs)
clts();
task_thread_info(current)->status |= TS_USEDFPU;
}
- - - - return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
+ + + + err = restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
+ + + + if (unlikely(err)) {
+ + + + /*
+ + + + * Encountered an error while doing the restore from the
+ + + + * user buffer, clear the fpu state.
+ + + + */
+ + + + clear_fpu(tsk);
+ + + + clear_used_math();
+ + + + }
+ + + + return err;
}
/*
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
-- ------------------#define COPY(x) err |= __get_user(regs->x, &sc->x)
++ ++++++++++++++++++#define COPY(x) (err |= __get_user(regs->x, &sc->x))
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
COPY(dx); COPY(cx); COPY(ip);
}
{
-- ------------------ struct _fpstate __user * buf;
++ ++++++++++++++++++ struct _fpstate __user *buf;
err |= __get_user(buf, &sc->fpstate);
if (buf) {
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
-- ------------------
++ ++++++++++++++++++
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
goto badframe;
return ax;
badframe:
-- ------------------ signal_fault(regs,frame,"sigreturn");
++ ++++++++++++++++++ signal_fault(regs, frame, "sigreturn");
return 0;
-- ------------------}
++ ++++++++++++++++++}
/*
* Set up a signal frame.
*/
static inline int
-- ------------------setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, struct task_struct *me)
++ ++++++++++++++++++setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
++ ++++++++++++++++++ unsigned long mask, struct task_struct *me)
{
int err = 0;
}
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
-- ------------------ sigset_t *set, struct pt_regs * regs)
++ ++++++++++++++++++ sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
-- ------------------ struct _fpstate __user *fp = NULL;
++ ++++++++++++++++++ struct _fpstate __user *fp = NULL;
int err = 0;
struct task_struct *me = current;
if (used_math()) {
-- ------------------ fp = get_stack(ka, regs, sizeof(struct _fpstate));
++ ++++++++++++++++++ fp = get_stack(ka, regs, sizeof(struct _fpstate));
frame = (void __user *)round_down(
(unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate)))
goto give_sigsegv;
-- ------------------ if (save_i387(fp) < 0)
-- ------------------ err |= -1;
++ ++++++++++++++++++ if (save_i387(fp) < 0)
++ ++++++++++++++++++ err |= -1;
} else
frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8;
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
-- ------------------ if (ka->sa.sa_flags & SA_SIGINFO) {
++ ++++++++++++++++++ if (ka->sa.sa_flags & SA_SIGINFO) {
err |= copy_siginfo_to_user(&frame->info, info);
if (err)
goto give_sigsegv;
}
-- ------------------
++ ++++++++++++++++++
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate);
-- ------------------ if (sizeof(*set) == 16) {
++ ++++++++++++++++++ if (sizeof(*set) == 16) {
__put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
-- ------------------ __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
++ ++++++++++++++++++ __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
} else
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
} else {
/* could use a vstub here */
-- ------------------ goto give_sigsegv;
++ ++++++++++++++++++ goto give_sigsegv;
}
if (err)
/* Set up registers for signal handler */
regs->di = sig;
-- ------------------ /* In case the signal handler was declared without prototypes */
++ ++++++++++++++++++ /* In case the signal handler was declared without prototypes */
regs->ax = 0;
/* This also works for non SA_SIGINFO handlers because they expect the
return -EFAULT;
}
--------------------/*
-------------------- * Return -1L or the syscall number that @regs is executing.
-------------------- */
--------------------static long current_syscall(struct pt_regs *regs)
--------------------{
-------------------- /*
-------------------- * We always sign-extend a -1 value being set here,
-------------------- * so this is always either -1L or a syscall number.
-------------------- */
-------------------- return regs->orig_ax;
--------------------}
--------------------
--------------------/*
-------------------- * Return a value that is -EFOO if the system call in @regs->orig_ax
-------------------- * returned an error. This only works for @regs from @current.
-------------------- */
--------------------static long current_syscall_ret(struct pt_regs *regs)
--------------------{
--------------------#ifdef CONFIG_IA32_EMULATION
-------------------- if (test_thread_flag(TIF_IA32))
-------------------- /*
-------------------- * Sign-extend the value so (int)-EFOO becomes (long)-EFOO
-------------------- * and will match correctly in comparisons.
-------------------- */
-------------------- return (int) regs->ax;
--------------------#endif
-------------------- return regs->ax;
--------------------}
--------------------
/*
* OK, we're invoking a handler
-- ------------------ */
++ ++++++++++++++++++ */
static int
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
int ret;
/* Are we from a system call? */
-------------------- if (current_syscall(regs) >= 0) {
++++++++++++++++++++ if (syscall_get_nr(current, regs) >= 0) {
/* If so, check system call restarting.. */
-------------------- switch (current_syscall_ret(regs)) {
++++++++++++++++++++ switch (syscall_get_error(current, regs)) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
regs->ax = -EINTR;
ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs);
else
ret = ia32_setup_frame(sig, ka, oldset, regs);
-- ------------------ } else
++ ++++++++++++++++++ } else
#endif
ret = setup_rt_frame(sig, ka, info, oldset, regs);
* handler too.
*/
regs->flags &= ~X86_EFLAGS_TF;
-------------------- if (test_thread_flag(TIF_SINGLESTEP))
-------------------- ptrace_notify(SIGTRAP);
spin_lock_irq(¤t->sighand->siglock);
-- ------------------ sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
++ ++++++++++++++++++ sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
-- ------------------ sigaddset(¤t->blocked,sig);
++ ++++++++++++++++++ sigaddset(¤t->blocked, sig);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
++++++++++++++++++++
++++++++++++++++++++ tracehook_signal_handler(sig, info, ka, regs,
++++++++++++++++++++ test_thread_flag(TIF_SINGLESTEP));
}
return ret;
}
/* Did we come from a system call? */
-------------------- if (current_syscall(regs) >= 0) {
++++++++++++++++++++ if (syscall_get_nr(current, regs) >= 0) {
/* Restart the system call - no handlers present */
-------------------- switch (current_syscall_ret(regs)) {
++++++++++++++++++++ switch (syscall_get_error(current, regs)) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
++++++++++++++++++++
++++++++++++++++++++ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
++++++++++++++++++++ clear_thread_flag(TIF_NOTIFY_RESUME);
++++++++++++++++++++ tracehook_notify_resume(regs);
++++++++++++++++++++ }
}
void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
-- ------------------{
-- ------------------ struct task_struct *me = current;
++ ++++++++++++++++++{
++ ++++++++++++++++++ struct task_struct *me = current;
if (show_unhandled_signals && printk_ratelimit()) {
printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
-- ------------------ me->comm,me->pid,where,frame,regs->ip,regs->sp,regs->orig_ax);
++ ++++++++++++++++++ me->comm, me->pid, where, frame, regs->ip,
++ ++++++++++++++++++ regs->sp, regs->orig_ax);
print_vma_addr(" in ", regs->ip);
printk("\n");
}
-- ------------------ force_sig(SIGSEGV, me);
-- ------------------}
++ ++++++++++++++++++ force_sig(SIGSEGV, me);
++ ++++++++++++++++++}
#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
#else
--------------------struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
++++++++++++++++++++static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
#define get_idle_for_cpu(x) (idle_thread_array[(x)])
#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
#endif
static cpumask_t cpu_sibling_setup_map;
/* Set if we find a B stepping CPU */
--------------------int __cpuinitdata smp_b_stepping;
++++++++++++++++++++static int __cpuinitdata smp_b_stepping;
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
* for which cpus receive the IPI. Holding this
* lock helps us to not include this cpu in a currently in progress
* smp_call_function().
+ *
+ * We need to hold vector_lock so there the set of online cpus
+ * does not change while we are assigning vectors to cpus. Holding
+ * this lock ensures we don't half assign or remove an irq from a cpu.
*/
ipi_call_lock_irq();
- #ifdef CONFIG_X86_IO_APIC
- setup_vector_irq(smp_processor_id());
- #endif
+ lock_vector_lock();
+ __setup_vector_irq(smp_processor_id());
cpu_set(smp_processor_id(), cpu_online_map);
+ unlock_vector_lock();
ipi_call_unlock_irq();
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
}
#ifdef CONFIG_X86_64
+ + + +
+ + + + /* __ref because it's safe to call free_bootmem when after_bootmem == 0. */
+ + + + static void __ref free_bootmem_pda(struct x8664_pda *oldpda)
+ + + + {
+ + + + if (!after_bootmem)
+ + + + free_bootmem((unsigned long)oldpda, sizeof(*oldpda));
+ + + + }
+ + + +
/*
* Allocate node local memory for the AP pda.
*
if (oldpda) {
memcpy(newpda, oldpda, size);
- - - - if (!after_bootmem)
- - - - free_bootmem((unsigned long)oldpda, size);
+ + + + free_bootmem_pda(oldpda);
}
newpda->in_bootmem = 0;
flush_tlb_all();
low_mappings = 1;
- - - #ifdef CONFIG_X86_PC
- - - if (def_to_bigsmp && apicid > 8) {
- - - printk(KERN_WARNING
- - - "More than 8 CPUs detected - skipping them.\n"
- - - "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");
- - - err = -1;
- - - } else
- - - err = do_boot_cpu(apicid, cpu);
- - - #else
err = do_boot_cpu(apicid, cpu);
- - - #endif
zap_low_mappings();
low_mappings = 0;
static int __init smp_sanity_check(unsigned max_cpus)
{
preempt_disable();
+ + + +
+ + + + #if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
+ + + + if (def_to_bigsmp && nr_cpu_ids > 8) {
+ + + + unsigned int cpu;
+ + + + unsigned nr;
+ + + +
+ + + + printk(KERN_WARNING
+ + + + "More than 8 CPUs detected - skipping them.\n"
+ + + + "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");
+ + + +
+ + + + nr = 0;
+ + + + for_each_present_cpu(cpu) {
+ + + + if (nr >= 8)
+ + + + cpu_clear(cpu, cpu_present_map);
+ + + + nr++;
+ + + + }
+ + + +
+ + + + nr = 0;
+ + + + for_each_possible_cpu(cpu) {
+ + + + if (nr >= 8)
+ + + + cpu_clear(cpu, cpu_possible_map);
+ + + + nr++;
+ + + + }
+ + + +
+ + + + nr_cpu_ids = 8;
+ + + + }
+ + + + #endif
+ + + +
if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
printk(KERN_WARNING "weird, boot CPU (#%d) not listed"
"by the BIOS.\n", hard_smp_processor_id());
printk(KERN_INFO "CPU%d: ", 0);
print_cpu_info(&cpu_data(0));
setup_boot_clock();
+ + + + ++ +
+ + + + ++ + if (is_uv_system())
+ + + + ++ + uv_system_init();
out:
preempt_enable();
}
if (!num_processors)
num_processors = 1;
-- ------------------#ifdef CONFIG_HOTPLUG_CPU
if (additional_cpus == -1) {
if (disabled_cpus > 0)
additional_cpus = disabled_cpus;
else
additional_cpus = 0;
}
-- ------------------#else
-- ------------------ additional_cpus = 0;
-- ------------------#endif
++ ++++++++++++++++++
possible = num_processors + additional_cpus;
if (possible > NR_CPUS)
possible = NR_CPUS;
remove_siblinginfo(cpu);
/* It's now safe to remove this processor from the online map */
+ lock_vector_lock();
remove_cpu_from_maps(cpu);
+ unlock_vector_lock();
fixup_irqs(cpu_online_map);
return 0;
}
BUG();
}
#endif
- - - -
- - - - /*
- - - - * If the BIOS enumerates physical processors before logical,
- - - - * maxcpus=N at enumeration-time can be used to disable HT.
- - - - */
- - - - static int __init parse_maxcpus(char *arg)
- - - - {
- - - - extern unsigned int maxcpus;
- - - -
- - - - if (arg)
- - - - maxcpus = simple_strtoul(arg, NULL, 0);
- - - - return 0;
- - - - }
- - - - early_param("maxcpus", parse_maxcpus);
#include <linux/utsname.h>
#include <linux/personality.h>
#include <linux/random.h>
++ ++++++++++++++++++#include <linux/uaccess.h>
-- ------------------#include <asm/uaccess.h>
#include <asm/ia32.h>
++++++++++++++++++++#include <asm/syscalls.h>
-- ------------------asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags,
-- ------------------ unsigned long fd, unsigned long off)
++ ++++++++++++++++++asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
++ ++++++++++++++++++ unsigned long prot, unsigned long flags,
++ ++++++++++++++++++ unsigned long fd, unsigned long off)
{
long error;
-- ------------------ struct file * file;
++ ++++++++++++++++++ struct file *file;
error = -EINVAL;
if (off & ~PAGE_MASK)
unmapped base down for this case. This can give
conflicts with the heap, but we assume that glibc
malloc knows how to fall back to mmap. Give it 1GB
-- ------------------ of playground for now. -AK */
-- ------------------ *begin = 0x40000000;
-- ------------------ *end = 0x80000000;
++ ++++++++++++++++++ of playground for now. -AK */
++ ++++++++++++++++++ *begin = 0x40000000;
++ ++++++++++++++++++ *end = 0x80000000;
if (current->flags & PF_RANDOMIZE) {
new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
if (new_begin)
}
} else {
*begin = TASK_UNMAPPED_BASE;
-- ------------------ *end = TASK_SIZE;
++ ++++++++++++++++++ *end = TASK_SIZE;
}
-- ------------------}
++ ++++++++++++++++++}
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct vm_area_struct *vma;
unsigned long start_addr;
unsigned long begin, end;
-- ------------------
++ ++++++++++++++++++
if (flags & MAP_FIXED)
return addr;
-- ------------------ find_start_end(flags, &begin, &end);
++ ++++++++++++++++++ find_start_end(flags, &begin, &end);
if (len > end)
return -ENOMEM;
}
if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
&& len <= mm->cached_hole_size) {
-- ------------------ mm->cached_hole_size = 0;
++ ++++++++++++++++++ mm->cached_hole_size = 0;
mm->free_area_cache = begin;
}
addr = mm->free_area_cache;
-- ------------------ if (addr < begin)
-- ------------------ addr = begin;
++ ++++++++++++++++++ if (addr < begin)
++ ++++++++++++++++++ addr = begin;
start_addr = addr;
full_search:
return addr;
}
if (addr + mm->cached_hole_size < vma->vm_start)
-- ------------------ mm->cached_hole_size = vma->vm_start - addr;
++ ++++++++++++++++++ mm->cached_hole_size = vma->vm_start - addr;
addr = vma->vm_end;
}
vma = find_vma(mm, addr-len);
if (!vma || addr <= vma->vm_start)
/* remember the address as a hint for next time */
-- ------------------ return (mm->free_area_cache = addr-len);
++ ++++++++++++++++++ return mm->free_area_cache = addr-len;
}
if (mm->mmap_base < len)
vma = find_vma(mm, addr);
if (!vma || addr+len <= vma->vm_start)
/* remember the address as a hint for next time */
-- ------------------ return (mm->free_area_cache = addr);
++ ++++++++++++++++++ return mm->free_area_cache = addr;
/* remember the largest hole we saw so far */
if (addr + mm->cached_hole_size < vma->vm_start)
}
-- ------------------asmlinkage long sys_uname(struct new_utsname __user * name)
++ ++++++++++++++++++asmlinkage long sys_uname(struct new_utsname __user *name)
{
int err;
down_read(&uts_sem);
-- ------------------ err = copy_to_user(name, utsname(), sizeof (*name));
++ ++++++++++++++++++ err = copy_to_user(name, utsname(), sizeof(*name));
up_read(&uts_sem);
-- ------------------ if (personality(current->personality) == PER_LINUX32)
-- ------------------ err |= copy_to_user(&name->machine, "i686", 5);
++ ++++++++++++++++++ if (personality(current->personality) == PER_LINUX32)
++ ++++++++++++++++++ err |= copy_to_user(&name->machine, "i686", 5);
return err ? -EFAULT : 0;
}
#include <linux/bug.h>
#include <linux/nmi.h>
#include <linux/mm.h>
++ ++++++++++++++++++#include <linux/smp.h>
++ ++++++++++++++++++#include <linux/io.h>
#if defined(CONFIG_EDAC)
#include <linux/edac.h>
#include <asm/unwind.h>
#include <asm/desc.h>
#include <asm/i387.h>
-- ------------------#include <asm/nmi.h>
-- ------------------#include <asm/smp.h>
-- ------------------#include <asm/io.h>
#include <asm/pgalloc.h>
#include <asm/proto.h>
#include <asm/pda.h>
void printk_address(unsigned long address, int reliable)
{
-- ------------------ printk(" [<%016lx>] %s%pS\n", address, reliable ? "": "? ", (void *) address);
++ ++++++++++++++++++ printk(" [<%016lx>] %s%pS\n",
++ ++++++++++++++++++ address, reliable ? "" : "? ", (void *) address);
}
static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
[STACKFAULT_STACK - 1] = "#SS",
[MCE_STACK - 1] = "#MC",
#if DEBUG_STKSZ > EXCEPTION_STKSZ
-- ------------------ [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
++ ++++++++++++++++++ [N_EXCEPTION_STACKS ...
++ ++++++++++++++++++ N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
#endif
};
unsigned k;
}
/*
-- ------------------ * x86-64 can have up to three kernel stacks:
++ ++++++++++++++++++ * x86-64 can have up to three kernel stacks:
* process stack
* interrupt stack
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
const struct stacktrace_ops *ops, void *data)
{
const unsigned cpu = get_cpu();
-- ------------------ unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
++ ++++++++++++++++++ unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
unsigned used = 0;
struct thread_info *tinfo;
if (!bp) {
if (task == current) {
/* Grab bp right from our regs */
-- ------------------ asm("movq %%rbp, %0" : "=r" (bp) :);
++ ++++++++++++++++++ asm("movq %%rbp, %0" : "=r" (bp) : );
} else {
/* bp is the last reg pushed by switch_to */
bp = *(unsigned long *) task->thread.sp;
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp, char *log_lvl)
{
----- --------------- printk("\nCall Trace:\n");
+++++ +++++++++++++++ printk("Call Trace:\n");
dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
----- --------------- printk("\n");
}
void show_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack;
int i;
const int cpu = smp_processor_id();
-- ------------------ unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
-- ------------------ unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
++ ++++++++++++++++++ unsigned long *irqstack_end =
++ ++++++++++++++++++ (unsigned long *) (cpu_pda(cpu)->irqstackptr);
++ ++++++++++++++++++ unsigned long *irqstack =
++ ++++++++++++++++++ (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
-- ------------------ // debugging aid: "show_stack(NULL, NULL);" prints the
-- ------------------ // back trace for this cpu.
++ ++++++++++++++++++ /*
++ ++++++++++++++++++ * debugging aid: "show_stack(NULL, NULL);" prints the
++ ++++++++++++++++++ * back trace for this cpu.
++ ++++++++++++++++++ */
if (sp == NULL) {
if (task)
printk(" %016lx", *stack++);
touch_nmi_watchdog();
}
+++++ +++++++++++++++ printk("\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
}
#ifdef CONFIG_FRAME_POINTER
if (!bp)
-- ------------------ asm("movq %%rbp, %0" : "=r" (bp):);
++ ++++++++++++++++++ asm("movq %%rbp, %0" : "=r" (bp) : );
#endif
printk("Pid: %d, comm: %.20s %s %s %.*s\n",
init_utsname()->version);
show_trace(NULL, NULL, &stack, bp);
}
-- ------------------
EXPORT_SYMBOL(dump_stack);
void show_registers(struct pt_regs *regs)
printk("Stack: ");
show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
regs->bp, "");
----- --------------- printk("\n");
printk(KERN_EMERG "Code: ");
raw_local_irq_save(flags);
cpu = smp_processor_id();
if (!__raw_spin_trylock(&die_lock)) {
-- ------------------ if (cpu == die_owner)
++ ++++++++++++++++++ if (cpu == die_owner)
/* nested oops. should stop eventually */;
else
__raw_spin_lock(&die_lock);
}
#define DO_ERROR(trapnr, signr, str, name) \
-- ------------------asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
++ ++++++++++++++++++asmlinkage void do_##name(struct pt_regs *regs, long error_code) \
{ \
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
== NOTIFY_STOP) \
}
#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-- ------------------asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
++ ++++++++++++++++++asmlinkage void do_##name(struct pt_regs *regs, long error_code) \
{ \
siginfo_t info; \
info.si_signo = signr; \
preempt_conditional_cli(regs);
}
-- ------------------asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
++ ++++++++++++++++++asmlinkage void do_double_fault(struct pt_regs *regs, long error_code)
{
static const char str[] = "double fault";
struct task_struct *tsk = current;
}
static notrace __kprobes void
-- ------------------unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
++ ++++++++++++++++++unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
{
-- ------------------ if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
++ ++++++++++++++++++ if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
++ ++++++++++++++++++ NOTIFY_STOP)
return;
printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
reason);
else if (user_mode(eregs))
regs = task_pt_regs(current);
/* Exception from kernel and interrupts are enabled. Move to
-- ------------------ kernel process stack. */
++ ++++++++++++++++++ kernel process stack. */
else if (eregs->flags & X86_EFLAGS_IF)
regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
if (eregs != regs)
}
/* runs on IST stack. */
-- ------------------asmlinkage void __kprobes do_debug(struct pt_regs * regs,
++ ++++++++++++++++++asmlinkage void __kprobes do_debug(struct pt_regs *regs,
unsigned long error_code)
{
struct task_struct *tsk = current;
asmlinkage void bad_intr(void)
{
-- ------------------ printk("bad interrupt");
++ ++++++++++++++++++ printk("bad interrupt");
}
asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
conditional_sti(regs);
if (!user_mode(regs) &&
-- ------------------ kernel_math_error(regs, "kernel simd math error", 19))
++ ++++++++++++++++++ kernel_math_error(regs, "kernel simd math error", 19))
return;
/*
force_sig_info(SIGFPE, &info, task);
}
-- ------------------asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
++ ++++++++++++++++++asmlinkage void do_spurious_interrupt_bug(struct pt_regs *regs)
{
}
}
clts(); /* Allow maths ops (or we recurse) */
- - - - restore_fpu_checking(&me->thread.xstate->fxsave);
+ + + + /*
+ + + + * Paranoid restore. send a SIGSEGV if we fail to restore the state.
+ + + + */
+ + + + if (unlikely(restore_fpu_checking(&me->thread.xstate->fxsave))) {
+ + + + stts();
+ + + + force_sig(SIGSEGV, me);
+ + + + return;
+ + + + }
task_thread_info(me)->status |= TS_USEDFPU;
me->fpu_counter++;
}
set_intr_gate(0, ÷_error);
set_intr_gate_ist(1, &debug, DEBUG_STACK);
set_intr_gate_ist(2, &nmi, NMI_STACK);
-- ------------------ set_system_gate_ist(3, &int3, DEBUG_STACK); /* int3 can be called from all */
-- ------------------ set_system_gate(4, &overflow); /* int4 can be called from all */
++ ++++++++++++++++++ /* int3 can be called from all */
++ ++++++++++++++++++ set_system_gate_ist(3, &int3, DEBUG_STACK);
++ ++++++++++++++++++ /* int4 can be called from all */
++ ++++++++++++++++++ set_system_gate(4, &overflow);
set_intr_gate(5, &bounds);
set_intr_gate(6, &invalid_op);
set_intr_gate(7, &device_not_available);
/*
* Read TSC and the reference counters. Take care of SMI disturbance
*/
- - ----- ----- -- --static u64 tsc_read_refs(u64 *pm, u64 *hpet)
- - - - static u64 __init tsc_read_refs(u64 *pm, u64 *hpet)
++++++++++++++++++ ++static u64 tsc_read_refs(u64 *p, int hpet)
{
u64 t1, t2;
int i;
for (i = 0; i < MAX_RETRIES; i++) {
t1 = get_cycles();
if (hpet)
------------------ -- *hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
++++++++++++++++++ ++ *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
else
------------------ -- *pm = acpi_pm_read_early();
++++++++++++++++++ ++ *p = acpi_pm_read_early();
t2 = get_cycles();
if ((t2 - t1) < SMI_TRESHOLD)
return t2;
return ULLONG_MAX;
}
- - - -- --- -/**
- - - -- --- - * native_calibrate_tsc - calibrate the tsc on boot
++++++++++++++++++ ++/*
++++++++++++++++++ ++ * Calculate the TSC frequency from HPET reference
+ + +++ + +++ + + */
- - - -- --- -unsigned long native_calibrate_tsc(void)
++++++++++++++++++ ++static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
+ + +++ + +++ + + {
- - - -- --- - unsigned long flags;
- - - -- --- - u64 tsc1, tsc2, tr1, tr2, delta, pm1, pm2, hpet1, hpet2;
- - - -- --- - int hpet = is_hpet_enabled();
- - - -- --- - unsigned int tsc_khz_val = 0;
++++++++++++++++++ ++ u64 tmp;
+ + +++ + +++ + +
- - - -- --- - local_irq_save(flags);
++++++++++++++++++ ++ if (hpet2 < hpet1)
++++++++++++++++++ ++ hpet2 += 0x100000000ULL;
++++++++++++++++++ ++ hpet2 -= hpet1;
++++++++++++++++++ ++ tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
++++++++++++++++++ ++ do_div(tmp, 1000000);
++++++++++++++++++ ++ do_div(deltatsc, tmp);
++++++++++++++++++ ++
++++++++++++++++++ ++ return (unsigned long) deltatsc;
++++++++++++++++++ ++}
++++++++++++++++++ ++
++++++++++++++++++ ++/*
++++++++++++++++++ ++ * Calculate the TSC frequency from PMTimer reference
++++++++++++++++++ ++ */
++++++++++++++++++ ++static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
++++++++++++++++++ ++{
++++++++++++++++++ ++ u64 tmp;
+ + +++ + ++++ + +
- - - - --- - tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL);
++++++++++++++++++ ++ if (!pm1 && !pm2)
++++++++++++++++++ ++ return ULONG_MAX;
++++++++++++++++++ ++
++++++++++++++++++ ++ if (pm2 < pm1)
++++++++++++++++++ ++ pm2 += (u64)ACPI_PM_OVRRUN;
++++++++++++++++++ ++ pm2 -= pm1;
++++++++++++++++++ ++ tmp = pm2 * 1000000000LL;
++++++++++++++++++ ++ do_div(tmp, PMTMR_TICKS_PER_SEC);
++++++++++++++++++ ++ do_div(deltatsc, tmp);
++++++++++++++++++ ++
++++++++++++++++++ ++ return (unsigned long) deltatsc;
++++++++++++++++++ ++}
++++++++++++++++++ ++
++++++++++++++++++ ++#define CAL_MS 10
++++++++++++++++++ ++#define CAL_LATCH (CLOCK_TICK_RATE / (1000 / CAL_MS))
++++++++++++++++++ ++#define CAL_PIT_LOOPS 1000
++++++++++++++++++ ++
++++++++++++++++++ ++#define CAL2_MS 50
++++++++++++++++++ ++#define CAL2_LATCH (CLOCK_TICK_RATE / (1000 / CAL2_MS))
++++++++++++++++++ ++#define CAL2_PIT_LOOPS 5000
++++++++++++++++++ ++
++++++++++++++++++ ++
+ + + ++ +++ +/*
+ + + ++ +++ + * Try to calibrate the TSC against the Programmable
+ + + ++ +++ + * Interrupt Timer and return the frequency of the TSC
+ + + ++ +++ + * in kHz.
+ + + ++ +++ + *
+ + + ++ +++ + * Return ULONG_MAX on failure to calibrate.
+ + + ++ +++ + */
- - --- - --- - - static unsigned long pit_calibrate_tsc(void)
++++++++++++++++++ ++static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
+ + + ++ +++ +{
+ + + ++ +++ + u64 tsc, t1, t2, delta;
+ + + ++ +++ + unsigned long tscmin, tscmax;
+ + + ++ +++ + int pitcnt;
+
+ + + ++ +++ + /* Set the Gate high, disable speaker */
+ outb((inb(0x61) & ~0x02) | 0x01, 0x61);
+
+ + + ++ +++ + /*
+ + + ++ +++ + * Setup CTC channel 2* for mode 0, (interrupt on terminal
+ + + ++ +++ + * count mode), binary count. Set the latch register to 50ms
+ + + ++ +++ + * (LSB then MSB) to begin countdown.
+ + + ++ +++ + */
+ outb(0xb0, 0x43);
---------- ------- -- outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
---------- ------- -- outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42);
- - - - --- - tr1 = get_cycles();
- - - - --- - while ((inb(0x61) & 0x20) == 0);
- - - - --- - tr2 = get_cycles();
++++++++++++++++++ ++ outb(latch & 0xff, 0x42);
++++++++++++++++++ ++ outb(latch >> 8, 0x42);
+
- - - - --- - tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL);
+ + + ++ +++ + tsc = t1 = t2 = get_cycles();
+ + + ++ +++ +
+ + + ++ +++ + pitcnt = 0;
+ + + ++ +++ + tscmax = 0;
+ + + ++ +++ + tscmin = ULONG_MAX;
+ + + ++ +++ + while ((inb(0x61) & 0x20) == 0) {
+ + + ++ +++ + t2 = get_cycles();
+ + + ++ +++ + delta = t2 - tsc;
+ + + ++ +++ + tsc = t2;
+ + + ++ +++ + if ((unsigned long) delta < tscmin)
+ + + ++ +++ + tscmin = (unsigned int) delta;
+ + + ++ +++ + if ((unsigned long) delta > tscmax)
+ + + ++ +++ + tscmax = (unsigned int) delta;
+ + + ++ +++ + pitcnt++;
+ + + ++ +++ + }
+ + + ++ +++ +
+ + + ++ +++ + /*
+ + + ++ +++ + * Sanity checks:
+ + + ++ +++ + *
- - --- - --- - - * If we were not able to read the PIT more than 5000
++++++++++++++++++ ++ * If we were not able to read the PIT more than loopmin
+ + + ++ +++ + * times, then we have been hit by a massive SMI
+ + + ++ +++ + *
+ + + ++ +++ + * If the maximum is 10 times larger than the minimum,
+ + + ++ +++ + * then we got hit by an SMI as well.
+ + + ++ +++ + */
- - --- - --- - - if (pitcnt < 5000 || tscmax > 10 * tscmin)
++++++++++++++++++ ++ if (pitcnt < loopmin || tscmax > 10 * tscmin)
+ + + ++ +++ + return ULONG_MAX;
+ + + ++ +++ +
+ + + ++ +++ + /* Calculate the PIT value */
+ + + ++ +++ + delta = t2 - t1;
- - --- - --- - - do_div(delta, 50);
++++++++++++++++++ ++ do_div(delta, ms);
+ + + ++ +++ + return delta;
+ + + ++ +++ +}
+
++++++++++++++++++ ++/*
++++++++++++++++++ ++ * This reads the current MSB of the PIT counter, and
++++++++++++++++++ ++ * checks if we are running on sufficiently fast and
++++++++++++++++++ ++ * non-virtualized hardware.
++++++++++++++++++ ++ *
++++++++++++++++++ ++ * Our expectations are:
++++++++++++++++++ ++ *
++++++++++++++++++ ++ * - the PIT is running at roughly 1.19MHz
++++++++++++++++++ ++ *
++++++++++++++++++ ++ * - each IO is going to take about 1us on real hardware,
++++++++++++++++++ ++ * but we allow it to be much faster (by a factor of 10) or
++++++++++++++++++ ++ * _slightly_ slower (ie we allow up to a 2us read+counter
++++++++++++++++++ ++ * update - anything else implies a unacceptably slow CPU
++++++++++++++++++ ++ * or PIT for the fast calibration to work.
++++++++++++++++++ ++ *
++++++++++++++++++ ++ * - with 256 PIT ticks to read the value, we have 214us to
++++++++++++++++++ ++ * see the same MSB (and overhead like doing a single TSC
++++++++++++++++++ ++ * read per MSB value etc).
++++++++++++++++++ ++ *
++++++++++++++++++ ++ * - We're doing 2 reads per loop (LSB, MSB), and we expect
++++++++++++++++++ ++ * them each to take about a microsecond on real hardware.
++++++++++++++++++ ++ * So we expect a count value of around 100. But we'll be
++++++++++++++++++ ++ * generous, and accept anything over 50.
++++++++++++++++++ ++ *
++++++++++++++++++ ++ * - if the PIT is stuck, and we see *many* more reads, we
++++++++++++++++++ ++ * return early (and the next caller of pit_expect_msb()
++++++++++++++++++ ++ * then consider it a failure when they don't see the
++++++++++++++++++ ++ * next expected value).
++++++++++++++++++ ++ *
++++++++++++++++++ ++ * These expectations mean that we know that we have seen the
++++++++++++++++++ ++ * transition from one expected value to another with a fairly
++++++++++++++++++ ++ * high accuracy, and we didn't miss any events. We can thus
++++++++++++++++++ ++ * use the TSC value at the transitions to calculate a pretty
++++++++++++++++++ ++ * good value for the TSC frequencty.
++++++++++++++++++ ++ */
++++++++++++++++++ ++static inline int pit_expect_msb(unsigned char val)
++++++++++++++++++ ++{
++++++++++++++++++ ++ int count = 0;
++++++++++ +++++++ ++
- tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL);
++++++++++++++++++ ++ for (count = 0; count < 50000; count++) {
++++++++++++++++++ ++ /* Ignore LSB */
++++++++++++++++++ ++ inb(0x42);
++++++++++++++++++ ++ if (inb(0x42) != val)
++++++++++++++++++ ++ break;
++++++++++++++++++ ++ }
++++++++++++++++++ ++ return count > 50;
++++++++++++++++++ ++}
++++++++++++++++++ ++
++++++++++++++++++ ++/*
++++++++++++++++++ ++ * How many MSB values do we want to see? We aim for a
++++++++++++++++++ ++ * 15ms calibration, which assuming a 2us counter read
++++++++++++++++++ ++ * error should give us roughly 150 ppm precision for
++++++++++++++++++ ++ * the calibration.
++++++++++++++++++ ++ */
++++++++++++++++++ ++#define QUICK_PIT_MS 15
++++++++++++++++++ ++#define QUICK_PIT_ITERATIONS (QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
++++++++++ +++++++ ++
++++++++++++++++++ ++static unsigned long quick_pit_calibrate(void)
++++++++++++++++++ ++{
++++++++++++++++++ ++ /* Set the Gate high, disable speaker */
++++++++++ +++++++ ++ outb((inb(0x61) & ~0x02) | 0x01, 0x61);
++++++++++ +++++++ ++
++++++++++++++++++ ++ /*
++++++++++++++++++ ++ * Counter 2, mode 0 (one-shot), binary count
++++++++++++++++++ ++ *
++++++++++++++++++ ++ * NOTE! Mode 2 decrements by two (and then the
++++++++++++++++++ ++ * output is flipped each time, giving the same
++++++++++++++++++ ++ * final output frequency as a decrement-by-one),
++++++++++++++++++ ++ * so mode 0 is much better when looking at the
++++++++++++++++++ ++ * individual counts.
++++++++++++++++++ ++ */
++++++++++ +++++++ ++ outb(0xb0, 0x43);
- outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
- outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42);
- tr1 = get_cycles();
- while ((inb(0x61) & 0x20) == 0);
- tr2 = get_cycles();
++++++++++ +++++++ ++
- tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL);
++++++++++++++++++ ++ /* Start at 0xffff */
++++++++++++++++++ ++ outb(0xff, 0x42);
++++++++++++++++++ ++ outb(0xff, 0x42);
++++++++++++++++++ ++
++++++++++++++++++ ++ if (pit_expect_msb(0xff)) {
++++++++++++++++++ ++ int i;
++++++++++++++++++ ++ u64 t1, t2, delta;
++++++++++++++++++ ++ unsigned char expect = 0xfe;
++++++++++++++++++ ++
++++++++++++++++++ ++ t1 = get_cycles();
++++++++++++++++++ ++ for (i = 0; i < QUICK_PIT_ITERATIONS; i++, expect--) {
++++++++++++++++++ ++ if (!pit_expect_msb(expect))
++++++++++++++++++ ++ goto failed;
++++++++++++++++++ ++ }
++++++++++++++++++ ++ t2 = get_cycles();
++++++++++++++++++ ++
++++++++++++++++++ ++ /*
++++++++++++++++++ ++ * Make sure we can rely on the second TSC timestamp:
++++++++++++++++++ ++ */
++++++++++++++++++ ++ if (!pit_expect_msb(expect))
++++++++++++++++++ ++ goto failed;
++++++++++++++++++ ++
++++++++++++++++++ ++ /*
++++++++++++++++++ ++ * Ok, if we get here, then we've seen the
++++++++++++++++++ ++ * MSB of the PIT decrement QUICK_PIT_ITERATIONS
++++++++++++++++++ ++ * times, and each MSB had many hits, so we never
++++++++++++++++++ ++ * had any sudden jumps.
++++++++++++++++++ ++ *
++++++++++++++++++ ++ * As a result, we can depend on there not being
++++++++++++++++++ ++ * any odd delays anywhere, and the TSC reads are
++++++++++++++++++ ++ * reliable.
++++++++++++++++++ ++ *
++++++++++++++++++ ++ * kHz = ticks / time-in-seconds / 1000;
++++++++++++++++++ ++ * kHz = (t2 - t1) / (QPI * 256 / PIT_TICK_RATE) / 1000
++++++++++++++++++ ++ * kHz = ((t2 - t1) * PIT_TICK_RATE) / (QPI * 256 * 1000)
++++++++++++++++++ ++ */
++++++++++++++++++ ++ delta = (t2 - t1)*PIT_TICK_RATE;
++++++++++++++++++ ++ do_div(delta, QUICK_PIT_ITERATIONS*256*1000);
++++++++++++++++++ ++ printk("Fast TSC calibration using PIT\n");
++++++++++++++++++ ++ return delta;
++++++++++++++++++ ++ }
++++++++++++++++++ ++failed:
++++++++++++++++++ ++ return 0;
++++++++++++++++++ ++}
+ + + + +++ +
+ + + ++ +++ +/**
+ + + ++ +++ + * native_calibrate_tsc - calibrate the tsc on boot
+ + + ++ +++ + */
+ + + ++ +++ +unsigned long native_calibrate_tsc(void)
+ + + ++ +++ +{
- - --- - --- - - u64 tsc1, tsc2, delta, pm1, pm2, hpet1, hpet2;
++++++++++++++++++ ++ u64 tsc1, tsc2, delta, ref1, ref2;
+ + + ++ +++ + unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
- - --- - --- - - unsigned long flags;
- - --- - --- - - int hpet = is_hpet_enabled(), i;
++++++++++++++++++ ++ unsigned long flags, latch, ms, fast_calibrate;
++++++++++++++++++ ++ int hpet = is_hpet_enabled(), i, loopmin;
++++++++++++++++++ ++
++++++++++++++++++ ++ local_irq_save(flags);
++++++++++++++++++ ++ fast_calibrate = quick_pit_calibrate();
+ + +++ + +++ + + local_irq_restore(flags);
++++++++++++++++++ ++ if (fast_calibrate)
++++++++++++++++++ ++ return fast_calibrate;
/*
- - - -- --- - * Preset the result with the raw and inaccurate PIT
- - - -- --- - * calibration value
+ + + ++ +++ + * Run 5 calibration loops to get the lowest frequency value
+ + + ++ +++ + * (the best estimate). We use two different calibration modes
+ + + ++ +++ + * here:
+ + + ++ +++ + *
+ + + ++ +++ + * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
+ + + ++ +++ + * load a timeout of 50ms. We read the time right after we
+ + + ++ +++ + * started the timer and wait until the PIT count down reaches
+ + + ++ +++ + * zero. In each wait loop iteration we read the TSC and check
+ + + ++ +++ + * the delta to the previous read. We keep track of the min
+ + + ++ +++ + * and max values of that delta. The delta is mostly defined
+ + + ++ +++ + * by the IO time of the PIT access, so we can detect when a
+ + + ++ +++ + * SMI/SMM disturbance happend between the two reads. If the
+ + + ++ +++ + * maximum time is significantly larger than the minimum time,
+ + + ++ +++ + * then we discard the result and have another try.
+ + + ++ +++ + *
+ + + ++ +++ + * 2) Reference counter. If available we use the HPET or the
+ + + ++ +++ + * PMTIMER as a reference to check the sanity of that value.
+ + + ++ +++ + * We use separate TSC readouts and check inside of the
+ + + ++ +++ + * reference read for a SMI/SMM disturbance. We dicard
+ + + ++ +++ + * disturbed values here as well. We do that around the PIT
+ + + ++ +++ + * calibration delay loop as we have to wait for a certain
+ + + ++ +++ + * amount of time anyway.
*/
- - --- - --- - - for (i = 0; i < 5; i++) {
- - - -- --- - delta = (tr2 - tr1);
- - - -- --- - do_div(delta, 50);
- - - -- --- - tsc_khz_val = delta;
- - - -- --- -
- - - -- --- - /* hpet or pmtimer available ? */
- - - -- --- - if (!hpet && !pm1 && !pm2) {
- - - -- --- - printk(KERN_INFO "TSC calibrated against PIT\n");
- - - -- --- - goto out;
++++++++++++++++++ ++
++++++++++++++++++ ++ /* Preset PIT loop values */
++++++++++++++++++ ++ latch = CAL_LATCH;
++++++++++++++++++ ++ ms = CAL_MS;
++++++++++++++++++ ++ loopmin = CAL_PIT_LOOPS;
++++++++++++++++++ ++
++++++++++++++++++ ++ for (i = 0; i < 3; i++) {
+ + + ++ +++ + unsigned long tsc_pit_khz;
+ + + ++ +++ +
+ + + ++ +++ + /*
+ + + ++ +++ + * Read the start value and the reference count of
+ + + ++ +++ + * hpet/pmtimer when available. Then do the PIT
+ + + ++ +++ + * calibration, which will take at least 50ms, and
+ + + ++ +++ + * read the end value.
+ + + ++ +++ + */
+ + + ++ +++ + local_irq_save(flags);
- - --- - --- - - tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL);
- - --- - --- - - tsc_pit_khz = pit_calibrate_tsc();
- - --- - --- - - tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL);
++++++++++++++++++ ++ tsc1 = tsc_read_refs(&ref1, hpet);
++++++++++++++++++ ++ tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
++++++++++++++++++ ++ tsc2 = tsc_read_refs(&ref2, hpet);
+ + + ++ +++ + local_irq_restore(flags);
+ + + ++ +++ +
+ + + ++ +++ + /* Pick the lowest PIT TSC calibration so far */
+ + + ++ +++ + tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
+ + + ++ +++ +
+ + + ++ +++ + /* hpet or pmtimer available ? */
- - --- - --- - - if (!hpet && !pm1 && !pm2)
++++++++++++++++++ ++ if (!hpet && !ref1 && !ref2)
+ + + ++ +++ + continue;
+ + + ++ +++ +
+ + + ++ +++ + /* Check, whether the sampling was disturbed by an SMI */
+ + + ++ +++ + if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
+ + + ++ +++ + continue;
+ + + ++ +++ +
+ + + ++ +++ + tsc2 = (tsc2 - tsc1) * 1000000LL;
++++++++++++++++++ ++ if (hpet)
++++++++++++++++++ ++ tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
++++++++++++++++++ ++ else
++++++++++++++++++ ++ tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
+ + + ++ +++ +
- - --- - --- - - if (hpet) {
- - --- - --- - - if (hpet2 < hpet1)
- - --- - --- - - hpet2 += 0x100000000ULL;
- - --- - --- - - hpet2 -= hpet1;
- - --- - --- - - tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
- - --- - --- - - do_div(tsc1, 1000000);
- - --- - --- - - } else {
- - --- - --- - - if (pm2 < pm1)
- - --- - --- - - pm2 += (u64)ACPI_PM_OVRRUN;
- - --- - --- - - pm2 -= pm1;
- - --- - --- - - tsc1 = pm2 * 1000000000LL;
- - --- - --- - - do_div(tsc1, PMTMR_TICKS_PER_SEC);
++++++++++++++++++ ++ tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
++++++++++++++++++ ++
++++++++++++++++++ ++ /* Check the reference deviation */
++++++++++++++++++ ++ delta = ((u64) tsc_pit_min) * 100;
++++++++++++++++++ ++ do_div(delta, tsc_ref_min);
++++++++++++++++++ ++
++++++++++++++++++ ++ /*
++++++++++++++++++ ++ * If both calibration results are inside a 10% window
++++++++++++++++++ ++ * then we can be sure, that the calibration
++++++++++++++++++ ++ * succeeded. We break out of the loop right away. We
++++++++++++++++++ ++ * use the reference value, as it is more precise.
++++++++++++++++++ ++ */
++++++++++++++++++ ++ if (delta >= 90 && delta <= 110) {
++++++++++++++++++ ++ printk(KERN_INFO
++++++++++++++++++ ++ "TSC: PIT calibration matches %s. %d loops\n",
++++++++++++++++++ ++ hpet ? "HPET" : "PMTIMER", i + 1);
++++++++++++++++++ ++ return tsc_ref_min;
+ + + ++ +++ + }
+ + + ++ +++ +
- - --- - --- - - do_div(tsc2, tsc1);
- - --- - --- - - tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
++++++++++++++++++ ++ /*
++++++++++++++++++ ++ * Check whether PIT failed more than once. This
++++++++++++++++++ ++ * happens in virtualized environments. We need to
++++++++++++++++++ ++ * give the virtual PC a slightly longer timeframe for
++++++++++++++++++ ++ * the HPET/PMTIMER to make the result precise.
++++++++++++++++++ ++ */
++++++++++++++++++ ++ if (i == 1 && tsc_pit_min == ULONG_MAX) {
++++++++++++++++++ ++ latch = CAL2_LATCH;
++++++++++++++++++ ++ ms = CAL2_MS;
++++++++++++++++++ ++ loopmin = CAL2_PIT_LOOPS;
++++++++++++++++++ ++ }
}
- - - -- --- - /* Check, whether the sampling was disturbed by an SMI */
- - - -- --- - if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) {
- - - -- --- - printk(KERN_WARNING "TSC calibration disturbed by SMI, "
- - - -- --- - "using PIT calibration result\n");
- - - -- --- - goto out;
+ + + ++ +++ + /*
+ + + ++ +++ + * Now check the results.
+ + + ++ +++ + */
+ + + ++ +++ + if (tsc_pit_min == ULONG_MAX) {
+ + + ++ +++ + /* PIT gave no useful value */
- printk(KERN_WARNING "TSC: PIT calibration failed due to "
- "SMI disturbance.\n");
+ + + ++ +++ + + printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n");
+ + + ++ +++ +
+ + + ++ +++ + /* We don't have an alternative source, disable TSC */
- - --- - --- - - if (!hpet && !pm1 && !pm2) {
++++++++++++++++++ ++ if (!hpet && !ref1 && !ref2) {
+ + + ++ +++ + printk("TSC: No reference (HPET/PMTIMER) available\n");
+ + + ++ +++ + return 0;
+ + + ++ +++ + }
+ + + ++ +++ +
+ + + ++ +++ + /* The alternative source failed as well, disable TSC */
+ + + ++ +++ + if (tsc_ref_min == ULONG_MAX) {
+ + + ++ +++ + printk(KERN_WARNING "TSC: HPET/PMTIMER calibration "
- - --- - --- - - "failed due to SMI disturbance.\n");
++++++++++++++++++ ++ "failed.\n");
+ + + ++ +++ + return 0;
+ + + ++ +++ + }
+ + + ++ +++ +
+ + + ++ +++ + /* Use the alternative source */
+ + + ++ +++ + printk(KERN_INFO "TSC: using %s reference calibration\n",
+ + + ++ +++ + hpet ? "HPET" : "PMTIMER");
+ + + ++ +++ +
+ + + ++ +++ + return tsc_ref_min;
}
- - - -- --- - tsc2 = (tsc2 - tsc1) * 1000000LL;
- - - -- --- -
- - - -- --- - if (hpet) {
- - - -- --- - printk(KERN_INFO "TSC calibrated against HPET\n");
- - - -- --- - if (hpet2 < hpet1)
- - - -- --- - hpet2 += 0x100000000ULL;
- - - -- --- - hpet2 -= hpet1;
- - - -- --- - tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
- - - -- --- - do_div(tsc1, 1000000);
- - - -- --- - } else {
- - - -- --- - printk(KERN_INFO "TSC calibrated against PM_TIMER\n");
- - - -- --- - if (pm2 < pm1)
- - - -- --- - pm2 += (u64)ACPI_PM_OVRRUN;
- - - -- --- - pm2 -= pm1;
- - - -- --- - tsc1 = pm2 * 1000000000LL;
- - - -- --- - do_div(tsc1, PMTMR_TICKS_PER_SEC);
+ + + ++ +++ + /* We don't have an alternative source, use the PIT calibration value */
- - --- - --- - - if (!hpet && !pm1 && !pm2) {
++++++++++++++++++ ++ if (!hpet && !ref1 && !ref2) {
+ + + ++ +++ + printk(KERN_INFO "TSC: Using PIT calibration value\n");
+ + + ++ +++ + return tsc_pit_min;
}
- - - -- --- - do_div(tsc2, tsc1);
- - - -- --- - tsc_khz_val = tsc2;
+ + + ++ +++ + /* The alternative source failed, use the PIT calibration value */
+ + + ++ +++ + if (tsc_ref_min == ULONG_MAX) {
- - --- - --- - - printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed due "
- - --- - --- - - "to SMI disturbance. Using PIT calibration\n");
++++++++++++++++++ ++ printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed. "
++++++++++++++++++ ++ "Using PIT calibration\n");
+ + + ++ +++ + return tsc_pit_min;
+ + + ++ +++ + }
- - --- - --- - - /* Check the reference deviation */
- - --- - --- - - delta = ((u64) tsc_pit_min) * 100;
- - --- - --- - - do_div(delta, tsc_ref_min);
- - --- - --- - -
- - --- - --- - - /*
- - --- - --- - - * If both calibration results are inside a 5% window, the we
- - --- - --- - - * use the lower frequency of those as it is probably the
- - --- - --- - - * closest estimate.
- - --- - --- - - */
- - --- - --- - - if (delta >= 95 && delta <= 105) {
- - --- - --- - - printk(KERN_INFO "TSC: PIT calibration confirmed by %s.\n",
- - --- - --- - - hpet ? "HPET" : "PMTIMER");
- - --- - --- - - printk(KERN_INFO "TSC: using %s calibration value\n",
- - --- - --- - - tsc_pit_min <= tsc_ref_min ? "PIT" :
- - --- - --- - - hpet ? "HPET" : "PMTIMER");
- - --- - --- - - return tsc_pit_min <= tsc_ref_min ? tsc_pit_min : tsc_ref_min;
- - --- - --- - - }
- - --- - --- - -
- - --- - --- - - printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n",
- - --- - --- - - hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
- - --- - --- - -
- - - -- --- -out:
- - - -- --- - return tsc_khz_val;
+ + + ++ +++ + /*
+ + + ++ +++ + * The calibration values differ too much. In doubt, we use
+ + + ++ +++ + * the PIT value as we know that there are PMTIMERs around
- - --- - --- - - * running at double speed.
++++++++++++++++++ ++ * running at double speed. At least we let the user know:
+ + + ++ +++ + */
++++++++++++++++++ ++ printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n",
++++++++++++++++++ ++ hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
+ + + ++ +++ + printk(KERN_INFO "TSC: Using PIT calibration value\n");
+ + + ++ +++ + return tsc_pit_min;
}
- - - -- --- -
#ifdef CONFIG_X86_32
/* Only called from the Powernow K7 cpu freq driver */
int recalibrate_cpu_khz(void)
mark_tsc_unstable("cpufreq changes");
}
- - - - --- - set_cyc2ns_scale(tsc_khz_ref, freq->cpu);
+ + + + +++ + set_cyc2ns_scale(tsc_khz, freq->cpu);
return 0;
}
static int __init cpufreq_tsc(void)
{
+ + + + +++ + if (!cpu_has_tsc)
+ + + + +++ + return 0;
+ + + + +++ + if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
+ + + + +++ + return 0;
cpufreq_register_notifier(&time_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
return 0;
#include <asm/timer.h>
#include <asm/vmi_time.h>
#include <asm/kmap_types.h>
+ #include <asm/setup.h>
/* Convenient for calling VMI functions indirectly in the ROM */
typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
const void *desc)
{
u32 *ldt_entry = (u32 *)desc;
------------------ - vmi_ops.write_idt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
++++++++++++++++++ + vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
}
static void vmi_load_sp0(struct tss_struct *tss,
}
#endif
--------------- -----static void vmi_allocate_pte(struct mm_struct *mm, u32 pfn)
+++++++++++++++ +++++static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
{
vmi_set_page_type(pfn, VMI_PAGE_L1);
vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
}
--------------- -----static void vmi_allocate_pmd(struct mm_struct *mm, u32 pfn)
+++++++++++++++ +++++static void vmi_allocate_pmd(struct mm_struct *mm, unsigned long pfn)
{
/*
* This call comes in very early, before mem_map is setup.
vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0);
}
--------------- -----static void vmi_allocate_pmd_clone(u32 pfn, u32 clonepfn, u32 start, u32 count)
+++++++++++++++ +++++static void vmi_allocate_pmd_clone(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count)
{
vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE);
vmi_check_page_type(clonepfn, VMI_PAGE_L2);
vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count);
}
--------------- -----static void vmi_release_pte(u32 pfn)
+++++++++++++++ +++++static void vmi_release_pte(unsigned long pfn)
{
vmi_ops.release_page(pfn, VMI_PAGE_L1);
vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
}
--------------- -----static void vmi_release_pmd(u32 pfn)
+++++++++++++++ +++++static void vmi_release_pmd(unsigned long pfn)
{
vmi_ops.release_page(pfn, VMI_PAGE_L2);
vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
{
/* We must establish the lowmem mapping for MMU ops to work */
if (vmi_ops.set_linear_mapping)
- vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, max_low_pfn, 0);
+ vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0);
}
/*
int arch_report_meminfo(char *page)
{
- - - - int n = sprintf(page, "DirectMap4k: %8lu\n"
- - - - "DirectMap2M: %8lu\n",
- - - - direct_pages_count[PG_LEVEL_4K],
- - - - direct_pages_count[PG_LEVEL_2M]);
+ + + + int n = sprintf(page, "DirectMap4k: %8lu kB\n",
+ + + + direct_pages_count[PG_LEVEL_4K] << 2);
+ + + + #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ + + + n += sprintf(page + n, "DirectMap2M: %8lu kB\n",
+ + + + direct_pages_count[PG_LEVEL_2M] << 11);
+ + + + #else
+ + + + n += sprintf(page + n, "DirectMap4M: %8lu kB\n",
+ + + + direct_pages_count[PG_LEVEL_2M] << 12);
+ + + + #endif
#ifdef CONFIG_X86_64
- - - - n += sprintf(page + n, "DirectMap1G: %8lu\n",
- - - - direct_pages_count[PG_LEVEL_1G]);
+ + + + if (direct_gbpages)
+ + + + n += sprintf(page + n, "DirectMap1G: %8lu kB\n",
+ + + + direct_pages_count[PG_LEVEL_1G] << 20);
#endif
return n;
}
static inline unsigned long highmap_end_pfn(void)
{
-- ------------------ return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT;
++ ++++++++++++++++++ return __pa(roundup((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT;
}
#endif
if (!pte_val(old_pte)) {
if (!primary)
return 0;
- - - - printk(KERN_WARNING "CPA: called for zero pte. "
+ + + + WARN(1, KERN_WARNING "CPA: called for zero pte. "
"vaddr = %lx cpa->vaddr = %lx\n", address,
cpa->vaddr);
- - - - WARN_ON(1);
return -EINVAL;
}
/*
* for now UC MINUS. see comments in ioremap_nocache()
*/
- - - - -- - if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
+ + + + ++ + if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
_PAGE_CACHE_UC_MINUS, NULL))
return -EINVAL;
if (!pat_enabled)
return set_memory_uc(addr, numpages);
- - - - -- - if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
+ + + + ++ + if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
_PAGE_CACHE_WC, NULL))
return -EINVAL;
int set_memory_wb(unsigned long addr, int numpages)
{
- - - - -- - free_memtype(addr, addr + numpages * PAGE_SIZE);
+ + + + ++ + free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
return _set_memory_wb(addr, numpages);
}
{
return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
}
++++++++ ++++++++++++EXPORT_SYMBOL_GPL(set_memory_ro);
int set_memory_rw(unsigned long addr, int numpages)
{
return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
}
++++++++ ++++++++++++EXPORT_SYMBOL_GPL(set_memory_rw);
int set_memory_np(unsigned long addr, int numpages)
{
/* Early in boot, while setting up the initial pagetable, assume
everything is pinned. */
--------------- -----static __init void xen_alloc_pte_init(struct mm_struct *mm, u32 pfn)
+++++++++++++++ +++++static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
{
#ifdef CONFIG_FLATMEM
BUG_ON(mem_map); /* should only be used early */
/* Early release_pte assumes that all pts are pinned, since there's
only init_mm and anything attached to that is pinned. */
--------------- -----static void xen_release_pte_init(u32 pfn)
+++++++++++++++ +++++static void xen_release_pte_init(unsigned long pfn)
{
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
}
/* This needs to make sure the new pte page is pinned iff its being
attached to a pinned pagetable. */
--------------- -----static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level)
+++++++++++++++ +++++static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
{
struct page *page = pfn_to_page(pfn);
}
}
--------------- -----static void xen_alloc_pte(struct mm_struct *mm, u32 pfn)
+++++++++++++++ +++++static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
{
xen_alloc_ptpage(mm, pfn, PT_PTE);
}
--------------- -----static void xen_alloc_pmd(struct mm_struct *mm, u32 pfn)
+++++++++++++++ +++++static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
{
xen_alloc_ptpage(mm, pfn, PT_PMD);
}
}
/* This should never happen until we're OK to use struct page */
--------------- -----static void xen_release_ptpage(u32 pfn, unsigned level)
+++++++++++++++ +++++static void xen_release_ptpage(unsigned long pfn, unsigned level)
{
struct page *page = pfn_to_page(pfn);
}
}
--------------- -----static void xen_release_pte(u32 pfn)
+++++++++++++++ +++++static void xen_release_pte(unsigned long pfn)
{
xen_release_ptpage(pfn, PT_PTE);
}
--------------- -----static void xen_release_pmd(u32 pfn)
+++++++++++++++ +++++static void xen_release_pmd(unsigned long pfn)
{
xen_release_ptpage(pfn, PT_PMD);
}
#if PAGETABLE_LEVELS == 4
--------------- -----static void xen_alloc_pud(struct mm_struct *mm, u32 pfn)
+++++++++++++++ +++++static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
{
xen_alloc_ptpage(mm, pfn, PT_PUD);
}
--------------- -----static void xen_release_pud(u32 pfn)
+++++++++++++++ +++++static void xen_release_pud(unsigned long pfn)
{
xen_release_ptpage(pfn, PT_PUD);
}
.ptep_modify_prot_commit = __ptep_modify_prot_commit,
.pte_val = xen_pte_val,
- - -- -- --- - - .pte_flags = native_pte_val,
+ + ++ ++ +++ + + .pte_flags = native_pte_flags,
.pgd_val = xen_pgd_val,
.make_pte = xen_make_pte,
--------------------#ifndef _ASM_X86_APIC_H
--------------------#define _ASM_X86_APIC_H
++++++++++++++++++++#ifndef ASM_X86__APIC_H
++++++++++++++++++++#define ASM_X86__APIC_H
#include <linux/pm.h>
#include <linux/delay.h>
#endif
extern int is_vsmp_box(void);
++++++++++++++++++++extern void xapic_wait_icr_idle(void);
++++++++++++++++++++extern u32 safe_xapic_wait_icr_idle(void);
++++++++++++++++++++extern u64 xapic_icr_read(void);
++++++++++++++++++++extern void xapic_icr_write(u32, u32);
++++++++++++++++++++extern int setup_profiling_timer(unsigned int);
static inline void native_apic_write(unsigned long reg, u32 v)
{
static inline void ack_APIC_irq(void)
{
/*
-- ------------------ * ack_APIC_irq() actually gets compiled as a single instruction:
-- ------------------ * - a single rmw on Pentium/82489DX
-- ------------------ * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
++ ++++++++++++++++++ * ack_APIC_irq() actually gets compiled as a single instruction
* ... yummie.
*/
#endif /* !CONFIG_X86_LOCAL_APIC */
--------------------#endif /* __ASM_APIC_H */
++++++++++++++++++++#endif /* ASM_X86__APIC_H */
--------------------#ifndef _ASM_X86_ASM_H
--------------------#define _ASM_X86_ASM_H
++++++++++++++++++++#ifndef ASM_X86__ASM_H
++++++++++++++++++++#define ASM_X86__ASM_H
#ifdef __ASSEMBLY__
# define __ASM_FORM(x) x
#define _ASM_PTR __ASM_SEL(.long, .quad)
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
-- ------------------#define _ASM_MOV_UL __ASM_SIZE(mov)
++ ++++++++++++++++++#define _ASM_MOV __ASM_SIZE(mov)
#define _ASM_INC __ASM_SIZE(inc)
#define _ASM_DEC __ASM_SIZE(dec)
#define _ASM_ADD __ASM_SIZE(add)
#define _ASM_SUB __ASM_SIZE(sub)
#define _ASM_XADD __ASM_SIZE(xadd)
++ ++++++++++++++++++
#define _ASM_AX __ASM_REG(ax)
#define _ASM_BX __ASM_REG(bx)
#define _ASM_CX __ASM_REG(cx)
#define _ASM_DX __ASM_REG(dx)
++ ++++++++++++++++++#define _ASM_SP __ASM_REG(sp)
++ ++++++++++++++++++#define _ASM_BP __ASM_REG(bp)
++ ++++++++++++++++++#define _ASM_SI __ASM_REG(si)
++ ++++++++++++++++++#define _ASM_DI __ASM_REG(di)
/* Exception table entry */
# define _ASM_EXTABLE(from,to) \
_ASM_PTR #from "," #to "\n" \
" .previous\n"
--------------------#endif /* _ASM_X86_ASM_H */
++++++++++++++++++++#endif /* ASM_X86__ASM_H */
--------------------#ifndef _ASM_X86_ELF_H
--------------------#define _ASM_X86_ELF_H
++++++++++++++++++++#ifndef ASM_X86__ELF_H
++++++++++++++++++++#define ASM_X86__ELF_H
/*
* ELF register definitions..
static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp)
{
-- ------------------ asm volatile("movl %0,%%fs" :: "r" (0));
-- ------------------ asm volatile("movl %0,%%es; movl %0,%%ds" : : "r" (__USER32_DS));
++ ++++++++++++++++++ loadsegment(fs, 0);
++ ++++++++++++++++++ loadsegment(ds, __USER32_DS);
++ ++++++++++++++++++ loadsegment(es, __USER32_DS);
load_gs_index(0);
regs->ip = ip;
regs->sp = sp;
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
--------------------#endif
++++++++++++++++++++#endif /* ASM_X86__ELF_H */
--------------------#ifndef _ASM_X86_FUTEX_H
--------------------#define _ASM_X86_FUTEX_H
++++++++++++++++++++#ifndef ASM_X86__FUTEX_H
++++++++++++++++++++#define ASM_X86__FUTEX_H
#ifdef __KERNEL__
asm volatile("1:\tmovl %2, %0\n" \
"\tmovl\t%0, %3\n" \
"\t" insn "\n" \
- ------------------- "2:\tlock; cmpxchgl %3, %2\n" \
+ +++++++++++++++++++ "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
"\tjnz\t1b\n" \
"3:\t.section .fixup,\"ax\"\n" \
"4:\tmov\t%5, %1\n" \
__futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
- ------------------- __futex_atomic_op1("lock; xaddl %0, %2", ret, oldval,
+ +++++++++++++++++++ __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
uaddr, oparg);
break;
case FUTEX_OP_OR:
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
- ------------------- asm volatile("1:\tlock; cmpxchgl %3, %1\n"
+ +++++++++++++++++++ asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
"2:\t.section .fixup, \"ax\"\n"
"3:\tmov %2, %0\n"
"\tjmp 2b\n"
}
#endif
--------------------#endif
++++++++++++++++++++#endif /* ASM_X86__FUTEX_H */
--------------------#ifndef _ASM_X8664_GART_H
--------------------#define _ASM_X8664_GART_H 1
++++++++++++++++++++#ifndef ASM_X86__GART_H
++++++++++++++++++++#define ASM_X86__GART_H
#include <asm/e820.h>
return 0;
if (aper_base + aper_size > 0x100000000ULL) {
----- --------------- printk(KERN_ERR "Aperture beyond 4GB. Ignoring.\n");
+++++ +++++++++++++++ printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
return 0;
}
if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
----- --------------- printk(KERN_ERR "Aperture pointing to e820 RAM. Ignoring.\n");
+++++ +++++++++++++++ printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n");
return 0;
}
if (aper_size < min_size) {
----- --------------- printk(KERN_ERR "Aperture too small (%d MB) than (%d MB)\n",
+++++ +++++++++++++++ printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
aper_size>>20, min_size>>20);
return 0;
}
return 1;
}
--------------------#endif
++++++++++++++++++++#endif /* ASM_X86__GART_H */
--------------------#ifndef _RDC321X_GPIO_H
--------------------#define _RDC321X_GPIO_H
++++++++++++++++++++#ifndef ASM_X86__MACH_RDC321X__GPIO_H
++++++++++++++++++++#define ASM_X86__MACH_RDC321X__GPIO_H
++++ +++++++++++++++
+++++ +++++++++++++++#include <linux/kernel.h>
+
extern int rdc_gpio_get_value(unsigned gpio);
extern void rdc_gpio_set_value(unsigned gpio, int value);
extern int rdc_gpio_direction_input(unsigned gpio);
static inline void gpio_free(unsigned gpio)
{
+++++ +++++++++++++++ might_sleep();
rdc_gpio_free(gpio);
}
/* For cansleep */
#include <asm-generic/gpio.h>
--------------------#endif /* _RDC321X_GPIO_H_ */
++++++++++++++++++++#endif /* ASM_X86__MACH_RDC321X__GPIO_H */
--------------------#ifndef _ASM_X86_MMU_H
--------------------#define _ASM_X86_MMU_H
++++++++++++++++++++#ifndef ASM_X86__MMU_H
++++++++++++++++++++#define ASM_X86__MMU_H
#include <linux/spinlock.h>
#include <linux/mutex.h>
/*
* The x86 doesn't have a mmu context, but
* we put the segment information here.
-- ------------------ *
-- ------------------ * cpu_vm_mask is used to optimize ldt flushing.
*/
typedef struct {
void *ldt;
-- ------------------#ifdef CONFIG_X86_64
-- ------------------ rwlock_t ldtlock;
-- ------------------#endif
int size;
struct mutex lock;
void *vdso;
}
#endif
--------------------#endif /* _ASM_X86_MMU_H */
++++++++++++++++++++#endif /* ASM_X86__MMU_H */
--------------------#ifndef __ASM_X86_MSR_H_
--------------------#define __ASM_X86_MSR_H_
++++++++++++++++++++#ifndef ASM_X86__MSR_H
++++++++++++++++++++#define ASM_X86__MSR_H
#include <asm/msr-index.h>
{
DECLARE_ARGS(val, low, high);
- - - -- --- - : "c" (msr), "i" (-EFAULT));
+ + + ++ +++ + asm volatile("2: rdmsr ; xor %[err],%[err]\n"
+ + + ++ +++ + "1:\n\t"
+ + + ++ +++ + ".section .fixup,\"ax\"\n\t"
+ + + ++ +++ + "3: mov %[fault],%[err] ; jmp 1b\n\t"
+ + + ++ +++ + ".previous\n\t"
+ + + ++ +++ + _ASM_EXTABLE(2b, 3b)
+ + + ++ +++ + : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
+ + + ++ +++ + : "c" (msr), [fault] "i" (-EFAULT));
+ + + ++ +++ + return EAX_EDX_VAL(val, low, high);
+ + + ++ +++ +}
+ + + ++ +++ +
+++++ +++++++++++++++static inline unsigned long long native_read_msr_amd_safe(unsigned int msr,
+++++ +++++++++++++++ int *err)
+++++ +++++++++++++++{
+++++ +++++++++++++++ DECLARE_ARGS(val, low, high);
+++++ +++++++++++++++
+ + + + + +++ +++ asm volatile("2: rdmsr ; xor %0,%0\n"
+ + + + + +++ +++ "1:\n\t"
+ + + + + +++ +++ ".section .fixup,\"ax\"\n\t"
+ + + + + +++ +++ "3: mov %3,%0 ; jmp 1b\n\t"
+ + + + + +++ +++ ".previous\n\t"
+ + + + + +++ +++ _ASM_EXTABLE(2b, 3b)
+ + + + + +++ +++ : "=r" (*err), EAX_EDX_RET(val, low, high)
+++++ +++++++++++++++ : "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT));
+ + + + + +++ +++ return EAX_EDX_VAL(val, low, high);
+ + + + + +++ +++ }
+ + + + + +++ +++
static inline void native_write_msr(unsigned int msr,
unsigned low, unsigned high)
{
unsigned low, unsigned high)
{
int err;
- - - -- --- - asm volatile("2: wrmsr ; xor %0,%0\n"
+ + + ++ +++ + asm volatile("2: wrmsr ; xor %[err],%[err]\n"
"1:\n\t"
".section .fixup,\"ax\"\n\t"
- - - -- --- - "3: mov %4,%0 ; jmp 1b\n\t"
+ + + ++ +++ + "3: mov %[fault],%[err] ; jmp 1b\n\t"
".previous\n\t"
_ASM_EXTABLE(2b, 3b)
- - - -- --- - : "=a" (err)
+ + + ++ +++ + : [err] "=a" (err)
: "c" (msr), "0" (low), "d" (high),
- - - -- --- - "i" (-EFAULT)
+ + + ++ +++ + [fault] "i" (-EFAULT)
: "memory");
return err;
}
*p = native_read_msr_safe(msr, &err);
return err;
}
+++++ +++++++++++++++static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
+++++ +++++++++++++++{
+++++ +++++++++++++++ int err;
+++++ +++++++++++++++
+++++ +++++++++++++++ *p = native_read_msr_amd_safe(msr, &err);
+++++ +++++++++++++++ return err;
+++++ +++++++++++++++}
#define rdtscl(low) \
((low) = (u32)native_read_tsc())
#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
#ifdef CONFIG_SMP
- - - -- --- -void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
- - - -- --- -void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+ + + ++ +++ +int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+ + + ++ +++ +int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
- - - -- --- -
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
#else /* CONFIG_SMP */
- - - -- --- -static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+ + + ++ +++ +static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
{
rdmsr(msr_no, *l, *h);
+ + + ++ +++ + return 0;
}
- - - -- --- -static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+ + + ++ +++ +static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
wrmsr(msr_no, l, h);
+ + + ++ +++ + return 0;
}
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
u32 *l, u32 *h)
#endif /* __KERNEL__ */
--------------------#endif
++++++++++++++++++++#endif /* ASM_X86__MSR_H */
--------------------#ifndef _ASM_X86_NMI_H_
--------------------#define _ASM_X86_NMI_H_
++++++++++++++++++++#ifndef ASM_X86__NMI_H
++++++++++++++++++++#define ASM_X86__NMI_H
#include <linux/pm.h>
#include <asm/irq.h>
extern void disable_timer_nmi_watchdog(void);
extern void enable_timer_nmi_watchdog(void);
extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason);
+++++++++++++ +++++++extern void cpu_nmi_set_wd_enabled(void);
extern atomic_t nmi_active;
extern unsigned int nmi_watchdog;
void stop_nmi(void);
void restart_nmi(void);
--------------------#endif
++++++++++++++++++++#endif /* ASM_X86__NMI_H */
--------------------#ifndef _ASM_X86_PAGE_32_H
--------------------#define _ASM_X86_PAGE_32_H
++++++++++++++++++++#ifndef ASM_X86__PAGE_32_H
++++++++++++++++++++#define ASM_X86__PAGE_32_H
/*
* This handles the memory map.
extern unsigned int __VMALLOC_RESERVE;
extern int sysctl_legacy_va_layout;
-------------------- #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
-------------------- #define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE)
--------------------
extern void find_low_pfn_range(void);
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
extern void initmem_init(unsigned long, unsigned long);
++++++++++++++++++++extern void free_initmem(void);
extern void setup_bootmem_allocator(void);
#endif /* CONFIG_X86_3DNOW */
#endif /* !__ASSEMBLY__ */
--------------------#endif /* _ASM_X86_PAGE_32_H */
++++++++++++++++++++#endif /* ASM_X86__PAGE_32_H */
--------------------#ifndef __ASM_PARAVIRT_H
--------------------#define __ASM_PARAVIRT_H
++++++++++++++++++++#ifndef ASM_X86__PARAVIRT_H
++++++++++++++++++++#define ASM_X86__PARAVIRT_H
/* Various instructions on x86 need to be replaced for
* para-virtualization: those hooks are defined here. */
/* MSR, PMC and TSR operations.
err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
+++++ +++++++++++++++ u64 (*read_msr_amd)(unsigned int msr, int *err);
u64 (*read_msr)(unsigned int msr, int *err);
int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
* Hooks for allocating/releasing pagetable pages when they're
* attached to a pagetable
*/
--------------- ----- void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
--------------- ----- void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
--------------- ----- void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
--------------- ----- void (*alloc_pud)(struct mm_struct *mm, u32 pfn);
--------------- ----- void (*release_pte)(u32 pfn);
--------------- ----- void (*release_pmd)(u32 pfn);
--------------- ----- void (*release_pud)(u32 pfn);
+++++++++++++++ +++++ void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
+++++++++++++++ +++++ void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
+++++++++++++++ +++++ void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
+++++++++++++++ +++++ void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
+++++++++++++++ +++++ void (*release_pte)(unsigned long pfn);
+++++++++++++++ +++++ void (*release_pmd)(unsigned long pfn);
+++++++++++++++ +++++ void (*release_pud)(unsigned long pfn);
/* Pagetable manipulation functions */
void (*set_pte)(pte_t *ptep, pte_t pteval);
{
return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
}
+++++ +++++++++++++++static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
+++++ +++++++++++++++{
+++++ +++++++++++++++ return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
+++++ +++++++++++++++}
static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
{
return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
*p = paravirt_read_msr(msr, &err);
return err;
}
+++++ +++++++++++++++static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
+++++ +++++++++++++++{
+++++ +++++++++++++++ int err;
+++++ +++++++++++++++
+++++ +++++++++++++++ *p = paravirt_read_msr_amd(msr, &err);
+++++ +++++++++++++++ return err;
+++++ +++++++++++++++}
static inline u64 paravirt_read_tsc(void)
{
PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
}
--------------- -----static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
+++++++++++++++ +++++static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
{
PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
}
--------------- -----static inline void paravirt_release_pte(unsigned pfn)
+++++++++++++++ +++++static inline void paravirt_release_pte(unsigned long pfn)
{
PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
}
--------------- -----static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
+++++++++++++++ +++++static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
{
PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
}
--------------- -----static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
--------------- ----- unsigned start, unsigned count)
+++++++++++++++ +++++static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
+++++++++++++++ +++++ unsigned long start, unsigned long count)
{
PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
}
--------------- -----static inline void paravirt_release_pmd(unsigned pfn)
+++++++++++++++ +++++static inline void paravirt_release_pmd(unsigned long pfn)
{
PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
}
--------------- -----static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn)
+++++++++++++++ +++++static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
{
PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
}
--------------- -----static inline void paravirt_release_pud(unsigned pfn)
+++++++++++++++ +++++static inline void paravirt_release_pud(unsigned long pfn)
{
PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
}
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_PARAVIRT */
--------------------#endif /* __ASM_PARAVIRT_H */
++++++++++++++++++++#endif /* ASM_X86__PARAVIRT_H */
--------------------#ifndef _I386_PGTABLE_2LEVEL_H
--------------------#define _I386_PGTABLE_2LEVEL_H
++++++++++++++++++++#ifndef ASM_X86__PGTABLE_2LEVEL_H
++++++++++++++++++++#define ASM_X86__PGTABLE_2LEVEL_H
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
#endif
----------- ---------#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_none(x) (!(x).pte_low)
----------- ---------#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
/*
* Bits 0, 6 and 7 are taken, split up the 29 bits of offset
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
--------------------#endif /* _I386_PGTABLE_2LEVEL_H */
++++++++++++++++++++#endif /* ASM_X86__PGTABLE_2LEVEL_H */
--------------------#ifndef _I386_PGTABLE_3LEVEL_H
--------------------#define _I386_PGTABLE_3LEVEL_H
++++++++++++++++++++#ifndef ASM_X86__PGTABLE_3LEVEL_H
++++++++++++++++++++#define ASM_X86__PGTABLE_3LEVEL_H
/*
* Intel Physical Address Extension (PAE) Mode - three-level page
return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
}
----------- ---------#define pte_page(x) pfn_to_page(pte_pfn(x))
----------- ---------
static inline int pte_none(pte_t pte)
{
return !pte.pte_low && !pte.pte_high;
}
----------- ---------static inline unsigned long pte_pfn(pte_t pte)
----------- ---------{
----------- --------- return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
----------- ---------}
----------- ---------
/*
* Bits 0, 6 and 7 are taken in the low part of the pte,
* put the 32 bits of offset into the high part.
#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
--------------------#endif /* _I386_PGTABLE_3LEVEL_H */
++++++++++++++++++++#endif /* ASM_X86__PGTABLE_3LEVEL_H */
--------------------#ifndef _ASM_X86_PGTABLE_H
--------------------#define _ASM_X86_PGTABLE_H
++++++++++++++++++++#ifndef ASM_X86__PGTABLE_H
++++++++++++++++++++#define ASM_X86__PGTABLE_H
#define FIRST_USER_ADDRESS 0
return pte_val(pte) & _PAGE_SPECIAL;
}
+++++++++++ +++++++++static inline unsigned long pte_pfn(pte_t pte)
+++++++++++ +++++++++{
+++++++++++ +++++++++ return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
+++++++++++ +++++++++}
+++++++++++ +++++++++
+++++++++++ +++++++++#define pte_page(pte) pfn_to_page(pte_pfn(pte))
+++++++++++ +++++++++
static inline int pmd_large(pmd_t pte)
{
return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
static inline void native_pagetable_setup_done(pgd_t *base) {}
#endif
++++++++++++++++++++extern int arch_report_meminfo(char *page);
++++++++++++++++++++
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else /* !CONFIG_PARAVIRT */
#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */
--------------------#endif /* _ASM_X86_PGTABLE_H */
++++++++++++++++++++#endif /* ASM_X86__PGTABLE_H */
--------------------#ifndef _I386_PGTABLE_H
--------------------#define _I386_PGTABLE_H
++++++++++++++++++++#ifndef ASM_X86__PGTABLE_32_H
++++++++++++++++++++#define ASM_X86__PGTABLE_32_H
/*
static inline void check_pgt_cache(void) { }
void paging_init(void);
++++++++++++++++++++extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
/*
* The Linux x86 paging architecture is 'compile-time dual-mode', it
* area for the same reason. ;)
*/
#define VMALLOC_OFFSET (8 * 1024 * 1024)
-------------------- #define VMALLOC_START (((unsigned long)high_memory + 2 * VMALLOC_OFFSET - 1) \
-------------------- & ~(VMALLOC_OFFSET - 1))
++++++++++++++++++++ #define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
#ifdef CONFIG_X86_PAE
#define LAST_PKMAP 512
#else
# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
#endif
++++++++++++++++++++ #define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
++++++++++++++++++++
/*
* Define this if things work differently on an i386 and an i486:
* it will (on an i486) warn about kernel memory accesses that are
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
--------------------#endif /* _I386_PGTABLE_H */
++++++++++++++++++++#endif /* ASM_X86__PGTABLE_32_H */
--------------------#ifndef _X86_64_PGTABLE_H
--------------------#define _X86_64_PGTABLE_H
++++++++++++++++++++#ifndef ASM_X86__PGTABLE_64_H
++++++++++++++++++++#define ASM_X86__PGTABLE_64_H
#include <linux/const.h>
#ifndef __ASSEMBLY__
#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
#define VMEMMAP_START _AC(0xffffe20000000000, UL)
#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
- - - - #define MODULES_END _AC(0xfffffffffff00000, UL)
+ + + + #define MODULES_END _AC(0xffffffffff000000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
#ifndef __ASSEMBLY__
#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
----------- ---------#define pte_page(x) pfn_to_page(pte_pfn((x)))
----------- ---------#define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
/*
* Macro to mark a page protection value as "uncacheable".
#define __HAVE_ARCH_PTE_SAME
#endif /* !__ASSEMBLY__ */
--------------------#endif /* _X86_64_PGTABLE_H */
++++++++++++++++++++#endif /* ASM_X86__PGTABLE_64_H */
--------------------#ifndef _ASM_X86_RESUME_TRACE_H
--------------------#define _ASM_X86_RESUME_TRACE_H
++++++++++++++++++++#ifndef ASM_X86__RESUME_TRACE_H
++++++++++++++++++++#define ASM_X86__RESUME_TRACE_H
#include <asm/asm.h>
do { \
if (pm_trace_enabled) { \
const void *tracedata; \
-- ------------------ asm volatile(_ASM_MOV_UL " $1f,%0\n" \
++ ++++++++++++++++++ asm volatile(_ASM_MOV " $1f,%0\n" \
".section .tracedata,\"a\"\n" \
"1:\t.word %c1\n\t" \
_ASM_PTR " %c2\n" \
} \
} while (0)
--------------------#endif
++++++++++++++++++++#endif /* ASM_X86__RESUME_TRACE_H */
--------------------#ifndef _X86_SPINLOCK_H_
--------------------#define _X86_SPINLOCK_H_
++++++++++++++++++++#ifndef ASM_X86__SPINLOCK_H
++++++++++++++++++++#define ASM_X86__SPINLOCK_H
#include <asm/atomic.h>
#include <asm/rwlock.h>
{
int tmp = ACCESS_ONCE(lock->slock);
- - - - return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
+ + + + return (((tmp >> 8) - tmp) & 0xff) > 1;
}
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
"jne 1f\n\t"
"movw %w0,%w1\n\t"
"incb %h1\n\t"
- ------------------- "lock ; cmpxchgw %w1,%2\n\t"
+ +++++++++++++++++++ LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
"1:"
"sete %b1\n\t"
"movzbl %b1,%0\n\t"
{
int tmp = ACCESS_ONCE(lock->slock);
- - - - return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1;
+ + + + return (((tmp >> 16) - tmp) & 0xffff) > 1;
}
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
int inc = 0x00010000;
int tmp;
- ------------------- asm volatile("lock ; xaddl %0, %1\n"
+ +++++++++++++++++++ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
"movzwl %w0, %2\n\t"
"shrl $16, %0\n\t"
"1:\t"
"cmpl %0,%1\n\t"
"jne 1f\n\t"
"addl $0x00010000, %1\n\t"
- ------------------- "lock ; cmpxchgl %1,%2\n\t"
+ +++++++++++++++++++ LOCK_PREFIX "cmpxchgl %1,%2\n\t"
"1:"
"sete %b1\n\t"
"movzbl %b1,%0\n\t"
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
--------------------#endif
++++++++++++++++++++#endif /* ASM_X86__SPINLOCK_H */