projects
/
cascardo
/
linux.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
x86, 64bit: Copy struct boot_params early
[cascardo/linux.git]
/
kernel
/
watchdog.c
diff --git
a/kernel/watchdog.c
b/kernel/watchdog.c
index
c8c21be
..
75a2ab3
100644
(file)
--- a/
kernel/watchdog.c
+++ b/
kernel/watchdog.c
@@
-31,6
+31,7
@@
int watchdog_enabled = 1;
int __read_mostly watchdog_thresh = 10;
static int __read_mostly watchdog_disabled;
int watchdog_enabled = 1;
int __read_mostly watchdog_thresh = 10;
static int __read_mostly watchdog_disabled;
+static u64 __read_mostly sample_period;
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
@@
-116,7
+117,7
@@
static unsigned long get_timestamp(int this_cpu)
return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
}
return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
}
-static
u64 g
et_sample_period(void)
+static
void s
et_sample_period(void)
{
/*
* convert watchdog_thresh from seconds to ns
{
/*
* convert watchdog_thresh from seconds to ns
@@
-125,7
+126,7
@@
static u64 get_sample_period(void)
* and hard thresholds) to increment before the
* hardlockup detector generates a warning
*/
* and hard thresholds) to increment before the
* hardlockup detector generates a warning
*/
-
return
get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
+
sample_period =
get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
}
/* Commands for resetting the watchdog */
}
/* Commands for resetting the watchdog */
@@
-275,7
+276,7
@@
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
wake_up_process(__this_cpu_read(softlockup_watchdog));
/* .. and repeat */
wake_up_process(__this_cpu_read(softlockup_watchdog));
/* .. and repeat */
- hrtimer_forward_now(hrtimer, ns_to_ktime(
get_sample_period()
));
+ hrtimer_forward_now(hrtimer, ns_to_ktime(
sample_period
));
if (touch_ts == 0) {
if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
if (touch_ts == 0) {
if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
@@
-343,6
+344,10
@@
static void watchdog_enable(unsigned int cpu)
{
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
{
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
+ /* kick off the timer for the hardlockup detector */
+ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer->function = watchdog_timer_fn;
+
if (!watchdog_enabled) {
kthread_park(current);
return;
if (!watchdog_enabled) {
kthread_park(current);
return;
@@
-351,12
+356,8
@@
static void watchdog_enable(unsigned int cpu)
/* Enable the perf event */
watchdog_nmi_enable(cpu);
/* Enable the perf event */
watchdog_nmi_enable(cpu);
- /* kick off the timer for the hardlockup detector */
- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer->function = watchdog_timer_fn;
-
/* done here because hrtimer_start can only pin to smp_processor_id() */
/* done here because hrtimer_start can only pin to smp_processor_id() */
- hrtimer_start(hrtimer, ns_to_ktime(
get_sample_period()
),
+ hrtimer_start(hrtimer, ns_to_ktime(
sample_period
),
HRTIMER_MODE_REL_PINNED);
/* initialize timestamp */
HRTIMER_MODE_REL_PINNED);
/* initialize timestamp */
@@
-368,9
+369,6
@@
static void watchdog_disable(unsigned int cpu)
{
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
{
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
- if (!watchdog_enabled)
- return;
-
watchdog_set_prio(SCHED_NORMAL, 0);
hrtimer_cancel(hrtimer);
/* disable the perf event */
watchdog_set_prio(SCHED_NORMAL, 0);
hrtimer_cancel(hrtimer);
/* disable the perf event */
@@
-386,7
+384,7
@@
static int watchdog_should_run(unsigned int cpu)
/*
* The watchdog thread function - touches the timestamp.
*
/*
* The watchdog thread function - touches the timestamp.
*
- * It only runs once every
get_sample_period()
seconds (4 seconds by
+ * It only runs once every
sample_period
seconds (4 seconds by
* default) to reset the softlockup timestamp. If this gets delayed
* for more than 2*watchdog_thresh seconds then the debug-printout
* triggers in watchdog_timer_fn().
* default) to reset the softlockup timestamp. If this gets delayed
* for more than 2*watchdog_thresh seconds then the debug-printout
* triggers in watchdog_timer_fn().
@@
-519,6
+517,7
@@
int proc_dowatchdog(struct ctl_table *table, int write,
if (ret || !write)
return ret;
if (ret || !write)
return ret;
+ set_sample_period();
if (watchdog_enabled && watchdog_thresh)
watchdog_enable_all_cpus();
else
if (watchdog_enabled && watchdog_thresh)
watchdog_enable_all_cpus();
else
@@
-540,6
+539,7
@@
static struct smp_hotplug_thread watchdog_threads = {
void __init lockup_detector_init(void)
{
void __init lockup_detector_init(void)
{
+ set_sample_period();
if (smpboot_register_percpu_thread(&watchdog_threads)) {
pr_err("Failed to create watchdog threads, disabled\n");
watchdog_disabled = -ENODEV;
if (smpboot_register_percpu_thread(&watchdog_threads)) {
pr_err("Failed to create watchdog threads, disabled\n");
watchdog_disabled = -ENODEV;