1 #include <linux/export.h>
2 #include <linux/sched.h>
3 #include <linux/tsacct_kern.h>
4 #include <linux/kernel_stat.h>
5 #include <linux/static_key.h>
6 #include <linux/context_tracking.h>
9 #include <asm/paravirt.h>
13 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
16 * There are no locks covering percpu hardirq/softirq time.
17 * They are only modified in vtime_account, on corresponding CPU
18 * with interrupts disabled. So, writes are safe.
19 * They are read and saved off onto struct rq in update_rq_clock().
20 * This may result in other CPU reading this CPU's irq time and can
21 * race with irq/vtime_account on this CPU. We would either get old
22 * or new value with a side effect of accounting a slice of irq time to wrong
23 * task when irq is in progress while we read rq->clock. That is a worthy
24 * compromise in place of having locks on each irq in account_system_time.
26 DEFINE_PER_CPU(u64, cpu_hardirq_time);
27 DEFINE_PER_CPU(u64, cpu_softirq_time);
29 static DEFINE_PER_CPU(u64, irq_start_time);
30 static int sched_clock_irqtime;
32 void enable_sched_clock_irqtime(void)
34 sched_clock_irqtime = 1;
37 void disable_sched_clock_irqtime(void)
39 sched_clock_irqtime = 0;
43 DEFINE_PER_CPU(seqcount_t, irq_time_seq);
44 #endif /* CONFIG_64BIT */
47 * Called before incrementing preempt_count on {soft,}irq_enter
48 * and before decrementing preempt_count on {soft,}irq_exit.
50 void irqtime_account_irq(struct task_struct *curr)
55 if (!sched_clock_irqtime)
58 cpu = smp_processor_id();
59 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
60 __this_cpu_add(irq_start_time, delta);
62 irq_time_write_begin();
64 * We do not account for softirq time from ksoftirqd here.
65 * We want to continue accounting softirq time to ksoftirqd thread
66 * in that case, so as not to confuse scheduler with a special task
67 * that do not consume any time, but still wants to run.
70 __this_cpu_add(cpu_hardirq_time, delta);
71 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
72 __this_cpu_add(cpu_softirq_time, delta);
76 EXPORT_SYMBOL_GPL(irqtime_account_irq);
78 static cputime_t irqtime_account_hi_update(cputime_t maxtime)
80 u64 *cpustat = kcpustat_this_cpu->cpustat;
81 cputime_t irq_cputime;
83 irq_cputime = nsecs_to_cputime64(__this_cpu_read(cpu_hardirq_time)) -
85 irq_cputime = min(irq_cputime, maxtime);
86 cpustat[CPUTIME_IRQ] += irq_cputime;
91 static cputime_t irqtime_account_si_update(cputime_t maxtime)
93 u64 *cpustat = kcpustat_this_cpu->cpustat;
94 cputime_t softirq_cputime;
96 softirq_cputime = nsecs_to_cputime64(__this_cpu_read(cpu_softirq_time)) -
97 cpustat[CPUTIME_SOFTIRQ];
98 softirq_cputime = min(softirq_cputime, maxtime);
99 cpustat[CPUTIME_SOFTIRQ] += softirq_cputime;
101 return softirq_cputime;
104 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
106 #define sched_clock_irqtime (0)
108 static cputime_t irqtime_account_hi_update(cputime_t dummy)
113 static cputime_t irqtime_account_si_update(cputime_t dummy)
118 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
120 static inline void task_group_account_field(struct task_struct *p, int index,
124 * Since all updates are sure to touch the root cgroup, we
125 * get ourselves ahead and touch it first. If the root cgroup
126 * is the only cgroup, then nothing else should be necessary.
129 __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
131 cpuacct_account_field(p, index, tmp);
135 * Account user cpu time to a process.
136 * @p: the process that the cpu time gets accounted to
137 * @cputime: the cpu time spent in user space since the last update
138 * @cputime_scaled: cputime scaled by cpu frequency
140 void account_user_time(struct task_struct *p, cputime_t cputime,
141 cputime_t cputime_scaled)
145 /* Add user time to process. */
147 p->utimescaled += cputime_scaled;
148 account_group_user_time(p, cputime);
150 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
152 /* Add user time to cpustat. */
153 task_group_account_field(p, index, (__force u64) cputime);
155 /* Account for user time used */
156 acct_account_cputime(p);
160 * Account guest cpu time to a process.
161 * @p: the process that the cpu time gets accounted to
162 * @cputime: the cpu time spent in virtual machine since the last update
163 * @cputime_scaled: cputime scaled by cpu frequency
165 static void account_guest_time(struct task_struct *p, cputime_t cputime,
166 cputime_t cputime_scaled)
168 u64 *cpustat = kcpustat_this_cpu->cpustat;
170 /* Add guest time to process. */
172 p->utimescaled += cputime_scaled;
173 account_group_user_time(p, cputime);
176 /* Add guest time to cpustat. */
177 if (task_nice(p) > 0) {
178 cpustat[CPUTIME_NICE] += (__force u64) cputime;
179 cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
181 cpustat[CPUTIME_USER] += (__force u64) cputime;
182 cpustat[CPUTIME_GUEST] += (__force u64) cputime;
187 * Account system cpu time to a process and desired cpustat field
188 * @p: the process that the cpu time gets accounted to
189 * @cputime: the cpu time spent in kernel space since the last update
190 * @cputime_scaled: cputime scaled by cpu frequency
191 * @target_cputime64: pointer to cpustat field that has to be updated
194 void __account_system_time(struct task_struct *p, cputime_t cputime,
195 cputime_t cputime_scaled, int index)
197 /* Add system time to process. */
199 p->stimescaled += cputime_scaled;
200 account_group_system_time(p, cputime);
202 /* Add system time to cpustat. */
203 task_group_account_field(p, index, (__force u64) cputime);
205 /* Account for system time used */
206 acct_account_cputime(p);
210 * Account system cpu time to a process.
211 * @p: the process that the cpu time gets accounted to
212 * @hardirq_offset: the offset to subtract from hardirq_count()
213 * @cputime: the cpu time spent in kernel space since the last update
214 * @cputime_scaled: cputime scaled by cpu frequency
216 void account_system_time(struct task_struct *p, int hardirq_offset,
217 cputime_t cputime, cputime_t cputime_scaled)
221 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
222 account_guest_time(p, cputime, cputime_scaled);
226 if (hardirq_count() - hardirq_offset)
228 else if (in_serving_softirq())
229 index = CPUTIME_SOFTIRQ;
231 index = CPUTIME_SYSTEM;
233 __account_system_time(p, cputime, cputime_scaled, index);
237 * Account for involuntary wait time.
238 * @cputime: the cpu time spent in involuntary wait
240 void account_steal_time(cputime_t cputime)
242 u64 *cpustat = kcpustat_this_cpu->cpustat;
244 cpustat[CPUTIME_STEAL] += (__force u64) cputime;
248 * Account for idle time.
249 * @cputime: the cpu time spent in idle wait
251 void account_idle_time(cputime_t cputime)
253 u64 *cpustat = kcpustat_this_cpu->cpustat;
254 struct rq *rq = this_rq();
256 if (atomic_read(&rq->nr_iowait) > 0)
257 cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
259 cpustat[CPUTIME_IDLE] += (__force u64) cputime;
263 * When a guest is interrupted for a longer amount of time, missed clock
264 * ticks are not redelivered later. Due to that, this function may on
265 * occasion account more time than the calling functions think elapsed.
267 static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
269 #ifdef CONFIG_PARAVIRT
270 if (static_key_false(¶virt_steal_enabled)) {
271 cputime_t steal_cputime;
274 steal = paravirt_steal_clock(smp_processor_id());
275 steal -= this_rq()->prev_steal_time;
277 steal_cputime = min(nsecs_to_cputime(steal), maxtime);
278 account_steal_time(steal_cputime);
279 this_rq()->prev_steal_time += cputime_to_nsecs(steal_cputime);
281 return steal_cputime;
288 * Account how much elapsed time was spent in steal, irq, or softirq time.
290 static inline cputime_t account_other_time(cputime_t max)
294 /* Shall be converted to a lockdep-enabled lightweight check */
295 WARN_ON_ONCE(!irqs_disabled());
297 accounted = steal_account_process_time(max);
300 accounted += irqtime_account_hi_update(max - accounted);
303 accounted += irqtime_account_si_update(max - accounted);
309 static inline u64 read_sum_exec_runtime(struct task_struct *t)
311 return t->se.sum_exec_runtime;
314 static u64 read_sum_exec_runtime(struct task_struct *t)
320 rq = task_rq_lock(t, &rf);
321 ns = t->se.sum_exec_runtime;
322 task_rq_unlock(rq, t, &rf);
329 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
330 * tasks (sum on group iteration) belonging to @tsk's group.
332 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
334 struct signal_struct *sig = tsk->signal;
335 cputime_t utime, stime;
336 struct task_struct *t;
337 unsigned int seq, nextseq;
341 * Update current task runtime to account pending time since last
342 * scheduler action or thread_group_cputime() call. This thread group
343 * might have other running tasks on different CPUs, but updating
344 * their runtime can affect syscall performance, so we skip account
345 * those pending times and rely only on values updated on tick or
346 * other scheduler action.
348 if (same_thread_group(current, tsk))
349 (void) task_sched_runtime(current);
352 /* Attempt a lockless read on the first round. */
356 flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
357 times->utime = sig->utime;
358 times->stime = sig->stime;
359 times->sum_exec_runtime = sig->sum_sched_runtime;
361 for_each_thread(tsk, t) {
362 task_cputime(t, &utime, &stime);
363 times->utime += utime;
364 times->stime += stime;
365 times->sum_exec_runtime += read_sum_exec_runtime(t);
367 /* If lockless access failed, take the lock. */
369 } while (need_seqretry(&sig->stats_lock, seq));
370 done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
374 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
376 * Account a tick to a process and cpustat
377 * @p: the process that the cpu time gets accounted to
378 * @user_tick: is the tick from userspace
379 * @rq: the pointer to rq
381 * Tick demultiplexing follows the order
382 * - pending hardirq update
383 * - pending softirq update
387 * - check for guest_time
388 * - else account as system_time
390 * Check for hardirq is done both for system and user time as there is
391 * no timer going off while we are on hardirq and hence we may never get an
392 * opportunity to update it solely in system time.
393 * p->stime and friends are only updated on system time and not on irq
394 * softirq as those do not count in task exec_runtime any more.
396 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
397 struct rq *rq, int ticks)
399 u64 cputime = (__force u64) cputime_one_jiffy * ticks;
400 cputime_t scaled, other;
403 * When returning from idle, many ticks can get accounted at
404 * once, including some ticks of steal, irq, and softirq time.
405 * Subtract those ticks from the amount of time accounted to
406 * idle, or potentially user or system time. Due to rounding,
407 * other time can exceed ticks occasionally.
409 other = account_other_time(ULONG_MAX);
410 if (other >= cputime)
413 scaled = cputime_to_scaled(cputime);
415 if (this_cpu_ksoftirqd() == p) {
417 * ksoftirqd time do not get accounted in cpu_softirq_time.
418 * So, we have to handle it separately here.
419 * Also, p->stime needs to be updated for ksoftirqd.
421 __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
422 } else if (user_tick) {
423 account_user_time(p, cputime, scaled);
424 } else if (p == rq->idle) {
425 account_idle_time(cputime);
426 } else if (p->flags & PF_VCPU) { /* System time or guest time */
427 account_guest_time(p, cputime, scaled);
429 __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM);
433 static void irqtime_account_idle_ticks(int ticks)
435 struct rq *rq = this_rq();
437 irqtime_account_process_tick(current, 0, rq, ticks);
439 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
440 static inline void irqtime_account_idle_ticks(int ticks) {}
441 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
442 struct rq *rq, int nr_ticks) {}
443 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
446 * Use precise platform statistics if available:
448 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
450 #ifndef __ARCH_HAS_VTIME_TASK_SWITCH
451 void vtime_common_task_switch(struct task_struct *prev)
453 if (is_idle_task(prev))
454 vtime_account_idle(prev);
456 vtime_account_system(prev);
458 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
459 vtime_account_user(prev);
461 arch_vtime_task_switch(prev);
465 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
468 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
470 * Archs that account the whole time spent in the idle task
471 * (outside irq) as idle time can rely on this and just implement
472 * vtime_account_system() and vtime_account_idle(). Archs that
473 * have other meaning of the idle time (s390 only includes the
474 * time spent by the CPU when it's in low power mode) must override
477 #ifndef __ARCH_HAS_VTIME_ACCOUNT
478 void vtime_account_irq_enter(struct task_struct *tsk)
480 if (!in_interrupt() && is_idle_task(tsk))
481 vtime_account_idle(tsk);
483 vtime_account_system(tsk);
485 EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
486 #endif /* __ARCH_HAS_VTIME_ACCOUNT */
488 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
493 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
495 void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
497 struct task_cputime cputime;
499 thread_group_cputime(p, &cputime);
504 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
506 * Account a single tick of cpu time.
507 * @p: the process that the cpu time gets accounted to
508 * @user_tick: indicates if the tick is a user or a system tick
510 void account_process_tick(struct task_struct *p, int user_tick)
512 cputime_t cputime, scaled, steal;
513 struct rq *rq = this_rq();
515 if (vtime_accounting_cpu_enabled())
518 if (sched_clock_irqtime) {
519 irqtime_account_process_tick(p, user_tick, rq, 1);
523 cputime = cputime_one_jiffy;
524 steal = steal_account_process_time(ULONG_MAX);
526 if (steal >= cputime)
530 scaled = cputime_to_scaled(cputime);
533 account_user_time(p, cputime, scaled);
534 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
535 account_system_time(p, HARDIRQ_OFFSET, cputime, scaled);
537 account_idle_time(cputime);
541 * Account multiple ticks of idle time.
542 * @ticks: number of stolen ticks
544 void account_idle_ticks(unsigned long ticks)
546 cputime_t cputime, steal;
548 if (sched_clock_irqtime) {
549 irqtime_account_idle_ticks(ticks);
553 cputime = jiffies_to_cputime(ticks);
554 steal = steal_account_process_time(ULONG_MAX);
556 if (steal >= cputime)
560 account_idle_time(cputime);
564 * Perform (stime * rtime) / total, but avoid multiplication overflow by
565 * loosing precision when the numbers are big.
567 static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
572 /* Make sure "rtime" is the bigger of stime/rtime */
576 /* Make sure 'total' fits in 32 bits */
580 /* Does rtime (and thus stime) fit in 32 bits? */
584 /* Can we just balance rtime/stime rather than dropping bits? */
588 /* We can grow stime and shrink rtime and try to make them both fit */
594 /* We drop from rtime, it has more bits than stime */
600 * Make sure gcc understands that this is a 32x32->64 multiply,
601 * followed by a 64/32->64 divide.
603 scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
604 return (__force cputime_t) scaled;
608 * Adjust tick based cputime random precision against scheduler runtime
611 * Tick based cputime accounting depend on random scheduling timeslices of a
612 * task to be interrupted or not by the timer. Depending on these
613 * circumstances, the number of these interrupts may be over or
614 * under-optimistic, matching the real user and system cputime with a variable
617 * Fix this by scaling these tick based values against the total runtime
618 * accounted by the CFS scheduler.
620 * This code provides the following guarantees:
622 * stime + utime == rtime
623 * stime_i+1 >= stime_i, utime_i+1 >= utime_i
625 * Assuming that rtime_i+1 >= rtime_i.
627 static void cputime_adjust(struct task_cputime *curr,
628 struct prev_cputime *prev,
629 cputime_t *ut, cputime_t *st)
631 cputime_t rtime, stime, utime;
634 /* Serialize concurrent callers such that we can honour our guarantees */
635 raw_spin_lock_irqsave(&prev->lock, flags);
636 rtime = nsecs_to_cputime(curr->sum_exec_runtime);
639 * This is possible under two circumstances:
640 * - rtime isn't monotonic after all (a bug);
641 * - we got reordered by the lock.
643 * In both cases this acts as a filter such that the rest of the code
644 * can assume it is monotonic regardless of anything else.
646 if (prev->stime + prev->utime >= rtime)
653 * If either stime or both stime and utime are 0, assume all runtime is
654 * userspace. Once a task gets some ticks, the monotonicy code at
655 * 'update' will ensure things converge to the observed ratio.
667 stime = scale_stime((__force u64)stime, (__force u64)rtime,
668 (__force u64)(stime + utime));
672 * Make sure stime doesn't go backwards; this preserves monotonicity
673 * for utime because rtime is monotonic.
675 * utime_i+1 = rtime_i+1 - stime_i
676 * = rtime_i+1 - (rtime_i - utime_i)
677 * = (rtime_i+1 - rtime_i) + utime_i
680 if (stime < prev->stime)
682 utime = rtime - stime;
685 * Make sure utime doesn't go backwards; this still preserves
686 * monotonicity for stime, analogous argument to above.
688 if (utime < prev->utime) {
690 stime = rtime - utime;
698 raw_spin_unlock_irqrestore(&prev->lock, flags);
701 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
703 struct task_cputime cputime = {
704 .sum_exec_runtime = p->se.sum_exec_runtime,
707 task_cputime(p, &cputime.utime, &cputime.stime);
708 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
710 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
712 void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
714 struct task_cputime cputime;
716 thread_group_cputime(p, &cputime);
717 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
719 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
721 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
722 static cputime_t vtime_delta(struct task_struct *tsk)
724 unsigned long now = READ_ONCE(jiffies);
726 if (time_before(now, (unsigned long)tsk->vtime_snap))
729 return jiffies_to_cputime(now - tsk->vtime_snap);
732 static cputime_t get_vtime_delta(struct task_struct *tsk)
734 unsigned long now = READ_ONCE(jiffies);
735 cputime_t delta, other;
738 * Unlike tick based timing, vtime based timing never has lost
739 * ticks, and no need for steal time accounting to make up for
740 * lost ticks. Vtime accounts a rounded version of actual
741 * elapsed time. Limit account_other_time to prevent rounding
742 * errors from causing elapsed vtime to go negative.
744 delta = jiffies_to_cputime(now - tsk->vtime_snap);
745 other = account_other_time(delta);
746 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
747 tsk->vtime_snap = now;
749 return delta - other;
752 static void __vtime_account_system(struct task_struct *tsk)
754 cputime_t delta_cpu = get_vtime_delta(tsk);
756 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
759 void vtime_account_system(struct task_struct *tsk)
761 if (!vtime_delta(tsk))
764 write_seqcount_begin(&tsk->vtime_seqcount);
765 __vtime_account_system(tsk);
766 write_seqcount_end(&tsk->vtime_seqcount);
769 void vtime_account_user(struct task_struct *tsk)
773 write_seqcount_begin(&tsk->vtime_seqcount);
774 tsk->vtime_snap_whence = VTIME_SYS;
775 if (vtime_delta(tsk)) {
776 delta_cpu = get_vtime_delta(tsk);
777 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
779 write_seqcount_end(&tsk->vtime_seqcount);
782 void vtime_user_enter(struct task_struct *tsk)
784 write_seqcount_begin(&tsk->vtime_seqcount);
785 if (vtime_delta(tsk))
786 __vtime_account_system(tsk);
787 tsk->vtime_snap_whence = VTIME_USER;
788 write_seqcount_end(&tsk->vtime_seqcount);
791 void vtime_guest_enter(struct task_struct *tsk)
794 * The flags must be updated under the lock with
795 * the vtime_snap flush and update.
796 * That enforces a right ordering and update sequence
797 * synchronization against the reader (task_gtime())
798 * that can thus safely catch up with a tickless delta.
800 write_seqcount_begin(&tsk->vtime_seqcount);
801 if (vtime_delta(tsk))
802 __vtime_account_system(tsk);
803 current->flags |= PF_VCPU;
804 write_seqcount_end(&tsk->vtime_seqcount);
806 EXPORT_SYMBOL_GPL(vtime_guest_enter);
808 void vtime_guest_exit(struct task_struct *tsk)
810 write_seqcount_begin(&tsk->vtime_seqcount);
811 __vtime_account_system(tsk);
812 current->flags &= ~PF_VCPU;
813 write_seqcount_end(&tsk->vtime_seqcount);
815 EXPORT_SYMBOL_GPL(vtime_guest_exit);
817 void vtime_account_idle(struct task_struct *tsk)
819 cputime_t delta_cpu = get_vtime_delta(tsk);
821 account_idle_time(delta_cpu);
824 void arch_vtime_task_switch(struct task_struct *prev)
826 write_seqcount_begin(&prev->vtime_seqcount);
827 prev->vtime_snap_whence = VTIME_INACTIVE;
828 write_seqcount_end(&prev->vtime_seqcount);
830 write_seqcount_begin(¤t->vtime_seqcount);
831 current->vtime_snap_whence = VTIME_SYS;
832 current->vtime_snap = jiffies;
833 write_seqcount_end(¤t->vtime_seqcount);
836 void vtime_init_idle(struct task_struct *t, int cpu)
840 local_irq_save(flags);
841 write_seqcount_begin(&t->vtime_seqcount);
842 t->vtime_snap_whence = VTIME_SYS;
843 t->vtime_snap = jiffies;
844 write_seqcount_end(&t->vtime_seqcount);
845 local_irq_restore(flags);
848 cputime_t task_gtime(struct task_struct *t)
853 if (!vtime_accounting_enabled())
857 seq = read_seqcount_begin(&t->vtime_seqcount);
860 if (t->vtime_snap_whence == VTIME_SYS && t->flags & PF_VCPU)
861 gtime += vtime_delta(t);
863 } while (read_seqcount_retry(&t->vtime_seqcount, seq));
869 * Fetch cputime raw values from fields of task_struct and
870 * add up the pending nohz execution time since the last
874 fetch_task_cputime(struct task_struct *t,
875 cputime_t *u_dst, cputime_t *s_dst,
876 cputime_t *u_src, cputime_t *s_src,
877 cputime_t *udelta, cputime_t *sdelta)
880 unsigned long long delta;
886 seq = read_seqcount_begin(&t->vtime_seqcount);
893 /* Task is sleeping, nothing to add */
894 if (t->vtime_snap_whence == VTIME_INACTIVE ||
898 delta = vtime_delta(t);
901 * Task runs either in user or kernel space, add pending nohz time to
904 if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
907 if (t->vtime_snap_whence == VTIME_SYS)
910 } while (read_seqcount_retry(&t->vtime_seqcount, seq));
914 void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
916 cputime_t udelta, sdelta;
918 if (!vtime_accounting_enabled()) {
926 fetch_task_cputime(t, utime, stime, &t->utime,
927 &t->stime, &udelta, &sdelta);
934 void task_cputime_scaled(struct task_struct *t,
935 cputime_t *utimescaled, cputime_t *stimescaled)
937 cputime_t udelta, sdelta;
939 if (!vtime_accounting_enabled()) {
941 *utimescaled = t->utimescaled;
943 *stimescaled = t->stimescaled;
947 fetch_task_cputime(t, utimescaled, stimescaled,
948 &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
950 *utimescaled += cputime_to_scaled(udelta);
952 *stimescaled += cputime_to_scaled(sdelta);
954 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */