Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 6 Aug 2014 00:46:42 +0000 (17:46 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 6 Aug 2014 00:46:42 +0000 (17:46 -0700)
Pull timer and time updates from Thomas Gleixner:
 "A rather large update of timers, timekeeping & co

   - Core timekeeping code is year-2038 safe now for 32bit machines.
     Now we just need to fix all in kernel users and the gazillion of
     user space interfaces which rely on timespec/timeval :)

   - Better cache layout for the timekeeping internal data structures.

   - Proper nanosecond based interfaces for in kernel users.

   - Tree wide cleanup of code which wants nanoseconds but does hoops
     and loops to convert back and forth from timespecs.  Some of it
     definitely belongs into the ugly code museum.

   - Consolidation of the timekeeping interface zoo.

   - A fast NMI safe accessor to clock monotonic for tracing.  This is a
     long standing request to support correlated user/kernel space
     traces.  With proper NTP frequency correction it's also suitable
     for correlation of traces accross separate machines.

   - Checkpoint/restart support for timerfd.

   - A few NOHZ[_FULL] improvements in the [hr]timer code.

   - Code move from kernel to kernel/time of all time* related code.

   - New clocksource/event drivers from the ARM universe.  I'm really
     impressed that despite an architected timer in the newer chips SoC
     manufacturers insist on inventing new and differently broken SoC
     specific timers.

[ Ed. "Impressed"? I don't think that word means what you think it means ]

   - Another round of code move from arch to drivers.  Looks like most
     of the legacy mess in ARM regarding timers is sorted out except for
     a few obnoxious strongholds.

   - The usual updates and fixlets all over the place"

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (114 commits)
  timekeeping: Fixup typo in update_vsyscall_old definition
  clocksource: document some basic timekeeping concepts
  timekeeping: Use cached ntp_tick_length when accumulating error
  timekeeping: Rework frequency adjustments to work better w/ nohz
  timekeeping: Minor fixup for timespec64->timespec assignment
  ftrace: Provide trace clocks monotonic
  timekeeping: Provide fast and NMI safe access to CLOCK_MONOTONIC
  seqcount: Add raw_write_seqcount_latch()
  seqcount: Provide raw_read_seqcount()
  timekeeping: Use tk_read_base as argument for timekeeping_get_ns()
  timekeeping: Create struct tk_read_base and use it in struct timekeeper
  timekeeping: Restructure the timekeeper some more
  clocksource: Get rid of cycle_last
  clocksource: Move cycle_last validation to core code
  clocksource: Make delta calculation a function
  wireless: ath9k: Get rid of timespec conversions
  drm: vmwgfx: Use nsec based interfaces
  drm: i915: Use nsec based interfaces
  timekeeping: Provide ktime_get_raw()
  hangcheck-timer: Use ktime_get_ns()
  ...

115 files changed:
Documentation/DocBook/device-drivers.tmpl
Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt [new file with mode: 0644]
Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt [new file with mode: 0644]
Documentation/devicetree/bindings/timer/renesas,cmt.txt [new file with mode: 0644]
Documentation/devicetree/bindings/timer/renesas,mtu2.txt [new file with mode: 0644]
Documentation/devicetree/bindings/timer/renesas,tmu.txt [new file with mode: 0644]
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/filesystems/proc.txt
Documentation/timers/00-INDEX
Documentation/timers/timekeeping.txt [new file with mode: 0644]
MAINTAINERS
arch/arm/Kconfig
arch/arm/common/bL_switcher.c
arch/arm/mach-pxa/Makefile
arch/arm/mach-pxa/generic.c
arch/arm/mach-pxa/time.c [deleted file]
arch/arm64/kernel/vdso.c
arch/hexagon/Kconfig
arch/ia64/kernel/time.c
arch/powerpc/kernel/time.c
arch/powerpc/platforms/cell/spu_base.c
arch/powerpc/platforms/cell/spufs/context.c
arch/powerpc/platforms/cell/spufs/file.c
arch/powerpc/platforms/cell/spufs/sched.c
arch/s390/Kconfig
arch/s390/kernel/time.c
arch/tile/kernel/time.c
arch/tile/kernel/vdso/vgettimeofday.c
arch/x86/Kconfig
arch/x86/kernel/tsc.c
arch/x86/kernel/vsyscall_gtod.c
arch/x86/kvm/x86.c
drivers/char/hangcheck-timer.c
drivers/clocksource/Kconfig
drivers/clocksource/Makefile
drivers/clocksource/clps711x-timer.c [new file with mode: 0644]
drivers/clocksource/exynos_mct.c
drivers/clocksource/mtk_timer.c [new file with mode: 0644]
drivers/clocksource/pxa_timer.c [new file with mode: 0644]
drivers/clocksource/sh_cmt.c
drivers/clocksource/sh_mtu2.c
drivers/clocksource/sh_tmu.c
drivers/clocksource/timer-marco.c
drivers/clocksource/timer-prima2.c
drivers/connector/cn_proc.c
drivers/firewire/core-cdev.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
drivers/hwmon/ibmaem.c
drivers/input/evdev.c
drivers/mfd/cros_ec_spi.c
drivers/misc/ioc4.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/of/address.c
fs/lockd/mon.c
fs/proc/array.c
fs/timerfd.c
include/clocksource/pxa.h [new file with mode: 0644]
include/linux/clocksource.h
include/linux/hrtimer.h
include/linux/iio/iio.h
include/linux/io.h
include/linux/ktime.h
include/linux/mlx5/driver.h
include/linux/of_address.h
include/linux/sched.h
include/linux/seqlock.h
include/linux/sh_timer.h
include/linux/time.h
include/linux/time64.h [new file with mode: 0644]
include/linux/timekeeper_internal.h
include/linux/timekeeping.h [new file with mode: 0644]
include/linux/timerfd.h
kernel/Makefile
kernel/acct.c
kernel/debug/kdb/kdb_main.c
kernel/delayacct.c
kernel/fork.c
kernel/hrtimer.c [deleted file]
kernel/itimer.c [deleted file]
kernel/posix-cpu-timers.c [deleted file]
kernel/posix-timers.c [deleted file]
kernel/time.c [deleted file]
kernel/time/Kconfig
kernel/time/Makefile
kernel/time/clocksource.c
kernel/time/hrtimer.c [new file with mode: 0644]
kernel/time/itimer.c [new file with mode: 0644]
kernel/time/ntp.c
kernel/time/ntp_internal.h
kernel/time/posix-cpu-timers.c [new file with mode: 0644]
kernel/time/posix-timers.c [new file with mode: 0644]
kernel/time/tick-internal.h
kernel/time/time.c [new file with mode: 0644]
kernel/time/timeconst.bc [new file with mode: 0644]
kernel/time/timekeeping.c
kernel/time/timekeeping.h [new file with mode: 0644]
kernel/time/timekeeping_debug.c
kernel/time/timekeeping_internal.h
kernel/time/timer.c [new file with mode: 0644]
kernel/time/udelay_test.c [new file with mode: 0644]
kernel/timeconst.bc [deleted file]
kernel/timer.c [deleted file]
kernel/trace/trace.c
kernel/tsacct.c
lib/Kconfig.debug
lib/devres.c
security/tomoyo/audit.c
security/tomoyo/common.c
tools/time/udelay_test.sh [new file with mode: 0755]

index dd3f278..f213058 100644 (file)
@@ -54,7 +54,7 @@
 !Ikernel/sched/cpupri.c
 !Ikernel/sched/fair.c
 !Iinclude/linux/completion.h
-!Ekernel/timer.c
+!Ekernel/time/timer.c
      </sect1>
      <sect1><title>Wait queues and Wake events</title>
 !Iinclude/linux/wait.h
@@ -63,7 +63,7 @@
      <sect1><title>High-resolution timers</title>
 !Iinclude/linux/ktime.h
 !Iinclude/linux/hrtimer.h
-!Ekernel/hrtimer.c
+!Ekernel/time/hrtimer.c
      </sect1>
      <sect1><title>Workqueues and Kevents</title>
 !Ekernel/workqueue.c
diff --git a/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt b/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt
new file mode 100644 (file)
index 0000000..cd55b52
--- /dev/null
@@ -0,0 +1,29 @@
+* Cirrus Logic CLPS711X Timer Counter
+
+Required properties:
+- compatible: Shall contain "cirrus,clps711x-timer".
+- reg       : Address and length of the register set.
+- interrupts: The interrupt number of the timer.
+- clocks    : phandle of timer reference clock.
+
+Note: Each timer should have an alias correctly numbered in "aliases" node.
+
+Example:
+       aliases {
+               timer0 = &timer1;
+               timer1 = &timer2;
+       };
+
+       timer1: timer@80000300 {
+               compatible = "cirrus,ep7312-timer", "cirrus,clps711x-timer";
+               reg = <0x80000300 0x4>;
+               interrupts = <8>;
+               clocks = <&clks 5>;
+       };
+
+       timer2: timer@80000340 {
+               compatible = "cirrus,ep7312-timer", "cirrus,clps711x-timer";
+               reg = <0x80000340 0x4>;
+               interrupts = <9>;
+               clocks = <&clks 6>;
+       };
diff --git a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
new file mode 100644 (file)
index 0000000..7c4408f
--- /dev/null
@@ -0,0 +1,17 @@
+Mediatek MT6577, MT6572 and MT6589 Timers
+---------------------------------------
+
+Required properties:
+- compatible: Should be "mediatek,mt6577-timer"
+- reg: Should contain location and length for timers register.
+- clocks: Clocks driving the timer hardware. This list should include two
+       clocks. The order is system clock and as second clock the RTC clock.
+
+Examples:
+
+       timer@10008000 {
+               compatible = "mediatek,mt6577-timer";
+               reg = <0x10008000 0x80>;
+               interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_LOW>;
+               clocks = <&system_clk>, <&rtc_clk>;
+       };
diff --git a/Documentation/devicetree/bindings/timer/renesas,cmt.txt b/Documentation/devicetree/bindings/timer/renesas,cmt.txt
new file mode 100644 (file)
index 0000000..a17418b
--- /dev/null
@@ -0,0 +1,47 @@
+* Renesas R-Car Compare Match Timer (CMT)
+
+The CMT is a multi-channel 16/32/48-bit timer/counter with configurable clock
+inputs and programmable compare match.
+
+Channels share hardware resources but their counter and compare match value
+are independent. A particular CMT instance can implement only a subset of the
+channels supported by the CMT model. Channel indices represent the hardware
+position of the channel in the CMT and don't match the channel numbers in the
+datasheets.
+
+Required Properties:
+
+  - compatible: must contain one of the following.
+    - "renesas,cmt-32" for the 32-bit CMT
+               (CMT0 on sh7372, sh73a0 and r8a7740)
+    - "renesas,cmt-32-fast" for the 32-bit CMT with fast clock support
+               (CMT[234] on sh7372, sh73a0 and r8a7740)
+    - "renesas,cmt-48" for the 48-bit CMT
+               (CMT1 on sh7372, sh73a0 and r8a7740)
+    - "renesas,cmt-48-gen2" for the second generation 48-bit CMT
+               (CMT[01] on r8a73a4, r8a7790 and r8a7791)
+
+  - reg: base address and length of the registers block for the timer module.
+  - interrupts: interrupt-specifier for the timer, one per channel.
+  - clocks: a list of phandle + clock-specifier pairs, one for each entry
+    in clock-names.
+  - clock-names: must contain "fck" for the functional clock.
+
+  - renesas,channels-mask: bitmask of the available channels.
+
+
+Example: R8A7790 (R-Car H2) CMT0 node
+
+       CMT0 on R8A7790 implements hardware channels 5 and 6 only and names
+       them channels 0 and 1 in the documentation.
+
+       cmt0: timer@ffca0000 {
+               compatible = "renesas,cmt-48-gen2";
+               reg = <0 0xffca0000 0 0x1004>;
+               interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>,
+                            <0 142 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp1_clks R8A7790_CLK_CMT0>;
+               clock-names = "fck";
+
+               renesas,channels-mask = <0x60>;
+       };
diff --git a/Documentation/devicetree/bindings/timer/renesas,mtu2.txt b/Documentation/devicetree/bindings/timer/renesas,mtu2.txt
new file mode 100644 (file)
index 0000000..917453f
--- /dev/null
@@ -0,0 +1,39 @@
+* Renesas R-Car Multi-Function Timer Pulse Unit 2 (MTU2)
+
+The MTU2 is a multi-purpose, multi-channel timer/counter with configurable
+clock inputs and programmable compare match.
+
+Channels share hardware resources but their counter and compare match value
+are independent. The MTU2 hardware supports five channels indexed from 0 to 4.
+
+Required Properties:
+
+  - compatible: must contain "renesas,mtu2"
+
+  - reg: base address and length of the registers block for the timer module.
+
+  - interrupts: interrupt specifiers for the timer, one for each entry in
+    interrupt-names.
+  - interrupt-names: must contain one entry named "tgi?a" for each enabled
+    channel, where "?" is the channel index expressed as one digit from "0" to
+    "4".
+
+  - clocks: a list of phandle + clock-specifier pairs, one for each entry
+    in clock-names.
+  - clock-names: must contain "fck" for the functional clock.
+
+
+Example: R7S72100 (RZ/A1H) MTU2 node
+
+       mtu2: timer@fcff0000 {
+               compatible = "renesas,mtu2";
+               reg = <0xfcff0000 0x400>;
+               interrupts = <0 139 IRQ_TYPE_LEVEL_HIGH>,
+                            <0 146 IRQ_TYPE_LEVEL_HIGH>,
+                            <0 150 IRQ_TYPE_LEVEL_HIGH>,
+                            <0 154 IRQ_TYPE_LEVEL_HIGH>,
+                            <0 159 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-names = "tgi0a", "tgi1a", "tgi2a", "tgi3a", "tgi4a";
+               clocks = <&mstp3_clks R7S72100_CLK_MTU2>;
+               clock-names = "fck";
+       };
diff --git a/Documentation/devicetree/bindings/timer/renesas,tmu.txt b/Documentation/devicetree/bindings/timer/renesas,tmu.txt
new file mode 100644 (file)
index 0000000..425d0c5
--- /dev/null
@@ -0,0 +1,39 @@
+* Renesas R-Car Timer Unit (TMU)
+
+The TMU is a 32-bit timer/counter with configurable clock inputs and
+programmable compare match.
+
+Channels share hardware resources but their counter and compare match value
+are independent. The TMU hardware supports up to three channels.
+
+Required Properties:
+
+  - compatible: must contain "renesas,tmu"
+
+  - reg: base address and length of the registers block for the timer module.
+
+  - interrupts: interrupt-specifier for the timer, one per channel.
+
+  - clocks: a list of phandle + clock-specifier pairs, one for each entry
+    in clock-names.
+  - clock-names: must contain "fck" for the functional clock.
+
+Optional Properties:
+
+  - #renesas,channels: number of channels implemented by the timer, must be 2
+    or 3 (if not specified the value defaults to 3).
+
+
+Example: R8A7779 (R-Car H1) TMU0 node
+
+       tmu0: timer@ffd80000 {
+               compatible = "renesas,tmu";
+               reg = <0xffd80000 0x30>;
+               interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>,
+                            <0 33 IRQ_TYPE_LEVEL_HIGH>,
+                            <0 34 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7779_CLK_TMU0>;
+               clock-names = "fck";
+
+               #renesas,channels = <3>;
+       };
index 97c9c06..d415b38 100644 (file)
@@ -78,6 +78,7 @@ lsi   LSI Corp. (LSI Logic)
 lltc   Linear Technology Corporation
 marvell        Marvell Technology Group Ltd.
 maxim  Maxim Integrated Products
+mediatek       MediaTek Inc.
 micrel Micrel Inc.
 microchip      Microchip Technology Inc.
 mosaixtech     Mosaix Technologies, Inc.
index ddc531a..eb8a10e 100644 (file)
@@ -1743,6 +1743,25 @@ pair provide additional information particular to the objects they represent.
        While the first three lines are mandatory and always printed, the rest is
        optional and may be omitted if no marks created yet.
 
+       Timerfd files
+       ~~~~~~~~~~~~~
+
+       pos:    0
+       flags:  02
+       mnt_id: 9
+       clockid: 0
+       ticks: 0
+       settime flags: 01
+       it_value: (0, 49406829)
+       it_interval: (1, 0)
+
+       where 'clockid' is the clock type and 'ticks' is the number of the timer expirations
+       that have occurred [see timerfd_create(2) for details]. 'settime flags' are
+       flags in octal form been used to setup the timer [see timerfd_settime(2) for
+       details]. 'it_value' is remaining time until the timer exiration.
+       'it_interval' is the interval for the timer. Note the timer might be set up
+       with TIMER_ABSTIME option which will be shown in 'settime flags', but 'it_value'
+       still exhibits timer's remaining time.
 
 ------------------------------------------------------------------------------
 Configuring procfs
index 6d042dc..ee212a2 100644 (file)
@@ -12,6 +12,8 @@ Makefile
        - Build and link hpet_example
 NO_HZ.txt
        - Summary of the different methods for the scheduler clock-interrupts management.
+timekeeping.txt
+       - Clock sources, clock events, sched_clock() and delay timer notes
 timers-howto.txt
        - how to insert delays in the kernel the right (tm) way.
 timer_stats.txt
diff --git a/Documentation/timers/timekeeping.txt b/Documentation/timers/timekeeping.txt
new file mode 100644 (file)
index 0000000..f3a8cf2
--- /dev/null
@@ -0,0 +1,179 @@
+Clock sources, Clock events, sched_clock() and delay timers
+-----------------------------------------------------------
+
+This document tries to briefly explain some basic kernel timekeeping
+abstractions. It partly pertains to the drivers usually found in
+drivers/clocksource in the kernel tree, but the code may be spread out
+across the kernel.
+
+If you grep through the kernel source you will find a number of architecture-
+specific implementations of clock sources, clockevents and several likewise
+architecture-specific overrides of the sched_clock() function and some
+delay timers.
+
+To provide timekeeping for your platform, the clock source provides
+the basic timeline, whereas clock events shoot interrupts on certain points
+on this timeline, providing facilities such as high-resolution timers.
+sched_clock() is used for scheduling and timestamping, and delay timers
+provide an accurate delay source using hardware counters.
+
+
+Clock sources
+-------------
+
+The purpose of the clock source is to provide a timeline for the system that
+tells you where you are in time. For example issuing the command 'date' on
+a Linux system will eventually read the clock source to determine exactly
+what time it is.
+
+Typically the clock source is a monotonic, atomic counter which will provide
+n bits which count from 0 to 2^(n-1) and then wraps around to 0 and start over.
+It will ideally NEVER stop ticking as long as the system is running. It
+may stop during system suspend.
+
+The clock source shall have as high resolution as possible, and the frequency
+shall be as stable and correct as possible as compared to a real-world wall
+clock. It should not move unpredictably back and forth in time or miss a few
+cycles here and there.
+
+It must be immune to the kind of effects that occur in hardware where e.g.
+the counter register is read in two phases on the bus lowest 16 bits first
+and the higher 16 bits in a second bus cycle with the counter bits
+potentially being updated in between leading to the risk of very strange
+values from the counter.
+
+When the wall-clock accuracy of the clock source isn't satisfactory, there
+are various quirks and layers in the timekeeping code for e.g. synchronizing
+the user-visible time to RTC clocks in the system or against networked time
+servers using NTP, but all they do basically is update an offset against
+the clock source, which provides the fundamental timeline for the system.
+These measures does not affect the clock source per se, they only adapt the
+system to the shortcomings of it.
+
+The clock source struct shall provide means to translate the provided counter
+into a nanosecond value as an unsigned long long (unsigned 64 bit) number.
+Since this operation may be invoked very often, doing this in a strict
+mathematical sense is not desirable: instead the number is taken as close as
+possible to a nanosecond value using only the arithmetic operations
+multiply and shift, so in clocksource_cyc2ns() you find:
+
+  ns ~= (clocksource * mult) >> shift
+
+You will find a number of helper functions in the clock source code intended
+to aid in providing these mult and shift values, such as
+clocksource_khz2mult(), clocksource_hz2mult() that help determine the
+mult factor from a fixed shift, and clocksource_register_hz() and
+clocksource_register_khz() which will help out assigning both shift and mult
+factors using the frequency of the clock source as the only input.
+
+For real simple clock sources accessed from a single I/O memory location
+there is nowadays even clocksource_mmio_init() which will take a memory
+location, bit width, a parameter telling whether the counter in the
+register counts up or down, and the timer clock rate, and then conjure all
+necessary parameters.
+
+Since a 32-bit counter at say 100 MHz will wrap around to zero after some 43
+seconds, the code handling the clock source will have to compensate for this.
+That is the reason why the clock source struct also contains a 'mask'
+member telling how many bits of the source are valid. This way the timekeeping
+code knows when the counter will wrap around and can insert the necessary
+compensation code on both sides of the wrap point so that the system timeline
+remains monotonic.
+
+
+Clock events
+------------
+
+Clock events are the conceptual reverse of clock sources: they take a
+desired time specification value and calculate the values to poke into
+hardware timer registers.
+
+Clock events are orthogonal to clock sources. The same hardware
+and register range may be used for the clock event, but it is essentially
+a different thing. The hardware driving clock events has to be able to
+fire interrupts, so as to trigger events on the system timeline. On an SMP
+system, it is ideal (and customary) to have one such event driving timer per
+CPU core, so that each core can trigger events independently of any other
+core.
+
+You will notice that the clock event device code is based on the same basic
+idea about translating counters to nanoseconds using mult and shift
+arithmetic, and you find the same family of helper functions again for
+assigning these values. The clock event driver does not need a 'mask'
+attribute however: the system will not try to plan events beyond the time
+horizon of the clock event.
+
+
+sched_clock()
+-------------
+
+In addition to the clock sources and clock events there is a special weak
+function in the kernel called sched_clock(). This function shall return the
+number of nanoseconds since the system was started. An architecture may or
+may not provide an implementation of sched_clock() on its own. If a local
+implementation is not provided, the system jiffy counter will be used as
+sched_clock().
+
+As the name suggests, sched_clock() is used for scheduling the system,
+determining the absolute timeslice for a certain process in the CFS scheduler
+for example. It is also used for printk timestamps when you have selected to
+include time information in printk for things like bootcharts.
+
+Compared to clock sources, sched_clock() has to be very fast: it is called
+much more often, especially by the scheduler. If you have to do trade-offs
+between accuracy compared to the clock source, you may sacrifice accuracy
+for speed in sched_clock(). It however requires some of the same basic
+characteristics as the clock source, i.e. it should be monotonic.
+
+The sched_clock() function may wrap only on unsigned long long boundaries,
+i.e. after 64 bits. Since this is a nanosecond value this will mean it wraps
+after circa 585 years. (For most practical systems this means "never".)
+
+If an architecture does not provide its own implementation of this function,
+it will fall back to using jiffies, making its maximum resolution 1/HZ of the
+jiffy frequency for the architecture. This will affect scheduling accuracy
+and will likely show up in system benchmarks.
+
+The clock driving sched_clock() may stop or reset to zero during system
+suspend/sleep. This does not matter to the function it serves of scheduling
+events on the system. However it may result in interesting timestamps in
+printk().
+
+The sched_clock() function should be callable in any context, IRQ- and
+NMI-safe and return a sane value in any context.
+
+Some architectures may have a limited set of time sources and lack a nice
+counter to derive a 64-bit nanosecond value, so for example on the ARM
+architecture, special helper functions have been created to provide a
+sched_clock() nanosecond base from a 16- or 32-bit counter. Sometimes the
+same counter that is also used as clock source is used for this purpose.
+
+On SMP systems, it is crucial for performance that sched_clock() can be called
+independently on each CPU without any synchronization performance hits.
+Some hardware (such as the x86 TSC) will cause the sched_clock() function to
+drift between the CPUs on the system. The kernel can work around this by
+enabling the CONFIG_HAVE_UNSTABLE_SCHED_CLOCK option. This is another aspect
+that makes sched_clock() different from the ordinary clock source.
+
+
+Delay timers (some architectures only)
+--------------------------------------
+
+On systems with variable CPU frequency, the various kernel delay() functions
+will sometimes behave strangely. Basically these delays usually use a hard
+loop to delay a certain number of jiffy fractions using a "lpj" (loops per
+jiffy) value, calibrated on boot.
+
+Let's hope that your system is running on maximum frequency when this value
+is calibrated: as an effect when the frequency is geared down to half the
+full frequency, any delay() will be twice as long. Usually this does not
+hurt, as you're commonly requesting that amount of delay *or more*. But
+basically the semantics are quite unpredictable on such systems.
+
+Enter timer-based delays. Using these, a timer read may be used instead of
+a hard-coded loop for providing the desired delay.
+
+This is done by declaring a struct delay_timer and assigning the appropriate
+function pointers and rate settings for this delay timer.
+
+This is available on some architectures like OpenRISC or ARM.
index d5a78f2..f777763 100644 (file)
@@ -4237,7 +4237,7 @@ L:        linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:     Maintained
 F:     Documentation/timers/
-F:     kernel/hrtimer.c
+F:     kernel/time/hrtimer.c
 F:     kernel/time/clockevents.c
 F:     kernel/time/tick*.*
 F:     kernel/time/timer_*.c
@@ -7053,10 +7053,10 @@ POSIX CLOCKS and TIMERS
 M:     Thomas Gleixner <tglx@linutronix.de>
 L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
-S:     Supported
+S:     Maintained
 F:     fs/timerfd.c
 F:     include/linux/timer*
-F:     kernel/*timer*
+F:     kernel/time/*timer*
 
 POWER SUPPLY CLASS/SUBSYSTEM and DRIVERS
 M:     Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
index 1e14b90..d31c500 100644 (file)
@@ -65,7 +65,6 @@ config ARM
        select HAVE_UID16
        select HAVE_VIRT_CPU_ACCOUNTING_GEN
        select IRQ_FORCED_THREADING
-       select KTIME_SCALAR
        select MODULES_USE_ELF_REL
        select NO_BOOTMEM
        select OLD_SIGACTION
@@ -648,6 +647,7 @@ config ARCH_PXA
        select AUTO_ZRELADDR
        select CLKDEV_LOOKUP
        select CLKSRC_MMIO
+       select CLKSRC_OF
        select GENERIC_CLOCKEVENTS
        select GPIO_PXA
        select HAVE_IDE
index 490f3dc..6eaddc4 100644 (file)
@@ -57,16 +57,6 @@ static int read_mpidr(void)
        return id & MPIDR_HWID_BITMASK;
 }
 
-/*
- * Get a global nanosecond time stamp for tracing.
- */
-static s64 get_ns(void)
-{
-       struct timespec ts;
-       getnstimeofday(&ts);
-       return timespec_to_ns(&ts);
-}
-
 /*
  * bL switcher core code.
  */
@@ -224,7 +214,7 @@ static int bL_switch_to(unsigned int new_cluster_id)
         */
        local_irq_disable();
        local_fiq_disable();
-       trace_cpu_migrate_begin(get_ns(), ob_mpidr);
+       trace_cpu_migrate_begin(ktime_get_real_ns(), ob_mpidr);
 
        /* redirect GIC's SGIs to our counterpart */
        gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
@@ -267,7 +257,7 @@ static int bL_switch_to(unsigned int new_cluster_id)
                                          tdev->evtdev->next_event, 1);
        }
 
-       trace_cpu_migrate_finish(get_ns(), ib_mpidr);
+       trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr);
        local_fiq_enable();
        local_irq_enable();
 
@@ -558,7 +548,7 @@ int bL_switcher_get_logical_index(u32 mpidr)
 
 static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
 {
-       trace_cpu_migrate_current(get_ns(), read_mpidr());
+       trace_cpu_migrate_current(ktime_get_real_ns(), read_mpidr());
 }
 
 int bL_switcher_trace_trigger(void)
index 648867a..2fe1824 100644 (file)
@@ -4,7 +4,7 @@
 
 # Common support (must be linked before board specific support)
 obj-y                          += clock.o devices.o generic.o irq.o \
-                                  time.o reset.o
+                                  reset.o
 obj-$(CONFIG_PM)               += pm.o sleep.o standby.o
 
 # Generic drivers that other drivers may depend upon
index 4225417..6f38e1a 100644 (file)
 #include <asm/mach/map.h>
 #include <asm/mach-types.h>
 
+#include <mach/irqs.h>
 #include <mach/reset.h>
 #include <mach/smemc.h>
 #include <mach/pxa3xx-regs.h>
 
 #include "generic.h"
+#include <clocksource/pxa.h>
 
 void clear_reset_status(unsigned int mask)
 {
@@ -56,6 +58,15 @@ unsigned long get_clock_tick_rate(void)
 }
 EXPORT_SYMBOL(get_clock_tick_rate);
 
+/*
+ * For non device-tree builds, keep legacy timer init
+ */
+void pxa_timer_init(void)
+{
+       pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000),
+                           get_clock_tick_rate());
+}
+
 /*
  * Get the clock frequency as reflected by CCCR and the turbo flag.
  * We assume these values have been applied via a fcs.
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c
deleted file mode 100644 (file)
index fca174e..0000000
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * arch/arm/mach-pxa/time.c
- *
- * PXA clocksource, clockevents, and OST interrupt handlers.
- * Copyright (c) 2007 by Bill Gatliff <bgat@billgatliff.com>.
- *
- * Derived from Nicolas Pitre's PXA timer handler Copyright (c) 2001
- * by MontaVista Software, Inc.  (Nico, your code rocks!)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/clockchips.h>
-#include <linux/sched_clock.h>
-
-#include <asm/div64.h>
-#include <asm/mach/irq.h>
-#include <asm/mach/time.h>
-#include <mach/regs-ost.h>
-#include <mach/irqs.h>
-
-/*
- * This is PXA's sched_clock implementation. This has a resolution
- * of at least 308 ns and a maximum value of 208 days.
- *
- * The return value is guaranteed to be monotonic in that range as
- * long as there is always less than 582 seconds between successive
- * calls to sched_clock() which should always be the case in practice.
- */
-
-static u64 notrace pxa_read_sched_clock(void)
-{
-       return readl_relaxed(OSCR);
-}
-
-
-#define MIN_OSCR_DELTA 16
-
-static irqreturn_t
-pxa_ost0_interrupt(int irq, void *dev_id)
-{
-       struct clock_event_device *c = dev_id;
-
-       /* Disarm the compare/match, signal the event. */
-       writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER);
-       writel_relaxed(OSSR_M0, OSSR);
-       c->event_handler(c);
-
-       return IRQ_HANDLED;
-}
-
-static int
-pxa_osmr0_set_next_event(unsigned long delta, struct clock_event_device *dev)
-{
-       unsigned long next, oscr;
-
-       writel_relaxed(readl_relaxed(OIER) | OIER_E0, OIER);
-       next = readl_relaxed(OSCR) + delta;
-       writel_relaxed(next, OSMR0);
-       oscr = readl_relaxed(OSCR);
-
-       return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0;
-}
-
-static void
-pxa_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *dev)
-{
-       switch (mode) {
-       case CLOCK_EVT_MODE_ONESHOT:
-               writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER);
-               writel_relaxed(OSSR_M0, OSSR);
-               break;
-
-       case CLOCK_EVT_MODE_UNUSED:
-       case CLOCK_EVT_MODE_SHUTDOWN:
-               /* initializing, released, or preparing for suspend */
-               writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER);
-               writel_relaxed(OSSR_M0, OSSR);
-               break;
-
-       case CLOCK_EVT_MODE_RESUME:
-       case CLOCK_EVT_MODE_PERIODIC:
-               break;
-       }
-}
-
-#ifdef CONFIG_PM
-static unsigned long osmr[4], oier, oscr;
-
-static void pxa_timer_suspend(struct clock_event_device *cedev)
-{
-       osmr[0] = readl_relaxed(OSMR0);
-       osmr[1] = readl_relaxed(OSMR1);
-       osmr[2] = readl_relaxed(OSMR2);
-       osmr[3] = readl_relaxed(OSMR3);
-       oier = readl_relaxed(OIER);
-       oscr = readl_relaxed(OSCR);
-}
-
-static void pxa_timer_resume(struct clock_event_device *cedev)
-{
-       /*
-        * Ensure that we have at least MIN_OSCR_DELTA between match
-        * register 0 and the OSCR, to guarantee that we will receive
-        * the one-shot timer interrupt.  We adjust OSMR0 in preference
-        * to OSCR to guarantee that OSCR is monotonically incrementing.
-        */
-       if (osmr[0] - oscr < MIN_OSCR_DELTA)
-               osmr[0] += MIN_OSCR_DELTA;
-
-       writel_relaxed(osmr[0], OSMR0);
-       writel_relaxed(osmr[1], OSMR1);
-       writel_relaxed(osmr[2], OSMR2);
-       writel_relaxed(osmr[3], OSMR3);
-       writel_relaxed(oier, OIER);
-       writel_relaxed(oscr, OSCR);
-}
-#else
-#define pxa_timer_suspend NULL
-#define pxa_timer_resume NULL
-#endif
-
-static struct clock_event_device ckevt_pxa_osmr0 = {
-       .name           = "osmr0",
-       .features       = CLOCK_EVT_FEAT_ONESHOT,
-       .rating         = 200,
-       .set_next_event = pxa_osmr0_set_next_event,
-       .set_mode       = pxa_osmr0_set_mode,
-       .suspend        = pxa_timer_suspend,
-       .resume         = pxa_timer_resume,
-};
-
-static struct irqaction pxa_ost0_irq = {
-       .name           = "ost0",
-       .flags          = IRQF_TIMER | IRQF_IRQPOLL,
-       .handler        = pxa_ost0_interrupt,
-       .dev_id         = &ckevt_pxa_osmr0,
-};
-
-void __init pxa_timer_init(void)
-{
-       unsigned long clock_tick_rate = get_clock_tick_rate();
-
-       writel_relaxed(0, OIER);
-       writel_relaxed(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
-
-       sched_clock_register(pxa_read_sched_clock, 32, clock_tick_rate);
-
-       ckevt_pxa_osmr0.cpumask = cpumask_of(0);
-
-       setup_irq(IRQ_OST0, &pxa_ost0_irq);
-
-       clocksource_mmio_init(OSCR, "oscr0", clock_tick_rate, 200, 32,
-               clocksource_mmio_readl_up);
-       clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate,
-               MIN_OSCR_DELTA * 2, 0x7fffffff);
-}
index 24f2e8c..a81a446 100644 (file)
@@ -219,7 +219,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
 void update_vsyscall(struct timekeeper *tk)
 {
        struct timespec xtime_coarse;
-       u32 use_syscall = strcmp(tk->clock->name, "arch_sys_counter");
+       u32 use_syscall = strcmp(tk->tkr.clock->name, "arch_sys_counter");
 
        ++vdso_data->tb_seq_count;
        smp_wmb();
@@ -232,11 +232,11 @@ void update_vsyscall(struct timekeeper *tk)
        vdso_data->wtm_clock_nsec               = tk->wall_to_monotonic.tv_nsec;
 
        if (!use_syscall) {
-               vdso_data->cs_cycle_last        = tk->clock->cycle_last;
+               vdso_data->cs_cycle_last        = tk->tkr.cycle_last;
                vdso_data->xtime_clock_sec      = tk->xtime_sec;
-               vdso_data->xtime_clock_nsec     = tk->xtime_nsec;
-               vdso_data->cs_mult              = tk->mult;
-               vdso_data->cs_shift             = tk->shift;
+               vdso_data->xtime_clock_nsec     = tk->tkr.xtime_nsec;
+               vdso_data->cs_mult              = tk->tkr.mult;
+               vdso_data->cs_shift             = tk->tkr.shift;
        }
 
        smp_wmb();
index 0fd6138..4dc89d1 100644 (file)
@@ -23,7 +23,6 @@ config HEXAGON
        select GENERIC_IOMAP
        select GENERIC_SMP_IDLE_THREAD
        select STACKTRACE_SUPPORT
-       select KTIME_SCALAR
        select GENERIC_CLOCKEVENTS
        select GENERIC_CLOCKEVENTS_BROADCAST
        select MODULES_USE_ELF_RELA
index 71c52bc..3e71ef8 100644 (file)
@@ -441,7 +441,7 @@ void update_vsyscall_tz(void)
 }
 
 void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
-                       struct clocksource *c, u32 mult)
+                        struct clocksource *c, u32 mult, cycle_t cycle_last)
 {
        write_seqcount_begin(&fsyscall_gtod_data.seq);
 
@@ -450,7 +450,7 @@ void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
         fsyscall_gtod_data.clk_mult = mult;
         fsyscall_gtod_data.clk_shift = c->shift;
         fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
-        fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
+        fsyscall_gtod_data.clk_cycle_last = cycle_last;
 
        /* copy kernel time structures */
         fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
index 9fff9cd..368ab37 100644 (file)
@@ -741,7 +741,7 @@ static cycle_t timebase_read(struct clocksource *cs)
 }
 
 void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
-                       struct clocksource *clock, u32 mult)
+                        struct clocksource *clock, u32 mult, cycle_t cycle_last)
 {
        u64 new_tb_to_xs, new_stamp_xsec;
        u32 frac_sec;
@@ -774,7 +774,7 @@ void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
         * We expect the caller to have done the first increment of
         * vdso_data->tb_update_count already.
         */
-       vdso_data->tb_orig_stamp = clock->cycle_last;
+       vdso_data->tb_orig_stamp = cycle_last;
        vdso_data->stamp_xsec = new_stamp_xsec;
        vdso_data->tb_to_xs = new_tb_to_xs;
        vdso_data->wtom_clock_sec = wtm->tv_sec;
index f85db3a..2930d1e 100644 (file)
@@ -611,7 +611,6 @@ static int __init create_spu(void *data)
        int ret;
        static int number;
        unsigned long flags;
-       struct timespec ts;
 
        ret = -ENOMEM;
        spu = kzalloc(sizeof (*spu), GFP_KERNEL);
@@ -652,8 +651,7 @@ static int __init create_spu(void *data)
        mutex_unlock(&spu_full_list_mutex);
 
        spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
-       ktime_get_ts(&ts);
-       spu->stats.tstamp = timespec_to_ns(&ts);
+       spu->stats.tstamp = ktime_get_ns();
 
        INIT_LIST_HEAD(&spu->aff_list);
 
@@ -676,7 +674,6 @@ static const char *spu_state_names[] = {
 static unsigned long long spu_acct_time(struct spu *spu,
                enum spu_utilization_state state)
 {
-       struct timespec ts;
        unsigned long long time = spu->stats.times[state];
 
        /*
@@ -684,10 +681,8 @@ static unsigned long long spu_acct_time(struct spu *spu,
         * statistics are not updated.  Apply the time delta from the
         * last recorded state of the spu.
         */
-       if (spu->stats.util_state == state) {
-               ktime_get_ts(&ts);
-               time += timespec_to_ns(&ts) - spu->stats.tstamp;
-       }
+       if (spu->stats.util_state == state)
+               time += ktime_get_ns() - spu->stats.tstamp;
 
        return time / NSEC_PER_MSEC;
 }
index 9c6790d..3b4152f 100644 (file)
@@ -36,7 +36,6 @@ atomic_t nr_spu_contexts = ATOMIC_INIT(0);
 struct spu_context *alloc_spu_context(struct spu_gang *gang)
 {
        struct spu_context *ctx;
-       struct timespec ts;
 
        ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
        if (!ctx)
@@ -67,8 +66,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
        __spu_update_sched_info(ctx);
        spu_set_timeslice(ctx);
        ctx->stats.util_state = SPU_UTIL_IDLE_LOADED;
-       ktime_get_ts(&ts);
-       ctx->stats.tstamp = timespec_to_ns(&ts);
+       ctx->stats.tstamp = ktime_get_ns();
 
        atomic_inc(&nr_spu_contexts);
        goto out;
index 9098692..d966bbe 100644 (file)
@@ -2338,7 +2338,6 @@ static const char *ctx_state_names[] = {
 static unsigned long long spufs_acct_time(struct spu_context *ctx,
                enum spu_utilization_state state)
 {
-       struct timespec ts;
        unsigned long long time = ctx->stats.times[state];
 
        /*
@@ -2351,8 +2350,7 @@ static unsigned long long spufs_acct_time(struct spu_context *ctx,
         * of the spu context.
         */
        if (ctx->spu && ctx->stats.util_state == state) {
-               ktime_get_ts(&ts);
-               time += timespec_to_ns(&ts) - ctx->stats.tstamp;
+               time += ktime_get_ns() - ctx->stats.tstamp;
        }
 
        return time / NSEC_PER_MSEC;
index 4a0a64f..998f632 100644 (file)
@@ -1039,13 +1039,11 @@ void spuctx_switch_state(struct spu_context *ctx,
 {
        unsigned long long curtime;
        signed long long delta;
-       struct timespec ts;
        struct spu *spu;
        enum spu_utilization_state old_state;
        int node;
 
-       ktime_get_ts(&ts);
-       curtime = timespec_to_ns(&ts);
+       curtime = ktime_get_ns();
        delta = curtime - ctx->stats.tstamp;
 
        WARN_ON(!mutex_is_locked(&ctx->state_mutex));
index f5af5f6..720a11d 100644 (file)
@@ -136,7 +136,6 @@ config S390
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_UID16 if 32BIT
        select HAVE_VIRT_CPU_ACCOUNTING
-       select KTIME_SCALAR if 32BIT
        select MODULES_USE_ELF_RELA
        select NO_BOOTMEM
        select OLD_SIGACTION
index 0931b11..4cef607 100644 (file)
@@ -214,26 +214,26 @@ void update_vsyscall(struct timekeeper *tk)
 {
        u64 nsecps;
 
-       if (tk->clock != &clocksource_tod)
+       if (tk->tkr.clock != &clocksource_tod)
                return;
 
        /* Make userspace gettimeofday spin until we're done. */
        ++vdso_data->tb_update_count;
        smp_wmb();
-       vdso_data->xtime_tod_stamp = tk->clock->cycle_last;
+       vdso_data->xtime_tod_stamp = tk->tkr.cycle_last;
        vdso_data->xtime_clock_sec = tk->xtime_sec;
-       vdso_data->xtime_clock_nsec = tk->xtime_nsec;
+       vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
        vdso_data->wtom_clock_sec =
                tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
-       vdso_data->wtom_clock_nsec = tk->xtime_nsec +
-               + ((u64) tk->wall_to_monotonic.tv_nsec << tk->shift);
-       nsecps = (u64) NSEC_PER_SEC << tk->shift;
+       vdso_data->wtom_clock_nsec = tk->tkr.xtime_nsec +
+               + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr.shift);
+       nsecps = (u64) NSEC_PER_SEC << tk->tkr.shift;
        while (vdso_data->wtom_clock_nsec >= nsecps) {
                vdso_data->wtom_clock_nsec -= nsecps;
                vdso_data->wtom_clock_sec++;
        }
-       vdso_data->tk_mult = tk->mult;
-       vdso_data->tk_shift = tk->shift;
+       vdso_data->tk_mult = tk->tkr.mult;
+       vdso_data->tk_shift = tk->tkr.shift;
        smp_wmb();
        ++vdso_data->tb_update_count;
 }
index 462dcd0..d8fbc28 100644 (file)
@@ -260,9 +260,8 @@ void update_vsyscall_tz(void)
 
 void update_vsyscall(struct timekeeper *tk)
 {
-       struct timespec wall_time = tk_xtime(tk);
        struct timespec *wtm = &tk->wall_to_monotonic;
-       struct clocksource *clock = tk->clock;
+       struct clocksource *clock = tk->tkr.clock;
 
        if (clock != &cycle_counter_cs)
                return;
@@ -270,13 +269,13 @@ void update_vsyscall(struct timekeeper *tk)
        /* Userspace gettimeofday will spin while this value is odd. */
        ++vdso_data->tb_update_count;
        smp_wmb();
-       vdso_data->xtime_tod_stamp = clock->cycle_last;
-       vdso_data->xtime_clock_sec = wall_time.tv_sec;
-       vdso_data->xtime_clock_nsec = wall_time.tv_nsec;
+       vdso_data->xtime_tod_stamp = tk->tkr.cycle_last;
+       vdso_data->xtime_clock_sec = tk->xtime_sec;
+       vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
        vdso_data->wtom_clock_sec = wtm->tv_sec;
        vdso_data->wtom_clock_nsec = wtm->tv_nsec;
-       vdso_data->mult = clock->mult;
-       vdso_data->shift = clock->shift;
+       vdso_data->mult = tk->tkr.mult;
+       vdso_data->shift = tk->tkr.shift;
        smp_wmb();
        ++vdso_data->tb_update_count;
 }
index 51ec8e4..e933fb9 100644 (file)
@@ -83,10 +83,11 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
                if (count & 1)
                        continue;
 
-               cycles = (get_cycles() - vdso_data->xtime_tod_stamp);
-               ns = (cycles * vdso_data->mult) >> vdso_data->shift;
                sec = vdso_data->xtime_clock_sec;
-               ns += vdso_data->xtime_clock_nsec;
+               cycles = get_cycles() - vdso_data->xtime_tod_stamp;
+               ns = (cycles * vdso_data->mult) + vdso_data->xtime_clock_nsec;
+               ns >>= vdso_data->shift;
+
                if (ns >= NSEC_PER_SEC) {
                        ns -= NSEC_PER_SEC;
                        sec += 1;
index 6b71f04..6cfeb08 100644 (file)
@@ -108,9 +108,9 @@ config X86
        select CLOCKSOURCE_WATCHDOG
        select GENERIC_CLOCKEVENTS
        select ARCH_CLOCKSOURCE_DATA
+       select CLOCKSOURCE_VALIDATE_LAST_CYCLE
        select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
        select GENERIC_TIME_VSYSCALL
-       select KTIME_SCALAR if X86_32
        select GENERIC_STRNCPY_FROM_USER
        select GENERIC_STRNLEN_USER
        select HAVE_CONTEXT_TRACKING if X86_64
index 56b0c33..b6025f9 100644 (file)
@@ -950,7 +950,7 @@ core_initcall(cpufreq_tsc);
 static struct clocksource clocksource_tsc;
 
 /*
- * We compare the TSC to the cycle_last value in the clocksource
+ * We used to compare the TSC to the cycle_last value in the clocksource
  * structure to avoid a nasty time-warp. This can be observed in a
  * very small window right after one CPU updated cycle_last under
  * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
@@ -960,26 +960,23 @@ static struct clocksource clocksource_tsc;
  * due to the unsigned delta calculation of the time keeping core
  * code, which is necessary to support wrapping clocksources like pm
  * timer.
+ *
+ * This sanity check is now done in the core timekeeping code.
+ * checking the result of read_tsc() - cycle_last for being negative.
+ * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
  */
 static cycle_t read_tsc(struct clocksource *cs)
 {
-       cycle_t ret = (cycle_t)get_cycles();
-
-       return ret >= clocksource_tsc.cycle_last ?
-               ret : clocksource_tsc.cycle_last;
-}
-
-static void resume_tsc(struct clocksource *cs)
-{
-       if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
-               clocksource_tsc.cycle_last = 0;
+       return (cycle_t)get_cycles();
 }
 
+/*
+ * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
+ */
 static struct clocksource clocksource_tsc = {
        .name                   = "tsc",
        .rating                 = 300,
        .read                   = read_tsc,
-       .resume                 = resume_tsc,
        .mask                   = CLOCKSOURCE_MASK(64),
        .flags                  = CLOCK_SOURCE_IS_CONTINUOUS |
                                  CLOCK_SOURCE_MUST_VERIFY,
index 9531fbb..c7d791f 100644 (file)
@@ -31,29 +31,30 @@ void update_vsyscall(struct timekeeper *tk)
        gtod_write_begin(vdata);
 
        /* copy vsyscall data */
-       vdata->vclock_mode      = tk->clock->archdata.vclock_mode;
-       vdata->cycle_last       = tk->clock->cycle_last;
-       vdata->mask             = tk->clock->mask;
-       vdata->mult             = tk->mult;
-       vdata->shift            = tk->shift;
+       vdata->vclock_mode      = tk->tkr.clock->archdata.vclock_mode;
+       vdata->cycle_last       = tk->tkr.cycle_last;
+       vdata->mask             = tk->tkr.mask;
+       vdata->mult             = tk->tkr.mult;
+       vdata->shift            = tk->tkr.shift;
 
        vdata->wall_time_sec            = tk->xtime_sec;
-       vdata->wall_time_snsec          = tk->xtime_nsec;
+       vdata->wall_time_snsec          = tk->tkr.xtime_nsec;
 
        vdata->monotonic_time_sec       = tk->xtime_sec
                                        + tk->wall_to_monotonic.tv_sec;
-       vdata->monotonic_time_snsec     = tk->xtime_nsec
+       vdata->monotonic_time_snsec     = tk->tkr.xtime_nsec
                                        + ((u64)tk->wall_to_monotonic.tv_nsec
-                                               << tk->shift);
+                                               << tk->tkr.shift);
        while (vdata->monotonic_time_snsec >=
-                                       (((u64)NSEC_PER_SEC) << tk->shift)) {
+                                       (((u64)NSEC_PER_SEC) << tk->tkr.shift)) {
                vdata->monotonic_time_snsec -=
-                                       ((u64)NSEC_PER_SEC) << tk->shift;
+                                       ((u64)NSEC_PER_SEC) << tk->tkr.shift;
                vdata->monotonic_time_sec++;
        }
 
        vdata->wall_time_coarse_sec     = tk->xtime_sec;
-       vdata->wall_time_coarse_nsec    = (long)(tk->xtime_nsec >> tk->shift);
+       vdata->wall_time_coarse_nsec    = (long)(tk->tkr.xtime_nsec >>
+                                                tk->tkr.shift);
 
        vdata->monotonic_time_coarse_sec =
                vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
index b86d329..ca3d760 100644 (file)
@@ -1020,9 +1020,8 @@ struct pvclock_gtod_data {
                u32     shift;
        } clock;
 
-       /* open coded 'struct timespec' */
-       u64             monotonic_time_snsec;
-       time_t          monotonic_time_sec;
+       u64             boot_ns;
+       u64             nsec_base;
 };
 
 static struct pvclock_gtod_data pvclock_gtod_data;
@@ -1030,27 +1029,21 @@ static struct pvclock_gtod_data pvclock_gtod_data;
 static void update_pvclock_gtod(struct timekeeper *tk)
 {
        struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
+       u64 boot_ns;
+
+       boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot));
 
        write_seqcount_begin(&vdata->seq);
 
        /* copy pvclock gtod data */
-       vdata->clock.vclock_mode        = tk->clock->archdata.vclock_mode;
-       vdata->clock.cycle_last         = tk->clock->cycle_last;
-       vdata->clock.mask               = tk->clock->mask;
-       vdata->clock.mult               = tk->mult;
-       vdata->clock.shift              = tk->shift;
-
-       vdata->monotonic_time_sec       = tk->xtime_sec
-                                       + tk->wall_to_monotonic.tv_sec;
-       vdata->monotonic_time_snsec     = tk->xtime_nsec
-                                       + (tk->wall_to_monotonic.tv_nsec
-                                               << tk->shift);
-       while (vdata->monotonic_time_snsec >=
-                                       (((u64)NSEC_PER_SEC) << tk->shift)) {
-               vdata->monotonic_time_snsec -=
-                                       ((u64)NSEC_PER_SEC) << tk->shift;
-               vdata->monotonic_time_sec++;
-       }
+       vdata->clock.vclock_mode        = tk->tkr.clock->archdata.vclock_mode;
+       vdata->clock.cycle_last         = tk->tkr.cycle_last;
+       vdata->clock.mask               = tk->tkr.mask;
+       vdata->clock.mult               = tk->tkr.mult;
+       vdata->clock.shift              = tk->tkr.shift;
+
+       vdata->boot_ns                  = boot_ns;
+       vdata->nsec_base                = tk->tkr.xtime_nsec;
 
        write_seqcount_end(&vdata->seq);
 }
@@ -1145,11 +1138,7 @@ static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
 
 static inline u64 get_kernel_ns(void)
 {
-       struct timespec ts;
-
-       ktime_get_ts(&ts);
-       monotonic_to_bootbased(&ts);
-       return timespec_to_ns(&ts);
+       return ktime_get_boot_ns();
 }
 
 #ifdef CONFIG_X86_64
@@ -1414,23 +1403,22 @@ static inline u64 vgettsc(cycle_t *cycle_now)
        return v * gtod->clock.mult;
 }
 
-static int do_monotonic(struct timespec *ts, cycle_t *cycle_now)
+static int do_monotonic_boot(s64 *t, cycle_t *cycle_now)
 {
+       struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
        unsigned long seq;
-       u64 ns;
        int mode;
-       struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
+       u64 ns;
 
-       ts->tv_nsec = 0;
        do {
                seq = read_seqcount_begin(&gtod->seq);
                mode = gtod->clock.vclock_mode;
-               ts->tv_sec = gtod->monotonic_time_sec;
-               ns = gtod->monotonic_time_snsec;
+               ns = gtod->nsec_base;
                ns += vgettsc(cycle_now);
                ns >>= gtod->clock.shift;
+               ns += gtod->boot_ns;
        } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
-       timespec_add_ns(ts, ns);
+       *t = ns;
 
        return mode;
 }
@@ -1438,19 +1426,11 @@ static int do_monotonic(struct timespec *ts, cycle_t *cycle_now)
 /* returns true if host is using tsc clocksource */
 static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now)
 {
-       struct timespec ts;
-
        /* checked again under seqlock below */
        if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
                return false;
 
-       if (do_monotonic(&ts, cycle_now) != VCLOCK_TSC)
-               return false;
-
-       monotonic_to_bootbased(&ts);
-       *kernel_ns = timespec_to_ns(&ts);
-
-       return true;
+       return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC;
 }
 #endif
 
index f953c96..ebc4c73 100644 (file)
@@ -49,7 +49,7 @@
 #include <asm/uaccess.h>
 #include <linux/sysrq.h>
 #include <linux/timer.h>
-#include <linux/time.h>
+#include <linux/hrtimer.h>
 
 #define VERSION_STR "0.9.1"
 
@@ -117,24 +117,7 @@ __setup("hcheck_reboot", hangcheck_parse_reboot);
 __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
 #endif /* not MODULE */
 
-#if defined(CONFIG_S390)
-# define HAVE_MONOTONIC
-# define TIMER_FREQ 1000000000ULL
-#else
-# define TIMER_FREQ 1000000000ULL
-#endif
-
-#ifdef HAVE_MONOTONIC
-extern unsigned long long monotonic_clock(void);
-#else
-static inline unsigned long long monotonic_clock(void)
-{
-       struct timespec ts;
-       getrawmonotonic(&ts);
-       return timespec_to_ns(&ts);
-}
-#endif  /* HAVE_MONOTONIC */
-
+#define TIMER_FREQ 1000000000ULL
 
 /* Last time scheduled */
 static unsigned long long hangcheck_tsc, hangcheck_tsc_margin;
@@ -143,12 +126,11 @@ static void hangcheck_fire(unsigned long);
 
 static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire, 0, 0);
 
-
 static void hangcheck_fire(unsigned long data)
 {
        unsigned long long cur_tsc, tsc_diff;
 
-       cur_tsc = monotonic_clock();
+       cur_tsc = ktime_get_ns();
 
        if (cur_tsc > hangcheck_tsc)
                tsc_diff = cur_tsc - hangcheck_tsc;
@@ -177,7 +159,7 @@ static void hangcheck_fire(unsigned long data)
                        tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ);
 #endif
        mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
-       hangcheck_tsc = monotonic_clock();
+       hangcheck_tsc = ktime_get_ns();
 }
 
 
@@ -185,16 +167,11 @@ static int __init hangcheck_init(void)
 {
        printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n",
               VERSION_STR, hangcheck_tick, hangcheck_margin);
-#if defined (HAVE_MONOTONIC)
-       printk("Hangcheck: Using monotonic_clock().\n");
-#else
-       printk("Hangcheck: Using getrawmonotonic().\n");
-#endif  /* HAVE_MONOTONIC */
        hangcheck_tsc_margin =
                (unsigned long long)(hangcheck_margin + hangcheck_tick);
        hangcheck_tsc_margin *= (unsigned long long)TIMER_FREQ;
 
-       hangcheck_tsc = monotonic_clock();
+       hangcheck_tsc = ktime_get_ns();
        mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
 
        return 0;
index 065131c..cfd6519 100644 (file)
@@ -1,3 +1,5 @@
+menu "Clock Source drivers"
+
 config CLKSRC_OF
        bool
 
@@ -125,6 +127,7 @@ config CLKSRC_METAG_GENERIC
 
 config CLKSRC_EXYNOS_MCT
        def_bool y if ARCH_EXYNOS
+       depends on !ARM64
        help
          Support for Multi Core Timer controller on Exynos SoCs.
 
@@ -149,6 +152,11 @@ config VF_PIT_TIMER
 config SYS_SUPPORTS_SH_CMT
         bool
 
+config MTK_TIMER
+       select CLKSRC_OF
+       select CLKSRC_MMIO
+       bool
+
 config SYS_SUPPORTS_SH_MTU2
         bool
 
@@ -173,7 +181,7 @@ config SH_TIMER_MTU2
        default SYS_SUPPORTS_SH_MTU2
        help
          This enables build of a clockevent driver for the Multi-Function
-         Timer Pulse Unit 2 (TMU2) hardware available on SoCs from Renesas.
+         Timer Pulse Unit 2 (MTU2) hardware available on SoCs from Renesas.
          This hardware comes with 16 bit-timer registers.
 
 config SH_TIMER_TMU
@@ -187,7 +195,7 @@ config SH_TIMER_TMU
 
 config EM_TIMER_STI
        bool "Renesas STI timer driver" if COMPILE_TEST
-       depends on GENERIC_CLOCKEVENTS
+       depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
        default SYS_SUPPORTS_EM_STI
        help
          This enables build of a clocksource and clockevent driver for
@@ -207,3 +215,5 @@ config CLKSRC_VERSATILE
          counter available in the "System Registers" block of
          ARM Versatile, RealView and Versatile Express reference
          platforms.
+
+endmenu
index 800b130..7fd9fd1 100644 (file)
@@ -16,9 +16,11 @@ obj-$(CONFIG_CLKSRC_DBX500_PRCMU)    += clksrc-dbx500-prcmu.o
 obj-$(CONFIG_ARMADA_370_XP_TIMER)      += time-armada-370-xp.o
 obj-$(CONFIG_ORION_TIMER)      += time-orion.o
 obj-$(CONFIG_ARCH_BCM2835)     += bcm2835_timer.o
+obj-$(CONFIG_ARCH_CLPS711X)    += clps711x-timer.o
 obj-$(CONFIG_ARCH_MARCO)       += timer-marco.o
 obj-$(CONFIG_ARCH_MOXART)      += moxart_timer.o
 obj-$(CONFIG_ARCH_MXS)         += mxs_timer.o
+obj-$(CONFIG_ARCH_PXA)         += pxa_timer.o
 obj-$(CONFIG_ARCH_PRIMA2)      += timer-prima2.o
 obj-$(CONFIG_ARCH_U300)                += timer-u300.o
 obj-$(CONFIG_SUN4I_TIMER)      += sun4i_timer.o
@@ -34,6 +36,7 @@ obj-$(CONFIG_CLKSRC_SAMSUNG_PWM)      += samsung_pwm_timer.o
 obj-$(CONFIG_FSL_FTM_TIMER)    += fsl_ftm_timer.o
 obj-$(CONFIG_VF_PIT_TIMER)     += vf_pit_timer.o
 obj-$(CONFIG_CLKSRC_QCOM)      += qcom-timer.o
+obj-$(CONFIG_MTK_TIMER)                += mtk_timer.o
 
 obj-$(CONFIG_ARM_ARCH_TIMER)           += arm_arch_timer.o
 obj-$(CONFIG_ARM_GLOBAL_TIMER)         += arm_global_timer.o
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
new file mode 100644 (file)
index 0000000..d83ec1f
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+ *  Cirrus Logic CLPS711X clocksource driver
+ *
+ *  Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+enum {
+       CLPS711X_CLKSRC_CLOCKSOURCE,
+       CLPS711X_CLKSRC_CLOCKEVENT,
+};
+
+static void __iomem *tcd;
+
+static u64 notrace clps711x_sched_clock_read(void)
+{
+       return ~readw(tcd);
+}
+
+static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base)
+{
+       unsigned long rate;
+
+       if (!base)
+               return -ENOMEM;
+       if (IS_ERR(clock))
+               return PTR_ERR(clock);
+
+       rate = clk_get_rate(clock);
+
+       tcd = base;
+
+       clocksource_mmio_init(tcd, "clps711x-clocksource", rate, 300, 16,
+                             clocksource_mmio_readw_down);
+
+       sched_clock_register(clps711x_sched_clock_read, 16, rate);
+
+       return 0;
+}
+
+static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id)
+{
+       struct clock_event_device *evt = dev_id;
+
+       evt->event_handler(evt);
+
+       return IRQ_HANDLED;
+}
+
+static void clps711x_clockevent_set_mode(enum clock_event_mode mode,
+                                        struct clock_event_device *evt)
+{
+}
+
+static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
+                                       unsigned int irq)
+{
+       struct clock_event_device *clkevt;
+       unsigned long rate;
+
+       if (!irq)
+               return -EINVAL;
+       if (!base)
+               return -ENOMEM;
+       if (IS_ERR(clock))
+               return PTR_ERR(clock);
+
+       clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
+       if (!clkevt)
+               return -ENOMEM;
+
+       rate = clk_get_rate(clock);
+
+       /* Set Timer prescaler */
+       writew(DIV_ROUND_CLOSEST(rate, HZ), base);
+
+       clkevt->name = "clps711x-clockevent";
+       clkevt->rating = 300;
+       clkevt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_C3STOP;
+       clkevt->set_mode = clps711x_clockevent_set_mode;
+       clkevt->cpumask = cpumask_of(0);
+       clockevents_config_and_register(clkevt, HZ, 0, 0);
+
+       return request_irq(irq, clps711x_timer_interrupt, IRQF_TIMER,
+                          "clps711x-timer", clkevt);
+}
+
+void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
+                                unsigned int irq)
+{
+       struct clk *tc1 = clk_get_sys("clps711x-timer.0", NULL);
+       struct clk *tc2 = clk_get_sys("clps711x-timer.1", NULL);
+
+       BUG_ON(_clps711x_clksrc_init(tc1, tc1_base));
+       BUG_ON(_clps711x_clkevt_init(tc2, tc2_base, irq));
+}
+
+#ifdef CONFIG_CLKSRC_OF
+static void __init clps711x_timer_init(struct device_node *np)
+{
+       unsigned int irq = irq_of_parse_and_map(np, 0);
+       struct clk *clock = of_clk_get(np, 0);
+       void __iomem *base = of_iomap(np, 0);
+
+       switch (of_alias_get_id(np, "timer")) {
+       case CLPS711X_CLKSRC_CLOCKSOURCE:
+               BUG_ON(_clps711x_clksrc_init(clock, base));
+               break;
+       case CLPS711X_CLKSRC_CLOCKEVENT:
+               BUG_ON(_clps711x_clkevt_init(clock, base, irq));
+               break;
+       default:
+               break;
+       }
+}
+CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,clps711x-timer", clps711x_timer_init);
+#endif
index ab51bf2..9403061 100644 (file)
@@ -94,7 +94,7 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
        u32 mask;
        u32 i;
 
-       __raw_writel(value, reg_base + offset);
+       writel_relaxed(value, reg_base + offset);
 
        if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
                stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
@@ -144,8 +144,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
 
        /* Wait maximum 1 ms until written values are applied */
        for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
-               if (__raw_readl(reg_base + stat_addr) & mask) {
-                       __raw_writel(mask, reg_base + stat_addr);
+               if (readl_relaxed(reg_base + stat_addr) & mask) {
+                       writel_relaxed(mask, reg_base + stat_addr);
                        return;
                }
 
@@ -157,28 +157,51 @@ static void exynos4_mct_frc_start(void)
 {
        u32 reg;
 
-       reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
+       reg = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
        reg |= MCT_G_TCON_START;
        exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
 }
 
-static cycle_t notrace _exynos4_frc_read(void)
+/**
+ * exynos4_read_count_64 - Read all 64-bits of the global counter
+ *
+ * This will read all 64-bits of the global counter taking care to make sure
+ * that the upper and lower half match.  Note that reading the MCT can be quite
+ * slow (hundreds of nanoseconds) so you should use the 32-bit (lower half
+ * only) version when possible.
+ *
+ * Returns the number of cycles in the global counter.
+ */
+static u64 exynos4_read_count_64(void)
 {
        unsigned int lo, hi;
-       u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
+       u32 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
 
        do {
                hi = hi2;
-               lo = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_L);
-               hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
+               lo = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
+               hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
        } while (hi != hi2);
 
        return ((cycle_t)hi << 32) | lo;
 }
 
+/**
+ * exynos4_read_count_32 - Read the lower 32-bits of the global counter
+ *
+ * This will read just the lower 32-bits of the global counter.  This is marked
+ * as notrace so it can be used by the scheduler clock.
+ *
+ * Returns the number of cycles in the global counter (lower 32 bits).
+ */
+static u32 notrace exynos4_read_count_32(void)
+{
+       return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
+}
+
 static cycle_t exynos4_frc_read(struct clocksource *cs)
 {
-       return _exynos4_frc_read();
+       return exynos4_read_count_32();
 }
 
 static void exynos4_frc_resume(struct clocksource *cs)
@@ -190,21 +213,23 @@ struct clocksource mct_frc = {
        .name           = "mct-frc",
        .rating         = 400,
        .read           = exynos4_frc_read,
-       .mask           = CLOCKSOURCE_MASK(64),
+       .mask           = CLOCKSOURCE_MASK(32),
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
        .resume         = exynos4_frc_resume,
 };
 
 static u64 notrace exynos4_read_sched_clock(void)
 {
-       return _exynos4_frc_read();
+       return exynos4_read_count_32();
 }
 
 static struct delay_timer exynos4_delay_timer;
 
 static cycles_t exynos4_read_current_timer(void)
 {
-       return _exynos4_frc_read();
+       BUILD_BUG_ON_MSG(sizeof(cycles_t) != sizeof(u32),
+                        "cycles_t needs to move to 32-bit for ARM64 usage");
+       return exynos4_read_count_32();
 }
 
 static void __init exynos4_clocksource_init(void)
@@ -218,14 +243,14 @@ static void __init exynos4_clocksource_init(void)
        if (clocksource_register_hz(&mct_frc, clk_rate))
                panic("%s: can't register clocksource\n", mct_frc.name);
 
-       sched_clock_register(exynos4_read_sched_clock, 64, clk_rate);
+       sched_clock_register(exynos4_read_sched_clock, 32, clk_rate);
 }
 
 static void exynos4_mct_comp0_stop(void)
 {
        unsigned int tcon;
 
-       tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
+       tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
        tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
 
        exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
@@ -238,14 +263,14 @@ static void exynos4_mct_comp0_start(enum clock_event_mode mode,
        unsigned int tcon;
        cycle_t comp_cycle;
 
-       tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
+       tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
 
        if (mode == CLOCK_EVT_MODE_PERIODIC) {
                tcon |= MCT_G_TCON_COMP0_AUTO_INC;
                exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
        }
 
-       comp_cycle = exynos4_frc_read(&mct_frc) + cycles;
+       comp_cycle = exynos4_read_count_64() + cycles;
        exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
        exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
 
@@ -327,7 +352,7 @@ static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
        unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
        unsigned long offset = mevt->base + MCT_L_TCON_OFFSET;
 
-       tmp = __raw_readl(reg_base + offset);
+       tmp = readl_relaxed(reg_base + offset);
        if (tmp & mask) {
                tmp &= ~mask;
                exynos4_mct_write(tmp, offset);
@@ -349,7 +374,7 @@ static void exynos4_mct_tick_start(unsigned long cycles,
        /* enable MCT tick interrupt */
        exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
 
-       tmp = __raw_readl(reg_base + mevt->base + MCT_L_TCON_OFFSET);
+       tmp = readl_relaxed(reg_base + mevt->base + MCT_L_TCON_OFFSET);
        tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
               MCT_L_TCON_INTERVAL_MODE;
        exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
@@ -401,7 +426,7 @@ static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
                exynos4_mct_tick_stop(mevt);
 
        /* Clear the MCT tick interrupt */
-       if (__raw_readl(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
+       if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
                exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
                return 1;
        } else {
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
new file mode 100644 (file)
index 0000000..32a3d25
--- /dev/null
@@ -0,0 +1,261 @@
+/*
+ * Mediatek SoCs General-Purpose Timer handling.
+ *
+ * Copyright (C) 2014 Matthias Brugger
+ *
+ * Matthias Brugger <matthias.bgg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#define GPT_IRQ_EN_REG         0x00
+#define GPT_IRQ_ENABLE(val)    BIT((val) - 1)
+#define GPT_IRQ_ACK_REG                0x08
+#define GPT_IRQ_ACK(val)       BIT((val) - 1)
+
+#define TIMER_CTRL_REG(val)    (0x10 * (val))
+#define TIMER_CTRL_OP(val)     (((val) & 0x3) << 4)
+#define TIMER_CTRL_OP_ONESHOT  (0)
+#define TIMER_CTRL_OP_REPEAT   (1)
+#define TIMER_CTRL_OP_FREERUN  (3)
+#define TIMER_CTRL_CLEAR       (2)
+#define TIMER_CTRL_ENABLE      (1)
+#define TIMER_CTRL_DISABLE     (0)
+
+#define TIMER_CLK_REG(val)     (0x04 + (0x10 * (val)))
+#define TIMER_CLK_SRC(val)     (((val) & 0x1) << 4)
+#define TIMER_CLK_SRC_SYS13M   (0)
+#define TIMER_CLK_SRC_RTC32K   (1)
+#define TIMER_CLK_DIV1         (0x0)
+#define TIMER_CLK_DIV2         (0x1)
+
+#define TIMER_CNT_REG(val)     (0x08 + (0x10 * (val)))
+#define TIMER_CMP_REG(val)     (0x0C + (0x10 * (val)))
+
+#define GPT_CLK_EVT    1
+#define GPT_CLK_SRC    2
+
+struct mtk_clock_event_device {
+       void __iomem *gpt_base;
+       u32 ticks_per_jiffy;
+       struct clock_event_device dev;
+};
+
+static inline struct mtk_clock_event_device *to_mtk_clk(
+                               struct clock_event_device *c)
+{
+       return container_of(c, struct mtk_clock_event_device, dev);
+}
+
+static void mtk_clkevt_time_stop(struct mtk_clock_event_device *evt, u8 timer)
+{
+       u32 val;
+
+       val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
+       writel(val & ~TIMER_CTRL_ENABLE, evt->gpt_base +
+                       TIMER_CTRL_REG(timer));
+}
+
+static void mtk_clkevt_time_setup(struct mtk_clock_event_device *evt,
+                               unsigned long delay, u8 timer)
+{
+       writel(delay, evt->gpt_base + TIMER_CMP_REG(timer));
+}
+
+static void mtk_clkevt_time_start(struct mtk_clock_event_device *evt,
+               bool periodic, u8 timer)
+{
+       u32 val;
+
+       /* Acknowledge interrupt */
+       writel(GPT_IRQ_ACK(timer), evt->gpt_base + GPT_IRQ_ACK_REG);
+
+       val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
+
+       /* Clear 2 bit timer operation mode field */
+       val &= ~TIMER_CTRL_OP(0x3);
+
+       if (periodic)
+               val |= TIMER_CTRL_OP(TIMER_CTRL_OP_REPEAT);
+       else
+               val |= TIMER_CTRL_OP(TIMER_CTRL_OP_ONESHOT);
+
+       writel(val | TIMER_CTRL_ENABLE | TIMER_CTRL_CLEAR,
+              evt->gpt_base + TIMER_CTRL_REG(timer));
+}
+
+static void mtk_clkevt_mode(enum clock_event_mode mode,
+                               struct clock_event_device *clk)
+{
+       struct mtk_clock_event_device *evt = to_mtk_clk(clk);
+
+       mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
+
+       switch (mode) {
+       case CLOCK_EVT_MODE_PERIODIC:
+               mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT);
+               mtk_clkevt_time_start(evt, true, GPT_CLK_EVT);
+               break;
+       case CLOCK_EVT_MODE_ONESHOT:
+               /* Timer is enabled in set_next_event */
+               break;
+       case CLOCK_EVT_MODE_UNUSED:
+       case CLOCK_EVT_MODE_SHUTDOWN:
+       default:
+               /* No more interrupts will occur as source is disabled */
+               break;
+       }
+}
+
+static int mtk_clkevt_next_event(unsigned long event,
+                                  struct clock_event_device *clk)
+{
+       struct mtk_clock_event_device *evt = to_mtk_clk(clk);
+
+       mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
+       mtk_clkevt_time_setup(evt, event, GPT_CLK_EVT);
+       mtk_clkevt_time_start(evt, false, GPT_CLK_EVT);
+
+       return 0;
+}
+
+static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id)
+{
+       struct mtk_clock_event_device *evt = dev_id;
+
+       /* Acknowledge timer0 irq */
+       writel(GPT_IRQ_ACK(GPT_CLK_EVT), evt->gpt_base + GPT_IRQ_ACK_REG);
+       evt->dev.event_handler(&evt->dev);
+
+       return IRQ_HANDLED;
+}
+
+static void mtk_timer_global_reset(struct mtk_clock_event_device *evt)
+{
+       /* Disable all interrupts */
+       writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG);
+       /* Acknowledge all interrupts */
+       writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG);
+}
+
+static void
+mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
+{
+       writel(TIMER_CTRL_CLEAR | TIMER_CTRL_DISABLE,
+               evt->gpt_base + TIMER_CTRL_REG(timer));
+
+       writel(TIMER_CLK_SRC(TIMER_CLK_SRC_SYS13M) | TIMER_CLK_DIV1,
+                       evt->gpt_base + TIMER_CLK_REG(timer));
+
+       writel(0x0, evt->gpt_base + TIMER_CMP_REG(timer));
+
+       writel(TIMER_CTRL_OP(option) | TIMER_CTRL_ENABLE,
+                       evt->gpt_base + TIMER_CTRL_REG(timer));
+}
+
+static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
+{
+       u32 val;
+
+       val = readl(evt->gpt_base + GPT_IRQ_EN_REG);
+       writel(val | GPT_IRQ_ENABLE(timer),
+                       evt->gpt_base + GPT_IRQ_EN_REG);
+}
+
+static void __init mtk_timer_init(struct device_node *node)
+{
+       struct mtk_clock_event_device *evt;
+       struct resource res;
+       unsigned long rate = 0;
+       struct clk *clk;
+
+       evt = kzalloc(sizeof(*evt), GFP_KERNEL);
+       if (!evt) {
+               pr_warn("Can't allocate mtk clock event driver struct");
+               return;
+       }
+
+       evt->dev.name = "mtk_tick";
+       evt->dev.rating = 300;
+       evt->dev.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+       evt->dev.set_mode = mtk_clkevt_mode;
+       evt->dev.set_next_event = mtk_clkevt_next_event;
+       evt->dev.cpumask = cpu_possible_mask;
+
+       evt->gpt_base = of_io_request_and_map(node, 0, "mtk-timer");
+       if (IS_ERR(evt->gpt_base)) {
+               pr_warn("Can't get resource\n");
+               return;
+       }
+
+       evt->dev.irq = irq_of_parse_and_map(node, 0);
+       if (evt->dev.irq <= 0) {
+               pr_warn("Can't parse IRQ");
+               goto err_mem;
+       }
+
+       clk = of_clk_get(node, 0);
+       if (IS_ERR(clk)) {
+               pr_warn("Can't get timer clock");
+               goto err_irq;
+       }
+
+       if (clk_prepare_enable(clk)) {
+               pr_warn("Can't prepare clock");
+               goto err_clk_put;
+       }
+       rate = clk_get_rate(clk);
+
+       if (request_irq(evt->dev.irq, mtk_timer_interrupt,
+                       IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
+               pr_warn("failed to setup irq %d\n", evt->dev.irq);
+               goto err_clk_disable;
+       }
+
+       evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
+
+       mtk_timer_global_reset(evt);
+
+       /* Configure clock source */
+       mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
+       clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
+                       node->name, rate, 300, 32, clocksource_mmio_readl_up);
+
+       /* Configure clock event */
+       mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
+       mtk_timer_enable_irq(evt, GPT_CLK_EVT);
+
+       clockevents_config_and_register(&evt->dev, rate, 0x3,
+                                       0xffffffff);
+       return;
+
+err_clk_disable:
+       clk_disable_unprepare(clk);
+err_clk_put:
+       clk_put(clk);
+err_irq:
+       irq_dispose_mapping(evt->dev.irq);
+err_mem:
+       iounmap(evt->gpt_base);
+       of_address_to_resource(node, 0, &res);
+       release_mem_region(res.start, resource_size(&res));
+}
+CLOCKSOURCE_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
new file mode 100644 (file)
index 0000000..941f3f3
--- /dev/null
@@ -0,0 +1,227 @@
+/*
+ * arch/arm/mach-pxa/time.c
+ *
+ * PXA clocksource, clockevents, and OST interrupt handlers.
+ * Copyright (c) 2007 by Bill Gatliff <bgat@billgatliff.com>.
+ *
+ * Derived from Nicolas Pitre's PXA timer handler Copyright (c) 2001
+ * by MontaVista Software, Inc.  (Nico, your code rocks!)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+
+#include <asm/div64.h>
+
+#define OSMR0          0x00    /* OS Timer 0 Match Register */
+#define OSMR1          0x04    /* OS Timer 1 Match Register */
+#define OSMR2          0x08    /* OS Timer 2 Match Register */
+#define OSMR3          0x0C    /* OS Timer 3 Match Register */
+
+#define OSCR           0x10    /* OS Timer Counter Register */
+#define OSSR           0x14    /* OS Timer Status Register */
+#define OWER           0x18    /* OS Timer Watchdog Enable Register */
+#define OIER           0x1C    /* OS Timer Interrupt Enable Register */
+
+#define OSSR_M3                (1 << 3)        /* Match status channel 3 */
+#define OSSR_M2                (1 << 2)        /* Match status channel 2 */
+#define OSSR_M1                (1 << 1)        /* Match status channel 1 */
+#define OSSR_M0                (1 << 0)        /* Match status channel 0 */
+
+#define OIER_E0                (1 << 0)        /* Interrupt enable channel 0 */
+
+/*
+ * This is PXA's sched_clock implementation. This has a resolution
+ * of at least 308 ns and a maximum value of 208 days.
+ *
+ * The return value is guaranteed to be monotonic in that range as
+ * long as there is always less than 582 seconds between successive
+ * calls to sched_clock() which should always be the case in practice.
+ */
+
+#define timer_readl(reg) readl_relaxed(timer_base + (reg))
+#define timer_writel(val, reg) writel_relaxed((val), timer_base + (reg))
+
+static void __iomem *timer_base;
+
+static u64 notrace pxa_read_sched_clock(void)
+{
+       return timer_readl(OSCR);
+}
+
+
+#define MIN_OSCR_DELTA 16
+
+static irqreturn_t
+pxa_ost0_interrupt(int irq, void *dev_id)
+{
+       struct clock_event_device *c = dev_id;
+
+       /* Disarm the compare/match, signal the event. */
+       timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
+       timer_writel(OSSR_M0, OSSR);
+       c->event_handler(c);
+
+       return IRQ_HANDLED;
+}
+
+static int
+pxa_osmr0_set_next_event(unsigned long delta, struct clock_event_device *dev)
+{
+       unsigned long next, oscr;
+
+       timer_writel(timer_readl(OIER) | OIER_E0, OIER);
+       next = timer_readl(OSCR) + delta;
+       timer_writel(next, OSMR0);
+       oscr = timer_readl(OSCR);
+
+       return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0;
+}
+
+static void
+pxa_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *dev)
+{
+       switch (mode) {
+       case CLOCK_EVT_MODE_ONESHOT:
+               timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
+               timer_writel(OSSR_M0, OSSR);
+               break;
+
+       case CLOCK_EVT_MODE_UNUSED:
+       case CLOCK_EVT_MODE_SHUTDOWN:
+               /* initializing, released, or preparing for suspend */
+               timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
+               timer_writel(OSSR_M0, OSSR);
+               break;
+
+       case CLOCK_EVT_MODE_RESUME:
+       case CLOCK_EVT_MODE_PERIODIC:
+               break;
+       }
+}
+
+#ifdef CONFIG_PM
+static unsigned long osmr[4], oier, oscr;
+
+static void pxa_timer_suspend(struct clock_event_device *cedev)
+{
+       osmr[0] = timer_readl(OSMR0);
+       osmr[1] = timer_readl(OSMR1);
+       osmr[2] = timer_readl(OSMR2);
+       osmr[3] = timer_readl(OSMR3);
+       oier = timer_readl(OIER);
+       oscr = timer_readl(OSCR);
+}
+
+static void pxa_timer_resume(struct clock_event_device *cedev)
+{
+       /*
+        * Ensure that we have at least MIN_OSCR_DELTA between match
+        * register 0 and the OSCR, to guarantee that we will receive
+        * the one-shot timer interrupt.  We adjust OSMR0 in preference
+        * to OSCR to guarantee that OSCR is monotonically incrementing.
+        */
+       if (osmr[0] - oscr < MIN_OSCR_DELTA)
+               osmr[0] += MIN_OSCR_DELTA;
+
+       timer_writel(osmr[0], OSMR0);
+       timer_writel(osmr[1], OSMR1);
+       timer_writel(osmr[2], OSMR2);
+       timer_writel(osmr[3], OSMR3);
+       timer_writel(oier, OIER);
+       timer_writel(oscr, OSCR);
+}
+#else
+#define pxa_timer_suspend NULL
+#define pxa_timer_resume NULL
+#endif
+
+static struct clock_event_device ckevt_pxa_osmr0 = {
+       .name           = "osmr0",
+       .features       = CLOCK_EVT_FEAT_ONESHOT,
+       .rating         = 200,
+       .set_next_event = pxa_osmr0_set_next_event,
+       .set_mode       = pxa_osmr0_set_mode,
+       .suspend        = pxa_timer_suspend,
+       .resume         = pxa_timer_resume,
+};
+
+static struct irqaction pxa_ost0_irq = {
+       .name           = "ost0",
+       .flags          = IRQF_TIMER | IRQF_IRQPOLL,
+       .handler        = pxa_ost0_interrupt,
+       .dev_id         = &ckevt_pxa_osmr0,
+};
+
+static void pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
+{
+       timer_writel(0, OIER);
+       timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
+
+       sched_clock_register(pxa_read_sched_clock, 32, clock_tick_rate);
+
+       ckevt_pxa_osmr0.cpumask = cpumask_of(0);
+
+       setup_irq(irq, &pxa_ost0_irq);
+
+       clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200,
+                             32, clocksource_mmio_readl_up);
+       clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate,
+                                       MIN_OSCR_DELTA * 2, 0x7fffffff);
+}
+
+static void __init pxa_timer_dt_init(struct device_node *np)
+{
+       struct clk *clk;
+       int irq;
+
+       /* timer registers are shared with watchdog timer */
+       timer_base = of_iomap(np, 0);
+       if (!timer_base)
+               panic("%s: unable to map resource\n", np->name);
+
+       clk = of_clk_get(np, 0);
+       if (IS_ERR(clk)) {
+               pr_crit("%s: unable to get clk\n", np->name);
+               return;
+       }
+       clk_prepare_enable(clk);
+
+       /* we are only interested in OS-timer0 irq */
+       irq = irq_of_parse_and_map(np, 0);
+       if (irq <= 0) {
+               pr_crit("%s: unable to parse OS-timer0 irq\n", np->name);
+               return;
+       }
+
+       pxa_timer_common_init(irq, clk_get_rate(clk));
+}
+CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init);
+
+/*
+ * Legacy timer init for non device-tree boards.
+ */
+void __init pxa_timer_nodt_init(int irq, void __iomem *base,
+       unsigned long clock_tick_rate)
+{
+       struct clk *clk;
+
+       timer_base = base;
+       clk = clk_get(NULL, "OSTIMER0");
+       if (clk && !IS_ERR(clk))
+               clk_prepare_enable(clk);
+       else
+               pr_crit("%s: unable to get clk\n", __func__);
+
+       pxa_timer_common_init(irq, clock_tick_rate);
+}
index dfa7803..2bd13b5 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/ioport.h>
 #include <linux/irq.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
@@ -114,14 +115,15 @@ struct sh_cmt_device {
        struct platform_device *pdev;
 
        const struct sh_cmt_info *info;
-       bool legacy;
 
-       void __iomem *mapbase_ch;
        void __iomem *mapbase;
        struct clk *clk;
 
+       raw_spinlock_t lock; /* Protect the shared start/stop register */
+
        struct sh_cmt_channel *channels;
        unsigned int num_channels;
+       unsigned int hw_channels;
 
        bool has_clockevent;
        bool has_clocksource;
@@ -301,14 +303,12 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
        return v2;
 }
 
-static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
-
 static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
 {
        unsigned long flags, value;
 
        /* start stop register shared by multiple timer channels */
-       raw_spin_lock_irqsave(&sh_cmt_lock, flags);
+       raw_spin_lock_irqsave(&ch->cmt->lock, flags);
        value = sh_cmt_read_cmstr(ch);
 
        if (start)
@@ -317,7 +317,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
                value &= ~(1 << ch->timer_bit);
 
        sh_cmt_write_cmstr(ch, value);
-       raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
+       raw_spin_unlock_irqrestore(&ch->cmt->lock, flags);
 }
 
 static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate)
@@ -792,7 +792,7 @@ static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
        int irq;
        int ret;
 
-       irq = platform_get_irq(ch->cmt->pdev, ch->cmt->legacy ? 0 : ch->index);
+       irq = platform_get_irq(ch->cmt->pdev, ch->index);
        if (irq < 0) {
                dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n",
                        ch->index);
@@ -863,33 +863,26 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
         * Compute the address of the channel control register block. For the
         * timers with a per-channel start/stop register, compute its address
         * as well.
-        *
-        * For legacy configuration the address has been mapped explicitly.
         */
-       if (cmt->legacy) {
-               ch->ioctrl = cmt->mapbase_ch;
-       } else {
-               switch (cmt->info->model) {
-               case SH_CMT_16BIT:
-                       ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
-                       break;
-               case SH_CMT_32BIT:
-               case SH_CMT_48BIT:
-                       ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
-                       break;
-               case SH_CMT_32BIT_FAST:
-                       /*
-                        * The 32-bit "fast" timer has a single channel at hwidx
-                        * 5 but is located at offset 0x40 instead of 0x60 for
-                        * some reason.
-                        */
-                       ch->ioctrl = cmt->mapbase + 0x40;
-                       break;
-               case SH_CMT_48BIT_GEN2:
-                       ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
-                       ch->ioctrl = ch->iostart + 0x10;
-                       break;
-               }
+       switch (cmt->info->model) {
+       case SH_CMT_16BIT:
+               ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
+               break;
+       case SH_CMT_32BIT:
+       case SH_CMT_48BIT:
+               ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
+               break;
+       case SH_CMT_32BIT_FAST:
+               /*
+                * The 32-bit "fast" timer has a single channel at hwidx 5 but
+                * is located at offset 0x40 instead of 0x60 for some reason.
+                */
+               ch->ioctrl = cmt->mapbase + 0x40;
+               break;
+       case SH_CMT_48BIT_GEN2:
+               ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
+               ch->ioctrl = ch->iostart + 0x10;
+               break;
        }
 
        if (cmt->info->width == (sizeof(ch->max_match_value) * 8))
@@ -900,12 +893,7 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
        ch->match_value = ch->max_match_value;
        raw_spin_lock_init(&ch->lock);
 
-       if (cmt->legacy) {
-               ch->timer_bit = ch->hwidx;
-       } else {
-               ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2
-                             ? 0 : ch->hwidx;
-       }
+       ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2 ? 0 : ch->hwidx;
 
        ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
                              clockevent, clocksource);
@@ -938,75 +926,65 @@ static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
        return 0;
 }
 
-static int sh_cmt_map_memory_legacy(struct sh_cmt_device *cmt)
-{
-       struct sh_timer_config *cfg = cmt->pdev->dev.platform_data;
-       struct resource *res, *res2;
-
-       /* map memory, let mapbase_ch point to our channel */
-       res = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
-               return -ENXIO;
-       }
-
-       cmt->mapbase_ch = ioremap_nocache(res->start, resource_size(res));
-       if (cmt->mapbase_ch == NULL) {
-               dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
-               return -ENXIO;
-       }
-
-       /* optional resource for the shared timer start/stop register */
-       res2 = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 1);
-
-       /* map second resource for CMSTR */
-       cmt->mapbase = ioremap_nocache(res2 ? res2->start :
-                                      res->start - cfg->channel_offset,
-                                      res2 ? resource_size(res2) : 2);
-       if (cmt->mapbase == NULL) {
-               dev_err(&cmt->pdev->dev, "failed to remap I/O second memory\n");
-               iounmap(cmt->mapbase_ch);
-               return -ENXIO;
-       }
-
-       /* identify the model based on the resources */
-       if (resource_size(res) == 6)
-               cmt->info = &sh_cmt_info[SH_CMT_16BIT];
-       else if (res2 && (resource_size(res2) == 4))
-               cmt->info = &sh_cmt_info[SH_CMT_48BIT_GEN2];
-       else
-               cmt->info = &sh_cmt_info[SH_CMT_32BIT];
+static const struct platform_device_id sh_cmt_id_table[] = {
+       { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
+       { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
+       { "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] },
+       { "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] },
+       { "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] },
+       { }
+};
+MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
 
-       return 0;
-}
+static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
+       { .compatible = "renesas,cmt-32", .data = &sh_cmt_info[SH_CMT_32BIT] },
+       { .compatible = "renesas,cmt-32-fast", .data = &sh_cmt_info[SH_CMT_32BIT_FAST] },
+       { .compatible = "renesas,cmt-48", .data = &sh_cmt_info[SH_CMT_48BIT] },
+       { .compatible = "renesas,cmt-48-gen2", .data = &sh_cmt_info[SH_CMT_48BIT_GEN2] },
+       { }
+};
+MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
 
-static void sh_cmt_unmap_memory(struct sh_cmt_device *cmt)
+static int sh_cmt_parse_dt(struct sh_cmt_device *cmt)
 {
-       iounmap(cmt->mapbase);
-       if (cmt->mapbase_ch)
-               iounmap(cmt->mapbase_ch);
+       struct device_node *np = cmt->pdev->dev.of_node;
+
+       return of_property_read_u32(np, "renesas,channels-mask",
+                                   &cmt->hw_channels);
 }
 
 static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
 {
-       struct sh_timer_config *cfg = pdev->dev.platform_data;
-       const struct platform_device_id *id = pdev->id_entry;
-       unsigned int hw_channels;
+       unsigned int mask;
+       unsigned int i;
        int ret;
 
        memset(cmt, 0, sizeof(*cmt));
        cmt->pdev = pdev;
+       raw_spin_lock_init(&cmt->lock);
+
+       if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
+               const struct of_device_id *id;
+
+               id = of_match_node(sh_cmt_of_table, pdev->dev.of_node);
+               cmt->info = id->data;
 
-       if (!cfg) {
+               ret = sh_cmt_parse_dt(cmt);
+               if (ret < 0)
+                       return ret;
+       } else if (pdev->dev.platform_data) {
+               struct sh_timer_config *cfg = pdev->dev.platform_data;
+               const struct platform_device_id *id = pdev->id_entry;
+
+               cmt->info = (const struct sh_cmt_info *)id->driver_data;
+               cmt->hw_channels = cfg->channels_mask;
+       } else {
                dev_err(&cmt->pdev->dev, "missing platform data\n");
                return -ENXIO;
        }
 
-       cmt->info = (const struct sh_cmt_info *)id->driver_data;
-       cmt->legacy = cmt->info ? false : true;
-
        /* Get hold of clock. */
-       cmt->clk = clk_get(&cmt->pdev->dev, cmt->legacy ? "cmt_fck" : "fck");
+       cmt->clk = clk_get(&cmt->pdev->dev, "fck");
        if (IS_ERR(cmt->clk)) {
                dev_err(&cmt->pdev->dev, "cannot get clock\n");
                return PTR_ERR(cmt->clk);
@@ -1016,28 +994,13 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
        if (ret < 0)
                goto err_clk_put;
 
-       /*
-        * Map the memory resource(s). We need to support both the legacy
-        * platform device configuration (with one device per channel) and the
-        * new version (with multiple channels per device).
-        */
-       if (cmt->legacy)
-               ret = sh_cmt_map_memory_legacy(cmt);
-       else
-               ret = sh_cmt_map_memory(cmt);
-
+       /* Map the memory resource(s). */
+       ret = sh_cmt_map_memory(cmt);
        if (ret < 0)
                goto err_clk_unprepare;
 
        /* Allocate and setup the channels. */
-       if (cmt->legacy) {
-               cmt->num_channels = 1;
-               hw_channels = 0;
-       } else {
-               cmt->num_channels = hweight8(cfg->channels_mask);
-               hw_channels = cfg->channels_mask;
-       }
-
+       cmt->num_channels = hweight8(cmt->hw_channels);
        cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels),
                                GFP_KERNEL);
        if (cmt->channels == NULL) {
@@ -1045,35 +1008,21 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
                goto err_unmap;
        }
 
-       if (cmt->legacy) {
-               ret = sh_cmt_setup_channel(&cmt->channels[0],
-                                          cfg->timer_bit, cfg->timer_bit,
-                                          cfg->clockevent_rating != 0,
-                                          cfg->clocksource_rating != 0, cmt);
+       /*
+        * Use the first channel as a clock event device and the second channel
+        * as a clock source. If only one channel is available use it for both.
+        */
+       for (i = 0, mask = cmt->hw_channels; i < cmt->num_channels; ++i) {
+               unsigned int hwidx = ffs(mask) - 1;
+               bool clocksource = i == 1 || cmt->num_channels == 1;
+               bool clockevent = i == 0;
+
+               ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
+                                          clockevent, clocksource, cmt);
                if (ret < 0)
                        goto err_unmap;
-       } else {
-               unsigned int mask = hw_channels;
-               unsigned int i;
 
-               /*
-                * Use the first channel as a clock event device and the second
-                * channel as a clock source. If only one channel is available
-                * use it for both.
-                */
-               for (i = 0; i < cmt->num_channels; ++i) {
-                       unsigned int hwidx = ffs(mask) - 1;
-                       bool clocksource = i == 1 || cmt->num_channels == 1;
-                       bool clockevent = i == 0;
-
-                       ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
-                                                  clockevent, clocksource,
-                                                  cmt);
-                       if (ret < 0)
-                               goto err_unmap;
-
-                       mask &= ~(1 << hwidx);
-               }
+               mask &= ~(1 << hwidx);
        }
 
        platform_set_drvdata(pdev, cmt);
@@ -1082,7 +1031,7 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
 
 err_unmap:
        kfree(cmt->channels);
-       sh_cmt_unmap_memory(cmt);
+       iounmap(cmt->mapbase);
 err_clk_unprepare:
        clk_unprepare(cmt->clk);
 err_clk_put:
@@ -1132,22 +1081,12 @@ static int sh_cmt_remove(struct platform_device *pdev)
        return -EBUSY; /* cannot unregister clockevent and clocksource */
 }
 
-static const struct platform_device_id sh_cmt_id_table[] = {
-       { "sh_cmt", 0 },
-       { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
-       { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
-       { "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] },
-       { "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] },
-       { "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] },
-       { }
-};
-MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
-
 static struct platform_driver sh_cmt_device_driver = {
        .probe          = sh_cmt_probe,
        .remove         = sh_cmt_remove,
        .driver         = {
                .name   = "sh_cmt",
+               .of_match_table = of_match_ptr(sh_cmt_of_table),
        },
        .id_table       = sh_cmt_id_table,
 };
index 188d4e0..3d88698 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/ioport.h>
 #include <linux/irq.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
@@ -37,7 +38,6 @@ struct sh_mtu2_channel {
        unsigned int index;
 
        void __iomem *base;
-       int irq;
 
        struct clock_event_device ced;
 };
@@ -48,15 +48,14 @@ struct sh_mtu2_device {
        void __iomem *mapbase;
        struct clk *clk;
 
+       raw_spinlock_t lock; /* Protect the shared registers */
+
        struct sh_mtu2_channel *channels;
        unsigned int num_channels;
 
-       bool legacy;
        bool has_clockevent;
 };
 
-static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
-
 #define TSTR -1 /* shared register */
 #define TCR  0 /* channel register */
 #define TMDR 1 /* channel register */
@@ -162,12 +161,8 @@ static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr)
 {
        unsigned long offs;
 
-       if (reg_nr == TSTR) {
-               if (ch->mtu->legacy)
-                       return ioread8(ch->mtu->mapbase);
-               else
-                       return ioread8(ch->mtu->mapbase + 0x280);
-       }
+       if (reg_nr == TSTR)
+               return ioread8(ch->mtu->mapbase + 0x280);
 
        offs = mtu2_reg_offs[reg_nr];
 
@@ -182,12 +177,8 @@ static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr,
 {
        unsigned long offs;
 
-       if (reg_nr == TSTR) {
-               if (ch->mtu->legacy)
-                       return iowrite8(value, ch->mtu->mapbase);
-               else
-                       return iowrite8(value, ch->mtu->mapbase + 0x280);
-       }
+       if (reg_nr == TSTR)
+               return iowrite8(value, ch->mtu->mapbase + 0x280);
 
        offs = mtu2_reg_offs[reg_nr];
 
@@ -202,7 +193,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)
        unsigned long flags, value;
 
        /* start stop register shared by multiple timer channels */
-       raw_spin_lock_irqsave(&sh_mtu2_lock, flags);
+       raw_spin_lock_irqsave(&ch->mtu->lock, flags);
        value = sh_mtu2_read(ch, TSTR);
 
        if (start)
@@ -211,7 +202,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)
                value &= ~(1 << ch->index);
 
        sh_mtu2_write(ch, TSTR, value);
-       raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);
+       raw_spin_unlock_irqrestore(&ch->mtu->lock, flags);
 }
 
 static int sh_mtu2_enable(struct sh_mtu2_channel *ch)
@@ -331,7 +322,6 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
                                        const char *name)
 {
        struct clock_event_device *ced = &ch->ced;
-       int ret;
 
        ced->name = name;
        ced->features = CLOCK_EVT_FEAT_PERIODIC;
@@ -344,24 +334,12 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
        dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n",
                 ch->index);
        clockevents_register_device(ced);
-
-       ret = request_irq(ch->irq, sh_mtu2_interrupt,
-                         IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
-                         dev_name(&ch->mtu->pdev->dev), ch);
-       if (ret) {
-               dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n",
-                       ch->index, ch->irq);
-               return;
-       }
 }
 
-static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name,
-                           bool clockevent)
+static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name)
 {
-       if (clockevent) {
-               ch->mtu->has_clockevent = true;
-               sh_mtu2_register_clockevent(ch, name);
-       }
+       ch->mtu->has_clockevent = true;
+       sh_mtu2_register_clockevent(ch, name);
 
        return 0;
 }
@@ -372,40 +350,32 @@ static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
        static const unsigned int channel_offsets[] = {
                0x300, 0x380, 0x000,
        };
-       bool clockevent;
+       char name[6];
+       int irq;
+       int ret;
 
        ch->mtu = mtu;
 
-       if (mtu->legacy) {
-               struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
-
-               clockevent = cfg->clockevent_rating != 0;
-
-               ch->irq = platform_get_irq(mtu->pdev, 0);
-               ch->base = mtu->mapbase - cfg->channel_offset;
-               ch->index = cfg->timer_bit;
-       } else {
-               char name[6];
-
-               clockevent = true;
-
-               sprintf(name, "tgi%ua", index);
-               ch->irq = platform_get_irq_byname(mtu->pdev, name);
-               ch->base = mtu->mapbase + channel_offsets[index];
-               ch->index = index;
-       }
-
-       if (ch->irq < 0) {
+       sprintf(name, "tgi%ua", index);
+       irq = platform_get_irq_byname(mtu->pdev, name);
+       if (irq < 0) {
                /* Skip channels with no declared interrupt. */
-               if (!mtu->legacy)
-                       return 0;
+               return 0;
+       }
 
-               dev_err(&mtu->pdev->dev, "ch%u: failed to get irq\n",
-                       ch->index);
-               return ch->irq;
+       ret = request_irq(irq, sh_mtu2_interrupt,
+                         IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+                         dev_name(&ch->mtu->pdev->dev), ch);
+       if (ret) {
+               dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n",
+                       index, irq);
+               return ret;
        }
 
-       return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev), clockevent);
+       ch->base = mtu->mapbase + channel_offsets[index];
+       ch->index = index;
+
+       return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev));
 }
 
 static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
@@ -422,46 +392,21 @@ static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
        if (mtu->mapbase == NULL)
                return -ENXIO;
 
-       /*
-        * In legacy platform device configuration (with one device per channel)
-        * the resource points to the channel base address.
-        */
-       if (mtu->legacy) {
-               struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
-               mtu->mapbase += cfg->channel_offset;
-       }
-
        return 0;
 }
 
-static void sh_mtu2_unmap_memory(struct sh_mtu2_device *mtu)
-{
-       if (mtu->legacy) {
-               struct sh_timer_config *cfg = mtu->pdev->dev.platform_data;
-               mtu->mapbase -= cfg->channel_offset;
-       }
-
-       iounmap(mtu->mapbase);
-}
-
 static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
                         struct platform_device *pdev)
 {
-       struct sh_timer_config *cfg = pdev->dev.platform_data;
-       const struct platform_device_id *id = pdev->id_entry;
        unsigned int i;
        int ret;
 
        mtu->pdev = pdev;
-       mtu->legacy = id->driver_data;
 
-       if (mtu->legacy && !cfg) {
-               dev_err(&mtu->pdev->dev, "missing platform data\n");
-               return -ENXIO;
-       }
+       raw_spin_lock_init(&mtu->lock);
 
        /* Get hold of clock. */
-       mtu->clk = clk_get(&mtu->pdev->dev, mtu->legacy ? "mtu2_fck" : "fck");
+       mtu->clk = clk_get(&mtu->pdev->dev, "fck");
        if (IS_ERR(mtu->clk)) {
                dev_err(&mtu->pdev->dev, "cannot get clock\n");
                return PTR_ERR(mtu->clk);
@@ -479,10 +424,7 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
        }
 
        /* Allocate and setup the channels. */
-       if (mtu->legacy)
-               mtu->num_channels = 1;
-       else
-               mtu->num_channels = 3;
+       mtu->num_channels = 3;
 
        mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels,
                                GFP_KERNEL);
@@ -491,16 +433,10 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
                goto err_unmap;
        }
 
-       if (mtu->legacy) {
-               ret = sh_mtu2_setup_channel(&mtu->channels[0], 0, mtu);
+       for (i = 0; i < mtu->num_channels; ++i) {
+               ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu);
                if (ret < 0)
                        goto err_unmap;
-       } else {
-               for (i = 0; i < mtu->num_channels; ++i) {
-                       ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu);
-                       if (ret < 0)
-                               goto err_unmap;
-               }
        }
 
        platform_set_drvdata(pdev, mtu);
@@ -509,7 +445,7 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
 
 err_unmap:
        kfree(mtu->channels);
-       sh_mtu2_unmap_memory(mtu);
+       iounmap(mtu->mapbase);
 err_clk_unprepare:
        clk_unprepare(mtu->clk);
 err_clk_put:
@@ -560,17 +496,23 @@ static int sh_mtu2_remove(struct platform_device *pdev)
 }
 
 static const struct platform_device_id sh_mtu2_id_table[] = {
-       { "sh_mtu2", 1 },
        { "sh-mtu2", 0 },
        { },
 };
 MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table);
 
+static const struct of_device_id sh_mtu2_of_table[] __maybe_unused = {
+       { .compatible = "renesas,mtu2" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, sh_mtu2_of_table);
+
 static struct platform_driver sh_mtu2_device_driver = {
        .probe          = sh_mtu2_probe,
        .remove         = sh_mtu2_remove,
        .driver         = {
                .name   = "sh_mtu2",
+               .of_match_table = of_match_ptr(sh_mtu2_of_table),
        },
        .id_table       = sh_mtu2_id_table,
 };
index 6bd17a8..0f665b8 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/ioport.h>
 #include <linux/irq.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
@@ -32,7 +33,6 @@
 #include <linux/spinlock.h>
 
 enum sh_tmu_model {
-       SH_TMU_LEGACY,
        SH_TMU,
        SH_TMU_SH3,
 };
@@ -62,6 +62,8 @@ struct sh_tmu_device {
 
        enum sh_tmu_model model;
 
+       raw_spinlock_t lock; /* Protect the shared start/stop register */
+
        struct sh_tmu_channel *channels;
        unsigned int num_channels;
 
@@ -69,8 +71,6 @@ struct sh_tmu_device {
        bool has_clocksource;
 };
 
-static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
-
 #define TSTR -1 /* shared register */
 #define TCOR  0 /* channel register */
 #define TCNT 1 /* channel register */
@@ -91,8 +91,6 @@ static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
 
        if (reg_nr == TSTR) {
                switch (ch->tmu->model) {
-               case SH_TMU_LEGACY:
-                       return ioread8(ch->tmu->mapbase);
                case SH_TMU_SH3:
                        return ioread8(ch->tmu->mapbase + 2);
                case SH_TMU:
@@ -115,8 +113,6 @@ static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
 
        if (reg_nr == TSTR) {
                switch (ch->tmu->model) {
-               case SH_TMU_LEGACY:
-                       return iowrite8(value, ch->tmu->mapbase);
                case SH_TMU_SH3:
                        return iowrite8(value, ch->tmu->mapbase + 2);
                case SH_TMU:
@@ -137,7 +133,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
        unsigned long flags, value;
 
        /* start stop register shared by multiple timer channels */
-       raw_spin_lock_irqsave(&sh_tmu_lock, flags);
+       raw_spin_lock_irqsave(&ch->tmu->lock, flags);
        value = sh_tmu_read(ch, TSTR);
 
        if (start)
@@ -146,7 +142,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
                value &= ~(1 << ch->index);
 
        sh_tmu_write(ch, TSTR, value);
-       raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
+       raw_spin_unlock_irqrestore(&ch->tmu->lock, flags);
 }
 
 static int __sh_tmu_enable(struct sh_tmu_channel *ch)
@@ -476,27 +472,12 @@ static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
                return 0;
 
        ch->tmu = tmu;
+       ch->index = index;
 
-       if (tmu->model == SH_TMU_LEGACY) {
-               struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
-
-               /*
-                * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps
-                * channel registers blocks at base + 2 + 12 * index, while all
-                * other variants map them at base + 4 + 12 * index. We can
-                * compute the index by just dividing by 12, the 2 bytes or 4
-                * bytes offset being hidden by the integer division.
-                */
-               ch->index = cfg->channel_offset / 12;
-               ch->base = tmu->mapbase + cfg->channel_offset;
-       } else {
-               ch->index = index;
-
-               if (tmu->model == SH_TMU_SH3)
-                       ch->base = tmu->mapbase + 4 + ch->index * 12;
-               else
-                       ch->base = tmu->mapbase + 8 + ch->index * 12;
-       }
+       if (tmu->model == SH_TMU_SH3)
+               ch->base = tmu->mapbase + 4 + ch->index * 12;
+       else
+               ch->base = tmu->mapbase + 8 + ch->index * 12;
 
        ch->irq = platform_get_irq(tmu->pdev, index);
        if (ch->irq < 0) {
@@ -526,46 +507,53 @@ static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
        if (tmu->mapbase == NULL)
                return -ENXIO;
 
-       /*
-        * In legacy platform device configuration (with one device per channel)
-        * the resource points to the channel base address.
-        */
-       if (tmu->model == SH_TMU_LEGACY) {
-               struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
-               tmu->mapbase -= cfg->channel_offset;
-       }
-
        return 0;
 }
 
-static void sh_tmu_unmap_memory(struct sh_tmu_device *tmu)
+static int sh_tmu_parse_dt(struct sh_tmu_device *tmu)
 {
-       if (tmu->model == SH_TMU_LEGACY) {
-               struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
-               tmu->mapbase += cfg->channel_offset;
+       struct device_node *np = tmu->pdev->dev.of_node;
+
+       tmu->model = SH_TMU;
+       tmu->num_channels = 3;
+
+       of_property_read_u32(np, "#renesas,channels", &tmu->num_channels);
+
+       if (tmu->num_channels != 2 && tmu->num_channels != 3) {
+               dev_err(&tmu->pdev->dev, "invalid number of channels %u\n",
+                       tmu->num_channels);
+               return -EINVAL;
        }
 
-       iounmap(tmu->mapbase);
+       return 0;
 }
 
 static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
 {
-       struct sh_timer_config *cfg = pdev->dev.platform_data;
-       const struct platform_device_id *id = pdev->id_entry;
        unsigned int i;
        int ret;
 
-       if (!cfg) {
+       tmu->pdev = pdev;
+
+       raw_spin_lock_init(&tmu->lock);
+
+       if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
+               ret = sh_tmu_parse_dt(tmu);
+               if (ret < 0)
+                       return ret;
+       } else if (pdev->dev.platform_data) {
+               const struct platform_device_id *id = pdev->id_entry;
+               struct sh_timer_config *cfg = pdev->dev.platform_data;
+
+               tmu->model = id->driver_data;
+               tmu->num_channels = hweight8(cfg->channels_mask);
+       } else {
                dev_err(&tmu->pdev->dev, "missing platform data\n");
                return -ENXIO;
        }
 
-       tmu->pdev = pdev;
-       tmu->model = id->driver_data;
-
        /* Get hold of clock. */
-       tmu->clk = clk_get(&tmu->pdev->dev,
-                          tmu->model == SH_TMU_LEGACY ? "tmu_fck" : "fck");
+       tmu->clk = clk_get(&tmu->pdev->dev, "fck");
        if (IS_ERR(tmu->clk)) {
                dev_err(&tmu->pdev->dev, "cannot get clock\n");
                return PTR_ERR(tmu->clk);
@@ -583,11 +571,6 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
        }
 
        /* Allocate and setup the channels. */
-       if (tmu->model == SH_TMU_LEGACY)
-               tmu->num_channels = 1;
-       else
-               tmu->num_channels = hweight8(cfg->channels_mask);
-
        tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels,
                                GFP_KERNEL);
        if (tmu->channels == NULL) {
@@ -595,23 +578,15 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
                goto err_unmap;
        }
 
-       if (tmu->model == SH_TMU_LEGACY) {
-               ret = sh_tmu_channel_setup(&tmu->channels[0], 0,
-                                          cfg->clockevent_rating != 0,
-                                          cfg->clocksource_rating != 0, tmu);
+       /*
+        * Use the first channel as a clock event device and the second channel
+        * as a clock source.
+        */
+       for (i = 0; i < tmu->num_channels; ++i) {
+               ret = sh_tmu_channel_setup(&tmu->channels[i], i,
+                                          i == 0, i == 1, tmu);
                if (ret < 0)
                        goto err_unmap;
-       } else {
-               /*
-                * Use the first channel as a clock event device and the second
-                * channel as a clock source.
-                */
-               for (i = 0; i < tmu->num_channels; ++i) {
-                       ret = sh_tmu_channel_setup(&tmu->channels[i], i,
-                                                  i == 0, i == 1, tmu);
-                       if (ret < 0)
-                               goto err_unmap;
-               }
        }
 
        platform_set_drvdata(pdev, tmu);
@@ -620,7 +595,7 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
 
 err_unmap:
        kfree(tmu->channels);
-       sh_tmu_unmap_memory(tmu);
+       iounmap(tmu->mapbase);
 err_clk_unprepare:
        clk_unprepare(tmu->clk);
 err_clk_put:
@@ -671,18 +646,24 @@ static int sh_tmu_remove(struct platform_device *pdev)
 }
 
 static const struct platform_device_id sh_tmu_id_table[] = {
-       { "sh_tmu", SH_TMU_LEGACY },
        { "sh-tmu", SH_TMU },
        { "sh-tmu-sh3", SH_TMU_SH3 },
        { }
 };
 MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
 
+static const struct of_device_id sh_tmu_of_table[] __maybe_unused = {
+       { .compatible = "renesas,tmu" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, sh_tmu_of_table);
+
 static struct platform_driver sh_tmu_device_driver = {
        .probe          = sh_tmu_probe,
        .remove         = sh_tmu_remove,
        .driver         = {
                .name   = "sh_tmu",
+               .of_match_table = of_match_ptr(sh_tmu_of_table),
        },
        .id_table       = sh_tmu_id_table,
 };
index dbd3039..330e930 100644 (file)
@@ -260,6 +260,9 @@ static void __init sirfsoc_marco_timer_init(struct device_node *np)
 
        clk = of_clk_get(np, 0);
        BUG_ON(IS_ERR(clk));
+
+       BUG_ON(clk_prepare_enable(clk));
+
        rate = clk_get_rate(clk);
 
        BUG_ON(rate < MARCO_CLOCK_FREQ);
index a722aac..ce18d57 100644 (file)
@@ -200,6 +200,9 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
 
        clk = of_clk_get(np, 0);
        BUG_ON(IS_ERR(clk));
+
+       BUG_ON(clk_prepare_enable(clk));
+
        rate = clk_get_rate(clk);
 
        BUG_ON(rate < PRIMA2_CLOCK_FREQ);
index ccdd4c7..15d06fc 100644 (file)
@@ -69,7 +69,6 @@ void proc_fork_connector(struct task_struct *task)
        struct cn_msg *msg;
        struct proc_event *ev;
        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
-       struct timespec ts;
        struct task_struct *parent;
 
        if (atomic_read(&proc_event_num_listeners) < 1)
@@ -79,8 +78,7 @@ void proc_fork_connector(struct task_struct *task)
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
-       ktime_get_ts(&ts); /* get high res monotonic timestamp */
-       ev->timestamp_ns = timespec_to_ns(&ts);
+       ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_FORK;
        rcu_read_lock();
        parent = rcu_dereference(task->real_parent);
@@ -102,7 +100,6 @@ void proc_exec_connector(struct task_struct *task)
 {
        struct cn_msg *msg;
        struct proc_event *ev;
-       struct timespec ts;
        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 
        if (atomic_read(&proc_event_num_listeners) < 1)
@@ -112,8 +109,7 @@ void proc_exec_connector(struct task_struct *task)
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
-       ktime_get_ts(&ts); /* get high res monotonic timestamp */
-       ev->timestamp_ns = timespec_to_ns(&ts);
+       ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_EXEC;
        ev->event_data.exec.process_pid = task->pid;
        ev->event_data.exec.process_tgid = task->tgid;
@@ -130,7 +126,6 @@ void proc_id_connector(struct task_struct *task, int which_id)
        struct cn_msg *msg;
        struct proc_event *ev;
        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
-       struct timespec ts;
        const struct cred *cred;
 
        if (atomic_read(&proc_event_num_listeners) < 1)
@@ -156,8 +151,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
        }
        rcu_read_unlock();
        get_seq(&msg->seq, &ev->cpu);
-       ktime_get_ts(&ts); /* get high res monotonic timestamp */
-       ev->timestamp_ns = timespec_to_ns(&ts);
+       ev->timestamp_ns = ktime_get_ns();
 
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
@@ -170,7 +164,6 @@ void proc_sid_connector(struct task_struct *task)
 {
        struct cn_msg *msg;
        struct proc_event *ev;
-       struct timespec ts;
        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 
        if (atomic_read(&proc_event_num_listeners) < 1)
@@ -180,8 +173,7 @@ void proc_sid_connector(struct task_struct *task)
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
-       ktime_get_ts(&ts); /* get high res monotonic timestamp */
-       ev->timestamp_ns = timespec_to_ns(&ts);
+       ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_SID;
        ev->event_data.sid.process_pid = task->pid;
        ev->event_data.sid.process_tgid = task->tgid;
@@ -197,7 +189,6 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
 {
        struct cn_msg *msg;
        struct proc_event *ev;
-       struct timespec ts;
        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 
        if (atomic_read(&proc_event_num_listeners) < 1)
@@ -207,8 +198,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
-       ktime_get_ts(&ts); /* get high res monotonic timestamp */
-       ev->timestamp_ns = timespec_to_ns(&ts);
+       ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_PTRACE;
        ev->event_data.ptrace.process_pid  = task->pid;
        ev->event_data.ptrace.process_tgid = task->tgid;
@@ -232,7 +222,6 @@ void proc_comm_connector(struct task_struct *task)
 {
        struct cn_msg *msg;
        struct proc_event *ev;
-       struct timespec ts;
        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 
        if (atomic_read(&proc_event_num_listeners) < 1)
@@ -242,8 +231,7 @@ void proc_comm_connector(struct task_struct *task)
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
-       ktime_get_ts(&ts); /* get high res monotonic timestamp */
-       ev->timestamp_ns = timespec_to_ns(&ts);
+       ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_COMM;
        ev->event_data.comm.process_pid  = task->pid;
        ev->event_data.comm.process_tgid = task->tgid;
@@ -261,7 +249,6 @@ void proc_coredump_connector(struct task_struct *task)
        struct cn_msg *msg;
        struct proc_event *ev;
        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
-       struct timespec ts;
 
        if (atomic_read(&proc_event_num_listeners) < 1)
                return;
@@ -270,8 +257,7 @@ void proc_coredump_connector(struct task_struct *task)
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
-       ktime_get_ts(&ts); /* get high res monotonic timestamp */
-       ev->timestamp_ns = timespec_to_ns(&ts);
+       ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_COREDUMP;
        ev->event_data.coredump.process_pid = task->pid;
        ev->event_data.coredump.process_tgid = task->tgid;
@@ -288,7 +274,6 @@ void proc_exit_connector(struct task_struct *task)
        struct cn_msg *msg;
        struct proc_event *ev;
        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
-       struct timespec ts;
 
        if (atomic_read(&proc_event_num_listeners) < 1)
                return;
@@ -297,8 +282,7 @@ void proc_exit_connector(struct task_struct *task)
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
-       ktime_get_ts(&ts); /* get high res monotonic timestamp */
-       ev->timestamp_ns = timespec_to_ns(&ts);
+       ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_EXIT;
        ev->event_data.exit.process_pid = task->pid;
        ev->event_data.exit.process_tgid = task->tgid;
@@ -325,7 +309,6 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
        struct cn_msg *msg;
        struct proc_event *ev;
        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
-       struct timespec ts;
 
        if (atomic_read(&proc_event_num_listeners) < 1)
                return;
@@ -334,8 +317,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
        msg->seq = rcvd_seq;
-       ktime_get_ts(&ts); /* get high res monotonic timestamp */
-       ev->timestamp_ns = timespec_to_ns(&ts);
+       ev->timestamp_ns = ktime_get_ns();
        ev->cpu = -1;
        ev->what = PROC_EVENT_NONE;
        ev->event_data.ack.err = err;
index d7d5c8a..5d997a3 100644 (file)
@@ -1214,9 +1214,9 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
        cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
 
        switch (a->clk_id) {
-       case CLOCK_REALTIME:      getnstimeofday(&ts);                   break;
-       case CLOCK_MONOTONIC:     do_posix_clock_monotonic_gettime(&ts); break;
-       case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts);                  break;
+       case CLOCK_REALTIME:      getnstimeofday(&ts);  break;
+       case CLOCK_MONOTONIC:     ktime_get_ts(&ts);    break;
+       case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break;
        default:
                ret = -EINVAL;
        }
index 0de123a..08ba120 100644 (file)
@@ -542,8 +542,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
                                          const struct drm_crtc *refcrtc,
                                          const struct drm_display_mode *mode)
 {
-       ktime_t stime, etime, mono_time_offset;
        struct timeval tv_etime;
+       ktime_t stime, etime;
        int vbl_status;
        int vpos, hpos, i;
        int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
@@ -588,13 +588,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
                vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos,
                                                               &hpos, &stime, &etime);
 
-               /*
-                * Get correction for CLOCK_MONOTONIC -> CLOCK_REALTIME if
-                * CLOCK_REALTIME is requested.
-                */
-               if (!drm_timestamp_monotonic)
-                       mono_time_offset = ktime_get_monotonic_offset();
-
                /* Return as no-op if scanout query unsupported or failed. */
                if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
                        DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
@@ -633,7 +626,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
        delta_ns = vpos * linedur_ns + hpos * pixeldur_ns;
 
        if (!drm_timestamp_monotonic)
-               etime = ktime_sub(etime, mono_time_offset);
+               etime = ktime_mono_to_real(etime);
 
        /* save this only for debugging purposes */
        tv_etime = ktime_to_timeval(etime);
@@ -664,10 +657,7 @@ static struct timeval get_drm_timestamp(void)
 {
        ktime_t now;
 
-       now = ktime_get();
-       if (!drm_timestamp_monotonic)
-               now = ktime_sub(now, ktime_get_monotonic_offset());
-
+       now = drm_timestamp_monotonic ? ktime_get() : ktime_get_real();
        return ktime_to_timeval(now);
 }
 
index 374f964..1f77008 100644 (file)
@@ -931,7 +931,7 @@ struct intel_ilk_power_mgmt {
        unsigned long last_time1;
        unsigned long chipset_power;
        u64 last_count2;
-       struct timespec last_time2;
+       u64 last_time2;
        unsigned long gfx_power;
        u8 corr;
 
index d893e4d..f247d92 100644 (file)
@@ -1149,16 +1149,16 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
 static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
                        unsigned reset_counter,
                        bool interruptible,
-                       struct timespec *timeout,
+                       s64 *timeout,
                        struct drm_i915_file_private *file_priv)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        const bool irq_test_in_progress =
                ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
-       struct timespec before, now;
        DEFINE_WAIT(wait);
        unsigned long timeout_expire;
+       s64 before, now;
        int ret;
 
        WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
@@ -1166,7 +1166,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
        if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
                return 0;
 
-       timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
+       timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
 
        if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
                gen6_rps_boost(dev_priv);
@@ -1181,7 +1181,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
 
        /* Record current time in case interrupted by signal, or wedged */
        trace_i915_gem_request_wait_begin(ring, seqno);
-       getrawmonotonic(&before);
+       before = ktime_get_raw_ns();
        for (;;) {
                struct timer_list timer;
 
@@ -1230,7 +1230,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
                        destroy_timer_on_stack(&timer);
                }
        }
-       getrawmonotonic(&now);
+       now = ktime_get_raw_ns();
        trace_i915_gem_request_wait_end(ring, seqno);
 
        if (!irq_test_in_progress)
@@ -1239,10 +1239,9 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
        finish_wait(&ring->irq_queue, &wait);
 
        if (timeout) {
-               struct timespec sleep_time = timespec_sub(now, before);
-               *timeout = timespec_sub(*timeout, sleep_time);
-               if (!timespec_valid(timeout)) /* i.e. negative time remains */
-                       set_normalized_timespec(timeout, 0, 0);
+               s64 tres = *timeout - (now - before);
+
+               *timeout = tres < 0 ? 0 : tres;
        }
 
        return ret;
@@ -2746,16 +2745,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        struct drm_i915_gem_wait *args = data;
        struct drm_i915_gem_object *obj;
        struct intel_engine_cs *ring = NULL;
-       struct timespec timeout_stack, *timeout = NULL;
        unsigned reset_counter;
        u32 seqno = 0;
        int ret = 0;
 
-       if (args->timeout_ns >= 0) {
-               timeout_stack = ns_to_timespec(args->timeout_ns);
-               timeout = &timeout_stack;
-       }
-
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
@@ -2780,9 +2773,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                 goto out;
 
        /* Do this after OLR check to make sure we make forward progress polling
-        * on this IOCTL with a 0 timeout (like busy ioctl)
+        * on this IOCTL with a timeout <=0 (like busy ioctl)
         */
-       if (!args->timeout_ns) {
+       if (args->timeout_ns <= 0) {
                ret = -ETIME;
                goto out;
        }
@@ -2791,10 +2784,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
        mutex_unlock(&dev->struct_mutex);
 
-       ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
-       if (timeout)
-               args->timeout_ns = timespec_to_ns(timeout);
-       return ret;
+       return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
+                           file->driver_priv);
 
 out:
        drm_gem_object_unreference(&obj->base);
index ee72807..f1233f5 100644 (file)
@@ -2993,7 +2993,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
                I915_READ(0x112e0);
        dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
        dev_priv->ips.last_count2 = I915_READ(0x112f4);
-       getrawmonotonic(&dev_priv->ips.last_time2);
+       dev_priv->ips.last_time2 = ktime_get_raw_ns();
 
        spin_unlock_irq(&mchdev_lock);
 }
@@ -4314,18 +4314,16 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
 
 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
 {
-       struct timespec now, diff1;
-       u64 diff;
-       unsigned long diffms;
+       u64 now, diff, diffms;
        u32 count;
 
        assert_spin_locked(&mchdev_lock);
 
-       getrawmonotonic(&now);
-       diff1 = timespec_sub(now, dev_priv->ips.last_time2);
+       now = ktime_get_raw_ns();
+       diffms = now - dev_priv->ips.last_time2;
+       do_div(diffms, NSEC_PER_MSEC);
 
        /* Don't divide by 0 */
-       diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
        if (!diffms)
                return;
 
index 6b252a8..c886c02 100644 (file)
@@ -159,8 +159,8 @@ struct vmw_surface {
 
 struct vmw_marker_queue {
        struct list_head head;
-       struct timespec lag;
-       struct timespec lag_time;
+       u64 lag;
+       u64 lag_time;
        spinlock_t lock;
 };
 
index 8a8725c..efd1ffd 100644 (file)
 struct vmw_marker {
        struct list_head head;
        uint32_t seqno;
-       struct timespec submitted;
+       u64 submitted;
 };
 
 void vmw_marker_queue_init(struct vmw_marker_queue *queue)
 {
        INIT_LIST_HEAD(&queue->head);
-       queue->lag = ns_to_timespec(0);
-       getrawmonotonic(&queue->lag_time);
+       queue->lag = 0;
+       queue->lag_time = ktime_get_raw_ns();
        spin_lock_init(&queue->lock);
 }
 
@@ -62,7 +62,7 @@ int vmw_marker_push(struct vmw_marker_queue *queue,
                return -ENOMEM;
 
        marker->seqno = seqno;
-       getrawmonotonic(&marker->submitted);
+       marker->submitted = ktime_get_raw_ns();
        spin_lock(&queue->lock);
        list_add_tail(&marker->head, &queue->head);
        spin_unlock(&queue->lock);
@@ -74,14 +74,14 @@ int vmw_marker_pull(struct vmw_marker_queue *queue,
                   uint32_t signaled_seqno)
 {
        struct vmw_marker *marker, *next;
-       struct timespec now;
        bool updated = false;
+       u64 now;
 
        spin_lock(&queue->lock);
-       getrawmonotonic(&now);
+       now = ktime_get_raw_ns();
 
        if (list_empty(&queue->head)) {
-               queue->lag = ns_to_timespec(0);
+               queue->lag = 0;
                queue->lag_time = now;
                updated = true;
                goto out_unlock;
@@ -91,7 +91,7 @@ int vmw_marker_pull(struct vmw_marker_queue *queue,
                if (signaled_seqno - marker->seqno > (1 << 30))
                        continue;
 
-               queue->lag = timespec_sub(now, marker->submitted);
+               queue->lag = now - marker->submitted;
                queue->lag_time = now;
                updated = true;
                list_del(&marker->head);
@@ -104,27 +104,13 @@ out_unlock:
        return (updated) ? 0 : -EBUSY;
 }
 
-static struct timespec vmw_timespec_add(struct timespec t1,
-                                       struct timespec t2)
+static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
 {
-       t1.tv_sec += t2.tv_sec;
-       t1.tv_nsec += t2.tv_nsec;
-       if (t1.tv_nsec >= 1000000000L) {
-               t1.tv_sec += 1;
-               t1.tv_nsec -= 1000000000L;
-       }
-
-       return t1;
-}
-
-static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
-{
-       struct timespec now;
+       u64 now;
 
        spin_lock(&queue->lock);
-       getrawmonotonic(&now);
-       queue->lag = vmw_timespec_add(queue->lag,
-                                     timespec_sub(now, queue->lag_time));
+       now = ktime_get_raw_ns();
+       queue->lag += now - queue->lag_time;
        queue->lag_time = now;
        spin_unlock(&queue->lock);
        return queue->lag;
@@ -134,11 +120,9 @@ static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
 static bool vmw_lag_lt(struct vmw_marker_queue *queue,
                       uint32_t us)
 {
-       struct timespec lag, cond;
+       u64 cond = (u64) us * NSEC_PER_USEC;
 
-       cond = ns_to_timespec((s64) us * 1000);
-       lag = vmw_fifo_lag(queue);
-       return (timespec_compare(&lag, &cond) < 1);
+       return vmw_fifo_lag(queue) <= cond;
 }
 
 int vmw_wait_lag(struct vmw_private *dev_priv,
index 632f1dc..7a8a6fb 100644 (file)
@@ -842,11 +842,10 @@ static ssize_t aem_show_power(struct device *dev,
        struct aem_data *data = dev_get_drvdata(dev);
        u64 before, after, delta, time;
        signed long leftover;
-       struct timespec b, a;
 
        mutex_lock(&data->lock);
        update_aem_energy_one(data, attr->index);
-       getnstimeofday(&b);
+       time = ktime_get_ns();
        before = data->energy[attr->index];
 
        leftover = schedule_timeout_interruptible(
@@ -858,11 +857,10 @@ static ssize_t aem_show_power(struct device *dev,
        }
 
        update_aem_energy_one(data, attr->index);
-       getnstimeofday(&a);
+       time = ktime_get_ns() - time;
        after = data->energy[attr->index];
        mutex_unlock(&data->lock);
 
-       time = timespec_to_ns(&a) - timespec_to_ns(&b);
        delta = (after - before) * UJ_PER_MJ;
 
        return sprintf(buf, "%llu\n",
index fd325ec..de05545 100644 (file)
@@ -108,9 +108,8 @@ static void evdev_queue_syn_dropped(struct evdev_client *client)
        struct input_event ev;
        ktime_t time;
 
-       time = ktime_get();
-       if (client->clkid != CLOCK_MONOTONIC)
-               time = ktime_sub(time, ktime_get_monotonic_offset());
+       time = (client->clkid == CLOCK_MONOTONIC) ?
+               ktime_get() : ktime_get_real();
 
        ev.time = ktime_to_timeval(time);
        ev.type = EV_SYN;
@@ -202,7 +201,7 @@ static void evdev_events(struct input_handle *handle,
        ktime_t time_mono, time_real;
 
        time_mono = ktime_get();
-       time_real = ktime_sub(time_mono, ktime_get_monotonic_offset());
+       time_real = ktime_mono_to_real(time_mono);
 
        rcu_read_lock();
 
index 0b8d328..8c1c7cc 100644 (file)
@@ -225,7 +225,6 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
        u8 *ptr;
        int sum;
        int ret = 0, final_ret;
-       struct timespec ts;
 
        /*
         * We have the shared ec_dev buffer plus we do lots of separate spi_sync
@@ -239,11 +238,9 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
 
        /* If it's too soon to do another transaction, wait */
        if (ec_spi->last_transfer_ns) {
-               struct timespec ts;
                unsigned long delay;    /* The delay completed so far */
 
-               ktime_get_ts(&ts);
-               delay = timespec_to_ns(&ts) - ec_spi->last_transfer_ns;
+               delay = ktime_get_ns() - ec_spi->last_transfer_ns;
                if (delay < EC_SPI_RECOVERY_TIME_NS)
                        ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
        }
@@ -280,8 +277,7 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
        }
 
        final_ret = spi_sync(ec_spi->spi, &msg);
-       ktime_get_ts(&ts);
-       ec_spi->last_transfer_ns = timespec_to_ns(&ts);
+       ec_spi->last_transfer_ns = ktime_get_ns();
        if (!ret)
                ret = final_ret;
        if (ret < 0) {
index 06f6ad2..3336ddc 100644 (file)
@@ -145,7 +145,6 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
        union ioc4_int_out int_out;
        union ioc4_gpcr gpcr;
        unsigned int state, last_state = 1;
-       struct timespec start_ts, end_ts;
        uint64_t start, end, period;
        unsigned int count = 0;
 
@@ -174,10 +173,10 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
                if (!last_state && state) {
                        count++;
                        if (count == IOC4_CALIBRATE_END) {
-                               ktime_get_ts(&end_ts);
+                               end = ktime_get_ns();
                                break;
                        } else if (count == IOC4_CALIBRATE_DISCARD)
-                               ktime_get_ts(&start_ts);
+                               start = ktime_get_ns();
                }
                last_state = state;
        } while (1);
@@ -192,8 +191,6 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
         *    by which the IOC4 generates the square wave, to get the
         *    period of an IOC4 INT_OUT count.
         */
-       end = end_ts.tv_sec * NSEC_PER_SEC + end_ts.tv_nsec;
-       start = start_ts.tv_sec * NSEC_PER_SEC + start_ts.tv_nsec;
        period = (end - start) /
                (IOC4_CALIBRATE_CYCLES * 2 * (IOC4_CALIBRATE_COUNT + 1));
 
index 87d1b01..67f8f5a 100644 (file)
@@ -548,7 +548,7 @@ static void cmd_work_handler(struct work_struct *work)
        lay->status_own = CMD_OWNER_HW;
        set_signature(ent, !cmd->checksum_disabled);
        dump_command(dev, ent, 1);
-       ktime_get_ts(&ent->ts1);
+       ent->ts1 = ktime_get_ns();
 
        /* ring doorbell after the descriptor is valid */
        wmb();
@@ -637,7 +637,6 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
 {
        struct mlx5_cmd *cmd = &dev->cmd;
        struct mlx5_cmd_work_ent *ent;
-       ktime_t t1, t2, delta;
        struct mlx5_cmd_stats *stats;
        int err = 0;
        s64 ds;
@@ -668,10 +667,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
                if (err == -ETIMEDOUT)
                        goto out;
 
-               t1 = timespec_to_ktime(ent->ts1);
-               t2 = timespec_to_ktime(ent->ts2);
-               delta = ktime_sub(t2, t1);
-               ds = ktime_to_ns(delta);
+               ds = ent->ts2 - ent->ts1;
                op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
                if (op < ARRAY_SIZE(cmd->stats)) {
                        stats = &cmd->stats[op];
@@ -1135,7 +1131,6 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
        void *context;
        int err;
        int i;
-       ktime_t t1, t2, delta;
        s64 ds;
        struct mlx5_cmd_stats *stats;
        unsigned long flags;
@@ -1149,7 +1144,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
                                sem = &cmd->pages_sem;
                        else
                                sem = &cmd->sem;
-                       ktime_get_ts(&ent->ts2);
+                       ent->ts2 = ktime_get_ns();
                        memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
                        dump_command(dev, ent, 0);
                        if (!ent->ret) {
@@ -1163,10 +1158,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
                        }
                        free_ent(cmd, ent->idx);
                        if (ent->callback) {
-                               t1 = timespec_to_ktime(ent->ts1);
-                               t2 = timespec_to_ktime(ent->ts2);
-                               delta = ktime_sub(t2, t1);
-                               ds = ktime_to_ns(delta);
+                               ds = ent->ts2 - ent->ts1;
                                if (ent->op < ARRAY_SIZE(cmd->stats)) {
                                        stats = &cmd->stats[ent->op];
                                        spin_lock_irqsave(&stats->lock, flags);
index 2a8ed83..14b80b1 100644 (file)
@@ -1734,7 +1734,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
                   struct ath9k_hw_cal_data *caldata, bool fastcc)
 {
        struct ath_common *common = ath9k_hw_common(ah);
-       struct timespec ts;
        u32 saveLedState;
        u32 saveDefAntenna;
        u32 macStaId1;
@@ -1784,8 +1783,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 
        /* Save TSF before chip reset, a cold reset clears it */
        tsf = ath9k_hw_gettsf64(ah);
-       getrawmonotonic(&ts);
-       usec = ts.tv_sec * 1000000ULL + ts.tv_nsec / 1000;
+       usec = ktime_to_us(ktime_get_raw());
 
        saveLedState = REG_READ(ah, AR_CFG_LED) &
                (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
@@ -1818,8 +1816,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        }
 
        /* Restore TSF */
-       getrawmonotonic(&ts);
-       usec = ts.tv_sec * 1000000ULL + ts.tv_nsec / 1000 - usec;
+       usec = ktime_to_us(ktime_get_raw()) - usec;
        ath9k_hw_settsf64(ah, tsf + usec);
 
        if (AR_SREV_9280_20_OR_LATER(ah))
index 5edfcb0..e371825 100644 (file)
@@ -702,6 +702,42 @@ void __iomem *of_iomap(struct device_node *np, int index)
 }
 EXPORT_SYMBOL(of_iomap);
 
+/*
+ * of_io_request_and_map - Requests a resource and maps the memory mapped IO
+ *                        for a given device_node
+ * @device:    the device whose io range will be mapped
+ * @index:     index of the io range
+ * @name:      name of the resource
+ *
+ * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
+ * error code on failure. Usage example:
+ *
+ *     base = of_io_request_and_map(node, 0, "foo");
+ *     if (IS_ERR(base))
+ *             return PTR_ERR(base);
+ */
+void __iomem *of_io_request_and_map(struct device_node *np, int index,
+                                       char *name)
+{
+       struct resource res;
+       void __iomem *mem;
+
+       if (of_address_to_resource(np, index, &res))
+               return IOMEM_ERR_PTR(-EINVAL);
+
+       if (!request_mem_region(res.start, resource_size(&res), name))
+               return IOMEM_ERR_PTR(-EBUSY);
+
+       mem = ioremap(res.start, resource_size(&res));
+       if (!mem) {
+               release_mem_region(res.start, resource_size(&res));
+               return IOMEM_ERR_PTR(-ENOMEM);
+       }
+
+       return mem;
+}
+EXPORT_SYMBOL(of_io_request_and_map);
+
 /**
  * of_dma_get_range - Get DMA range info
  * @np:                device node to get DMA range info
index 1812f02..daa8e75 100644 (file)
@@ -306,11 +306,9 @@ static struct nsm_handle *nsm_lookup_priv(const struct nsm_private *priv)
 static void nsm_init_private(struct nsm_handle *nsm)
 {
        u64 *p = (u64 *)&nsm->sm_priv.data;
-       struct timespec ts;
        s64 ns;
 
-       ktime_get_ts(&ts);
-       ns = timespec_to_ns(&ts);
+       ns = ktime_get_ns();
        put_unaligned(ns, p);
        put_unaligned((unsigned long)nsm, p + 1);
 }
index 64db2bc..d7f9199 100644 (file)
@@ -473,13 +473,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
        priority = task_prio(task);
        nice = task_nice(task);
 
-       /* Temporary variable needed for gcc-2.96 */
-       /* convert timespec -> nsec*/
-       start_time =
-               (unsigned long long)task->real_start_time.tv_sec * NSEC_PER_SEC
-                               + task->real_start_time.tv_nsec;
        /* convert nsec -> ticks */
-       start_time = nsec_to_clock_t(start_time);
+       start_time = nsec_to_clock_t(task->real_start_time);
 
        seq_printf(m, "%d (%s) %c", pid_nr_ns(pid, ns), tcomm, state);
        seq_put_decimal_ll(m, ' ', ppid);
index 0013142..80c3502 100644 (file)
@@ -35,8 +35,9 @@ struct timerfd_ctx {
        ktime_t moffs;
        wait_queue_head_t wqh;
        u64 ticks;
-       int expired;
        int clockid;
+       short unsigned expired;
+       short unsigned settime_flags;   /* to show in fdinfo */
        struct rcu_head rcu;
        struct list_head clist;
        bool might_cancel;
@@ -92,7 +93,7 @@ static enum alarmtimer_restart timerfd_alarmproc(struct alarm *alarm,
  */
 void timerfd_clock_was_set(void)
 {
-       ktime_t moffs = ktime_get_monotonic_offset();
+       ktime_t moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 });
        struct timerfd_ctx *ctx;
        unsigned long flags;
 
@@ -125,7 +126,7 @@ static bool timerfd_canceled(struct timerfd_ctx *ctx)
 {
        if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX)
                return false;
-       ctx->moffs = ktime_get_monotonic_offset();
+       ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 });
        return true;
 }
 
@@ -196,6 +197,8 @@ static int timerfd_setup(struct timerfd_ctx *ctx, int flags,
                if (timerfd_canceled(ctx))
                        return -ECANCELED;
        }
+
+       ctx->settime_flags = flags & TFD_SETTIME_FLAGS;
        return 0;
 }
 
@@ -284,11 +287,77 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
        return res;
 }
 
+#ifdef CONFIG_PROC_FS
+static int timerfd_show(struct seq_file *m, struct file *file)
+{
+       struct timerfd_ctx *ctx = file->private_data;
+       struct itimerspec t;
+
+       spin_lock_irq(&ctx->wqh.lock);
+       t.it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
+       t.it_interval = ktime_to_timespec(ctx->tintv);
+       spin_unlock_irq(&ctx->wqh.lock);
+
+       return seq_printf(m,
+                         "clockid: %d\n"
+                         "ticks: %llu\n"
+                         "settime flags: 0%o\n"
+                         "it_value: (%llu, %llu)\n"
+                         "it_interval: (%llu, %llu)\n",
+                         ctx->clockid, (unsigned long long)ctx->ticks,
+                         ctx->settime_flags,
+                         (unsigned long long)t.it_value.tv_sec,
+                         (unsigned long long)t.it_value.tv_nsec,
+                         (unsigned long long)t.it_interval.tv_sec,
+                         (unsigned long long)t.it_interval.tv_nsec);
+}
+#else
+#define timerfd_show NULL
+#endif
+
+#ifdef CONFIG_CHECKPOINT_RESTORE
+static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       struct timerfd_ctx *ctx = file->private_data;
+       int ret = 0;
+
+       switch (cmd) {
+       case TFD_IOC_SET_TICKS: {
+               u64 ticks;
+
+               if (copy_from_user(&ticks, (u64 __user *)arg, sizeof(ticks)))
+                       return -EFAULT;
+               if (!ticks)
+                       return -EINVAL;
+
+               spin_lock_irq(&ctx->wqh.lock);
+               if (!timerfd_canceled(ctx)) {
+                       ctx->ticks = ticks;
+                       if (ticks)
+                               wake_up_locked(&ctx->wqh);
+               } else
+                       ret = -ECANCELED;
+               spin_unlock_irq(&ctx->wqh.lock);
+               break;
+       }
+       default:
+               ret = -ENOTTY;
+               break;
+       }
+
+       return ret;
+}
+#else
+#define timerfd_ioctl NULL
+#endif
+
 static const struct file_operations timerfd_fops = {
        .release        = timerfd_release,
        .poll           = timerfd_poll,
        .read           = timerfd_read,
        .llseek         = noop_llseek,
+       .show_fdinfo    = timerfd_show,
+       .unlocked_ioctl = timerfd_ioctl,
 };
 
 static int timerfd_fget(int fd, struct fd *p)
@@ -336,7 +405,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
        else
                hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS);
 
-       ctx->moffs = ktime_get_monotonic_offset();
+       ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 });
 
        ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
                               O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
diff --git a/include/clocksource/pxa.h b/include/clocksource/pxa.h
new file mode 100644 (file)
index 0000000..1efbe5a
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * PXA clocksource, clockevents, and OST interrupt handlers.
+ *
+ * Copyright (C) 2014 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ */
+
+#ifndef _CLOCKSOURCE_PXA_H
+#define _CLOCKSOURCE_PXA_H
+
+extern void pxa_timer_nodt_init(int irq, void __iomem *base,
+                          unsigned long clock_tick_rate);
+
+#endif
index a16b497..653f0e2 100644 (file)
@@ -162,7 +162,6 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
  * @archdata:          arch-specific data
  * @suspend:           suspend function for the clocksource, if necessary
  * @resume:            resume function for the clocksource, if necessary
- * @cycle_last:                most recent cycle counter value seen by ::read()
  * @owner:             module reference, must be set by clocksource in modules
  */
 struct clocksource {
@@ -171,7 +170,6 @@ struct clocksource {
         * clocksource itself is cacheline aligned.
         */
        cycle_t (*read)(struct clocksource *cs);
-       cycle_t cycle_last;
        cycle_t mask;
        u32 mult;
        u32 shift;
index e7a8d3f..a036d05 100644 (file)
@@ -165,6 +165,7 @@ enum  hrtimer_base_type {
  * struct hrtimer_cpu_base - the per cpu clock bases
  * @lock:              lock protecting the base and associated clock bases
  *                     and timers
+ * @cpu:               cpu number
  * @active_bases:      Bitfield to mark bases with active timers
  * @clock_was_set:     Indicates that clock was set from irq context.
  * @expires_next:      absolute time of the next event which was scheduled
@@ -179,6 +180,7 @@ enum  hrtimer_base_type {
  */
 struct hrtimer_cpu_base {
        raw_spinlock_t                  lock;
+       unsigned int                    cpu;
        unsigned int                    active_bases;
        unsigned int                    clock_was_set;
 #ifdef CONFIG_HIGH_RES_TIMERS
@@ -324,14 +326,6 @@ static inline void timerfd_clock_was_set(void) { }
 #endif
 extern void hrtimers_resume(void);
 
-extern ktime_t ktime_get(void);
-extern ktime_t ktime_get_real(void);
-extern ktime_t ktime_get_boottime(void);
-extern ktime_t ktime_get_monotonic_offset(void);
-extern ktime_t ktime_get_clocktai(void);
-extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,
-                                        ktime_t *offs_tai);
-
 DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
 
 
@@ -452,12 +446,6 @@ extern void hrtimer_run_pending(void);
 /* Bootup initialization: */
 extern void __init hrtimers_init(void);
 
-#if BITS_PER_LONG < 64
-extern u64 ktime_divns(const ktime_t kt, s64 div);
-#else /* BITS_PER_LONG < 64 */
-# define ktime_divns(kt, div)          (u64)((kt).tv64 / (div))
-#endif
-
 /* Show pending timers: */
 extern void sysrq_timer_list_show(void);
 
index ccde917..15dc6bc 100644 (file)
@@ -277,14 +277,7 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
  **/
 static inline s64 iio_get_time_ns(void)
 {
-       struct timespec ts;
-       /*
-        * calls getnstimeofday.
-        * If hrtimers then up to ns accurate, if not microsecond.
-        */
-       ktime_get_real_ts(&ts);
-
-       return timespec_to_ns(&ts);
+       return ktime_get_real_ns();
 }
 
 /* Device operating modes */
index b76e6e5..d5fc9b8 100644 (file)
@@ -58,6 +58,8 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
 }
 #endif
 
+#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
+
 void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
                            unsigned long size);
 void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
index de9e46e..c9d645a 100644 (file)
 /*
  * ktime_t:
  *
- * On 64-bit CPUs a single 64-bit variable is used to store the hrtimers
+ * A single 64-bit variable is used to store the hrtimers
  * internal representation of time values in scalar nanoseconds. The
  * design plays out best on 64-bit CPUs, where most conversions are
  * NOPs and most arithmetic ktime_t operations are plain arithmetic
  * operations.
  *
- * On 32-bit CPUs an optimized representation of the timespec structure
- * is used to avoid expensive conversions from and to timespecs. The
- * endian-aware order of the tv struct members is chosen to allow
- * mathematical operations on the tv64 member of the union too, which
- * for certain operations produces better code.
- *
- * For architectures with efficient support for 64/32-bit conversions the
- * plain scalar nanosecond based representation can be selected by the
- * config switch CONFIG_KTIME_SCALAR.
  */
 union ktime {
        s64     tv64;
-#if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR)
-       struct {
-# ifdef __BIG_ENDIAN
-       s32     sec, nsec;
-# else
-       s32     nsec, sec;
-# endif
-       } tv;
-#endif
 };
 
 typedef union ktime ktime_t;           /* Kill this */
 
-/*
- * ktime_t definitions when using the 64-bit scalar representation:
- */
-
-#if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)
-
 /**
  * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
  * @secs:      seconds to set
@@ -71,13 +47,12 @@ typedef union ktime ktime_t;                /* Kill this */
  *
  * Return: The ktime_t representation of the value.
  */
-static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
+static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
 {
-#if (BITS_PER_LONG == 64)
        if (unlikely(secs >= KTIME_SEC_MAX))
                return (ktime_t){ .tv64 = KTIME_MAX };
-#endif
-       return (ktime_t) { .tv64 = (s64)secs * NSEC_PER_SEC + (s64)nsecs };
+
+       return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs };
 }
 
 /* Subtract two ktime_t variables. rem = lhs -rhs: */
@@ -108,6 +83,12 @@ static inline ktime_t timespec_to_ktime(struct timespec ts)
        return ktime_set(ts.tv_sec, ts.tv_nsec);
 }
 
+/* convert a timespec64 to ktime_t format: */
+static inline ktime_t timespec64_to_ktime(struct timespec64 ts)
+{
+       return ktime_set(ts.tv_sec, ts.tv_nsec);
+}
+
 /* convert a timeval to ktime_t format: */
 static inline ktime_t timeval_to_ktime(struct timeval tv)
 {
@@ -117,159 +98,15 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
 /* Map the ktime_t to timespec conversion to ns_to_timespec function */
 #define ktime_to_timespec(kt)          ns_to_timespec((kt).tv64)
 
+/* Map the ktime_t to timespec conversion to ns_to_timespec function */
+#define ktime_to_timespec64(kt)                ns_to_timespec64((kt).tv64)
+
 /* Map the ktime_t to timeval conversion to ns_to_timeval function */
 #define ktime_to_timeval(kt)           ns_to_timeval((kt).tv64)
 
 /* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
 #define ktime_to_ns(kt)                        ((kt).tv64)
 
-#else  /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */
-
-/*
- * Helper macros/inlines to get the ktime_t math right in the timespec
- * representation. The macros are sometimes ugly - their actual use is
- * pretty okay-ish, given the circumstances. We do all this for
- * performance reasons. The pure scalar nsec_t based code was nice and
- * simple, but created too many 64-bit / 32-bit conversions and divisions.
- *
- * Be especially aware that negative values are represented in a way
- * that the tv.sec field is negative and the tv.nsec field is greater
- * or equal to zero but less than nanoseconds per second. This is the
- * same representation which is used by timespecs.
- *
- *   tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC
- */
-
-/* Set a ktime_t variable to a value in sec/nsec representation: */
-static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
-{
-       return (ktime_t) { .tv = { .sec = secs, .nsec = nsecs } };
-}
-
-/**
- * ktime_sub - subtract two ktime_t variables
- * @lhs:       minuend
- * @rhs:       subtrahend
- *
- * Return: The remainder of the subtraction.
- */
-static inline ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs)
-{
-       ktime_t res;
-
-       res.tv64 = lhs.tv64 - rhs.tv64;
-       if (res.tv.nsec < 0)
-               res.tv.nsec += NSEC_PER_SEC;
-
-       return res;
-}
-
-/**
- * ktime_add - add two ktime_t variables
- * @add1:      addend1
- * @add2:      addend2
- *
- * Return: The sum of @add1 and @add2.
- */
-static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
-{
-       ktime_t res;
-
-       res.tv64 = add1.tv64 + add2.tv64;
-       /*
-        * performance trick: the (u32) -NSEC gives 0x00000000Fxxxxxxx
-        * so we subtract NSEC_PER_SEC and add 1 to the upper 32 bit.
-        *
-        * it's equivalent to:
-        *   tv.nsec -= NSEC_PER_SEC
-        *   tv.sec ++;
-        */
-       if (res.tv.nsec >= NSEC_PER_SEC)
-               res.tv64 += (u32)-NSEC_PER_SEC;
-
-       return res;
-}
-
-/**
- * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
- * @kt:                addend
- * @nsec:      the scalar nsec value to add
- *
- * Return: The sum of @kt and @nsec in ktime_t format.
- */
-extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec);
-
-/**
- * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
- * @kt:                minuend
- * @nsec:      the scalar nsec value to subtract
- *
- * Return: The subtraction of @nsec from @kt in ktime_t format.
- */
-extern ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec);
-
-/**
- * timespec_to_ktime - convert a timespec to ktime_t format
- * @ts:                the timespec variable to convert
- *
- * Return: A ktime_t variable with the converted timespec value.
- */
-static inline ktime_t timespec_to_ktime(const struct timespec ts)
-{
-       return (ktime_t) { .tv = { .sec = (s32)ts.tv_sec,
-                                  .nsec = (s32)ts.tv_nsec } };
-}
-
-/**
- * timeval_to_ktime - convert a timeval to ktime_t format
- * @tv:                the timeval variable to convert
- *
- * Return: A ktime_t variable with the converted timeval value.
- */
-static inline ktime_t timeval_to_ktime(const struct timeval tv)
-{
-       return (ktime_t) { .tv = { .sec = (s32)tv.tv_sec,
-                                  .nsec = (s32)(tv.tv_usec *
-                                                NSEC_PER_USEC) } };
-}
-
-/**
- * ktime_to_timespec - convert a ktime_t variable to timespec format
- * @kt:                the ktime_t variable to convert
- *
- * Return: The timespec representation of the ktime value.
- */
-static inline struct timespec ktime_to_timespec(const ktime_t kt)
-{
-       return (struct timespec) { .tv_sec = (time_t) kt.tv.sec,
-                                  .tv_nsec = (long) kt.tv.nsec };
-}
-
-/**
- * ktime_to_timeval - convert a ktime_t variable to timeval format
- * @kt:                the ktime_t variable to convert
- *
- * Return: The timeval representation of the ktime value.
- */
-static inline struct timeval ktime_to_timeval(const ktime_t kt)
-{
-       return (struct timeval) {
-               .tv_sec = (time_t) kt.tv.sec,
-               .tv_usec = (suseconds_t) (kt.tv.nsec / NSEC_PER_USEC) };
-}
-
-/**
- * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds
- * @kt:                the ktime_t variable to convert
- *
- * Return: The scalar nanoseconds representation of @kt.
- */
-static inline s64 ktime_to_ns(const ktime_t kt)
-{
-       return (s64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec;
-}
-
-#endif /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */
 
 /**
  * ktime_equal - Compares two ktime_t variables to see if they are equal
@@ -328,16 +165,20 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
        return ktime_compare(cmp1, cmp2) < 0;
 }
 
+#if BITS_PER_LONG < 64
+extern u64 ktime_divns(const ktime_t kt, s64 div);
+#else /* BITS_PER_LONG < 64 */
+# define ktime_divns(kt, div)          (u64)((kt).tv64 / (div))
+#endif
+
 static inline s64 ktime_to_us(const ktime_t kt)
 {
-       struct timeval tv = ktime_to_timeval(kt);
-       return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
+       return ktime_divns(kt, NSEC_PER_USEC);
 }
 
 static inline s64 ktime_to_ms(const ktime_t kt)
 {
-       struct timeval tv = ktime_to_timeval(kt);
-       return (s64) tv.tv_sec * MSEC_PER_SEC + tv.tv_usec / USEC_PER_MSEC;
+       return ktime_divns(kt, NSEC_PER_MSEC);
 }
 
 static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
@@ -381,6 +222,25 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
        }
 }
 
+/**
+ * ktime_to_timespec64_cond - convert a ktime_t variable to timespec64
+ *                         format only if the variable contains data
+ * @kt:                the ktime_t variable to convert
+ * @ts:                the timespec variable to store the result in
+ *
+ * Return: %true if there was a successful conversion, %false if kt was 0.
+ */
+static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
+                                                      struct timespec64 *ts)
+{
+       if (kt.tv64) {
+               *ts = ktime_to_timespec64(kt);
+               return true;
+       } else {
+               return false;
+       }
+}
+
 /*
  * The resolution of the clocks. The resolution value is returned in
  * the clock_getres() system call to give application programmers an
@@ -390,12 +250,6 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
 #define LOW_RES_NSEC           TICK_NSEC
 #define KTIME_LOW_RES          (ktime_t){ .tv64 = LOW_RES_NSEC }
 
-/* Get the monotonic time in timespec format: */
-extern void ktime_get_ts(struct timespec *ts);
-
-/* Get the real (wall-) time in timespec format: */
-#define ktime_get_real_ts(ts)  getnstimeofday(ts)
-
 static inline ktime_t ns_to_ktime(u64 ns)
 {
        static const ktime_t ktime_zero = { .tv64 = 0 };
@@ -410,4 +264,6 @@ static inline ktime_t ms_to_ktime(u64 ms)
        return ktime_add_ms(ktime_zero, ms);
 }
 
+# include <linux/timekeeping.h>
+
 #endif
index 2bce4aa..52d631c 100644 (file)
@@ -604,8 +604,8 @@ struct mlx5_cmd_work_ent {
        int                     page_queue;
        u8                      status;
        u8                      token;
-       struct timespec         ts1;
-       struct timespec         ts2;
+       u64                     ts1;
+       u64                     ts2;
        u16                     op;
 };
 
index c13b878..fb7b722 100644 (file)
@@ -109,7 +109,12 @@ static inline bool of_dma_is_coherent(struct device_node *np)
 extern int of_address_to_resource(struct device_node *dev, int index,
                                  struct resource *r);
 void __iomem *of_iomap(struct device_node *node, int index);
+void __iomem *of_io_request_and_map(struct device_node *device,
+                                       int index, char *name);
 #else
+
+#include <linux/io.h>
+
 static inline int of_address_to_resource(struct device_node *dev, int index,
                                         struct resource *r)
 {
@@ -120,6 +125,12 @@ static inline void __iomem *of_iomap(struct device_node *device, int index)
 {
        return NULL;
 }
+
+static inline void __iomem *of_io_request_and_map(struct device_node *device,
+                                       int index, char *name)
+{
+       return IOMEM_ERR_PTR(-EINVAL);
+}
 #endif
 
 #if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI)
index 42cac4d..66124d6 100644 (file)
@@ -813,7 +813,7 @@ struct task_delay_info {
         * associated with the operation is added to XXX_delay.
         * XXX_delay contains the accumulated delay time in nanoseconds.
         */
-       struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */
+       u64 blkio_start;        /* Shared by blkio, swapin */
        u64 blkio_delay;        /* wait for sync block io completion */
        u64 swapin_delay;       /* wait for swapin block io completion */
        u32 blkio_count;        /* total count of the number of sync block */
@@ -821,7 +821,7 @@ struct task_delay_info {
        u32 swapin_count;       /* total count of the number of swapin block */
                                /* io operations performed */
 
-       struct timespec freepages_start, freepages_end;
+       u64 freepages_start;
        u64 freepages_delay;    /* wait for memory reclaim */
        u32 freepages_count;    /* total count of memory reclaim */
 };
@@ -1364,8 +1364,8 @@ struct task_struct {
        } vtime_snap_whence;
 #endif
        unsigned long nvcsw, nivcsw; /* context switch counts */
-       struct timespec start_time;             /* monotonic time */
-       struct timespec real_start_time;        /* boot based time */
+       u64 start_time;         /* monotonic time in nsec */
+       u64 real_start_time;    /* boot based time in nsec */
 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
        unsigned long min_flt, maj_flt;
 
index 8cf3503..cc35963 100644 (file)
@@ -116,6 +116,22 @@ repeat:
        return ret;
 }
 
+/**
+ * raw_read_seqcount - Read the raw seqcount
+ * @s: pointer to seqcount_t
+ * Returns: count to be passed to read_seqcount_retry
+ *
+ * raw_read_seqcount opens a read critical section of the given
+ * seqcount without any lockdep checking and without checking or
+ * masking the LSB. Calling code is responsible for handling that.
+ */
+static inline unsigned raw_read_seqcount(const seqcount_t *s)
+{
+       unsigned ret = ACCESS_ONCE(s->sequence);
+       smp_rmb();
+       return ret;
+}
+
 /**
  * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
  * @s: pointer to seqcount_t
@@ -217,6 +233,17 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
        s->sequence++;
 }
 
+/*
+ * raw_write_seqcount_latch - redirect readers to even/odd copy
+ * @s: pointer to seqcount_t
+ */
+static inline void raw_write_seqcount_latch(seqcount_t *s)
+{
+       smp_wmb();      /* prior stores before incrementing "sequence" */
+       s->sequence++;
+       smp_wmb();      /* increment "sequence" before following stores */
+}
+
 /*
  * Sequence counter only version assumes that callers are using their
  * own mutexing.
index 8e1e036..64638b0 100644 (file)
@@ -2,11 +2,6 @@
 #define __SH_TIMER_H__
 
 struct sh_timer_config {
-       char *name;
-       long channel_offset;
-       int timer_bit;
-       unsigned long clockevent_rating;
-       unsigned long clocksource_rating;
        unsigned int channels_mask;
 };
 
index d5d229b..8c42cf8 100644 (file)
@@ -4,19 +4,10 @@
 # include <linux/cache.h>
 # include <linux/seqlock.h>
 # include <linux/math64.h>
-#include <uapi/linux/time.h>
+# include <linux/time64.h>
 
 extern struct timezone sys_tz;
 
-/* Parameters used to convert the timespec values: */
-#define MSEC_PER_SEC   1000L
-#define USEC_PER_MSEC  1000L
-#define NSEC_PER_USEC  1000L
-#define NSEC_PER_MSEC  1000000L
-#define USEC_PER_SEC   1000000L
-#define NSEC_PER_SEC   1000000000L
-#define FSEC_PER_SEC   1000000000000000LL
-
 #define TIME_T_MAX     (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
 
 static inline int timespec_equal(const struct timespec *a,
@@ -84,13 +75,6 @@ static inline struct timespec timespec_sub(struct timespec lhs,
        return ts_delta;
 }
 
-#define KTIME_MAX                      ((s64)~((u64)1 << 63))
-#if (BITS_PER_LONG == 64)
-# define KTIME_SEC_MAX                 (KTIME_MAX / NSEC_PER_SEC)
-#else
-# define KTIME_SEC_MAX                 LONG_MAX
-#endif
-
 /*
  * Returns true if the timespec is norm, false if denorm:
  */
@@ -115,27 +99,7 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
        return true;
 }
 
-extern bool persistent_clock_exist;
-
-static inline bool has_persistent_clock(void)
-{
-       return persistent_clock_exist;
-}
-
-extern void read_persistent_clock(struct timespec *ts);
-extern void read_boot_clock(struct timespec *ts);
-extern int persistent_clock_is_local;
-extern int update_persistent_clock(struct timespec now);
-void timekeeping_init(void);
-extern int timekeeping_suspended;
-
-unsigned long get_seconds(void);
-struct timespec current_kernel_time(void);
-struct timespec __current_kernel_time(void); /* does not take xtime_lock */
-struct timespec get_monotonic_coarse(void);
-void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
-                               struct timespec *wtom, struct timespec *sleep);
-void timekeeping_inject_sleeptime(struct timespec *delta);
+extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
 
 #define CURRENT_TIME           (current_kernel_time())
 #define CURRENT_TIME_SEC       ((struct timespec) { get_seconds(), 0 })
@@ -153,33 +117,14 @@ void timekeeping_inject_sleeptime(struct timespec *delta);
 extern u32 (*arch_gettimeoffset)(void);
 #endif
 
-extern void do_gettimeofday(struct timeval *tv);
-extern int do_settimeofday(const struct timespec *tv);
-extern int do_sys_settimeofday(const struct timespec *tv,
-                              const struct timezone *tz);
-#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
-extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
 struct itimerval;
 extern int do_setitimer(int which, struct itimerval *value,
                        struct itimerval *ovalue);
-extern unsigned int alarm_setitimer(unsigned int seconds);
 extern int do_getitimer(int which, struct itimerval *value);
-extern int __getnstimeofday(struct timespec *tv);
-extern void getnstimeofday(struct timespec *tv);
-extern void getrawmonotonic(struct timespec *ts);
-extern void getnstime_raw_and_real(struct timespec *ts_raw,
-               struct timespec *ts_real);
-extern void getboottime(struct timespec *ts);
-extern void monotonic_to_bootbased(struct timespec *ts);
-extern void get_monotonic_boottime(struct timespec *ts);
 
-extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
-extern int timekeeping_valid_for_hres(void);
-extern u64 timekeeping_max_deferment(void);
-extern int timekeeping_inject_offset(struct timespec *ts);
-extern s32 timekeeping_get_tai_offset(void);
-extern void timekeeping_set_tai_offset(s32 tai_offset);
-extern void timekeeping_clocktai(struct timespec *ts);
+extern unsigned int alarm_setitimer(unsigned int seconds);
+
+extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
 
 struct tms;
 extern void do_sys_times(struct tms *);
diff --git a/include/linux/time64.h b/include/linux/time64.h
new file mode 100644 (file)
index 0000000..a383147
--- /dev/null
@@ -0,0 +1,190 @@
+#ifndef _LINUX_TIME64_H
+#define _LINUX_TIME64_H
+
+#include <uapi/linux/time.h>
+
+typedef __s64 time64_t;
+
+/*
+ * This wants to go into uapi/linux/time.h once we agreed about the
+ * userspace interfaces.
+ */
+#if __BITS_PER_LONG == 64
+# define timespec64 timespec
+#else
+struct timespec64 {
+       time64_t        tv_sec;                 /* seconds */
+       long            tv_nsec;                /* nanoseconds */
+};
+#endif
+
+/* Parameters used to convert the timespec values: */
+#define MSEC_PER_SEC   1000L
+#define USEC_PER_MSEC  1000L
+#define NSEC_PER_USEC  1000L
+#define NSEC_PER_MSEC  1000000L
+#define USEC_PER_SEC   1000000L
+#define NSEC_PER_SEC   1000000000L
+#define FSEC_PER_SEC   1000000000000000LL
+
+/* Located here for timespec[64]_valid_strict */
+#define KTIME_MAX                      ((s64)~((u64)1 << 63))
+#define KTIME_SEC_MAX                  (KTIME_MAX / NSEC_PER_SEC)
+
+#if __BITS_PER_LONG == 64
+
+static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
+{
+       return ts64;
+}
+
+static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
+{
+       return ts;
+}
+
+# define timespec64_equal              timespec_equal
+# define timespec64_compare            timespec_compare
+# define set_normalized_timespec64     set_normalized_timespec
+# define timespec64_add_safe           timespec_add_safe
+# define timespec64_add                        timespec_add
+# define timespec64_sub                        timespec_sub
+# define timespec64_valid              timespec_valid
+# define timespec64_valid_strict       timespec_valid_strict
+# define timespec64_to_ns              timespec_to_ns
+# define ns_to_timespec64              ns_to_timespec
+# define timespec64_add_ns             timespec_add_ns
+
+#else
+
+static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
+{
+       struct timespec ret;
+
+       ret.tv_sec = (time_t)ts64.tv_sec;
+       ret.tv_nsec = ts64.tv_nsec;
+       return ret;
+}
+
+static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
+{
+       struct timespec64 ret;
+
+       ret.tv_sec = ts.tv_sec;
+       ret.tv_nsec = ts.tv_nsec;
+       return ret;
+}
+
+static inline int timespec64_equal(const struct timespec64 *a,
+                                  const struct timespec64 *b)
+{
+       return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
+}
+
+/*
+ * lhs < rhs:  return <0
+ * lhs == rhs: return 0
+ * lhs > rhs:  return >0
+ */
+static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs)
+{
+       if (lhs->tv_sec < rhs->tv_sec)
+               return -1;
+       if (lhs->tv_sec > rhs->tv_sec)
+               return 1;
+       return lhs->tv_nsec - rhs->tv_nsec;
+}
+
+extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec);
+
+/*
+ * timespec64_add_safe assumes both values are positive and checks for
+ * overflow. It will return TIME_T_MAX if the returned value would be
+ * smaller then either of the arguments.
+ */
+extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
+                                        const struct timespec64 rhs);
+
+
+static inline struct timespec64 timespec64_add(struct timespec64 lhs,
+                                               struct timespec64 rhs)
+{
+       struct timespec64 ts_delta;
+       set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec,
+                               lhs.tv_nsec + rhs.tv_nsec);
+       return ts_delta;
+}
+
+/*
+ * sub = lhs - rhs, in normalized form
+ */
+static inline struct timespec64 timespec64_sub(struct timespec64 lhs,
+                                               struct timespec64 rhs)
+{
+       struct timespec64 ts_delta;
+       set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec,
+                               lhs.tv_nsec - rhs.tv_nsec);
+       return ts_delta;
+}
+
+/*
+ * Returns true if the timespec64 is norm, false if denorm:
+ */
+static inline bool timespec64_valid(const struct timespec64 *ts)
+{
+       /* Dates before 1970 are bogus */
+       if (ts->tv_sec < 0)
+               return false;
+       /* Can't have more nanoseconds then a second */
+       if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
+               return false;
+       return true;
+}
+
+static inline bool timespec64_valid_strict(const struct timespec64 *ts)
+{
+       if (!timespec64_valid(ts))
+               return false;
+       /* Disallow values that could overflow ktime_t */
+       if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
+               return false;
+       return true;
+}
+
+/**
+ * timespec64_to_ns - Convert timespec64 to nanoseconds
+ * @ts:                pointer to the timespec64 variable to be converted
+ *
+ * Returns the scalar nanosecond representation of the timespec64
+ * parameter.
+ */
+static inline s64 timespec64_to_ns(const struct timespec64 *ts)
+{
+       return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
+}
+
+/**
+ * ns_to_timespec64 - Convert nanoseconds to timespec64
+ * @nsec:      the nanoseconds value to be converted
+ *
+ * Returns the timespec64 representation of the nsec parameter.
+ */
+extern struct timespec64 ns_to_timespec64(const s64 nsec);
+
+/**
+ * timespec64_add_ns - Adds nanoseconds to a timespec64
+ * @a:         pointer to timespec64 to be incremented
+ * @ns:                unsigned nanoseconds value to be added
+ *
+ * This must always be inlined because its used from the x86-64 vdso,
+ * which cannot call other kernel functions.
+ */
+static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns)
+{
+       a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
+       a->tv_nsec = ns;
+}
+
+#endif
+
+#endif /* _LINUX_TIME64_H */
index c1825eb..95640dc 100644 (file)
 #include <linux/jiffies.h>
 #include <linux/time.h>
 
-/* Structure holding internal timekeeping values. */
-struct timekeeper {
-       /* Current clocksource used for timekeeping. */
+/**
+ * struct tk_read_base - base structure for timekeeping readout
+ * @clock:     Current clocksource used for timekeeping.
+ * @read:      Read function of @clock
+ * @mask:      Bitmask for two's complement subtraction of non 64bit clocks
+ * @cycle_last: @clock cycle value at last update
+ * @mult:      NTP adjusted multiplier for scaled math conversion
+ * @shift:     Shift value for scaled math conversion
+ * @xtime_nsec: Shifted (fractional) nano seconds offset for readout
+ * @base_mono:  ktime_t (nanoseconds) base time for readout
+ *
+ * This struct has size 56 byte on 64 bit. Together with a seqcount it
+ * occupies a single 64byte cache line.
+ *
+ * The struct is separate from struct timekeeper as it is also used
+ * for a fast NMI safe accessor to clock monotonic.
+ */
+struct tk_read_base {
        struct clocksource      *clock;
-       /* NTP adjusted clock multiplier */
+       cycle_t                 (*read)(struct clocksource *cs);
+       cycle_t                 mask;
+       cycle_t                 cycle_last;
        u32                     mult;
-       /* The shift value of the current clocksource. */
        u32                     shift;
-       /* Number of clock cycles in one NTP interval. */
+       u64                     xtime_nsec;
+       ktime_t                 base_mono;
+};
+
+/**
+ * struct timekeeper - Structure holding internal timekeeping values.
+ * @tkr:               The readout base structure
+ * @xtime_sec:         Current CLOCK_REALTIME time in seconds
+ * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
+ * @offs_real:         Offset clock monotonic -> clock realtime
+ * @offs_boot:         Offset clock monotonic -> clock boottime
+ * @offs_tai:          Offset clock monotonic -> clock tai
+ * @tai_offset:                The current UTC to TAI offset in seconds
+ * @base_raw:          Monotonic raw base time in ktime_t format
+ * @raw_time:          Monotonic raw base time in timespec64 format
+ * @cycle_interval:    Number of clock cycles in one NTP interval
+ * @xtime_interval:    Number of clock shifted nano seconds in one NTP
+ *                     interval.
+ * @xtime_remainder:   Shifted nano seconds left over when rounding
+ *                     @cycle_interval
+ * @raw_interval:      Raw nano seconds accumulated per NTP interval.
+ * @ntp_error:         Difference between accumulated time and NTP time in ntp
+ *                     shifted nano seconds.
+ * @ntp_error_shift:   Shift conversion between clock shifted nano seconds and
+ *                     ntp shifted nano seconds.
+ *
+ * Note: For timespec(64) based interfaces wall_to_monotonic is what
+ * we need to add to xtime (or xtime corrected for sub jiffie times)
+ * to get to monotonic time.  Monotonic is pegged at zero at system
+ * boot time, so wall_to_monotonic will be negative, however, we will
+ * ALWAYS keep the tv_nsec part positive so we can use the usual
+ * normalization.
+ *
+ * wall_to_monotonic is moved after resume from suspend for the
+ * monotonic time not to jump. We need to add total_sleep_time to
+ * wall_to_monotonic to get the real boot based time offset.
+ *
+ * wall_to_monotonic is no longer the boot time, getboottime must be
+ * used instead.
+ */
+struct timekeeper {
+       struct tk_read_base     tkr;
+       u64                     xtime_sec;
+       struct timespec64       wall_to_monotonic;
+       ktime_t                 offs_real;
+       ktime_t                 offs_boot;
+       ktime_t                 offs_tai;
+       s32                     tai_offset;
+       ktime_t                 base_raw;
+       struct timespec64       raw_time;
+
+       /* The following members are for timekeeping internal use */
        cycle_t                 cycle_interval;
-       /* Last cycle value (also stored in clock->cycle_last) */
-       cycle_t                 cycle_last;
-       /* Number of clock shifted nano seconds in one NTP interval. */
        u64                     xtime_interval;
-       /* shifted nano seconds left over when rounding cycle_interval */
        s64                     xtime_remainder;
-       /* Raw nano seconds accumulated per NTP interval. */
        u32                     raw_interval;
-
-       /* Current CLOCK_REALTIME time in seconds */
-       u64                     xtime_sec;
-       /* Clock shifted nano seconds */
-       u64                     xtime_nsec;
-
+       /* The ntp_tick_length() value currently being used.
+        * This cached copy ensures we consistently apply the tick
+        * length for an entire tick, as ntp_tick_length may change
+        * mid-tick, and we don't want to apply that new value to
+        * the tick in progress.
+        */
+       u64                     ntp_tick;
        /* Difference between accumulated time and NTP time in ntp
         * shifted nano seconds. */
        s64                     ntp_error;
-       /* Shift conversion between clock shifted nano seconds and
-        * ntp shifted nano seconds. */
        u32                     ntp_error_shift;
-
-       /*
-        * wall_to_monotonic is what we need to add to xtime (or xtime corrected
-        * for sub jiffie times) to get to monotonic time.  Monotonic is pegged
-        * at zero at system boot time, so wall_to_monotonic will be negative,
-        * however, we will ALWAYS keep the tv_nsec part positive so we can use
-        * the usual normalization.
-        *
-        * wall_to_monotonic is moved after resume from suspend for the
-        * monotonic time not to jump. We need to add total_sleep_time to
-        * wall_to_monotonic to get the real boot based time offset.
-        *
-        * - wall_to_monotonic is no longer the boot time, getboottime must be
-        * used instead.
-        */
-       struct timespec         wall_to_monotonic;
-       /* Offset clock monotonic -> clock realtime */
-       ktime_t                 offs_real;
-       /* time spent in suspend */
-       struct timespec         total_sleep_time;
-       /* Offset clock monotonic -> clock boottime */
-       ktime_t                 offs_boot;
-       /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
-       struct timespec         raw_time;
-       /* The current UTC to TAI offset in seconds */
-       s32                     tai_offset;
-       /* Offset clock monotonic -> clock tai */
-       ktime_t                 offs_tai;
-
+       u32                     ntp_err_mult;
 };
 
-static inline struct timespec tk_xtime(struct timekeeper *tk)
-{
-       struct timespec ts;
-
-       ts.tv_sec = tk->xtime_sec;
-       ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift);
-       return ts;
-}
-
-
 #ifdef CONFIG_GENERIC_TIME_VSYSCALL
 
 extern void update_vsyscall(struct timekeeper *tk);
@@ -89,17 +112,10 @@ extern void update_vsyscall_tz(void);
 #elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
 
 extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
-                               struct clocksource *c, u32 mult);
+                               struct clocksource *c, u32 mult,
+                               cycle_t cycle_last);
 extern void update_vsyscall_tz(void);
 
-static inline void update_vsyscall(struct timekeeper *tk)
-{
-       struct timespec xt;
-
-       xt = tk_xtime(tk);
-       update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
-}
-
 #else
 
 static inline void update_vsyscall(struct timekeeper *tk)
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
new file mode 100644 (file)
index 0000000..1caa6b0
--- /dev/null
@@ -0,0 +1,209 @@
+#ifndef _LINUX_TIMEKEEPING_H
+#define _LINUX_TIMEKEEPING_H
+
+/* Included from linux/ktime.h */
+
+void timekeeping_init(void);
+extern int timekeeping_suspended;
+
+/*
+ * Get and set timeofday
+ */
+extern void do_gettimeofday(struct timeval *tv);
+extern int do_settimeofday(const struct timespec *tv);
+extern int do_sys_settimeofday(const struct timespec *tv,
+                              const struct timezone *tz);
+
+/*
+ * Kernel time accessors
+ */
+unsigned long get_seconds(void);
+struct timespec current_kernel_time(void);
+/* does not take xtime_lock */
+struct timespec __current_kernel_time(void);
+
+/*
+ * timespec based interfaces
+ */
+struct timespec get_monotonic_coarse(void);
+extern void getrawmonotonic(struct timespec *ts);
+extern void ktime_get_ts64(struct timespec64 *ts);
+
+extern int __getnstimeofday64(struct timespec64 *tv);
+extern void getnstimeofday64(struct timespec64 *tv);
+
+#if BITS_PER_LONG == 64
+static inline int __getnstimeofday(struct timespec *ts)
+{
+       return __getnstimeofday64(ts);
+}
+
+static inline void getnstimeofday(struct timespec *ts)
+{
+       getnstimeofday64(ts);
+}
+
+static inline void ktime_get_ts(struct timespec *ts)
+{
+       ktime_get_ts64(ts);
+}
+
+static inline void ktime_get_real_ts(struct timespec *ts)
+{
+       getnstimeofday64(ts);
+}
+
+#else
+static inline int __getnstimeofday(struct timespec *ts)
+{
+       struct timespec64 ts64;
+       int ret = __getnstimeofday64(&ts64);
+
+       *ts = timespec64_to_timespec(ts64);
+       return ret;
+}
+
+static inline void getnstimeofday(struct timespec *ts)
+{
+       struct timespec64 ts64;
+
+       getnstimeofday64(&ts64);
+       *ts = timespec64_to_timespec(ts64);
+}
+
+static inline void ktime_get_ts(struct timespec *ts)
+{
+       struct timespec64 ts64;
+
+       ktime_get_ts64(&ts64);
+       *ts = timespec64_to_timespec(ts64);
+}
+
+static inline void ktime_get_real_ts(struct timespec *ts)
+{
+       struct timespec64 ts64;
+
+       getnstimeofday64(&ts64);
+       *ts = timespec64_to_timespec(ts64);
+}
+#endif
+
+extern void getboottime(struct timespec *ts);
+
+#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
+#define ktime_get_real_ts64(ts)        getnstimeofday64(ts)
+
+/*
+ * ktime_t based interfaces
+ */
+
+enum tk_offsets {
+       TK_OFFS_REAL,
+       TK_OFFS_BOOT,
+       TK_OFFS_TAI,
+       TK_OFFS_MAX,
+};
+
+extern ktime_t ktime_get(void);
+extern ktime_t ktime_get_with_offset(enum tk_offsets offs);
+extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
+extern ktime_t ktime_get_raw(void);
+
+/**
+ * ktime_get_real - get the real (wall-) time in ktime_t format
+ */
+static inline ktime_t ktime_get_real(void)
+{
+       return ktime_get_with_offset(TK_OFFS_REAL);
+}
+
+/**
+ * ktime_get_boottime - Returns monotonic time since boot in ktime_t format
+ *
+ * This is similar to CLOCK_MONTONIC/ktime_get, but also includes the
+ * time spent in suspend.
+ */
+static inline ktime_t ktime_get_boottime(void)
+{
+       return ktime_get_with_offset(TK_OFFS_BOOT);
+}
+
+/**
+ * ktime_get_clocktai - Returns the TAI time of day in ktime_t format
+ */
+static inline ktime_t ktime_get_clocktai(void)
+{
+       return ktime_get_with_offset(TK_OFFS_TAI);
+}
+
+/**
+ * ktime_mono_to_real - Convert monotonic time to clock realtime
+ */
+static inline ktime_t ktime_mono_to_real(ktime_t mono)
+{
+       return ktime_mono_to_any(mono, TK_OFFS_REAL);
+}
+
+static inline u64 ktime_get_ns(void)
+{
+       return ktime_to_ns(ktime_get());
+}
+
+static inline u64 ktime_get_real_ns(void)
+{
+       return ktime_to_ns(ktime_get_real());
+}
+
+static inline u64 ktime_get_boot_ns(void)
+{
+       return ktime_to_ns(ktime_get_boottime());
+}
+
+static inline u64 ktime_get_raw_ns(void)
+{
+       return ktime_to_ns(ktime_get_raw());
+}
+
+extern u64 ktime_get_mono_fast_ns(void);
+
+/*
+ * Timespec interfaces utilizing the ktime based ones
+ */
+static inline void get_monotonic_boottime(struct timespec *ts)
+{
+       *ts = ktime_to_timespec(ktime_get_boottime());
+}
+
+static inline void timekeeping_clocktai(struct timespec *ts)
+{
+       *ts = ktime_to_timespec(ktime_get_clocktai());
+}
+
+/*
+ * RTC specific
+ */
+extern void timekeeping_inject_sleeptime(struct timespec *delta);
+
+/*
+ * PPS accessor
+ */
+extern void getnstime_raw_and_real(struct timespec *ts_raw,
+                                  struct timespec *ts_real);
+
+/*
+ * Persistent clock related interfaces
+ */
+extern bool persistent_clock_exist;
+extern int persistent_clock_is_local;
+
+static inline bool has_persistent_clock(void)
+{
+       return persistent_clock_exist;
+}
+
+extern void read_persistent_clock(struct timespec *ts);
+extern void read_boot_clock(struct timespec *ts);
+extern int update_persistent_clock(struct timespec now);
+
+
+#endif
index d3b57fa..bd36ce4 100644 (file)
@@ -11,6 +11,9 @@
 /* For O_CLOEXEC and O_NONBLOCK */
 #include <linux/fcntl.h>
 
+/* For _IO helpers */
+#include <linux/ioctl.h>
+
 /*
  * CAREFUL: Check include/asm-generic/fcntl.h when defining
  * new flags, since they might collide with O_* ones. We want
@@ -29,4 +32,6 @@
 /* Flags for timerfd_settime.  */
 #define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET)
 
+#define TFD_IOC_SET_TICKS      _IOW('T', 0, u64)
+
 #endif /* _LINUX_TIMERFD_H */
index f2a8b62..973a40c 100644 (file)
@@ -3,12 +3,11 @@
 #
 
 obj-y     = fork.o exec_domain.o panic.o \
-           cpu.o exit.o itimer.o time.o softirq.o resource.o \
-           sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
+           cpu.o exit.o softirq.o resource.o \
+           sysctl.o sysctl_binary.o capability.o ptrace.o user.o \
            signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
-           extable.o params.o posix-timers.o \
-           kthread.o sys_ni.o posix-cpu-timers.o \
-           hrtimer.o nsproxy.o \
+           extable.o params.o \
+           kthread.o sys_ni.o nsproxy.o \
            notifier.o ksysfs.o cred.o reboot.o \
            async.o range.o groups.o smpboot.o
 
@@ -110,22 +109,6 @@ targets += config_data.h
 $(obj)/config_data.h: $(obj)/config_data.gz FORCE
        $(call filechk,ikconfiggz)
 
-$(obj)/time.o: $(obj)/timeconst.h
-
-quiet_cmd_hzfile = HZFILE  $@
-      cmd_hzfile = echo "hz=$(CONFIG_HZ)" > $@
-
-targets += hz.bc
-$(obj)/hz.bc: $(objtree)/include/config/hz.h FORCE
-       $(call if_changed,hzfile)
-
-quiet_cmd_bc  = BC      $@
-      cmd_bc  = bc -q $(filter-out FORCE,$^) > $@
-
-targets += timeconst.h
-$(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
-       $(call if_changed,bc)
-
 ###############################################################################
 #
 # Roll all the X.509 certificates that we can find together and pull them into
index 808a86f..a1844f1 100644 (file)
@@ -458,9 +458,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
        acct_t ac;
        mm_segment_t fs;
        unsigned long flim;
-       u64 elapsed;
-       u64 run_time;
-       struct timespec uptime;
+       u64 elapsed, run_time;
        struct tty_struct *tty;
        const struct cred *orig_cred;
 
@@ -484,10 +482,8 @@ static void do_acct_process(struct bsd_acct_struct *acct,
        strlcpy(ac.ac_comm, current->comm, sizeof(ac.ac_comm));
 
        /* calculate run_time in nsec*/
-       do_posix_clock_monotonic_gettime(&uptime);
-       run_time = (u64)uptime.tv_sec*NSEC_PER_SEC + uptime.tv_nsec;
-       run_time -= (u64)current->group_leader->start_time.tv_sec * NSEC_PER_SEC
-                      + current->group_leader->start_time.tv_nsec;
+       run_time = ktime_get_ns();
+       run_time -= current->group_leader->start_time;
        /* convert nsec -> AHZ */
        elapsed = nsec_to_AHZ(run_time);
 #if ACCT_VERSION==3
index 2f7c760..379650b 100644 (file)
@@ -2472,7 +2472,7 @@ static void kdb_gmtime(struct timespec *tv, struct kdb_tm *tm)
 static void kdb_sysinfo(struct sysinfo *val)
 {
        struct timespec uptime;
-       do_posix_clock_monotonic_gettime(&uptime);
+       ktime_get_ts(&uptime);
        memset(val, 0, sizeof(*val));
        val->uptime = uptime.tv_sec;
        val->loads[0] = avenrun[0];
index 54996b7..ef90b04 100644 (file)
@@ -46,42 +46,25 @@ void __delayacct_tsk_init(struct task_struct *tsk)
 }
 
 /*
- * Start accounting for a delay statistic using
- * its starting timestamp (@start)
+ * Finish delay accounting for a statistic using its timestamps (@start),
+ * accumalator (@total) and @count
  */
-
-static inline void delayacct_start(struct timespec *start)
+static void delayacct_end(u64 *start, u64 *total, u32 *count)
 {
-       do_posix_clock_monotonic_gettime(start);
-}
-
-/*
- * Finish delay accounting for a statistic using
- * its timestamps (@start, @end), accumalator (@total) and @count
- */
-
-static void delayacct_end(struct timespec *start, struct timespec *end,
-                               u64 *total, u32 *count)
-{
-       struct timespec ts;
-       s64 ns;
+       s64 ns = ktime_get_ns() - *start;
        unsigned long flags;
 
-       do_posix_clock_monotonic_gettime(end);
-       ts = timespec_sub(*end, *start);
-       ns = timespec_to_ns(&ts);
-       if (ns < 0)
-               return;
-
-       spin_lock_irqsave(&current->delays->lock, flags);
-       *total += ns;
-       (*count)++;
-       spin_unlock_irqrestore(&current->delays->lock, flags);
+       if (ns > 0) {
+               spin_lock_irqsave(&current->delays->lock, flags);
+               *total += ns;
+               (*count)++;
+               spin_unlock_irqrestore(&current->delays->lock, flags);
+       }
 }
 
 void __delayacct_blkio_start(void)
 {
-       delayacct_start(&current->delays->blkio_start);
+       current->delays->blkio_start = ktime_get_ns();
 }
 
 void __delayacct_blkio_end(void)
@@ -89,35 +72,29 @@ void __delayacct_blkio_end(void)
        if (current->delays->flags & DELAYACCT_PF_SWAPIN)
                /* Swapin block I/O */
                delayacct_end(&current->delays->blkio_start,
-                       &current->delays->blkio_end,
                        &current->delays->swapin_delay,
                        &current->delays->swapin_count);
        else    /* Other block I/O */
                delayacct_end(&current->delays->blkio_start,
-                       &current->delays->blkio_end,
                        &current->delays->blkio_delay,
                        &current->delays->blkio_count);
 }
 
 int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
 {
-       s64 tmp;
-       unsigned long t1;
-       unsigned long long t2, t3;
-       unsigned long flags;
-       struct timespec ts;
        cputime_t utime, stime, stimescaled, utimescaled;
+       unsigned long long t2, t3;
+       unsigned long flags, t1;
+       s64 tmp;
 
-       tmp = (s64)d->cpu_run_real_total;
        task_cputime(tsk, &utime, &stime);
-       cputime_to_timespec(utime + stime, &ts);
-       tmp += timespec_to_ns(&ts);
+       tmp = (s64)d->cpu_run_real_total;
+       tmp += cputime_to_nsecs(utime + stime);
        d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
 
-       tmp = (s64)d->cpu_scaled_run_real_total;
        task_cputime_scaled(tsk, &utimescaled, &stimescaled);
-       cputime_to_timespec(utimescaled + stimescaled, &ts);
-       tmp += timespec_to_ns(&ts);
+       tmp = (s64)d->cpu_scaled_run_real_total;
+       tmp += cputime_to_nsecs(utimescaled + stimescaled);
        d->cpu_scaled_run_real_total =
                (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
 
@@ -169,13 +146,12 @@ __u64 __delayacct_blkio_ticks(struct task_struct *tsk)
 
 void __delayacct_freepages_start(void)
 {
-       delayacct_start(&current->delays->freepages_start);
+       current->delays->freepages_start = ktime_get_ns();
 }
 
 void __delayacct_freepages_end(void)
 {
        delayacct_end(&current->delays->freepages_start,
-                       &current->delays->freepages_end,
                        &current->delays->freepages_delay,
                        &current->delays->freepages_count);
 }
index 962885e..5f1bf3b 100644 (file)
@@ -1261,9 +1261,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 
        posix_cpu_timers_init(p);
 
-       do_posix_clock_monotonic_gettime(&p->start_time);
-       p->real_start_time = p->start_time;
-       monotonic_to_bootbased(&p->real_start_time);
+       p->start_time = ktime_get_ns();
+       p->real_start_time = ktime_get_boot_ns();
        p->io_context = NULL;
        p->audit_context = NULL;
        if (clone_flags & CLONE_THREAD)
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
deleted file mode 100644 (file)
index 3ab2899..0000000
+++ /dev/null
@@ -1,1915 +0,0 @@
-/*
- *  linux/kernel/hrtimer.c
- *
- *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
- *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
- *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner
- *
- *  High-resolution kernel timers
- *
- *  In contrast to the low-resolution timeout API implemented in
- *  kernel/timer.c, hrtimers provide finer resolution and accuracy
- *  depending on system configuration and capabilities.
- *
- *  These timers are currently used for:
- *   - itimers
- *   - POSIX timers
- *   - nanosleep
- *   - precise in-kernel timing
- *
- *  Started by: Thomas Gleixner and Ingo Molnar
- *
- *  Credits:
- *     based on kernel/timer.c
- *
- *     Help, testing, suggestions, bugfixes, improvements were
- *     provided by:
- *
- *     George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
- *     et. al.
- *
- *  For licencing details see kernel-base/COPYING
- */
-
-#include <linux/cpu.h>
-#include <linux/export.h>
-#include <linux/percpu.h>
-#include <linux/hrtimer.h>
-#include <linux/notifier.h>
-#include <linux/syscalls.h>
-#include <linux/kallsyms.h>
-#include <linux/interrupt.h>
-#include <linux/tick.h>
-#include <linux/seq_file.h>
-#include <linux/err.h>
-#include <linux/debugobjects.h>
-#include <linux/sched.h>
-#include <linux/sched/sysctl.h>
-#include <linux/sched/rt.h>
-#include <linux/sched/deadline.h>
-#include <linux/timer.h>
-#include <linux/freezer.h>
-
-#include <asm/uaccess.h>
-
-#include <trace/events/timer.h>
-
-/*
- * The timer bases:
- *
- * There are more clockids then hrtimer bases. Thus, we index
- * into the timer bases by the hrtimer_base_type enum. When trying
- * to reach a base using a clockid, hrtimer_clockid_to_base()
- * is used to convert from clockid to the proper hrtimer_base_type.
- */
-DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
-{
-
-       .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
-       .clock_base =
-       {
-               {
-                       .index = HRTIMER_BASE_MONOTONIC,
-                       .clockid = CLOCK_MONOTONIC,
-                       .get_time = &ktime_get,
-                       .resolution = KTIME_LOW_RES,
-               },
-               {
-                       .index = HRTIMER_BASE_REALTIME,
-                       .clockid = CLOCK_REALTIME,
-                       .get_time = &ktime_get_real,
-                       .resolution = KTIME_LOW_RES,
-               },
-               {
-                       .index = HRTIMER_BASE_BOOTTIME,
-                       .clockid = CLOCK_BOOTTIME,
-                       .get_time = &ktime_get_boottime,
-                       .resolution = KTIME_LOW_RES,
-               },
-               {
-                       .index = HRTIMER_BASE_TAI,
-                       .clockid = CLOCK_TAI,
-                       .get_time = &ktime_get_clocktai,
-                       .resolution = KTIME_LOW_RES,
-               },
-       }
-};
-
-static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
-       [CLOCK_REALTIME]        = HRTIMER_BASE_REALTIME,
-       [CLOCK_MONOTONIC]       = HRTIMER_BASE_MONOTONIC,
-       [CLOCK_BOOTTIME]        = HRTIMER_BASE_BOOTTIME,
-       [CLOCK_TAI]             = HRTIMER_BASE_TAI,
-};
-
-static inline int hrtimer_clockid_to_base(clockid_t clock_id)
-{
-       return hrtimer_clock_to_base_table[clock_id];
-}
-
-
-/*
- * Get the coarse grained time at the softirq based on xtime and
- * wall_to_monotonic.
- */
-static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
-{
-       ktime_t xtim, mono, boot;
-       struct timespec xts, tom, slp;
-       s32 tai_offset;
-
-       get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp);
-       tai_offset = timekeeping_get_tai_offset();
-
-       xtim = timespec_to_ktime(xts);
-       mono = ktime_add(xtim, timespec_to_ktime(tom));
-       boot = ktime_add(mono, timespec_to_ktime(slp));
-       base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
-       base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
-       base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot;
-       base->clock_base[HRTIMER_BASE_TAI].softirq_time =
-                               ktime_add(xtim, ktime_set(tai_offset, 0));
-}
-
-/*
- * Functions and macros which are different for UP/SMP systems are kept in a
- * single place
- */
-#ifdef CONFIG_SMP
-
-/*
- * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
- * means that all timers which are tied to this base via timer->base are
- * locked, and the base itself is locked too.
- *
- * So __run_timers/migrate_timers can safely modify all timers which could
- * be found on the lists/queues.
- *
- * When the timer's base is locked, and the timer removed from list, it is
- * possible to set timer->base = NULL and drop the lock: the timer remains
- * locked.
- */
-static
-struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
-                                            unsigned long *flags)
-{
-       struct hrtimer_clock_base *base;
-
-       for (;;) {
-               base = timer->base;
-               if (likely(base != NULL)) {
-                       raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
-                       if (likely(base == timer->base))
-                               return base;
-                       /* The timer has migrated to another CPU: */
-                       raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
-               }
-               cpu_relax();
-       }
-}
-
-/*
- * With HIGHRES=y we do not migrate the timer when it is expiring
- * before the next event on the target cpu because we cannot reprogram
- * the target cpu hardware and we would cause it to fire late.
- *
- * Called with cpu_base->lock of target cpu held.
- */
-static int
-hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
-{
-#ifdef CONFIG_HIGH_RES_TIMERS
-       ktime_t expires;
-
-       if (!new_base->cpu_base->hres_active)
-               return 0;
-
-       expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
-       return expires.tv64 <= new_base->cpu_base->expires_next.tv64;
-#else
-       return 0;
-#endif
-}
-
-/*
- * Switch the timer base to the current CPU when possible.
- */
-static inline struct hrtimer_clock_base *
-switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
-                   int pinned)
-{
-       struct hrtimer_clock_base *new_base;
-       struct hrtimer_cpu_base *new_cpu_base;
-       int this_cpu = smp_processor_id();
-       int cpu = get_nohz_timer_target(pinned);
-       int basenum = base->index;
-
-again:
-       new_cpu_base = &per_cpu(hrtimer_bases, cpu);
-       new_base = &new_cpu_base->clock_base[basenum];
-
-       if (base != new_base) {
-               /*
-                * We are trying to move timer to new_base.
-                * However we can't change timer's base while it is running,
-                * so we keep it on the same CPU. No hassle vs. reprogramming
-                * the event source in the high resolution case. The softirq
-                * code will take care of this when the timer function has
-                * completed. There is no conflict as we hold the lock until
-                * the timer is enqueued.
-                */
-               if (unlikely(hrtimer_callback_running(timer)))
-                       return base;
-
-               /* See the comment in lock_timer_base() */
-               timer->base = NULL;
-               raw_spin_unlock(&base->cpu_base->lock);
-               raw_spin_lock(&new_base->cpu_base->lock);
-
-               if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
-                       cpu = this_cpu;
-                       raw_spin_unlock(&new_base->cpu_base->lock);
-                       raw_spin_lock(&base->cpu_base->lock);
-                       timer->base = base;
-                       goto again;
-               }
-               timer->base = new_base;
-       } else {
-               if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
-                       cpu = this_cpu;
-                       goto again;
-               }
-       }
-       return new_base;
-}
-
-#else /* CONFIG_SMP */
-
-static inline struct hrtimer_clock_base *
-lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
-{
-       struct hrtimer_clock_base *base = timer->base;
-
-       raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
-
-       return base;
-}
-
-# define switch_hrtimer_base(t, b, p)  (b)
-
-#endif /* !CONFIG_SMP */
-
-/*
- * Functions for the union type storage format of ktime_t which are
- * too large for inlining:
- */
-#if BITS_PER_LONG < 64
-# ifndef CONFIG_KTIME_SCALAR
-/**
- * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
- * @kt:                addend
- * @nsec:      the scalar nsec value to add
- *
- * Returns the sum of kt and nsec in ktime_t format
- */
-ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
-{
-       ktime_t tmp;
-
-       if (likely(nsec < NSEC_PER_SEC)) {
-               tmp.tv64 = nsec;
-       } else {
-               unsigned long rem = do_div(nsec, NSEC_PER_SEC);
-
-               /* Make sure nsec fits into long */
-               if (unlikely(nsec > KTIME_SEC_MAX))
-                       return (ktime_t){ .tv64 = KTIME_MAX };
-
-               tmp = ktime_set((long)nsec, rem);
-       }
-
-       return ktime_add(kt, tmp);
-}
-
-EXPORT_SYMBOL_GPL(ktime_add_ns);
-
-/**
- * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
- * @kt:                minuend
- * @nsec:      the scalar nsec value to subtract
- *
- * Returns the subtraction of @nsec from @kt in ktime_t format
- */
-ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
-{
-       ktime_t tmp;
-
-       if (likely(nsec < NSEC_PER_SEC)) {
-               tmp.tv64 = nsec;
-       } else {
-               unsigned long rem = do_div(nsec, NSEC_PER_SEC);
-
-               tmp = ktime_set((long)nsec, rem);
-       }
-
-       return ktime_sub(kt, tmp);
-}
-
-EXPORT_SYMBOL_GPL(ktime_sub_ns);
-# endif /* !CONFIG_KTIME_SCALAR */
-
-/*
- * Divide a ktime value by a nanosecond value
- */
-u64 ktime_divns(const ktime_t kt, s64 div)
-{
-       u64 dclc;
-       int sft = 0;
-
-       dclc = ktime_to_ns(kt);
-       /* Make sure the divisor is less than 2^32: */
-       while (div >> 32) {
-               sft++;
-               div >>= 1;
-       }
-       dclc >>= sft;
-       do_div(dclc, (unsigned long) div);
-
-       return dclc;
-}
-#endif /* BITS_PER_LONG >= 64 */
-
-/*
- * Add two ktime values and do a safety check for overflow:
- */
-ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
-{
-       ktime_t res = ktime_add(lhs, rhs);
-
-       /*
-        * We use KTIME_SEC_MAX here, the maximum timeout which we can
-        * return to user space in a timespec:
-        */
-       if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
-               res = ktime_set(KTIME_SEC_MAX, 0);
-
-       return res;
-}
-
-EXPORT_SYMBOL_GPL(ktime_add_safe);
-
-#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
-
-static struct debug_obj_descr hrtimer_debug_descr;
-
-static void *hrtimer_debug_hint(void *addr)
-{
-       return ((struct hrtimer *) addr)->function;
-}
-
-/*
- * fixup_init is called when:
- * - an active object is initialized
- */
-static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
-{
-       struct hrtimer *timer = addr;
-
-       switch (state) {
-       case ODEBUG_STATE_ACTIVE:
-               hrtimer_cancel(timer);
-               debug_object_init(timer, &hrtimer_debug_descr);
-               return 1;
-       default:
-               return 0;
-       }
-}
-
-/*
- * fixup_activate is called when:
- * - an active object is activated
- * - an unknown object is activated (might be a statically initialized object)
- */
-static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
-{
-       switch (state) {
-
-       case ODEBUG_STATE_NOTAVAILABLE:
-               WARN_ON_ONCE(1);
-               return 0;
-
-       case ODEBUG_STATE_ACTIVE:
-               WARN_ON(1);
-
-       default:
-               return 0;
-       }
-}
-
-/*
- * fixup_free is called when:
- * - an active object is freed
- */
-static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
-{
-       struct hrtimer *timer = addr;
-
-       switch (state) {
-       case ODEBUG_STATE_ACTIVE:
-               hrtimer_cancel(timer);
-               debug_object_free(timer, &hrtimer_debug_descr);
-               return 1;
-       default:
-               return 0;
-       }
-}
-
-static struct debug_obj_descr hrtimer_debug_descr = {
-       .name           = "hrtimer",
-       .debug_hint     = hrtimer_debug_hint,
-       .fixup_init     = hrtimer_fixup_init,
-       .fixup_activate = hrtimer_fixup_activate,
-       .fixup_free     = hrtimer_fixup_free,
-};
-
-static inline void debug_hrtimer_init(struct hrtimer *timer)
-{
-       debug_object_init(timer, &hrtimer_debug_descr);
-}
-
-static inline void debug_hrtimer_activate(struct hrtimer *timer)
-{
-       debug_object_activate(timer, &hrtimer_debug_descr);
-}
-
-static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
-{
-       debug_object_deactivate(timer, &hrtimer_debug_descr);
-}
-
-static inline void debug_hrtimer_free(struct hrtimer *timer)
-{
-       debug_object_free(timer, &hrtimer_debug_descr);
-}
-
-static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
-                          enum hrtimer_mode mode);
-
-void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
-                          enum hrtimer_mode mode)
-{
-       debug_object_init_on_stack(timer, &hrtimer_debug_descr);
-       __hrtimer_init(timer, clock_id, mode);
-}
-EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
-
-void destroy_hrtimer_on_stack(struct hrtimer *timer)
-{
-       debug_object_free(timer, &hrtimer_debug_descr);
-}
-
-#else
-static inline void debug_hrtimer_init(struct hrtimer *timer) { }
-static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
-static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
-#endif
-
-static inline void
-debug_init(struct hrtimer *timer, clockid_t clockid,
-          enum hrtimer_mode mode)
-{
-       debug_hrtimer_init(timer);
-       trace_hrtimer_init(timer, clockid, mode);
-}
-
-static inline void debug_activate(struct hrtimer *timer)
-{
-       debug_hrtimer_activate(timer);
-       trace_hrtimer_start(timer);
-}
-
-static inline void debug_deactivate(struct hrtimer *timer)
-{
-       debug_hrtimer_deactivate(timer);
-       trace_hrtimer_cancel(timer);
-}
-
-/* High resolution timer related functions */
-#ifdef CONFIG_HIGH_RES_TIMERS
-
-/*
- * High resolution timer enabled ?
- */
-static int hrtimer_hres_enabled __read_mostly  = 1;
-
-/*
- * Enable / Disable high resolution mode
- */
-static int __init setup_hrtimer_hres(char *str)
-{
-       if (!strcmp(str, "off"))
-               hrtimer_hres_enabled = 0;
-       else if (!strcmp(str, "on"))
-               hrtimer_hres_enabled = 1;
-       else
-               return 0;
-       return 1;
-}
-
-__setup("highres=", setup_hrtimer_hres);
-
-/*
- * hrtimer_high_res_enabled - query, if the highres mode is enabled
- */
-static inline int hrtimer_is_hres_enabled(void)
-{
-       return hrtimer_hres_enabled;
-}
-
-/*
- * Is the high resolution mode active ?
- */
-static inline int hrtimer_hres_active(void)
-{
-       return __this_cpu_read(hrtimer_bases.hres_active);
-}
-
-/*
- * Reprogram the event source with checking both queues for the
- * next event
- * Called with interrupts disabled and base->lock held
- */
-static void
-hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
-{
-       int i;
-       struct hrtimer_clock_base *base = cpu_base->clock_base;
-       ktime_t expires, expires_next;
-
-       expires_next.tv64 = KTIME_MAX;
-
-       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
-               struct hrtimer *timer;
-               struct timerqueue_node *next;
-
-               next = timerqueue_getnext(&base->active);
-               if (!next)
-                       continue;
-               timer = container_of(next, struct hrtimer, node);
-
-               expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
-               /*
-                * clock_was_set() has changed base->offset so the
-                * result might be negative. Fix it up to prevent a
-                * false positive in clockevents_program_event()
-                */
-               if (expires.tv64 < 0)
-                       expires.tv64 = 0;
-               if (expires.tv64 < expires_next.tv64)
-                       expires_next = expires;
-       }
-
-       if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
-               return;
-
-       cpu_base->expires_next.tv64 = expires_next.tv64;
-
-       /*
-        * If a hang was detected in the last timer interrupt then we
-        * leave the hang delay active in the hardware. We want the
-        * system to make progress. That also prevents the following
-        * scenario:
-        * T1 expires 50ms from now
-        * T2 expires 5s from now
-        *
-        * T1 is removed, so this code is called and would reprogram
-        * the hardware to 5s from now. Any hrtimer_start after that
-        * will not reprogram the hardware due to hang_detected being
-        * set. So we'd effectivly block all timers until the T2 event
-        * fires.
-        */
-       if (cpu_base->hang_detected)
-               return;
-
-       if (cpu_base->expires_next.tv64 != KTIME_MAX)
-               tick_program_event(cpu_base->expires_next, 1);
-}
-
-/*
- * Shared reprogramming for clock_realtime and clock_monotonic
- *
- * When a timer is enqueued and expires earlier than the already enqueued
- * timers, we have to check, whether it expires earlier than the timer for
- * which the clock event device was armed.
- *
- * Called with interrupts disabled and base->cpu_base.lock held
- */
-static int hrtimer_reprogram(struct hrtimer *timer,
-                            struct hrtimer_clock_base *base)
-{
-       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
-       ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
-       int res;
-
-       WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
-
-       /*
-        * When the callback is running, we do not reprogram the clock event
-        * device. The timer callback is either running on a different CPU or
-        * the callback is executed in the hrtimer_interrupt context. The
-        * reprogramming is handled either by the softirq, which called the
-        * callback or at the end of the hrtimer_interrupt.
-        */
-       if (hrtimer_callback_running(timer))
-               return 0;
-
-       /*
-        * CLOCK_REALTIME timer might be requested with an absolute
-        * expiry time which is less than base->offset. Nothing wrong
-        * about that, just avoid to call into the tick code, which
-        * has now objections against negative expiry values.
-        */
-       if (expires.tv64 < 0)
-               return -ETIME;
-
-       if (expires.tv64 >= cpu_base->expires_next.tv64)
-               return 0;
-
-       /*
-        * If a hang was detected in the last timer interrupt then we
-        * do not schedule a timer which is earlier than the expiry
-        * which we enforced in the hang detection. We want the system
-        * to make progress.
-        */
-       if (cpu_base->hang_detected)
-               return 0;
-
-       /*
-        * Clockevents returns -ETIME, when the event was in the past.
-        */
-       res = tick_program_event(expires, 0);
-       if (!IS_ERR_VALUE(res))
-               cpu_base->expires_next = expires;
-       return res;
-}
-
-/*
- * Initialize the high resolution related parts of cpu_base
- */
-static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
-{
-       base->expires_next.tv64 = KTIME_MAX;
-       base->hres_active = 0;
-}
-
-/*
- * When High resolution timers are active, try to reprogram. Note, that in case
- * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
- * check happens. The timer gets enqueued into the rbtree. The reprogramming
- * and expiry check is done in the hrtimer_interrupt or in the softirq.
- */
-static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
-                                           struct hrtimer_clock_base *base)
-{
-       return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
-}
-
-static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
-{
-       ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
-       ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
-       ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
-
-       return ktime_get_update_offsets(offs_real, offs_boot, offs_tai);
-}
-
-/*
- * Retrigger next event is called after clock was set
- *
- * Called with interrupts disabled via on_each_cpu()
- */
-static void retrigger_next_event(void *arg)
-{
-       struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
-
-       if (!hrtimer_hres_active())
-               return;
-
-       raw_spin_lock(&base->lock);
-       hrtimer_update_base(base);
-       hrtimer_force_reprogram(base, 0);
-       raw_spin_unlock(&base->lock);
-}
-
-/*
- * Switch to high resolution mode
- */
-static int hrtimer_switch_to_hres(void)
-{
-       int i, cpu = smp_processor_id();
-       struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
-       unsigned long flags;
-
-       if (base->hres_active)
-               return 1;
-
-       local_irq_save(flags);
-
-       if (tick_init_highres()) {
-               local_irq_restore(flags);
-               printk(KERN_WARNING "Could not switch to high resolution "
-                                   "mode on CPU %d\n", cpu);
-               return 0;
-       }
-       base->hres_active = 1;
-       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
-               base->clock_base[i].resolution = KTIME_HIGH_RES;
-
-       tick_setup_sched_timer();
-       /* "Retrigger" the interrupt to get things going */
-       retrigger_next_event(NULL);
-       local_irq_restore(flags);
-       return 1;
-}
-
-static void clock_was_set_work(struct work_struct *work)
-{
-       clock_was_set();
-}
-
-static DECLARE_WORK(hrtimer_work, clock_was_set_work);
-
-/*
- * Called from timekeeping and resume code to reprogramm the hrtimer
- * interrupt device on all cpus.
- */
-void clock_was_set_delayed(void)
-{
-       schedule_work(&hrtimer_work);
-}
-
-#else
-
-static inline int hrtimer_hres_active(void) { return 0; }
-static inline int hrtimer_is_hres_enabled(void) { return 0; }
-static inline int hrtimer_switch_to_hres(void) { return 0; }
-static inline void
-hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
-static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
-                                           struct hrtimer_clock_base *base)
-{
-       return 0;
-}
-static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
-static inline void retrigger_next_event(void *arg) { }
-
-#endif /* CONFIG_HIGH_RES_TIMERS */
-
-/*
- * Clock realtime was set
- *
- * Change the offset of the realtime clock vs. the monotonic
- * clock.
- *
- * We might have to reprogram the high resolution timer interrupt. On
- * SMP we call the architecture specific code to retrigger _all_ high
- * resolution timer interrupts. On UP we just disable interrupts and
- * call the high resolution interrupt code.
- */
-void clock_was_set(void)
-{
-#ifdef CONFIG_HIGH_RES_TIMERS
-       /* Retrigger the CPU local events everywhere */
-       on_each_cpu(retrigger_next_event, NULL, 1);
-#endif
-       timerfd_clock_was_set();
-}
-
-/*
- * During resume we might have to reprogram the high resolution timer
- * interrupt on all online CPUs.  However, all other CPUs will be
- * stopped with IRQs interrupts disabled so the clock_was_set() call
- * must be deferred.
- */
-void hrtimers_resume(void)
-{
-       WARN_ONCE(!irqs_disabled(),
-                 KERN_INFO "hrtimers_resume() called with IRQs enabled!");
-
-       /* Retrigger on the local CPU */
-       retrigger_next_event(NULL);
-       /* And schedule a retrigger for all others */
-       clock_was_set_delayed();
-}
-
-static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
-{
-#ifdef CONFIG_TIMER_STATS
-       if (timer->start_site)
-               return;
-       timer->start_site = __builtin_return_address(0);
-       memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
-       timer->start_pid = current->pid;
-#endif
-}
-
-static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
-{
-#ifdef CONFIG_TIMER_STATS
-       timer->start_site = NULL;
-#endif
-}
-
-static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
-{
-#ifdef CONFIG_TIMER_STATS
-       if (likely(!timer_stats_active))
-               return;
-       timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
-                                timer->function, timer->start_comm, 0);
-#endif
-}
-
-/*
- * Counterpart to lock_hrtimer_base above:
- */
-static inline
-void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
-{
-       raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
-}
-
-/**
- * hrtimer_forward - forward the timer expiry
- * @timer:     hrtimer to forward
- * @now:       forward past this time
- * @interval:  the interval to forward
- *
- * Forward the timer expiry so it will expire in the future.
- * Returns the number of overruns.
- */
-u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
-{
-       u64 orun = 1;
-       ktime_t delta;
-
-       delta = ktime_sub(now, hrtimer_get_expires(timer));
-
-       if (delta.tv64 < 0)
-               return 0;
-
-       if (interval.tv64 < timer->base->resolution.tv64)
-               interval.tv64 = timer->base->resolution.tv64;
-
-       if (unlikely(delta.tv64 >= interval.tv64)) {
-               s64 incr = ktime_to_ns(interval);
-
-               orun = ktime_divns(delta, incr);
-               hrtimer_add_expires_ns(timer, incr * orun);
-               if (hrtimer_get_expires_tv64(timer) > now.tv64)
-                       return orun;
-               /*
-                * This (and the ktime_add() below) is the
-                * correction for exact:
-                */
-               orun++;
-       }
-       hrtimer_add_expires(timer, interval);
-
-       return orun;
-}
-EXPORT_SYMBOL_GPL(hrtimer_forward);
-
-/*
- * enqueue_hrtimer - internal function to (re)start a timer
- *
- * The timer is inserted in expiry order. Insertion into the
- * red black tree is O(log(n)). Must hold the base lock.
- *
- * Returns 1 when the new timer is the leftmost timer in the tree.
- */
-static int enqueue_hrtimer(struct hrtimer *timer,
-                          struct hrtimer_clock_base *base)
-{
-       debug_activate(timer);
-
-       timerqueue_add(&base->active, &timer->node);
-       base->cpu_base->active_bases |= 1 << base->index;
-
-       /*
-        * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
-        * state of a possibly running callback.
-        */
-       timer->state |= HRTIMER_STATE_ENQUEUED;
-
-       return (&timer->node == base->active.next);
-}
-
-/*
- * __remove_hrtimer - internal function to remove a timer
- *
- * Caller must hold the base lock.
- *
- * High resolution timer mode reprograms the clock event device when the
- * timer is the one which expires next. The caller can disable this by setting
- * reprogram to zero. This is useful, when the context does a reprogramming
- * anyway (e.g. timer interrupt)
- */
-static void __remove_hrtimer(struct hrtimer *timer,
-                            struct hrtimer_clock_base *base,
-                            unsigned long newstate, int reprogram)
-{
-       struct timerqueue_node *next_timer;
-       if (!(timer->state & HRTIMER_STATE_ENQUEUED))
-               goto out;
-
-       next_timer = timerqueue_getnext(&base->active);
-       timerqueue_del(&base->active, &timer->node);
-       if (&timer->node == next_timer) {
-#ifdef CONFIG_HIGH_RES_TIMERS
-               /* Reprogram the clock event device. if enabled */
-               if (reprogram && hrtimer_hres_active()) {
-                       ktime_t expires;
-
-                       expires = ktime_sub(hrtimer_get_expires(timer),
-                                           base->offset);
-                       if (base->cpu_base->expires_next.tv64 == expires.tv64)
-                               hrtimer_force_reprogram(base->cpu_base, 1);
-               }
-#endif
-       }
-       if (!timerqueue_getnext(&base->active))
-               base->cpu_base->active_bases &= ~(1 << base->index);
-out:
-       timer->state = newstate;
-}
-
-/*
- * remove hrtimer, called with base lock held
- */
-static inline int
-remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
-{
-       if (hrtimer_is_queued(timer)) {
-               unsigned long state;
-               int reprogram;
-
-               /*
-                * Remove the timer and force reprogramming when high
-                * resolution mode is active and the timer is on the current
-                * CPU. If we remove a timer on another CPU, reprogramming is
-                * skipped. The interrupt event on this CPU is fired and
-                * reprogramming happens in the interrupt handler. This is a
-                * rare case and less expensive than a smp call.
-                */
-               debug_deactivate(timer);
-               timer_stats_hrtimer_clear_start_info(timer);
-               reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
-               /*
-                * We must preserve the CALLBACK state flag here,
-                * otherwise we could move the timer base in
-                * switch_hrtimer_base.
-                */
-               state = timer->state & HRTIMER_STATE_CALLBACK;
-               __remove_hrtimer(timer, base, state, reprogram);
-               return 1;
-       }
-       return 0;
-}
-
-int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
-               unsigned long delta_ns, const enum hrtimer_mode mode,
-               int wakeup)
-{
-       struct hrtimer_clock_base *base, *new_base;
-       unsigned long flags;
-       int ret, leftmost;
-
-       base = lock_hrtimer_base(timer, &flags);
-
-       /* Remove an active timer from the queue: */
-       ret = remove_hrtimer(timer, base);
-
-       if (mode & HRTIMER_MODE_REL) {
-               tim = ktime_add_safe(tim, base->get_time());
-               /*
-                * CONFIG_TIME_LOW_RES is a temporary way for architectures
-                * to signal that they simply return xtime in
-                * do_gettimeoffset(). In this case we want to round up by
-                * resolution when starting a relative timer, to avoid short
-                * timeouts. This will go away with the GTOD framework.
-                */
-#ifdef CONFIG_TIME_LOW_RES
-               tim = ktime_add_safe(tim, base->resolution);
-#endif
-       }
-
-       hrtimer_set_expires_range_ns(timer, tim, delta_ns);
-
-       /* Switch the timer base, if necessary: */
-       new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
-
-       timer_stats_hrtimer_set_start_info(timer);
-
-       leftmost = enqueue_hrtimer(timer, new_base);
-
-       /*
-        * Only allow reprogramming if the new base is on this CPU.
-        * (it might still be on another CPU if the timer was pending)
-        *
-        * XXX send_remote_softirq() ?
-        */
-       if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
-               && hrtimer_enqueue_reprogram(timer, new_base)) {
-               if (wakeup) {
-                       /*
-                        * We need to drop cpu_base->lock to avoid a
-                        * lock ordering issue vs. rq->lock.
-                        */
-                       raw_spin_unlock(&new_base->cpu_base->lock);
-                       raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-                       local_irq_restore(flags);
-                       return ret;
-               } else {
-                       __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-               }
-       }
-
-       unlock_hrtimer_base(timer, &flags);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(__hrtimer_start_range_ns);
-
-/**
- * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
- * @timer:     the timer to be added
- * @tim:       expiry time
- * @delta_ns:  "slack" range for the timer
- * @mode:      expiry mode: absolute (HRTIMER_MODE_ABS) or
- *             relative (HRTIMER_MODE_REL)
- *
- * Returns:
- *  0 on success
- *  1 when the timer was active
- */
-int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
-               unsigned long delta_ns, const enum hrtimer_mode mode)
-{
-       return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1);
-}
-EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
-
-/**
- * hrtimer_start - (re)start an hrtimer on the current CPU
- * @timer:     the timer to be added
- * @tim:       expiry time
- * @mode:      expiry mode: absolute (HRTIMER_MODE_ABS) or
- *             relative (HRTIMER_MODE_REL)
- *
- * Returns:
- *  0 on success
- *  1 when the timer was active
- */
-int
-hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
-{
-       return __hrtimer_start_range_ns(timer, tim, 0, mode, 1);
-}
-EXPORT_SYMBOL_GPL(hrtimer_start);
-
-
-/**
- * hrtimer_try_to_cancel - try to deactivate a timer
- * @timer:     hrtimer to stop
- *
- * Returns:
- *  0 when the timer was not active
- *  1 when the timer was active
- * -1 when the timer is currently excuting the callback function and
- *    cannot be stopped
- */
-int hrtimer_try_to_cancel(struct hrtimer *timer)
-{
-       struct hrtimer_clock_base *base;
-       unsigned long flags;
-       int ret = -1;
-
-       base = lock_hrtimer_base(timer, &flags);
-
-       if (!hrtimer_callback_running(timer))
-               ret = remove_hrtimer(timer, base);
-
-       unlock_hrtimer_base(timer, &flags);
-
-       return ret;
-
-}
-EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
-
-/**
- * hrtimer_cancel - cancel a timer and wait for the handler to finish.
- * @timer:     the timer to be cancelled
- *
- * Returns:
- *  0 when the timer was not active
- *  1 when the timer was active
- */
-int hrtimer_cancel(struct hrtimer *timer)
-{
-       for (;;) {
-               int ret = hrtimer_try_to_cancel(timer);
-
-               if (ret >= 0)
-                       return ret;
-               cpu_relax();
-       }
-}
-EXPORT_SYMBOL_GPL(hrtimer_cancel);
-
-/**
- * hrtimer_get_remaining - get remaining time for the timer
- * @timer:     the timer to read
- */
-ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
-{
-       unsigned long flags;
-       ktime_t rem;
-
-       lock_hrtimer_base(timer, &flags);
-       rem = hrtimer_expires_remaining(timer);
-       unlock_hrtimer_base(timer, &flags);
-
-       return rem;
-}
-EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
-
-#ifdef CONFIG_NO_HZ_COMMON
-/**
- * hrtimer_get_next_event - get the time until next expiry event
- *
- * Returns the delta to the next expiry event or KTIME_MAX if no timer
- * is pending.
- */
-ktime_t hrtimer_get_next_event(void)
-{
-       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
-       struct hrtimer_clock_base *base = cpu_base->clock_base;
-       ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
-       unsigned long flags;
-       int i;
-
-       raw_spin_lock_irqsave(&cpu_base->lock, flags);
-
-       if (!hrtimer_hres_active()) {
-               for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
-                       struct hrtimer *timer;
-                       struct timerqueue_node *next;
-
-                       next = timerqueue_getnext(&base->active);
-                       if (!next)
-                               continue;
-
-                       timer = container_of(next, struct hrtimer, node);
-                       delta.tv64 = hrtimer_get_expires_tv64(timer);
-                       delta = ktime_sub(delta, base->get_time());
-                       if (delta.tv64 < mindelta.tv64)
-                               mindelta.tv64 = delta.tv64;
-               }
-       }
-
-       raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
-
-       if (mindelta.tv64 < 0)
-               mindelta.tv64 = 0;
-       return mindelta;
-}
-#endif
-
-static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
-                          enum hrtimer_mode mode)
-{
-       struct hrtimer_cpu_base *cpu_base;
-       int base;
-
-       memset(timer, 0, sizeof(struct hrtimer));
-
-       cpu_base = &__raw_get_cpu_var(hrtimer_bases);
-
-       if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
-               clock_id = CLOCK_MONOTONIC;
-
-       base = hrtimer_clockid_to_base(clock_id);
-       timer->base = &cpu_base->clock_base[base];
-       timerqueue_init(&timer->node);
-
-#ifdef CONFIG_TIMER_STATS
-       timer->start_site = NULL;
-       timer->start_pid = -1;
-       memset(timer->start_comm, 0, TASK_COMM_LEN);
-#endif
-}
-
-/**
- * hrtimer_init - initialize a timer to the given clock
- * @timer:     the timer to be initialized
- * @clock_id:  the clock to be used
- * @mode:      timer mode abs/rel
- */
-void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
-                 enum hrtimer_mode mode)
-{
-       debug_init(timer, clock_id, mode);
-       __hrtimer_init(timer, clock_id, mode);
-}
-EXPORT_SYMBOL_GPL(hrtimer_init);
-
-/**
- * hrtimer_get_res - get the timer resolution for a clock
- * @which_clock: which clock to query
- * @tp:                 pointer to timespec variable to store the resolution
- *
- * Store the resolution of the clock selected by @which_clock in the
- * variable pointed to by @tp.
- */
-int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
-{
-       struct hrtimer_cpu_base *cpu_base;
-       int base = hrtimer_clockid_to_base(which_clock);
-
-       cpu_base = &__raw_get_cpu_var(hrtimer_bases);
-       *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(hrtimer_get_res);
-
-static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
-{
-       struct hrtimer_clock_base *base = timer->base;
-       struct hrtimer_cpu_base *cpu_base = base->cpu_base;
-       enum hrtimer_restart (*fn)(struct hrtimer *);
-       int restart;
-
-       WARN_ON(!irqs_disabled());
-
-       debug_deactivate(timer);
-       __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
-       timer_stats_account_hrtimer(timer);
-       fn = timer->function;
-
-       /*
-        * Because we run timers from hardirq context, there is no chance
-        * they get migrated to another cpu, therefore its safe to unlock
-        * the timer base.
-        */
-       raw_spin_unlock(&cpu_base->lock);
-       trace_hrtimer_expire_entry(timer, now);
-       restart = fn(timer);
-       trace_hrtimer_expire_exit(timer);
-       raw_spin_lock(&cpu_base->lock);
-
-       /*
-        * Note: We clear the CALLBACK bit after enqueue_hrtimer and
-        * we do not reprogramm the event hardware. Happens either in
-        * hrtimer_start_range_ns() or in hrtimer_interrupt()
-        */
-       if (restart != HRTIMER_NORESTART) {
-               BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
-               enqueue_hrtimer(timer, base);
-       }
-
-       WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
-
-       timer->state &= ~HRTIMER_STATE_CALLBACK;
-}
-
-#ifdef CONFIG_HIGH_RES_TIMERS
-
-/*
- * High resolution timer interrupt
- * Called with interrupts disabled
- */
-void hrtimer_interrupt(struct clock_event_device *dev)
-{
-       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
-       ktime_t expires_next, now, entry_time, delta;
-       int i, retries = 0;
-
-       BUG_ON(!cpu_base->hres_active);
-       cpu_base->nr_events++;
-       dev->next_event.tv64 = KTIME_MAX;
-
-       raw_spin_lock(&cpu_base->lock);
-       entry_time = now = hrtimer_update_base(cpu_base);
-retry:
-       expires_next.tv64 = KTIME_MAX;
-       /*
-        * We set expires_next to KTIME_MAX here with cpu_base->lock
-        * held to prevent that a timer is enqueued in our queue via
-        * the migration code. This does not affect enqueueing of
-        * timers which run their callback and need to be requeued on
-        * this CPU.
-        */
-       cpu_base->expires_next.tv64 = KTIME_MAX;
-
-       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
-               struct hrtimer_clock_base *base;
-               struct timerqueue_node *node;
-               ktime_t basenow;
-
-               if (!(cpu_base->active_bases & (1 << i)))
-                       continue;
-
-               base = cpu_base->clock_base + i;
-               basenow = ktime_add(now, base->offset);
-
-               while ((node = timerqueue_getnext(&base->active))) {
-                       struct hrtimer *timer;
-
-                       timer = container_of(node, struct hrtimer, node);
-
-                       /*
-                        * The immediate goal for using the softexpires is
-                        * minimizing wakeups, not running timers at the
-                        * earliest interrupt after their soft expiration.
-                        * This allows us to avoid using a Priority Search
-                        * Tree, which can answer a stabbing querry for
-                        * overlapping intervals and instead use the simple
-                        * BST we already have.
-                        * We don't add extra wakeups by delaying timers that
-                        * are right-of a not yet expired timer, because that
-                        * timer will have to trigger a wakeup anyway.
-                        */
-
-                       if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
-                               ktime_t expires;
-
-                               expires = ktime_sub(hrtimer_get_expires(timer),
-                                                   base->offset);
-                               if (expires.tv64 < 0)
-                                       expires.tv64 = KTIME_MAX;
-                               if (expires.tv64 < expires_next.tv64)
-                                       expires_next = expires;
-                               break;
-                       }
-
-                       __run_hrtimer(timer, &basenow);
-               }
-       }
-
-       /*
-        * Store the new expiry value so the migration code can verify
-        * against it.
-        */
-       cpu_base->expires_next = expires_next;
-       raw_spin_unlock(&cpu_base->lock);
-
-       /* Reprogramming necessary ? */
-       if (expires_next.tv64 == KTIME_MAX ||
-           !tick_program_event(expires_next, 0)) {
-               cpu_base->hang_detected = 0;
-               return;
-       }
-
-       /*
-        * The next timer was already expired due to:
-        * - tracing
-        * - long lasting callbacks
-        * - being scheduled away when running in a VM
-        *
-        * We need to prevent that we loop forever in the hrtimer
-        * interrupt routine. We give it 3 attempts to avoid
-        * overreacting on some spurious event.
-        *
-        * Acquire base lock for updating the offsets and retrieving
-        * the current time.
-        */
-       raw_spin_lock(&cpu_base->lock);
-       now = hrtimer_update_base(cpu_base);
-       cpu_base->nr_retries++;
-       if (++retries < 3)
-               goto retry;
-       /*
-        * Give the system a chance to do something else than looping
-        * here. We stored the entry time, so we know exactly how long
-        * we spent here. We schedule the next event this amount of
-        * time away.
-        */
-       cpu_base->nr_hangs++;
-       cpu_base->hang_detected = 1;
-       raw_spin_unlock(&cpu_base->lock);
-       delta = ktime_sub(now, entry_time);
-       if (delta.tv64 > cpu_base->max_hang_time.tv64)
-               cpu_base->max_hang_time = delta;
-       /*
-        * Limit it to a sensible value as we enforce a longer
-        * delay. Give the CPU at least 100ms to catch up.
-        */
-       if (delta.tv64 > 100 * NSEC_PER_MSEC)
-               expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
-       else
-               expires_next = ktime_add(now, delta);
-       tick_program_event(expires_next, 1);
-       printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
-                   ktime_to_ns(delta));
-}
-
-/*
- * local version of hrtimer_peek_ahead_timers() called with interrupts
- * disabled.
- */
-static void __hrtimer_peek_ahead_timers(void)
-{
-       struct tick_device *td;
-
-       if (!hrtimer_hres_active())
-               return;
-
-       td = &__get_cpu_var(tick_cpu_device);
-       if (td && td->evtdev)
-               hrtimer_interrupt(td->evtdev);
-}
-
-/**
- * hrtimer_peek_ahead_timers -- run soft-expired timers now
- *
- * hrtimer_peek_ahead_timers will peek at the timer queue of
- * the current cpu and check if there are any timers for which
- * the soft expires time has passed. If any such timers exist,
- * they are run immediately and then removed from the timer queue.
- *
- */
-void hrtimer_peek_ahead_timers(void)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __hrtimer_peek_ahead_timers();
-       local_irq_restore(flags);
-}
-
-static void run_hrtimer_softirq(struct softirq_action *h)
-{
-       hrtimer_peek_ahead_timers();
-}
-
-#else /* CONFIG_HIGH_RES_TIMERS */
-
-static inline void __hrtimer_peek_ahead_timers(void) { }
-
-#endif /* !CONFIG_HIGH_RES_TIMERS */
-
-/*
- * Called from timer softirq every jiffy, expire hrtimers:
- *
- * For HRT its the fall back code to run the softirq in the timer
- * softirq context in case the hrtimer initialization failed or has
- * not been done yet.
- */
-void hrtimer_run_pending(void)
-{
-       if (hrtimer_hres_active())
-               return;
-
-       /*
-        * This _is_ ugly: We have to check in the softirq context,
-        * whether we can switch to highres and / or nohz mode. The
-        * clocksource switch happens in the timer interrupt with
-        * xtime_lock held. Notification from there only sets the
-        * check bit in the tick_oneshot code, otherwise we might
-        * deadlock vs. xtime_lock.
-        */
-       if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
-               hrtimer_switch_to_hres();
-}
-
-/*
- * Called from hardirq context every jiffy
- */
-void hrtimer_run_queues(void)
-{
-       struct timerqueue_node *node;
-       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
-       struct hrtimer_clock_base *base;
-       int index, gettime = 1;
-
-       if (hrtimer_hres_active())
-               return;
-
-       for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
-               base = &cpu_base->clock_base[index];
-               if (!timerqueue_getnext(&base->active))
-                       continue;
-
-               if (gettime) {
-                       hrtimer_get_softirq_time(cpu_base);
-                       gettime = 0;
-               }
-
-               raw_spin_lock(&cpu_base->lock);
-
-               while ((node = timerqueue_getnext(&base->active))) {
-                       struct hrtimer *timer;
-
-                       timer = container_of(node, struct hrtimer, node);
-                       if (base->softirq_time.tv64 <=
-                                       hrtimer_get_expires_tv64(timer))
-                               break;
-
-                       __run_hrtimer(timer, &base->softirq_time);
-               }
-               raw_spin_unlock(&cpu_base->lock);
-       }
-}
-
-/*
- * Sleep related functions:
- */
-static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
-{
-       struct hrtimer_sleeper *t =
-               container_of(timer, struct hrtimer_sleeper, timer);
-       struct task_struct *task = t->task;
-
-       t->task = NULL;
-       if (task)
-               wake_up_process(task);
-
-       return HRTIMER_NORESTART;
-}
-
-void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
-{
-       sl->timer.function = hrtimer_wakeup;
-       sl->task = task;
-}
-EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-
-static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
-{
-       hrtimer_init_sleeper(t, current);
-
-       do {
-               set_current_state(TASK_INTERRUPTIBLE);
-               hrtimer_start_expires(&t->timer, mode);
-               if (!hrtimer_active(&t->timer))
-                       t->task = NULL;
-
-               if (likely(t->task))
-                       freezable_schedule();
-
-               hrtimer_cancel(&t->timer);
-               mode = HRTIMER_MODE_ABS;
-
-       } while (t->task && !signal_pending(current));
-
-       __set_current_state(TASK_RUNNING);
-
-       return t->task == NULL;
-}
-
-static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
-{
-       struct timespec rmt;
-       ktime_t rem;
-
-       rem = hrtimer_expires_remaining(timer);
-       if (rem.tv64 <= 0)
-               return 0;
-       rmt = ktime_to_timespec(rem);
-
-       if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
-               return -EFAULT;
-
-       return 1;
-}
-
-long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
-{
-       struct hrtimer_sleeper t;
-       struct timespec __user  *rmtp;
-       int ret = 0;
-
-       hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid,
-                               HRTIMER_MODE_ABS);
-       hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
-
-       if (do_nanosleep(&t, HRTIMER_MODE_ABS))
-               goto out;
-
-       rmtp = restart->nanosleep.rmtp;
-       if (rmtp) {
-               ret = update_rmtp(&t.timer, rmtp);
-               if (ret <= 0)
-                       goto out;
-       }
-
-       /* The other values in restart are already filled in */
-       ret = -ERESTART_RESTARTBLOCK;
-out:
-       destroy_hrtimer_on_stack(&t.timer);
-       return ret;
-}
-
-long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
-                      const enum hrtimer_mode mode, const clockid_t clockid)
-{
-       struct restart_block *restart;
-       struct hrtimer_sleeper t;
-       int ret = 0;
-       unsigned long slack;
-
-       slack = current->timer_slack_ns;
-       if (dl_task(current) || rt_task(current))
-               slack = 0;
-
-       hrtimer_init_on_stack(&t.timer, clockid, mode);
-       hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
-       if (do_nanosleep(&t, mode))
-               goto out;
-
-       /* Absolute timers do not update the rmtp value and restart: */
-       if (mode == HRTIMER_MODE_ABS) {
-               ret = -ERESTARTNOHAND;
-               goto out;
-       }
-
-       if (rmtp) {
-               ret = update_rmtp(&t.timer, rmtp);
-               if (ret <= 0)
-                       goto out;
-       }
-
-       restart = &current_thread_info()->restart_block;
-       restart->fn = hrtimer_nanosleep_restart;
-       restart->nanosleep.clockid = t.timer.base->clockid;
-       restart->nanosleep.rmtp = rmtp;
-       restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
-
-       ret = -ERESTART_RESTARTBLOCK;
-out:
-       destroy_hrtimer_on_stack(&t.timer);
-       return ret;
-}
-
-SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
-               struct timespec __user *, rmtp)
-{
-       struct timespec tu;
-
-       if (copy_from_user(&tu, rqtp, sizeof(tu)))
-               return -EFAULT;
-
-       if (!timespec_valid(&tu))
-               return -EINVAL;
-
-       return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
-}
-
-/*
- * Functions related to boot-time initialization:
- */
-static void init_hrtimers_cpu(int cpu)
-{
-       struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
-       int i;
-
-       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
-               cpu_base->clock_base[i].cpu_base = cpu_base;
-               timerqueue_init_head(&cpu_base->clock_base[i].active);
-       }
-
-       hrtimer_init_hres(cpu_base);
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
-                               struct hrtimer_clock_base *new_base)
-{
-       struct hrtimer *timer;
-       struct timerqueue_node *node;
-
-       while ((node = timerqueue_getnext(&old_base->active))) {
-               timer = container_of(node, struct hrtimer, node);
-               BUG_ON(hrtimer_callback_running(timer));
-               debug_deactivate(timer);
-
-               /*
-                * Mark it as STATE_MIGRATE not INACTIVE otherwise the
-                * timer could be seen as !active and just vanish away
-                * under us on another CPU
-                */
-               __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
-               timer->base = new_base;
-               /*
-                * Enqueue the timers on the new cpu. This does not
-                * reprogram the event device in case the timer
-                * expires before the earliest on this CPU, but we run
-                * hrtimer_interrupt after we migrated everything to
-                * sort out already expired timers and reprogram the
-                * event device.
-                */
-               enqueue_hrtimer(timer, new_base);
-
-               /* Clear the migration state bit */
-               timer->state &= ~HRTIMER_STATE_MIGRATE;
-       }
-}
-
-static void migrate_hrtimers(int scpu)
-{
-       struct hrtimer_cpu_base *old_base, *new_base;
-       int i;
-
-       BUG_ON(cpu_online(scpu));
-       tick_cancel_sched_timer(scpu);
-
-       local_irq_disable();
-       old_base = &per_cpu(hrtimer_bases, scpu);
-       new_base = &__get_cpu_var(hrtimer_bases);
-       /*
-        * The caller is globally serialized and nobody else
-        * takes two locks at once, deadlock is not possible.
-        */
-       raw_spin_lock(&new_base->lock);
-       raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
-
-       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
-               migrate_hrtimer_list(&old_base->clock_base[i],
-                                    &new_base->clock_base[i]);
-       }
-
-       raw_spin_unlock(&old_base->lock);
-       raw_spin_unlock(&new_base->lock);
-
-       /* Check, if we got expired work to do */
-       __hrtimer_peek_ahead_timers();
-       local_irq_enable();
-}
-
-#endif /* CONFIG_HOTPLUG_CPU */
-
-static int hrtimer_cpu_notify(struct notifier_block *self,
-                                       unsigned long action, void *hcpu)
-{
-       int scpu = (long)hcpu;
-
-       switch (action) {
-
-       case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
-               init_hrtimers_cpu(scpu);
-               break;
-
-#ifdef CONFIG_HOTPLUG_CPU
-       case CPU_DYING:
-       case CPU_DYING_FROZEN:
-               clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
-               break;
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-       {
-               clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
-               migrate_hrtimers(scpu);
-               break;
-       }
-#endif
-
-       default:
-               break;
-       }
-
-       return NOTIFY_OK;
-}
-
-static struct notifier_block hrtimers_nb = {
-       .notifier_call = hrtimer_cpu_notify,
-};
-
-void __init hrtimers_init(void)
-{
-       hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
-                         (void *)(long)smp_processor_id());
-       register_cpu_notifier(&hrtimers_nb);
-#ifdef CONFIG_HIGH_RES_TIMERS
-       open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
-#endif
-}
-
-/**
- * schedule_hrtimeout_range_clock - sleep until timeout
- * @expires:   timeout value (ktime_t)
- * @delta:     slack in expires timeout (ktime_t)
- * @mode:      timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
- * @clock:     timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
- */
-int __sched
-schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
-                              const enum hrtimer_mode mode, int clock)
-{
-       struct hrtimer_sleeper t;
-
-       /*
-        * Optimize when a zero timeout value is given. It does not
-        * matter whether this is an absolute or a relative time.
-        */
-       if (expires && !expires->tv64) {
-               __set_current_state(TASK_RUNNING);
-               return 0;
-       }
-
-       /*
-        * A NULL parameter means "infinite"
-        */
-       if (!expires) {
-               schedule();
-               __set_current_state(TASK_RUNNING);
-               return -EINTR;
-       }
-
-       hrtimer_init_on_stack(&t.timer, clock, mode);
-       hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
-
-       hrtimer_init_sleeper(&t, current);
-
-       hrtimer_start_expires(&t.timer, mode);
-       if (!hrtimer_active(&t.timer))
-               t.task = NULL;
-
-       if (likely(t.task))
-               schedule();
-
-       hrtimer_cancel(&t.timer);
-       destroy_hrtimer_on_stack(&t.timer);
-
-       __set_current_state(TASK_RUNNING);
-
-       return !t.task ? 0 : -EINTR;
-}
-
-/**
- * schedule_hrtimeout_range - sleep until timeout
- * @expires:   timeout value (ktime_t)
- * @delta:     slack in expires timeout (ktime_t)
- * @mode:      timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
- *
- * Make the current task sleep until the given expiry time has
- * elapsed. The routine will return immediately unless
- * the current task state has been set (see set_current_state()).
- *
- * The @delta argument gives the kernel the freedom to schedule the
- * actual wakeup to a time that is both power and performance friendly.
- * The kernel give the normal best effort behavior for "@expires+@delta",
- * but may decide to fire the timer earlier, but no earlier than @expires.
- *
- * You can set the task state as follows -
- *
- * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
- * pass before the routine returns.
- *
- * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
- * delivered to the current task.
- *
- * The current task state is guaranteed to be TASK_RUNNING when this
- * routine returns.
- *
- * Returns 0 when the timer has expired otherwise -EINTR
- */
-int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
-                                    const enum hrtimer_mode mode)
-{
-       return schedule_hrtimeout_range_clock(expires, delta, mode,
-                                             CLOCK_MONOTONIC);
-}
-EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
-
-/**
- * schedule_hrtimeout - sleep until timeout
- * @expires:   timeout value (ktime_t)
- * @mode:      timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
- *
- * Make the current task sleep until the given expiry time has
- * elapsed. The routine will return immediately unless
- * the current task state has been set (see set_current_state()).
- *
- * You can set the task state as follows -
- *
- * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
- * pass before the routine returns.
- *
- * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
- * delivered to the current task.
- *
- * The current task state is guaranteed to be TASK_RUNNING when this
- * routine returns.
- *
- * Returns 0 when the timer has expired otherwise -EINTR
- */
-int __sched schedule_hrtimeout(ktime_t *expires,
-                              const enum hrtimer_mode mode)
-{
-       return schedule_hrtimeout_range(expires, 0, mode);
-}
-EXPORT_SYMBOL_GPL(schedule_hrtimeout);
diff --git a/kernel/itimer.c b/kernel/itimer.c
deleted file mode 100644 (file)
index 8d262b4..0000000
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * linux/kernel/itimer.c
- *
- * Copyright (C) 1992 Darren Senn
- */
-
-/* These are all the functions necessary to implement itimers */
-
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/syscalls.h>
-#include <linux/time.h>
-#include <linux/posix-timers.h>
-#include <linux/hrtimer.h>
-#include <trace/events/timer.h>
-
-#include <asm/uaccess.h>
-
-/**
- * itimer_get_remtime - get remaining time for the timer
- *
- * @timer: the timer to read
- *
- * Returns the delta between the expiry time and now, which can be
- * less than zero or 1usec for an pending expired timer
- */
-static struct timeval itimer_get_remtime(struct hrtimer *timer)
-{
-       ktime_t rem = hrtimer_get_remaining(timer);
-
-       /*
-        * Racy but safe: if the itimer expires after the above
-        * hrtimer_get_remtime() call but before this condition
-        * then we return 0 - which is correct.
-        */
-       if (hrtimer_active(timer)) {
-               if (rem.tv64 <= 0)
-                       rem.tv64 = NSEC_PER_USEC;
-       } else
-               rem.tv64 = 0;
-
-       return ktime_to_timeval(rem);
-}
-
-static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
-                          struct itimerval *const value)
-{
-       cputime_t cval, cinterval;
-       struct cpu_itimer *it = &tsk->signal->it[clock_id];
-
-       spin_lock_irq(&tsk->sighand->siglock);
-
-       cval = it->expires;
-       cinterval = it->incr;
-       if (cval) {
-               struct task_cputime cputime;
-               cputime_t t;
-
-               thread_group_cputimer(tsk, &cputime);
-               if (clock_id == CPUCLOCK_PROF)
-                       t = cputime.utime + cputime.stime;
-               else
-                       /* CPUCLOCK_VIRT */
-                       t = cputime.utime;
-
-               if (cval < t)
-                       /* about to fire */
-                       cval = cputime_one_jiffy;
-               else
-                       cval = cval - t;
-       }
-
-       spin_unlock_irq(&tsk->sighand->siglock);
-
-       cputime_to_timeval(cval, &value->it_value);
-       cputime_to_timeval(cinterval, &value->it_interval);
-}
-
-int do_getitimer(int which, struct itimerval *value)
-{
-       struct task_struct *tsk = current;
-
-       switch (which) {
-       case ITIMER_REAL:
-               spin_lock_irq(&tsk->sighand->siglock);
-               value->it_value = itimer_get_remtime(&tsk->signal->real_timer);
-               value->it_interval =
-                       ktime_to_timeval(tsk->signal->it_real_incr);
-               spin_unlock_irq(&tsk->sighand->siglock);
-               break;
-       case ITIMER_VIRTUAL:
-               get_cpu_itimer(tsk, CPUCLOCK_VIRT, value);
-               break;
-       case ITIMER_PROF:
-               get_cpu_itimer(tsk, CPUCLOCK_PROF, value);
-               break;
-       default:
-               return(-EINVAL);
-       }
-       return 0;
-}
-
-SYSCALL_DEFINE2(getitimer, int, which, struct itimerval __user *, value)
-{
-       int error = -EFAULT;
-       struct itimerval get_buffer;
-
-       if (value) {
-               error = do_getitimer(which, &get_buffer);
-               if (!error &&
-                   copy_to_user(value, &get_buffer, sizeof(get_buffer)))
-                       error = -EFAULT;
-       }
-       return error;
-}
-
-
-/*
- * The timer is automagically restarted, when interval != 0
- */
-enum hrtimer_restart it_real_fn(struct hrtimer *timer)
-{
-       struct signal_struct *sig =
-               container_of(timer, struct signal_struct, real_timer);
-
-       trace_itimer_expire(ITIMER_REAL, sig->leader_pid, 0);
-       kill_pid_info(SIGALRM, SEND_SIG_PRIV, sig->leader_pid);
-
-       return HRTIMER_NORESTART;
-}
-
-static inline u32 cputime_sub_ns(cputime_t ct, s64 real_ns)
-{
-       struct timespec ts;
-       s64 cpu_ns;
-
-       cputime_to_timespec(ct, &ts);
-       cpu_ns = timespec_to_ns(&ts);
-
-       return (cpu_ns <= real_ns) ? 0 : cpu_ns - real_ns;
-}
-
-static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
-                          const struct itimerval *const value,
-                          struct itimerval *const ovalue)
-{
-       cputime_t cval, nval, cinterval, ninterval;
-       s64 ns_ninterval, ns_nval;
-       u32 error, incr_error;
-       struct cpu_itimer *it = &tsk->signal->it[clock_id];
-
-       nval = timeval_to_cputime(&value->it_value);
-       ns_nval = timeval_to_ns(&value->it_value);
-       ninterval = timeval_to_cputime(&value->it_interval);
-       ns_ninterval = timeval_to_ns(&value->it_interval);
-
-       error = cputime_sub_ns(nval, ns_nval);
-       incr_error = cputime_sub_ns(ninterval, ns_ninterval);
-
-       spin_lock_irq(&tsk->sighand->siglock);
-
-       cval = it->expires;
-       cinterval = it->incr;
-       if (cval || nval) {
-               if (nval > 0)
-                       nval += cputime_one_jiffy;
-               set_process_cpu_timer(tsk, clock_id, &nval, &cval);
-       }
-       it->expires = nval;
-       it->incr = ninterval;
-       it->error = error;
-       it->incr_error = incr_error;
-       trace_itimer_state(clock_id == CPUCLOCK_VIRT ?
-                          ITIMER_VIRTUAL : ITIMER_PROF, value, nval);
-
-       spin_unlock_irq(&tsk->sighand->siglock);
-
-       if (ovalue) {
-               cputime_to_timeval(cval, &ovalue->it_value);
-               cputime_to_timeval(cinterval, &ovalue->it_interval);
-       }
-}
-
-/*
- * Returns true if the timeval is in canonical form
- */
-#define timeval_valid(t) \
-       (((t)->tv_sec >= 0) && (((unsigned long) (t)->tv_usec) < USEC_PER_SEC))
-
-int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
-{
-       struct task_struct *tsk = current;
-       struct hrtimer *timer;
-       ktime_t expires;
-
-       /*
-        * Validate the timevals in value.
-        */
-       if (!timeval_valid(&value->it_value) ||
-           !timeval_valid(&value->it_interval))
-               return -EINVAL;
-
-       switch (which) {
-       case ITIMER_REAL:
-again:
-               spin_lock_irq(&tsk->sighand->siglock);
-               timer = &tsk->signal->real_timer;
-               if (ovalue) {
-                       ovalue->it_value = itimer_get_remtime(timer);
-                       ovalue->it_interval
-                               = ktime_to_timeval(tsk->signal->it_real_incr);
-               }
-               /* We are sharing ->siglock with it_real_fn() */
-               if (hrtimer_try_to_cancel(timer) < 0) {
-                       spin_unlock_irq(&tsk->sighand->siglock);
-                       goto again;
-               }
-               expires = timeval_to_ktime(value->it_value);
-               if (expires.tv64 != 0) {
-                       tsk->signal->it_real_incr =
-                               timeval_to_ktime(value->it_interval);
-                       hrtimer_start(timer, expires, HRTIMER_MODE_REL);
-               } else
-                       tsk->signal->it_real_incr.tv64 = 0;
-
-               trace_itimer_state(ITIMER_REAL, value, 0);
-               spin_unlock_irq(&tsk->sighand->siglock);
-               break;
-       case ITIMER_VIRTUAL:
-               set_cpu_itimer(tsk, CPUCLOCK_VIRT, value, ovalue);
-               break;
-       case ITIMER_PROF:
-               set_cpu_itimer(tsk, CPUCLOCK_PROF, value, ovalue);
-               break;
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-/**
- * alarm_setitimer - set alarm in seconds
- *
- * @seconds:   number of seconds until alarm
- *             0 disables the alarm
- *
- * Returns the remaining time in seconds of a pending timer or 0 when
- * the timer is not active.
- *
- * On 32 bit machines the seconds value is limited to (INT_MAX/2) to avoid
- * negative timeval settings which would cause immediate expiry.
- */
-unsigned int alarm_setitimer(unsigned int seconds)
-{
-       struct itimerval it_new, it_old;
-
-#if BITS_PER_LONG < 64
-       if (seconds > INT_MAX)
-               seconds = INT_MAX;
-#endif
-       it_new.it_value.tv_sec = seconds;
-       it_new.it_value.tv_usec = 0;
-       it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
-
-       do_setitimer(ITIMER_REAL, &it_new, &it_old);
-
-       /*
-        * We can't return 0 if we have an alarm pending ...  And we'd
-        * better return too much than too little anyway
-        */
-       if ((!it_old.it_value.tv_sec && it_old.it_value.tv_usec) ||
-             it_old.it_value.tv_usec >= 500000)
-               it_old.it_value.tv_sec++;
-
-       return it_old.it_value.tv_sec;
-}
-
-SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value,
-               struct itimerval __user *, ovalue)
-{
-       struct itimerval set_buffer, get_buffer;
-       int error;
-
-       if (value) {
-               if(copy_from_user(&set_buffer, value, sizeof(set_buffer)))
-                       return -EFAULT;
-       } else {
-               memset(&set_buffer, 0, sizeof(set_buffer));
-               printk_once(KERN_WARNING "%s calls setitimer() with new_value NULL pointer."
-                           " Misfeature support will be removed\n",
-                           current->comm);
-       }
-
-       error = do_setitimer(which, &set_buffer, ovalue ? &get_buffer : NULL);
-       if (error || !ovalue)
-               return error;
-
-       if (copy_to_user(ovalue, &get_buffer, sizeof(get_buffer)))
-               return -EFAULT;
-       return 0;
-}
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
deleted file mode 100644 (file)
index 3b89464..0000000
+++ /dev/null
@@ -1,1490 +0,0 @@
-/*
- * Implement CPU time clocks for the POSIX clock interface.
- */
-
-#include <linux/sched.h>
-#include <linux/posix-timers.h>
-#include <linux/errno.h>
-#include <linux/math64.h>
-#include <asm/uaccess.h>
-#include <linux/kernel_stat.h>
-#include <trace/events/timer.h>
-#include <linux/random.h>
-#include <linux/tick.h>
-#include <linux/workqueue.h>
-
-/*
- * Called after updating RLIMIT_CPU to run cpu timer and update
- * tsk->signal->cputime_expires expiration cache if necessary. Needs
- * siglock protection since other code may update expiration cache as
- * well.
- */
-void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
-{
-       cputime_t cputime = secs_to_cputime(rlim_new);
-
-       spin_lock_irq(&task->sighand->siglock);
-       set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
-       spin_unlock_irq(&task->sighand->siglock);
-}
-
-static int check_clock(const clockid_t which_clock)
-{
-       int error = 0;
-       struct task_struct *p;
-       const pid_t pid = CPUCLOCK_PID(which_clock);
-
-       if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
-               return -EINVAL;
-
-       if (pid == 0)
-               return 0;
-
-       rcu_read_lock();
-       p = find_task_by_vpid(pid);
-       if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
-                  same_thread_group(p, current) : has_group_leader_pid(p))) {
-               error = -EINVAL;
-       }
-       rcu_read_unlock();
-
-       return error;
-}
-
-static inline unsigned long long
-timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
-{
-       unsigned long long ret;
-
-       ret = 0;                /* high half always zero when .cpu used */
-       if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
-               ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
-       } else {
-               ret = cputime_to_expires(timespec_to_cputime(tp));
-       }
-       return ret;
-}
-
-static void sample_to_timespec(const clockid_t which_clock,
-                              unsigned long long expires,
-                              struct timespec *tp)
-{
-       if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
-               *tp = ns_to_timespec(expires);
-       else
-               cputime_to_timespec((__force cputime_t)expires, tp);
-}
-
-/*
- * Update expiry time from increment, and increase overrun count,
- * given the current clock sample.
- */
-static void bump_cpu_timer(struct k_itimer *timer,
-                          unsigned long long now)
-{
-       int i;
-       unsigned long long delta, incr;
-
-       if (timer->it.cpu.incr == 0)
-               return;
-
-       if (now < timer->it.cpu.expires)
-               return;
-
-       incr = timer->it.cpu.incr;
-       delta = now + incr - timer->it.cpu.expires;
-
-       /* Don't use (incr*2 < delta), incr*2 might overflow. */
-       for (i = 0; incr < delta - incr; i++)
-               incr = incr << 1;
-
-       for (; i >= 0; incr >>= 1, i--) {
-               if (delta < incr)
-                       continue;
-
-               timer->it.cpu.expires += incr;
-               timer->it_overrun += 1 << i;
-               delta -= incr;
-       }
-}
-
-/**
- * task_cputime_zero - Check a task_cputime struct for all zero fields.
- *
- * @cputime:   The struct to compare.
- *
- * Checks @cputime to see if all fields are zero.  Returns true if all fields
- * are zero, false if any field is nonzero.
- */
-static inline int task_cputime_zero(const struct task_cputime *cputime)
-{
-       if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
-               return 1;
-       return 0;
-}
-
-static inline unsigned long long prof_ticks(struct task_struct *p)
-{
-       cputime_t utime, stime;
-
-       task_cputime(p, &utime, &stime);
-
-       return cputime_to_expires(utime + stime);
-}
-static inline unsigned long long virt_ticks(struct task_struct *p)
-{
-       cputime_t utime;
-
-       task_cputime(p, &utime, NULL);
-
-       return cputime_to_expires(utime);
-}
-
-static int
-posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
-{
-       int error = check_clock(which_clock);
-       if (!error) {
-               tp->tv_sec = 0;
-               tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
-               if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
-                       /*
-                        * If sched_clock is using a cycle counter, we
-                        * don't have any idea of its true resolution
-                        * exported, but it is much more than 1s/HZ.
-                        */
-                       tp->tv_nsec = 1;
-               }
-       }
-       return error;
-}
-
-static int
-posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
-{
-       /*
-        * You can never reset a CPU clock, but we check for other errors
-        * in the call before failing with EPERM.
-        */
-       int error = check_clock(which_clock);
-       if (error == 0) {
-               error = -EPERM;
-       }
-       return error;
-}
-
-
-/*
- * Sample a per-thread clock for the given task.
- */
-static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
-                           unsigned long long *sample)
-{
-       switch (CPUCLOCK_WHICH(which_clock)) {
-       default:
-               return -EINVAL;
-       case CPUCLOCK_PROF:
-               *sample = prof_ticks(p);
-               break;
-       case CPUCLOCK_VIRT:
-               *sample = virt_ticks(p);
-               break;
-       case CPUCLOCK_SCHED:
-               *sample = task_sched_runtime(p);
-               break;
-       }
-       return 0;
-}
-
-static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
-{
-       if (b->utime > a->utime)
-               a->utime = b->utime;
-
-       if (b->stime > a->stime)
-               a->stime = b->stime;
-
-       if (b->sum_exec_runtime > a->sum_exec_runtime)
-               a->sum_exec_runtime = b->sum_exec_runtime;
-}
-
-void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
-{
-       struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
-       struct task_cputime sum;
-       unsigned long flags;
-
-       if (!cputimer->running) {
-               /*
-                * The POSIX timer interface allows for absolute time expiry
-                * values through the TIMER_ABSTIME flag, therefore we have
-                * to synchronize the timer to the clock every time we start
-                * it.
-                */
-               thread_group_cputime(tsk, &sum);
-               raw_spin_lock_irqsave(&cputimer->lock, flags);
-               cputimer->running = 1;
-               update_gt_cputime(&cputimer->cputime, &sum);
-       } else
-               raw_spin_lock_irqsave(&cputimer->lock, flags);
-       *times = cputimer->cputime;
-       raw_spin_unlock_irqrestore(&cputimer->lock, flags);
-}
-
-/*
- * Sample a process (thread group) clock for the given group_leader task.
- * Must be called with task sighand lock held for safe while_each_thread()
- * traversal.
- */
-static int cpu_clock_sample_group(const clockid_t which_clock,
-                                 struct task_struct *p,
-                                 unsigned long long *sample)
-{
-       struct task_cputime cputime;
-
-       switch (CPUCLOCK_WHICH(which_clock)) {
-       default:
-               return -EINVAL;
-       case CPUCLOCK_PROF:
-               thread_group_cputime(p, &cputime);
-               *sample = cputime_to_expires(cputime.utime + cputime.stime);
-               break;
-       case CPUCLOCK_VIRT:
-               thread_group_cputime(p, &cputime);
-               *sample = cputime_to_expires(cputime.utime);
-               break;
-       case CPUCLOCK_SCHED:
-               thread_group_cputime(p, &cputime);
-               *sample = cputime.sum_exec_runtime;
-               break;
-       }
-       return 0;
-}
-
-static int posix_cpu_clock_get_task(struct task_struct *tsk,
-                                   const clockid_t which_clock,
-                                   struct timespec *tp)
-{
-       int err = -EINVAL;
-       unsigned long long rtn;
-
-       if (CPUCLOCK_PERTHREAD(which_clock)) {
-               if (same_thread_group(tsk, current))
-                       err = cpu_clock_sample(which_clock, tsk, &rtn);
-       } else {
-               unsigned long flags;
-               struct sighand_struct *sighand;
-
-               /*
-                * while_each_thread() is not yet entirely RCU safe,
-                * keep locking the group while sampling process
-                * clock for now.
-                */
-               sighand = lock_task_sighand(tsk, &flags);
-               if (!sighand)
-                       return err;
-
-               if (tsk == current || thread_group_leader(tsk))
-                       err = cpu_clock_sample_group(which_clock, tsk, &rtn);
-
-               unlock_task_sighand(tsk, &flags);
-       }
-
-       if (!err)
-               sample_to_timespec(which_clock, rtn, tp);
-
-       return err;
-}
-
-
-static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
-{
-       const pid_t pid = CPUCLOCK_PID(which_clock);
-       int err = -EINVAL;
-
-       if (pid == 0) {
-               /*
-                * Special case constant value for our own clocks.
-                * We don't have to do any lookup to find ourselves.
-                */
-               err = posix_cpu_clock_get_task(current, which_clock, tp);
-       } else {
-               /*
-                * Find the given PID, and validate that the caller
-                * should be able to see it.
-                */
-               struct task_struct *p;
-               rcu_read_lock();
-               p = find_task_by_vpid(pid);
-               if (p)
-                       err = posix_cpu_clock_get_task(p, which_clock, tp);
-               rcu_read_unlock();
-       }
-
-       return err;
-}
-
-
-/*
- * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
- * This is called from sys_timer_create() and do_cpu_nanosleep() with the
- * new timer already all-zeros initialized.
- */
-static int posix_cpu_timer_create(struct k_itimer *new_timer)
-{
-       int ret = 0;
-       const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
-       struct task_struct *p;
-
-       if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
-               return -EINVAL;
-
-       INIT_LIST_HEAD(&new_timer->it.cpu.entry);
-
-       rcu_read_lock();
-       if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
-               if (pid == 0) {
-                       p = current;
-               } else {
-                       p = find_task_by_vpid(pid);
-                       if (p && !same_thread_group(p, current))
-                               p = NULL;
-               }
-       } else {
-               if (pid == 0) {
-                       p = current->group_leader;
-               } else {
-                       p = find_task_by_vpid(pid);
-                       if (p && !has_group_leader_pid(p))
-                               p = NULL;
-               }
-       }
-       new_timer->it.cpu.task = p;
-       if (p) {
-               get_task_struct(p);
-       } else {
-               ret = -EINVAL;
-       }
-       rcu_read_unlock();
-
-       return ret;
-}
-
-/*
- * Clean up a CPU-clock timer that is about to be destroyed.
- * This is called from timer deletion with the timer already locked.
- * If we return TIMER_RETRY, it's necessary to release the timer's lock
- * and try again.  (This happens when the timer is in the middle of firing.)
- */
-static int posix_cpu_timer_del(struct k_itimer *timer)
-{
-       int ret = 0;
-       unsigned long flags;
-       struct sighand_struct *sighand;
-       struct task_struct *p = timer->it.cpu.task;
-
-       WARN_ON_ONCE(p == NULL);
-
-       /*
-        * Protect against sighand release/switch in exit/exec and process/
-        * thread timer list entry concurrent read/writes.
-        */
-       sighand = lock_task_sighand(p, &flags);
-       if (unlikely(sighand == NULL)) {
-               /*
-                * We raced with the reaping of the task.
-                * The deletion should have cleared us off the list.
-                */
-               WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
-       } else {
-               if (timer->it.cpu.firing)
-                       ret = TIMER_RETRY;
-               else
-                       list_del(&timer->it.cpu.entry);
-
-               unlock_task_sighand(p, &flags);
-       }
-
-       if (!ret)
-               put_task_struct(p);
-
-       return ret;
-}
-
-static void cleanup_timers_list(struct list_head *head)
-{
-       struct cpu_timer_list *timer, *next;
-
-       list_for_each_entry_safe(timer, next, head, entry)
-               list_del_init(&timer->entry);
-}
-
-/*
- * Clean out CPU timers still ticking when a thread exited.  The task
- * pointer is cleared, and the expiry time is replaced with the residual
- * time for later timer_gettime calls to return.
- * This must be called with the siglock held.
- */
-static void cleanup_timers(struct list_head *head)
-{
-       cleanup_timers_list(head);
-       cleanup_timers_list(++head);
-       cleanup_timers_list(++head);
-}
-
-/*
- * These are both called with the siglock held, when the current thread
- * is being reaped.  When the final (leader) thread in the group is reaped,
- * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
- */
-void posix_cpu_timers_exit(struct task_struct *tsk)
-{
-       add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
-                                               sizeof(unsigned long long));
-       cleanup_timers(tsk->cpu_timers);
-
-}
-void posix_cpu_timers_exit_group(struct task_struct *tsk)
-{
-       cleanup_timers(tsk->signal->cpu_timers);
-}
-
-static inline int expires_gt(cputime_t expires, cputime_t new_exp)
-{
-       return expires == 0 || expires > new_exp;
-}
-
-/*
- * Insert the timer on the appropriate list before any timers that
- * expire later.  This must be called with the sighand lock held.
- */
-static void arm_timer(struct k_itimer *timer)
-{
-       struct task_struct *p = timer->it.cpu.task;
-       struct list_head *head, *listpos;
-       struct task_cputime *cputime_expires;
-       struct cpu_timer_list *const nt = &timer->it.cpu;
-       struct cpu_timer_list *next;
-
-       if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
-               head = p->cpu_timers;
-               cputime_expires = &p->cputime_expires;
-       } else {
-               head = p->signal->cpu_timers;
-               cputime_expires = &p->signal->cputime_expires;
-       }
-       head += CPUCLOCK_WHICH(timer->it_clock);
-
-       listpos = head;
-       list_for_each_entry(next, head, entry) {
-               if (nt->expires < next->expires)
-                       break;
-               listpos = &next->entry;
-       }
-       list_add(&nt->entry, listpos);
-
-       if (listpos == head) {
-               unsigned long long exp = nt->expires;
-
-               /*
-                * We are the new earliest-expiring POSIX 1.b timer, hence
-                * need to update expiration cache. Take into account that
-                * for process timers we share expiration cache with itimers
-                * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
-                */
-
-               switch (CPUCLOCK_WHICH(timer->it_clock)) {
-               case CPUCLOCK_PROF:
-                       if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp)))
-                               cputime_expires->prof_exp = expires_to_cputime(exp);
-                       break;
-               case CPUCLOCK_VIRT:
-                       if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp)))
-                               cputime_expires->virt_exp = expires_to_cputime(exp);
-                       break;
-               case CPUCLOCK_SCHED:
-                       if (cputime_expires->sched_exp == 0 ||
-                           cputime_expires->sched_exp > exp)
-                               cputime_expires->sched_exp = exp;
-                       break;
-               }
-       }
-}
-
-/*
- * The timer is locked, fire it and arrange for its reload.
- */
-static void cpu_timer_fire(struct k_itimer *timer)
-{
-       if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
-               /*
-                * User don't want any signal.
-                */
-               timer->it.cpu.expires = 0;
-       } else if (unlikely(timer->sigq == NULL)) {
-               /*
-                * This a special case for clock_nanosleep,
-                * not a normal timer from sys_timer_create.
-                */
-               wake_up_process(timer->it_process);
-               timer->it.cpu.expires = 0;
-       } else if (timer->it.cpu.incr == 0) {
-               /*
-                * One-shot timer.  Clear it as soon as it's fired.
-                */
-               posix_timer_event(timer, 0);
-               timer->it.cpu.expires = 0;
-       } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
-               /*
-                * The signal did not get queued because the signal
-                * was ignored, so we won't get any callback to
-                * reload the timer.  But we need to keep it
-                * ticking in case the signal is deliverable next time.
-                */
-               posix_cpu_timer_schedule(timer);
-       }
-}
-
-/*
- * Sample a process (thread group) timer for the given group_leader task.
- * Must be called with task sighand lock held for safe while_each_thread()
- * traversal.
- */
-static int cpu_timer_sample_group(const clockid_t which_clock,
-                                 struct task_struct *p,
-                                 unsigned long long *sample)
-{
-       struct task_cputime cputime;
-
-       thread_group_cputimer(p, &cputime);
-       switch (CPUCLOCK_WHICH(which_clock)) {
-       default:
-               return -EINVAL;
-       case CPUCLOCK_PROF:
-               *sample = cputime_to_expires(cputime.utime + cputime.stime);
-               break;
-       case CPUCLOCK_VIRT:
-               *sample = cputime_to_expires(cputime.utime);
-               break;
-       case CPUCLOCK_SCHED:
-               *sample = cputime.sum_exec_runtime + task_delta_exec(p);
-               break;
-       }
-       return 0;
-}
-
-#ifdef CONFIG_NO_HZ_FULL
-static void nohz_kick_work_fn(struct work_struct *work)
-{
-       tick_nohz_full_kick_all();
-}
-
-static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn);
-
-/*
- * We need the IPIs to be sent from sane process context.
- * The posix cpu timers are always set with irqs disabled.
- */
-static void posix_cpu_timer_kick_nohz(void)
-{
-       if (context_tracking_is_enabled())
-               schedule_work(&nohz_kick_work);
-}
-
-bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
-{
-       if (!task_cputime_zero(&tsk->cputime_expires))
-               return false;
-
-       if (tsk->signal->cputimer.running)
-               return false;
-
-       return true;
-}
-#else
-static inline void posix_cpu_timer_kick_nohz(void) { }
-#endif
-
-/*
- * Guts of sys_timer_settime for CPU timers.
- * This is called with the timer locked and interrupts disabled.
- * If we return TIMER_RETRY, it's necessary to release the timer's lock
- * and try again.  (This happens when the timer is in the middle of firing.)
- */
-static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
-                              struct itimerspec *new, struct itimerspec *old)
-{
-       unsigned long flags;
-       struct sighand_struct *sighand;
-       struct task_struct *p = timer->it.cpu.task;
-       unsigned long long old_expires, new_expires, old_incr, val;
-       int ret;
-
-       WARN_ON_ONCE(p == NULL);
-
-       new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
-
-       /*
-        * Protect against sighand release/switch in exit/exec and p->cpu_timers
-        * and p->signal->cpu_timers read/write in arm_timer()
-        */
-       sighand = lock_task_sighand(p, &flags);
-       /*
-        * If p has just been reaped, we can no
-        * longer get any information about it at all.
-        */
-       if (unlikely(sighand == NULL)) {
-               return -ESRCH;
-       }
-
-       /*
-        * Disarm any old timer after extracting its expiry time.
-        */
-       WARN_ON_ONCE(!irqs_disabled());
-
-       ret = 0;
-       old_incr = timer->it.cpu.incr;
-       old_expires = timer->it.cpu.expires;
-       if (unlikely(timer->it.cpu.firing)) {
-               timer->it.cpu.firing = -1;
-               ret = TIMER_RETRY;
-       } else
-               list_del_init(&timer->it.cpu.entry);
-
-       /*
-        * We need to sample the current value to convert the new
-        * value from to relative and absolute, and to convert the
-        * old value from absolute to relative.  To set a process
-        * timer, we need a sample to balance the thread expiry
-        * times (in arm_timer).  With an absolute time, we must
-        * check if it's already passed.  In short, we need a sample.
-        */
-       if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
-               cpu_clock_sample(timer->it_clock, p, &val);
-       } else {
-               cpu_timer_sample_group(timer->it_clock, p, &val);
-       }
-
-       if (old) {
-               if (old_expires == 0) {
-                       old->it_value.tv_sec = 0;
-                       old->it_value.tv_nsec = 0;
-               } else {
-                       /*
-                        * Update the timer in case it has
-                        * overrun already.  If it has,
-                        * we'll report it as having overrun
-                        * and with the next reloaded timer
-                        * already ticking, though we are
-                        * swallowing that pending
-                        * notification here to install the
-                        * new setting.
-                        */
-                       bump_cpu_timer(timer, val);
-                       if (val < timer->it.cpu.expires) {
-                               old_expires = timer->it.cpu.expires - val;
-                               sample_to_timespec(timer->it_clock,
-                                                  old_expires,
-                                                  &old->it_value);
-                       } else {
-                               old->it_value.tv_nsec = 1;
-                               old->it_value.tv_sec = 0;
-                       }
-               }
-       }
-
-       if (unlikely(ret)) {
-               /*
-                * We are colliding with the timer actually firing.
-                * Punt after filling in the timer's old value, and
-                * disable this firing since we are already reporting
-                * it as an overrun (thanks to bump_cpu_timer above).
-                */
-               unlock_task_sighand(p, &flags);
-               goto out;
-       }
-
-       if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
-               new_expires += val;
-       }
-
-       /*
-        * Install the new expiry time (or zero).
-        * For a timer with no notification action, we don't actually
-        * arm the timer (we'll just fake it for timer_gettime).
-        */
-       timer->it.cpu.expires = new_expires;
-       if (new_expires != 0 && val < new_expires) {
-               arm_timer(timer);
-       }
-
-       unlock_task_sighand(p, &flags);
-       /*
-        * Install the new reload setting, and
-        * set up the signal and overrun bookkeeping.
-        */
-       timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
-                                               &new->it_interval);
-
-       /*
-        * This acts as a modification timestamp for the timer,
-        * so any automatic reload attempt will punt on seeing
-        * that we have reset the timer manually.
-        */
-       timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
-               ~REQUEUE_PENDING;
-       timer->it_overrun_last = 0;
-       timer->it_overrun = -1;
-
-       if (new_expires != 0 && !(val < new_expires)) {
-               /*
-                * The designated time already passed, so we notify
-                * immediately, even if the thread never runs to
-                * accumulate more time on this clock.
-                */
-               cpu_timer_fire(timer);
-       }
-
-       ret = 0;
- out:
-       if (old) {
-               sample_to_timespec(timer->it_clock,
-                                  old_incr, &old->it_interval);
-       }
-       if (!ret)
-               posix_cpu_timer_kick_nohz();
-       return ret;
-}
-
-static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
-{
-       unsigned long long now;
-       struct task_struct *p = timer->it.cpu.task;
-
-       WARN_ON_ONCE(p == NULL);
-
-       /*
-        * Easy part: convert the reload time.
-        */
-       sample_to_timespec(timer->it_clock,
-                          timer->it.cpu.incr, &itp->it_interval);
-
-       if (timer->it.cpu.expires == 0) {       /* Timer not armed at all.  */
-               itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
-               return;
-       }
-
-       /*
-        * Sample the clock to take the difference with the expiry time.
-        */
-       if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
-               cpu_clock_sample(timer->it_clock, p, &now);
-       } else {
-               struct sighand_struct *sighand;
-               unsigned long flags;
-
-               /*
-                * Protect against sighand release/switch in exit/exec and
-                * also make timer sampling safe if it ends up calling
-                * thread_group_cputime().
-                */
-               sighand = lock_task_sighand(p, &flags);
-               if (unlikely(sighand == NULL)) {
-                       /*
-                        * The process has been reaped.
-                        * We can't even collect a sample any more.
-                        * Call the timer disarmed, nothing else to do.
-                        */
-                       timer->it.cpu.expires = 0;
-                       sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
-                                          &itp->it_value);
-               } else {
-                       cpu_timer_sample_group(timer->it_clock, p, &now);
-                       unlock_task_sighand(p, &flags);
-               }
-       }
-
-       if (now < timer->it.cpu.expires) {
-               sample_to_timespec(timer->it_clock,
-                                  timer->it.cpu.expires - now,
-                                  &itp->it_value);
-       } else {
-               /*
-                * The timer should have expired already, but the firing
-                * hasn't taken place yet.  Say it's just about to expire.
-                */
-               itp->it_value.tv_nsec = 1;
-               itp->it_value.tv_sec = 0;
-       }
-}
-
-static unsigned long long
-check_timers_list(struct list_head *timers,
-                 struct list_head *firing,
-                 unsigned long long curr)
-{
-       int maxfire = 20;
-
-       while (!list_empty(timers)) {
-               struct cpu_timer_list *t;
-
-               t = list_first_entry(timers, struct cpu_timer_list, entry);
-
-               if (!--maxfire || curr < t->expires)
-                       return t->expires;
-
-               t->firing = 1;
-               list_move_tail(&t->entry, firing);
-       }
-
-       return 0;
-}
-
-/*
- * Check for any per-thread CPU timers that have fired and move them off
- * the tsk->cpu_timers[N] list onto the firing list.  Here we update the
- * tsk->it_*_expires values to reflect the remaining thread CPU timers.
- */
-static void check_thread_timers(struct task_struct *tsk,
-                               struct list_head *firing)
-{
-       struct list_head *timers = tsk->cpu_timers;
-       struct signal_struct *const sig = tsk->signal;
-       struct task_cputime *tsk_expires = &tsk->cputime_expires;
-       unsigned long long expires;
-       unsigned long soft;
-
-       expires = check_timers_list(timers, firing, prof_ticks(tsk));
-       tsk_expires->prof_exp = expires_to_cputime(expires);
-
-       expires = check_timers_list(++timers, firing, virt_ticks(tsk));
-       tsk_expires->virt_exp = expires_to_cputime(expires);
-
-       tsk_expires->sched_exp = check_timers_list(++timers, firing,
-                                                  tsk->se.sum_exec_runtime);
-
-       /*
-        * Check for the special case thread timers.
-        */
-       soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
-       if (soft != RLIM_INFINITY) {
-               unsigned long hard =
-                       ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
-
-               if (hard != RLIM_INFINITY &&
-                   tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
-                       /*
-                        * At the hard limit, we just die.
-                        * No need to calculate anything else now.
-                        */
-                       __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
-                       return;
-               }
-               if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
-                       /*
-                        * At the soft limit, send a SIGXCPU every second.
-                        */
-                       if (soft < hard) {
-                               soft += USEC_PER_SEC;
-                               sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
-                       }
-                       printk(KERN_INFO
-                               "RT Watchdog Timeout: %s[%d]\n",
-                               tsk->comm, task_pid_nr(tsk));
-                       __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
-               }
-       }
-}
-
-static void stop_process_timers(struct signal_struct *sig)
-{
-       struct thread_group_cputimer *cputimer = &sig->cputimer;
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&cputimer->lock, flags);
-       cputimer->running = 0;
-       raw_spin_unlock_irqrestore(&cputimer->lock, flags);
-}
-
-static u32 onecputick;
-
-static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
-                            unsigned long long *expires,
-                            unsigned long long cur_time, int signo)
-{
-       if (!it->expires)
-               return;
-
-       if (cur_time >= it->expires) {
-               if (it->incr) {
-                       it->expires += it->incr;
-                       it->error += it->incr_error;
-                       if (it->error >= onecputick) {
-                               it->expires -= cputime_one_jiffy;
-                               it->error -= onecputick;
-                       }
-               } else {
-                       it->expires = 0;
-               }
-
-               trace_itimer_expire(signo == SIGPROF ?
-                                   ITIMER_PROF : ITIMER_VIRTUAL,
-                                   tsk->signal->leader_pid, cur_time);
-               __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
-       }
-
-       if (it->expires && (!*expires || it->expires < *expires)) {
-               *expires = it->expires;
-       }
-}
-
-/*
- * Check for any per-thread CPU timers that have fired and move them
- * off the tsk->*_timers list onto the firing list.  Per-thread timers
- * have already been taken off.
- */
-static void check_process_timers(struct task_struct *tsk,
-                                struct list_head *firing)
-{
-       struct signal_struct *const sig = tsk->signal;
-       unsigned long long utime, ptime, virt_expires, prof_expires;
-       unsigned long long sum_sched_runtime, sched_expires;
-       struct list_head *timers = sig->cpu_timers;
-       struct task_cputime cputime;
-       unsigned long soft;
-
-       /*
-        * Collect the current process totals.
-        */
-       thread_group_cputimer(tsk, &cputime);
-       utime = cputime_to_expires(cputime.utime);
-       ptime = utime + cputime_to_expires(cputime.stime);
-       sum_sched_runtime = cputime.sum_exec_runtime;
-
-       prof_expires = check_timers_list(timers, firing, ptime);
-       virt_expires = check_timers_list(++timers, firing, utime);
-       sched_expires = check_timers_list(++timers, firing, sum_sched_runtime);
-
-       /*
-        * Check for the special case process timers.
-        */
-       check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
-                        SIGPROF);
-       check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
-                        SIGVTALRM);
-       soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
-       if (soft != RLIM_INFINITY) {
-               unsigned long psecs = cputime_to_secs(ptime);
-               unsigned long hard =
-                       ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
-               cputime_t x;
-               if (psecs >= hard) {
-                       /*
-                        * At the hard limit, we just die.
-                        * No need to calculate anything else now.
-                        */
-                       __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
-                       return;
-               }
-               if (psecs >= soft) {
-                       /*
-                        * At the soft limit, send a SIGXCPU every second.
-                        */
-                       __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
-                       if (soft < hard) {
-                               soft++;
-                               sig->rlim[RLIMIT_CPU].rlim_cur = soft;
-                       }
-               }
-               x = secs_to_cputime(soft);
-               if (!prof_expires || x < prof_expires) {
-                       prof_expires = x;
-               }
-       }
-
-       sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires);
-       sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires);
-       sig->cputime_expires.sched_exp = sched_expires;
-       if (task_cputime_zero(&sig->cputime_expires))
-               stop_process_timers(sig);
-}
-
-/*
- * This is called from the signal code (via do_schedule_next_timer)
- * when the last timer signal was delivered and we have to reload the timer.
- */
-void posix_cpu_timer_schedule(struct k_itimer *timer)
-{
-       struct sighand_struct *sighand;
-       unsigned long flags;
-       struct task_struct *p = timer->it.cpu.task;
-       unsigned long long now;
-
-       WARN_ON_ONCE(p == NULL);
-
-       /*
-        * Fetch the current sample and update the timer's expiry time.
-        */
-       if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
-               cpu_clock_sample(timer->it_clock, p, &now);
-               bump_cpu_timer(timer, now);
-               if (unlikely(p->exit_state))
-                       goto out;
-
-               /* Protect timer list r/w in arm_timer() */
-               sighand = lock_task_sighand(p, &flags);
-               if (!sighand)
-                       goto out;
-       } else {
-               /*
-                * Protect arm_timer() and timer sampling in case of call to
-                * thread_group_cputime().
-                */
-               sighand = lock_task_sighand(p, &flags);
-               if (unlikely(sighand == NULL)) {
-                       /*
-                        * The process has been reaped.
-                        * We can't even collect a sample any more.
-                        */
-                       timer->it.cpu.expires = 0;
-                       goto out;
-               } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
-                       unlock_task_sighand(p, &flags);
-                       /* Optimizations: if the process is dying, no need to rearm */
-                       goto out;
-               }
-               cpu_timer_sample_group(timer->it_clock, p, &now);
-               bump_cpu_timer(timer, now);
-               /* Leave the sighand locked for the call below.  */
-       }
-
-       /*
-        * Now re-arm for the new expiry time.
-        */
-       WARN_ON_ONCE(!irqs_disabled());
-       arm_timer(timer);
-       unlock_task_sighand(p, &flags);
-
-       /* Kick full dynticks CPUs in case they need to tick on the new timer */
-       posix_cpu_timer_kick_nohz();
-out:
-       timer->it_overrun_last = timer->it_overrun;
-       timer->it_overrun = -1;
-       ++timer->it_requeue_pending;
-}
-
-/**
- * task_cputime_expired - Compare two task_cputime entities.
- *
- * @sample:    The task_cputime structure to be checked for expiration.
- * @expires:   Expiration times, against which @sample will be checked.
- *
- * Checks @sample against @expires to see if any field of @sample has expired.
- * Returns true if any field of the former is greater than the corresponding
- * field of the latter if the latter field is set.  Otherwise returns false.
- */
-static inline int task_cputime_expired(const struct task_cputime *sample,
-                                       const struct task_cputime *expires)
-{
-       if (expires->utime && sample->utime >= expires->utime)
-               return 1;
-       if (expires->stime && sample->utime + sample->stime >= expires->stime)
-               return 1;
-       if (expires->sum_exec_runtime != 0 &&
-           sample->sum_exec_runtime >= expires->sum_exec_runtime)
-               return 1;
-       return 0;
-}
-
-/**
- * fastpath_timer_check - POSIX CPU timers fast path.
- *
- * @tsk:       The task (thread) being checked.
- *
- * Check the task and thread group timers.  If both are zero (there are no
- * timers set) return false.  Otherwise snapshot the task and thread group
- * timers and compare them with the corresponding expiration times.  Return
- * true if a timer has expired, else return false.
- */
-static inline int fastpath_timer_check(struct task_struct *tsk)
-{
-       struct signal_struct *sig;
-       cputime_t utime, stime;
-
-       task_cputime(tsk, &utime, &stime);
-
-       if (!task_cputime_zero(&tsk->cputime_expires)) {
-               struct task_cputime task_sample = {
-                       .utime = utime,
-                       .stime = stime,
-                       .sum_exec_runtime = tsk->se.sum_exec_runtime
-               };
-
-               if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
-                       return 1;
-       }
-
-       sig = tsk->signal;
-       if (sig->cputimer.running) {
-               struct task_cputime group_sample;
-
-               raw_spin_lock(&sig->cputimer.lock);
-               group_sample = sig->cputimer.cputime;
-               raw_spin_unlock(&sig->cputimer.lock);
-
-               if (task_cputime_expired(&group_sample, &sig->cputime_expires))
-                       return 1;
-       }
-
-       return 0;
-}
-
-/*
- * This is called from the timer interrupt handler.  The irq handler has
- * already updated our counts.  We need to check if any timers fire now.
- * Interrupts are disabled.
- */
-void run_posix_cpu_timers(struct task_struct *tsk)
-{
-       LIST_HEAD(firing);
-       struct k_itimer *timer, *next;
-       unsigned long flags;
-
-       WARN_ON_ONCE(!irqs_disabled());
-
-       /*
-        * The fast path checks that there are no expired thread or thread
-        * group timers.  If that's so, just return.
-        */
-       if (!fastpath_timer_check(tsk))
-               return;
-
-       if (!lock_task_sighand(tsk, &flags))
-               return;
-       /*
-        * Here we take off tsk->signal->cpu_timers[N] and
-        * tsk->cpu_timers[N] all the timers that are firing, and
-        * put them on the firing list.
-        */
-       check_thread_timers(tsk, &firing);
-       /*
-        * If there are any active process wide timers (POSIX 1.b, itimers,
-        * RLIMIT_CPU) cputimer must be running.
-        */
-       if (tsk->signal->cputimer.running)
-               check_process_timers(tsk, &firing);
-
-       /*
-        * We must release these locks before taking any timer's lock.
-        * There is a potential race with timer deletion here, as the
-        * siglock now protects our private firing list.  We have set
-        * the firing flag in each timer, so that a deletion attempt
-        * that gets the timer lock before we do will give it up and
-        * spin until we've taken care of that timer below.
-        */
-       unlock_task_sighand(tsk, &flags);
-
-       /*
-        * Now that all the timers on our list have the firing flag,
-        * no one will touch their list entries but us.  We'll take
-        * each timer's lock before clearing its firing flag, so no
-        * timer call will interfere.
-        */
-       list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
-               int cpu_firing;
-
-               spin_lock(&timer->it_lock);
-               list_del_init(&timer->it.cpu.entry);
-               cpu_firing = timer->it.cpu.firing;
-               timer->it.cpu.firing = 0;
-               /*
-                * The firing flag is -1 if we collided with a reset
-                * of the timer, which already reported this
-                * almost-firing as an overrun.  So don't generate an event.
-                */
-               if (likely(cpu_firing >= 0))
-                       cpu_timer_fire(timer);
-               spin_unlock(&timer->it_lock);
-       }
-}
-
-/*
- * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
- * The tsk->sighand->siglock must be held by the caller.
- */
-void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
-                          cputime_t *newval, cputime_t *oldval)
-{
-       unsigned long long now;
-
-       WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
-       cpu_timer_sample_group(clock_idx, tsk, &now);
-
-       if (oldval) {
-               /*
-                * We are setting itimer. The *oldval is absolute and we update
-                * it to be relative, *newval argument is relative and we update
-                * it to be absolute.
-                */
-               if (*oldval) {
-                       if (*oldval <= now) {
-                               /* Just about to fire. */
-                               *oldval = cputime_one_jiffy;
-                       } else {
-                               *oldval -= now;
-                       }
-               }
-
-               if (!*newval)
-                       goto out;
-               *newval += now;
-       }
-
-       /*
-        * Update expiration cache if we are the earliest timer, or eventually
-        * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
-        */
-       switch (clock_idx) {
-       case CPUCLOCK_PROF:
-               if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
-                       tsk->signal->cputime_expires.prof_exp = *newval;
-               break;
-       case CPUCLOCK_VIRT:
-               if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
-                       tsk->signal->cputime_expires.virt_exp = *newval;
-               break;
-       }
-out:
-       posix_cpu_timer_kick_nohz();
-}
-
-static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
-                           struct timespec *rqtp, struct itimerspec *it)
-{
-       struct k_itimer timer;
-       int error;
-
-       /*
-        * Set up a temporary timer and then wait for it to go off.
-        */
-       memset(&timer, 0, sizeof timer);
-       spin_lock_init(&timer.it_lock);
-       timer.it_clock = which_clock;
-       timer.it_overrun = -1;
-       error = posix_cpu_timer_create(&timer);
-       timer.it_process = current;
-       if (!error) {
-               static struct itimerspec zero_it;
-
-               memset(it, 0, sizeof *it);
-               it->it_value = *rqtp;
-
-               spin_lock_irq(&timer.it_lock);
-               error = posix_cpu_timer_set(&timer, flags, it, NULL);
-               if (error) {
-                       spin_unlock_irq(&timer.it_lock);
-                       return error;
-               }
-
-               while (!signal_pending(current)) {
-                       if (timer.it.cpu.expires == 0) {
-                               /*
-                                * Our timer fired and was reset, below
-                                * deletion can not fail.
-                                */
-                               posix_cpu_timer_del(&timer);
-                               spin_unlock_irq(&timer.it_lock);
-                               return 0;
-                       }
-
-                       /*
-                        * Block until cpu_timer_fire (or a signal) wakes us.
-                        */
-                       __set_current_state(TASK_INTERRUPTIBLE);
-                       spin_unlock_irq(&timer.it_lock);
-                       schedule();
-                       spin_lock_irq(&timer.it_lock);
-               }
-
-               /*
-                * We were interrupted by a signal.
-                */
-               sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
-               error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
-               if (!error) {
-                       /*
-                        * Timer is now unarmed, deletion can not fail.
-                        */
-                       posix_cpu_timer_del(&timer);
-               }
-               spin_unlock_irq(&timer.it_lock);
-
-               while (error == TIMER_RETRY) {
-                       /*
-                        * We need to handle case when timer was or is in the
-                        * middle of firing. In other cases we already freed
-                        * resources.
-                        */
-                       spin_lock_irq(&timer.it_lock);
-                       error = posix_cpu_timer_del(&timer);
-                       spin_unlock_irq(&timer.it_lock);
-               }
-
-               if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
-                       /*
-                        * It actually did fire already.
-                        */
-                       return 0;
-               }
-
-               error = -ERESTART_RESTARTBLOCK;
-       }
-
-       return error;
-}
-
-static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
-
-static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
-                           struct timespec *rqtp, struct timespec __user *rmtp)
-{
-       struct restart_block *restart_block =
-               &current_thread_info()->restart_block;
-       struct itimerspec it;
-       int error;
-
-       /*
-        * Diagnose required errors first.
-        */
-       if (CPUCLOCK_PERTHREAD(which_clock) &&
-           (CPUCLOCK_PID(which_clock) == 0 ||
-            CPUCLOCK_PID(which_clock) == current->pid))
-               return -EINVAL;
-
-       error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
-
-       if (error == -ERESTART_RESTARTBLOCK) {
-
-               if (flags & TIMER_ABSTIME)
-                       return -ERESTARTNOHAND;
-               /*
-                * Report back to the user the time still remaining.
-                */
-               if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
-                       return -EFAULT;
-
-               restart_block->fn = posix_cpu_nsleep_restart;
-               restart_block->nanosleep.clockid = which_clock;
-               restart_block->nanosleep.rmtp = rmtp;
-               restart_block->nanosleep.expires = timespec_to_ns(rqtp);
-       }
-       return error;
-}
-
-static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
-{
-       clockid_t which_clock = restart_block->nanosleep.clockid;
-       struct timespec t;
-       struct itimerspec it;
-       int error;
-
-       t = ns_to_timespec(restart_block->nanosleep.expires);
-
-       error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
-
-       if (error == -ERESTART_RESTARTBLOCK) {
-               struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
-               /*
-                * Report back to the user the time still remaining.
-                */
-               if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
-                       return -EFAULT;
-
-               restart_block->nanosleep.expires = timespec_to_ns(&t);
-       }
-       return error;
-
-}
-
-#define PROCESS_CLOCK  MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
-#define THREAD_CLOCK   MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
-
-static int process_cpu_clock_getres(const clockid_t which_clock,
-                                   struct timespec *tp)
-{
-       return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
-}
-static int process_cpu_clock_get(const clockid_t which_clock,
-                                struct timespec *tp)
-{
-       return posix_cpu_clock_get(PROCESS_CLOCK, tp);
-}
-static int process_cpu_timer_create(struct k_itimer *timer)
-{
-       timer->it_clock = PROCESS_CLOCK;
-       return posix_cpu_timer_create(timer);
-}
-static int process_cpu_nsleep(const clockid_t which_clock, int flags,
-                             struct timespec *rqtp,
-                             struct timespec __user *rmtp)
-{
-       return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
-}
-static long process_cpu_nsleep_restart(struct restart_block *restart_block)
-{
-       return -EINVAL;
-}
-static int thread_cpu_clock_getres(const clockid_t which_clock,
-                                  struct timespec *tp)
-{
-       return posix_cpu_clock_getres(THREAD_CLOCK, tp);
-}
-static int thread_cpu_clock_get(const clockid_t which_clock,
-                               struct timespec *tp)
-{
-       return posix_cpu_clock_get(THREAD_CLOCK, tp);
-}
-static int thread_cpu_timer_create(struct k_itimer *timer)
-{
-       timer->it_clock = THREAD_CLOCK;
-       return posix_cpu_timer_create(timer);
-}
-
-struct k_clock clock_posix_cpu = {
-       .clock_getres   = posix_cpu_clock_getres,
-       .clock_set      = posix_cpu_clock_set,
-       .clock_get      = posix_cpu_clock_get,
-       .timer_create   = posix_cpu_timer_create,
-       .nsleep         = posix_cpu_nsleep,
-       .nsleep_restart = posix_cpu_nsleep_restart,
-       .timer_set      = posix_cpu_timer_set,
-       .timer_del      = posix_cpu_timer_del,
-       .timer_get      = posix_cpu_timer_get,
-};
-
-static __init int init_posix_cpu_timers(void)
-{
-       struct k_clock process = {
-               .clock_getres   = process_cpu_clock_getres,
-               .clock_get      = process_cpu_clock_get,
-               .timer_create   = process_cpu_timer_create,
-               .nsleep         = process_cpu_nsleep,
-               .nsleep_restart = process_cpu_nsleep_restart,
-       };
-       struct k_clock thread = {
-               .clock_getres   = thread_cpu_clock_getres,
-               .clock_get      = thread_cpu_clock_get,
-               .timer_create   = thread_cpu_timer_create,
-       };
-       struct timespec ts;
-
-       posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
-       posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
-
-       cputime_to_timespec(cputime_one_jiffy, &ts);
-       onecputick = ts.tv_nsec;
-       WARN_ON(ts.tv_sec != 0);
-
-       return 0;
-}
-__initcall(init_posix_cpu_timers);
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
deleted file mode 100644 (file)
index 424c2d4..0000000
+++ /dev/null
@@ -1,1121 +0,0 @@
-/*
- * linux/kernel/posix-timers.c
- *
- *
- * 2002-10-15  Posix Clocks & timers
- *                           by George Anzinger george@mvista.com
- *
- *                          Copyright (C) 2002 2003 by MontaVista Software.
- *
- * 2004-06-01  Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
- *                          Copyright (C) 2004 Boris Hu
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA
- */
-
-/* These are all the functions necessary to implement
- * POSIX clocks & timers
- */
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/time.h>
-#include <linux/mutex.h>
-
-#include <asm/uaccess.h>
-#include <linux/list.h>
-#include <linux/init.h>
-#include <linux/compiler.h>
-#include <linux/hash.h>
-#include <linux/posix-clock.h>
-#include <linux/posix-timers.h>
-#include <linux/syscalls.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-#include <linux/export.h>
-#include <linux/hashtable.h>
-
-/*
- * Management arrays for POSIX timers. Timers are now kept in static hash table
- * with 512 entries.
- * Timer ids are allocated by local routine, which selects proper hash head by
- * key, constructed from current->signal address and per signal struct counter.
- * This keeps timer ids unique per process, but now they can intersect between
- * processes.
- */
-
-/*
- * Lets keep our timers in a slab cache :-)
- */
-static struct kmem_cache *posix_timers_cache;
-
-static DEFINE_HASHTABLE(posix_timers_hashtable, 9);
-static DEFINE_SPINLOCK(hash_lock);
-
-/*
- * we assume that the new SIGEV_THREAD_ID shares no bits with the other
- * SIGEV values.  Here we put out an error if this assumption fails.
- */
-#if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
-                       ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
-#error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
-#endif
-
-/*
- * parisc wants ENOTSUP instead of EOPNOTSUPP
- */
-#ifndef ENOTSUP
-# define ENANOSLEEP_NOTSUP EOPNOTSUPP
-#else
-# define ENANOSLEEP_NOTSUP ENOTSUP
-#endif
-
-/*
- * The timer ID is turned into a timer address by idr_find().
- * Verifying a valid ID consists of:
- *
- * a) checking that idr_find() returns other than -1.
- * b) checking that the timer id matches the one in the timer itself.
- * c) that the timer owner is in the callers thread group.
- */
-
-/*
- * CLOCKs: The POSIX standard calls for a couple of clocks and allows us
- *         to implement others.  This structure defines the various
- *         clocks.
- *
- * RESOLUTION: Clock resolution is used to round up timer and interval
- *         times, NOT to report clock times, which are reported with as
- *         much resolution as the system can muster.  In some cases this
- *         resolution may depend on the underlying clock hardware and
- *         may not be quantifiable until run time, and only then is the
- *         necessary code is written.  The standard says we should say
- *         something about this issue in the documentation...
- *
- * FUNCTIONS: The CLOCKs structure defines possible functions to
- *         handle various clock functions.
- *
- *         The standard POSIX timer management code assumes the
- *         following: 1.) The k_itimer struct (sched.h) is used for
- *         the timer.  2.) The list, it_lock, it_clock, it_id and
- *         it_pid fields are not modified by timer code.
- *
- * Permissions: It is assumed that the clock_settime() function defined
- *         for each clock will take care of permission checks.  Some
- *         clocks may be set able by any user (i.e. local process
- *         clocks) others not.  Currently the only set able clock we
- *         have is CLOCK_REALTIME and its high res counter part, both of
- *         which we beg off on and pass to do_sys_settimeofday().
- */
-
-static struct k_clock posix_clocks[MAX_CLOCKS];
-
-/*
- * These ones are defined below.
- */
-static int common_nsleep(const clockid_t, int flags, struct timespec *t,
-                        struct timespec __user *rmtp);
-static int common_timer_create(struct k_itimer *new_timer);
-static void common_timer_get(struct k_itimer *, struct itimerspec *);
-static int common_timer_set(struct k_itimer *, int,
-                           struct itimerspec *, struct itimerspec *);
-static int common_timer_del(struct k_itimer *timer);
-
-static enum hrtimer_restart posix_timer_fn(struct hrtimer *data);
-
-static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);
-
-#define lock_timer(tid, flags)                                            \
-({     struct k_itimer *__timr;                                           \
-       __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags));  \
-       __timr;                                                            \
-})
-
-static int hash(struct signal_struct *sig, unsigned int nr)
-{
-       return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable));
-}
-
-static struct k_itimer *__posix_timers_find(struct hlist_head *head,
-                                           struct signal_struct *sig,
-                                           timer_t id)
-{
-       struct k_itimer *timer;
-
-       hlist_for_each_entry_rcu(timer, head, t_hash) {
-               if ((timer->it_signal == sig) && (timer->it_id == id))
-                       return timer;
-       }
-       return NULL;
-}
-
-static struct k_itimer *posix_timer_by_id(timer_t id)
-{
-       struct signal_struct *sig = current->signal;
-       struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)];
-
-       return __posix_timers_find(head, sig, id);
-}
-
-static int posix_timer_add(struct k_itimer *timer)
-{
-       struct signal_struct *sig = current->signal;
-       int first_free_id = sig->posix_timer_id;
-       struct hlist_head *head;
-       int ret = -ENOENT;
-
-       do {
-               spin_lock(&hash_lock);
-               head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)];
-               if (!__posix_timers_find(head, sig, sig->posix_timer_id)) {
-                       hlist_add_head_rcu(&timer->t_hash, head);
-                       ret = sig->posix_timer_id;
-               }
-               if (++sig->posix_timer_id < 0)
-                       sig->posix_timer_id = 0;
-               if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT))
-                       /* Loop over all possible ids completed */
-                       ret = -EAGAIN;
-               spin_unlock(&hash_lock);
-       } while (ret == -ENOENT);
-       return ret;
-}
-
-static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
-{
-       spin_unlock_irqrestore(&timr->it_lock, flags);
-}
-
-/* Get clock_realtime */
-static int posix_clock_realtime_get(clockid_t which_clock, struct timespec *tp)
-{
-       ktime_get_real_ts(tp);
-       return 0;
-}
-
-/* Set clock_realtime */
-static int posix_clock_realtime_set(const clockid_t which_clock,
-                                   const struct timespec *tp)
-{
-       return do_sys_settimeofday(tp, NULL);
-}
-
-static int posix_clock_realtime_adj(const clockid_t which_clock,
-                                   struct timex *t)
-{
-       return do_adjtimex(t);
-}
-
-/*
- * Get monotonic time for posix timers
- */
-static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
-{
-       ktime_get_ts(tp);
-       return 0;
-}
-
-/*
- * Get monotonic-raw time for posix timers
- */
-static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
-{
-       getrawmonotonic(tp);
-       return 0;
-}
-
-
-static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp)
-{
-       *tp = current_kernel_time();
-       return 0;
-}
-
-static int posix_get_monotonic_coarse(clockid_t which_clock,
-                                               struct timespec *tp)
-{
-       *tp = get_monotonic_coarse();
-       return 0;
-}
-
-static int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
-{
-       *tp = ktime_to_timespec(KTIME_LOW_RES);
-       return 0;
-}
-
-static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
-{
-       get_monotonic_boottime(tp);
-       return 0;
-}
-
-static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
-{
-       timekeeping_clocktai(tp);
-       return 0;
-}
-
-/*
- * Initialize everything, well, just everything in Posix clocks/timers ;)
- */
-static __init int init_posix_timers(void)
-{
-       struct k_clock clock_realtime = {
-               .clock_getres   = hrtimer_get_res,
-               .clock_get      = posix_clock_realtime_get,
-               .clock_set      = posix_clock_realtime_set,
-               .clock_adj      = posix_clock_realtime_adj,
-               .nsleep         = common_nsleep,
-               .nsleep_restart = hrtimer_nanosleep_restart,
-               .timer_create   = common_timer_create,
-               .timer_set      = common_timer_set,
-               .timer_get      = common_timer_get,
-               .timer_del      = common_timer_del,
-       };
-       struct k_clock clock_monotonic = {
-               .clock_getres   = hrtimer_get_res,
-               .clock_get      = posix_ktime_get_ts,
-               .nsleep         = common_nsleep,
-               .nsleep_restart = hrtimer_nanosleep_restart,
-               .timer_create   = common_timer_create,
-               .timer_set      = common_timer_set,
-               .timer_get      = common_timer_get,
-               .timer_del      = common_timer_del,
-       };
-       struct k_clock clock_monotonic_raw = {
-               .clock_getres   = hrtimer_get_res,
-               .clock_get      = posix_get_monotonic_raw,
-       };
-       struct k_clock clock_realtime_coarse = {
-               .clock_getres   = posix_get_coarse_res,
-               .clock_get      = posix_get_realtime_coarse,
-       };
-       struct k_clock clock_monotonic_coarse = {
-               .clock_getres   = posix_get_coarse_res,
-               .clock_get      = posix_get_monotonic_coarse,
-       };
-       struct k_clock clock_tai = {
-               .clock_getres   = hrtimer_get_res,
-               .clock_get      = posix_get_tai,
-               .nsleep         = common_nsleep,
-               .nsleep_restart = hrtimer_nanosleep_restart,
-               .timer_create   = common_timer_create,
-               .timer_set      = common_timer_set,
-               .timer_get      = common_timer_get,
-               .timer_del      = common_timer_del,
-       };
-       struct k_clock clock_boottime = {
-               .clock_getres   = hrtimer_get_res,
-               .clock_get      = posix_get_boottime,
-               .nsleep         = common_nsleep,
-               .nsleep_restart = hrtimer_nanosleep_restart,
-               .timer_create   = common_timer_create,
-               .timer_set      = common_timer_set,
-               .timer_get      = common_timer_get,
-               .timer_del      = common_timer_del,
-       };
-
-       posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
-       posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
-       posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
-       posix_timers_register_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
-       posix_timers_register_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
-       posix_timers_register_clock(CLOCK_BOOTTIME, &clock_boottime);
-       posix_timers_register_clock(CLOCK_TAI, &clock_tai);
-
-       posix_timers_cache = kmem_cache_create("posix_timers_cache",
-                                       sizeof (struct k_itimer), 0, SLAB_PANIC,
-                                       NULL);
-       return 0;
-}
-
-__initcall(init_posix_timers);
-
-static void schedule_next_timer(struct k_itimer *timr)
-{
-       struct hrtimer *timer = &timr->it.real.timer;
-
-       if (timr->it.real.interval.tv64 == 0)
-               return;
-
-       timr->it_overrun += (unsigned int) hrtimer_forward(timer,
-                                               timer->base->get_time(),
-                                               timr->it.real.interval);
-
-       timr->it_overrun_last = timr->it_overrun;
-       timr->it_overrun = -1;
-       ++timr->it_requeue_pending;
-       hrtimer_restart(timer);
-}
-
-/*
- * This function is exported for use by the signal deliver code.  It is
- * called just prior to the info block being released and passes that
- * block to us.  It's function is to update the overrun entry AND to
- * restart the timer.  It should only be called if the timer is to be
- * restarted (i.e. we have flagged this in the sys_private entry of the
- * info block).
- *
- * To protect against the timer going away while the interrupt is queued,
- * we require that the it_requeue_pending flag be set.
- */
-void do_schedule_next_timer(struct siginfo *info)
-{
-       struct k_itimer *timr;
-       unsigned long flags;
-
-       timr = lock_timer(info->si_tid, &flags);
-
-       if (timr && timr->it_requeue_pending == info->si_sys_private) {
-               if (timr->it_clock < 0)
-                       posix_cpu_timer_schedule(timr);
-               else
-                       schedule_next_timer(timr);
-
-               info->si_overrun += timr->it_overrun_last;
-       }
-
-       if (timr)
-               unlock_timer(timr, flags);
-}
-
-int posix_timer_event(struct k_itimer *timr, int si_private)
-{
-       struct task_struct *task;
-       int shared, ret = -1;
-       /*
-        * FIXME: if ->sigq is queued we can race with
-        * dequeue_signal()->do_schedule_next_timer().
-        *
-        * If dequeue_signal() sees the "right" value of
-        * si_sys_private it calls do_schedule_next_timer().
-        * We re-queue ->sigq and drop ->it_lock().
-        * do_schedule_next_timer() locks the timer
-        * and re-schedules it while ->sigq is pending.
-        * Not really bad, but not that we want.
-        */
-       timr->sigq->info.si_sys_private = si_private;
-
-       rcu_read_lock();
-       task = pid_task(timr->it_pid, PIDTYPE_PID);
-       if (task) {
-               shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
-               ret = send_sigqueue(timr->sigq, task, shared);
-       }
-       rcu_read_unlock();
-       /* If we failed to send the signal the timer stops. */
-       return ret > 0;
-}
-EXPORT_SYMBOL_GPL(posix_timer_event);
-
-/*
- * This function gets called when a POSIX.1b interval timer expires.  It
- * is used as a callback from the kernel internal timer.  The
- * run_timer_list code ALWAYS calls with interrupts on.
-
- * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
- */
-static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
-{
-       struct k_itimer *timr;
-       unsigned long flags;
-       int si_private = 0;
-       enum hrtimer_restart ret = HRTIMER_NORESTART;
-
-       timr = container_of(timer, struct k_itimer, it.real.timer);
-       spin_lock_irqsave(&timr->it_lock, flags);
-
-       if (timr->it.real.interval.tv64 != 0)
-               si_private = ++timr->it_requeue_pending;
-
-       if (posix_timer_event(timr, si_private)) {
-               /*
-                * signal was not sent because of sig_ignor
-                * we will not get a call back to restart it AND
-                * it should be restarted.
-                */
-               if (timr->it.real.interval.tv64 != 0) {
-                       ktime_t now = hrtimer_cb_get_time(timer);
-
-                       /*
-                        * FIXME: What we really want, is to stop this
-                        * timer completely and restart it in case the
-                        * SIG_IGN is removed. This is a non trivial
-                        * change which involves sighand locking
-                        * (sigh !), which we don't want to do late in
-                        * the release cycle.
-                        *
-                        * For now we just let timers with an interval
-                        * less than a jiffie expire every jiffie to
-                        * avoid softirq starvation in case of SIG_IGN
-                        * and a very small interval, which would put
-                        * the timer right back on the softirq pending
-                        * list. By moving now ahead of time we trick
-                        * hrtimer_forward() to expire the timer
-                        * later, while we still maintain the overrun
-                        * accuracy, but have some inconsistency in
-                        * the timer_gettime() case. This is at least
-                        * better than a starved softirq. A more
-                        * complex fix which solves also another related
-                        * inconsistency is already in the pipeline.
-                        */
-#ifdef CONFIG_HIGH_RES_TIMERS
-                       {
-                               ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);
-
-                               if (timr->it.real.interval.tv64 < kj.tv64)
-                                       now = ktime_add(now, kj);
-                       }
-#endif
-                       timr->it_overrun += (unsigned int)
-                               hrtimer_forward(timer, now,
-                                               timr->it.real.interval);
-                       ret = HRTIMER_RESTART;
-                       ++timr->it_requeue_pending;
-               }
-       }
-
-       unlock_timer(timr, flags);
-       return ret;
-}
-
-static struct pid *good_sigevent(sigevent_t * event)
-{
-       struct task_struct *rtn = current->group_leader;
-
-       if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
-               (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
-                !same_thread_group(rtn, current) ||
-                (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
-               return NULL;
-
-       if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
-           ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
-               return NULL;
-
-       return task_pid(rtn);
-}
-
-void posix_timers_register_clock(const clockid_t clock_id,
-                                struct k_clock *new_clock)
-{
-       if ((unsigned) clock_id >= MAX_CLOCKS) {
-               printk(KERN_WARNING "POSIX clock register failed for clock_id %d\n",
-                      clock_id);
-               return;
-       }
-
-       if (!new_clock->clock_get) {
-               printk(KERN_WARNING "POSIX clock id %d lacks clock_get()\n",
-                      clock_id);
-               return;
-       }
-       if (!new_clock->clock_getres) {
-               printk(KERN_WARNING "POSIX clock id %d lacks clock_getres()\n",
-                      clock_id);
-               return;
-       }
-
-       posix_clocks[clock_id] = *new_clock;
-}
-EXPORT_SYMBOL_GPL(posix_timers_register_clock);
-
-static struct k_itimer * alloc_posix_timer(void)
-{
-       struct k_itimer *tmr;
-       tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
-       if (!tmr)
-               return tmr;
-       if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
-               kmem_cache_free(posix_timers_cache, tmr);
-               return NULL;
-       }
-       memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
-       return tmr;
-}
-
-static void k_itimer_rcu_free(struct rcu_head *head)
-{
-       struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu);
-
-       kmem_cache_free(posix_timers_cache, tmr);
-}
-
-#define IT_ID_SET      1
-#define IT_ID_NOT_SET  0
-static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
-{
-       if (it_id_set) {
-               unsigned long flags;
-               spin_lock_irqsave(&hash_lock, flags);
-               hlist_del_rcu(&tmr->t_hash);
-               spin_unlock_irqrestore(&hash_lock, flags);
-       }
-       put_pid(tmr->it_pid);
-       sigqueue_free(tmr->sigq);
-       call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
-}
-
-static struct k_clock *clockid_to_kclock(const clockid_t id)
-{
-       if (id < 0)
-               return (id & CLOCKFD_MASK) == CLOCKFD ?
-                       &clock_posix_dynamic : &clock_posix_cpu;
-
-       if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
-               return NULL;
-       return &posix_clocks[id];
-}
-
-static int common_timer_create(struct k_itimer *new_timer)
-{
-       hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
-       return 0;
-}
-
-/* Create a POSIX.1b interval timer. */
-
-SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
-               struct sigevent __user *, timer_event_spec,
-               timer_t __user *, created_timer_id)
-{
-       struct k_clock *kc = clockid_to_kclock(which_clock);
-       struct k_itimer *new_timer;
-       int error, new_timer_id;
-       sigevent_t event;
-       int it_id_set = IT_ID_NOT_SET;
-
-       if (!kc)
-               return -EINVAL;
-       if (!kc->timer_create)
-               return -EOPNOTSUPP;
-
-       new_timer = alloc_posix_timer();
-       if (unlikely(!new_timer))
-               return -EAGAIN;
-
-       spin_lock_init(&new_timer->it_lock);
-       new_timer_id = posix_timer_add(new_timer);
-       if (new_timer_id < 0) {
-               error = new_timer_id;
-               goto out;
-       }
-
-       it_id_set = IT_ID_SET;
-       new_timer->it_id = (timer_t) new_timer_id;
-       new_timer->it_clock = which_clock;
-       new_timer->it_overrun = -1;
-
-       if (timer_event_spec) {
-               if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
-                       error = -EFAULT;
-                       goto out;
-               }
-               rcu_read_lock();
-               new_timer->it_pid = get_pid(good_sigevent(&event));
-               rcu_read_unlock();
-               if (!new_timer->it_pid) {
-                       error = -EINVAL;
-                       goto out;
-               }
-       } else {
-               event.sigev_notify = SIGEV_SIGNAL;
-               event.sigev_signo = SIGALRM;
-               event.sigev_value.sival_int = new_timer->it_id;
-               new_timer->it_pid = get_pid(task_tgid(current));
-       }
-
-       new_timer->it_sigev_notify     = event.sigev_notify;
-       new_timer->sigq->info.si_signo = event.sigev_signo;
-       new_timer->sigq->info.si_value = event.sigev_value;
-       new_timer->sigq->info.si_tid   = new_timer->it_id;
-       new_timer->sigq->info.si_code  = SI_TIMER;
-
-       if (copy_to_user(created_timer_id,
-                        &new_timer_id, sizeof (new_timer_id))) {
-               error = -EFAULT;
-               goto out;
-       }
-
-       error = kc->timer_create(new_timer);
-       if (error)
-               goto out;
-
-       spin_lock_irq(&current->sighand->siglock);
-       new_timer->it_signal = current->signal;
-       list_add(&new_timer->list, &current->signal->posix_timers);
-       spin_unlock_irq(&current->sighand->siglock);
-
-       return 0;
-       /*
-        * In the case of the timer belonging to another task, after
-        * the task is unlocked, the timer is owned by the other task
-        * and may cease to exist at any time.  Don't use or modify
-        * new_timer after the unlock call.
-        */
-out:
-       release_posix_timer(new_timer, it_id_set);
-       return error;
-}
-
-/*
- * Locking issues: We need to protect the result of the id look up until
- * we get the timer locked down so it is not deleted under us.  The
- * removal is done under the idr spinlock so we use that here to bridge
- * the find to the timer lock.  To avoid a dead lock, the timer id MUST
- * be release with out holding the timer lock.
- */
-static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
-{
-       struct k_itimer *timr;
-
-       /*
-        * timer_t could be any type >= int and we want to make sure any
-        * @timer_id outside positive int range fails lookup.
-        */
-       if ((unsigned long long)timer_id > INT_MAX)
-               return NULL;
-
-       rcu_read_lock();
-       timr = posix_timer_by_id(timer_id);
-       if (timr) {
-               spin_lock_irqsave(&timr->it_lock, *flags);
-               if (timr->it_signal == current->signal) {
-                       rcu_read_unlock();
-                       return timr;
-               }
-               spin_unlock_irqrestore(&timr->it_lock, *flags);
-       }
-       rcu_read_unlock();
-
-       return NULL;
-}
-
-/*
- * Get the time remaining on a POSIX.1b interval timer.  This function
- * is ALWAYS called with spin_lock_irq on the timer, thus it must not
- * mess with irq.
- *
- * We have a couple of messes to clean up here.  First there is the case
- * of a timer that has a requeue pending.  These timers should appear to
- * be in the timer list with an expiry as if we were to requeue them
- * now.
- *
- * The second issue is the SIGEV_NONE timer which may be active but is
- * not really ever put in the timer list (to save system resources).
- * This timer may be expired, and if so, we will do it here.  Otherwise
- * it is the same as a requeue pending timer WRT to what we should
- * report.
- */
-static void
-common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
-{
-       ktime_t now, remaining, iv;
-       struct hrtimer *timer = &timr->it.real.timer;
-
-       memset(cur_setting, 0, sizeof(struct itimerspec));
-
-       iv = timr->it.real.interval;
-
-       /* interval timer ? */
-       if (iv.tv64)
-               cur_setting->it_interval = ktime_to_timespec(iv);
-       else if (!hrtimer_active(timer) &&
-                (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
-               return;
-
-       now = timer->base->get_time();
-
-       /*
-        * When a requeue is pending or this is a SIGEV_NONE
-        * timer move the expiry time forward by intervals, so
-        * expiry is > now.
-        */
-       if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
-           (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
-               timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
-
-       remaining = ktime_sub(hrtimer_get_expires(timer), now);
-       /* Return 0 only, when the timer is expired and not pending */
-       if (remaining.tv64 <= 0) {
-               /*
-                * A single shot SIGEV_NONE timer must return 0, when
-                * it is expired !
-                */
-               if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
-                       cur_setting->it_value.tv_nsec = 1;
-       } else
-               cur_setting->it_value = ktime_to_timespec(remaining);
-}
-
-/* Get the time remaining on a POSIX.1b interval timer. */
-SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
-               struct itimerspec __user *, setting)
-{
-       struct itimerspec cur_setting;
-       struct k_itimer *timr;
-       struct k_clock *kc;
-       unsigned long flags;
-       int ret = 0;
-
-       timr = lock_timer(timer_id, &flags);
-       if (!timr)
-               return -EINVAL;
-
-       kc = clockid_to_kclock(timr->it_clock);
-       if (WARN_ON_ONCE(!kc || !kc->timer_get))
-               ret = -EINVAL;
-       else
-               kc->timer_get(timr, &cur_setting);
-
-       unlock_timer(timr, flags);
-
-       if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
-               return -EFAULT;
-
-       return ret;
-}
-
-/*
- * Get the number of overruns of a POSIX.1b interval timer.  This is to
- * be the overrun of the timer last delivered.  At the same time we are
- * accumulating overruns on the next timer.  The overrun is frozen when
- * the signal is delivered, either at the notify time (if the info block
- * is not queued) or at the actual delivery time (as we are informed by
- * the call back to do_schedule_next_timer().  So all we need to do is
- * to pick up the frozen overrun.
- */
-SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
-{
-       struct k_itimer *timr;
-       int overrun;
-       unsigned long flags;
-
-       timr = lock_timer(timer_id, &flags);
-       if (!timr)
-               return -EINVAL;
-
-       overrun = timr->it_overrun_last;
-       unlock_timer(timr, flags);
-
-       return overrun;
-}
-
-/* Set a POSIX.1b interval timer. */
-/* timr->it_lock is taken. */
-static int
-common_timer_set(struct k_itimer *timr, int flags,
-                struct itimerspec *new_setting, struct itimerspec *old_setting)
-{
-       struct hrtimer *timer = &timr->it.real.timer;
-       enum hrtimer_mode mode;
-
-       if (old_setting)
-               common_timer_get(timr, old_setting);
-
-       /* disable the timer */
-       timr->it.real.interval.tv64 = 0;
-       /*
-        * careful here.  If smp we could be in the "fire" routine which will
-        * be spinning as we hold the lock.  But this is ONLY an SMP issue.
-        */
-       if (hrtimer_try_to_cancel(timer) < 0)
-               return TIMER_RETRY;
-
-       timr->it_requeue_pending = (timr->it_requeue_pending + 2) & 
-               ~REQUEUE_PENDING;
-       timr->it_overrun_last = 0;
-
-       /* switch off the timer when it_value is zero */
-       if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
-               return 0;
-
-       mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
-       hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
-       timr->it.real.timer.function = posix_timer_fn;
-
-       hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));
-
-       /* Convert interval */
-       timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
-
-       /* SIGEV_NONE timers are not queued ! See common_timer_get */
-       if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
-               /* Setup correct expiry time for relative timers */
-               if (mode == HRTIMER_MODE_REL) {
-                       hrtimer_add_expires(timer, timer->base->get_time());
-               }
-               return 0;
-       }
-
-       hrtimer_start_expires(timer, mode);
-       return 0;
-}
-
-/* Set a POSIX.1b interval timer */
-SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
-               const struct itimerspec __user *, new_setting,
-               struct itimerspec __user *, old_setting)
-{
-       struct k_itimer *timr;
-       struct itimerspec new_spec, old_spec;
-       int error = 0;
-       unsigned long flag;
-       struct itimerspec *rtn = old_setting ? &old_spec : NULL;
-       struct k_clock *kc;
-
-       if (!new_setting)
-               return -EINVAL;
-
-       if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
-               return -EFAULT;
-
-       if (!timespec_valid(&new_spec.it_interval) ||
-           !timespec_valid(&new_spec.it_value))
-               return -EINVAL;
-retry:
-       timr = lock_timer(timer_id, &flag);
-       if (!timr)
-               return -EINVAL;
-
-       kc = clockid_to_kclock(timr->it_clock);
-       if (WARN_ON_ONCE(!kc || !kc->timer_set))
-               error = -EINVAL;
-       else
-               error = kc->timer_set(timr, flags, &new_spec, rtn);
-
-       unlock_timer(timr, flag);
-       if (error == TIMER_RETRY) {
-               rtn = NULL;     // We already got the old time...
-               goto retry;
-       }
-
-       if (old_setting && !error &&
-           copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
-               error = -EFAULT;
-
-       return error;
-}
-
-static int common_timer_del(struct k_itimer *timer)
-{
-       timer->it.real.interval.tv64 = 0;
-
-       if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
-               return TIMER_RETRY;
-       return 0;
-}
-
-static inline int timer_delete_hook(struct k_itimer *timer)
-{
-       struct k_clock *kc = clockid_to_kclock(timer->it_clock);
-
-       if (WARN_ON_ONCE(!kc || !kc->timer_del))
-               return -EINVAL;
-       return kc->timer_del(timer);
-}
-
-/* Delete a POSIX.1b interval timer. */
-SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
-{
-       struct k_itimer *timer;
-       unsigned long flags;
-
-retry_delete:
-       timer = lock_timer(timer_id, &flags);
-       if (!timer)
-               return -EINVAL;
-
-       if (timer_delete_hook(timer) == TIMER_RETRY) {
-               unlock_timer(timer, flags);
-               goto retry_delete;
-       }
-
-       spin_lock(&current->sighand->siglock);
-       list_del(&timer->list);
-       spin_unlock(&current->sighand->siglock);
-       /*
-        * This keeps any tasks waiting on the spin lock from thinking
-        * they got something (see the lock code above).
-        */
-       timer->it_signal = NULL;
-
-       unlock_timer(timer, flags);
-       release_posix_timer(timer, IT_ID_SET);
-       return 0;
-}
-
-/*
- * return timer owned by the process, used by exit_itimers
- */
-static void itimer_delete(struct k_itimer *timer)
-{
-       unsigned long flags;
-
-retry_delete:
-       spin_lock_irqsave(&timer->it_lock, flags);
-
-       if (timer_delete_hook(timer) == TIMER_RETRY) {
-               unlock_timer(timer, flags);
-               goto retry_delete;
-       }
-       list_del(&timer->list);
-       /*
-        * This keeps any tasks waiting on the spin lock from thinking
-        * they got something (see the lock code above).
-        */
-       timer->it_signal = NULL;
-
-       unlock_timer(timer, flags);
-       release_posix_timer(timer, IT_ID_SET);
-}
-
-/*
- * This is called by do_exit or de_thread, only when there are no more
- * references to the shared signal_struct.
- */
-void exit_itimers(struct signal_struct *sig)
-{
-       struct k_itimer *tmr;
-
-       while (!list_empty(&sig->posix_timers)) {
-               tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
-               itimer_delete(tmr);
-       }
-}
-
-SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
-               const struct timespec __user *, tp)
-{
-       struct k_clock *kc = clockid_to_kclock(which_clock);
-       struct timespec new_tp;
-
-       if (!kc || !kc->clock_set)
-               return -EINVAL;
-
-       if (copy_from_user(&new_tp, tp, sizeof (*tp)))
-               return -EFAULT;
-
-       return kc->clock_set(which_clock, &new_tp);
-}
-
-SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
-               struct timespec __user *,tp)
-{
-       struct k_clock *kc = clockid_to_kclock(which_clock);
-       struct timespec kernel_tp;
-       int error;
-
-       if (!kc)
-               return -EINVAL;
-
-       error = kc->clock_get(which_clock, &kernel_tp);
-
-       if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
-               error = -EFAULT;
-
-       return error;
-}
-
-SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
-               struct timex __user *, utx)
-{
-       struct k_clock *kc = clockid_to_kclock(which_clock);
-       struct timex ktx;
-       int err;
-
-       if (!kc)
-               return -EINVAL;
-       if (!kc->clock_adj)
-               return -EOPNOTSUPP;
-
-       if (copy_from_user(&ktx, utx, sizeof(ktx)))
-               return -EFAULT;
-
-       err = kc->clock_adj(which_clock, &ktx);
-
-       if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
-               return -EFAULT;
-
-       return err;
-}
-
-SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
-               struct timespec __user *, tp)
-{
-       struct k_clock *kc = clockid_to_kclock(which_clock);
-       struct timespec rtn_tp;
-       int error;
-
-       if (!kc)
-               return -EINVAL;
-
-       error = kc->clock_getres(which_clock, &rtn_tp);
-
-       if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp)))
-               error = -EFAULT;
-
-       return error;
-}
-
-/*
- * nanosleep for monotonic and realtime clocks
- */
-static int common_nsleep(const clockid_t which_clock, int flags,
-                        struct timespec *tsave, struct timespec __user *rmtp)
-{
-       return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
-                                HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
-                                which_clock);
-}
-
-SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
-               const struct timespec __user *, rqtp,
-               struct timespec __user *, rmtp)
-{
-       struct k_clock *kc = clockid_to_kclock(which_clock);
-       struct timespec t;
-
-       if (!kc)
-               return -EINVAL;
-       if (!kc->nsleep)
-               return -ENANOSLEEP_NOTSUP;
-
-       if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
-               return -EFAULT;
-
-       if (!timespec_valid(&t))
-               return -EINVAL;
-
-       return kc->nsleep(which_clock, flags, &t, rmtp);
-}
-
-/*
- * This will restart clock_nanosleep. This is required only by
- * compat_clock_nanosleep_restart for now.
- */
-long clock_nanosleep_restart(struct restart_block *restart_block)
-{
-       clockid_t which_clock = restart_block->nanosleep.clockid;
-       struct k_clock *kc = clockid_to_kclock(which_clock);
-
-       if (WARN_ON_ONCE(!kc || !kc->nsleep_restart))
-               return -EINVAL;
-
-       return kc->nsleep_restart(restart_block);
-}
diff --git a/kernel/time.c b/kernel/time.c
deleted file mode 100644 (file)
index 7c7964c..0000000
+++ /dev/null
@@ -1,714 +0,0 @@
-/*
- *  linux/kernel/time.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  This file contains the interface functions for the various
- *  time related system calls: time, stime, gettimeofday, settimeofday,
- *                            adjtime
- */
-/*
- * Modification history kernel/time.c
- *
- * 1993-09-02    Philip Gladstone
- *      Created file with time related functions from sched/core.c and adjtimex()
- * 1993-10-08    Torsten Duwe
- *      adjtime interface update and CMOS clock write code
- * 1995-08-13    Torsten Duwe
- *      kernel PLL updated to 1994-12-13 specs (rfc-1589)
- * 1999-01-16    Ulrich Windl
- *     Introduced error checking for many cases in adjtimex().
- *     Updated NTP code according to technical memorandum Jan '96
- *     "A Kernel Model for Precision Timekeeping" by Dave Mills
- *     Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
- *     (Even though the technical memorandum forbids it)
- * 2004-07-14   Christoph Lameter
- *     Added getnstimeofday to allow the posix timer functions to return
- *     with nanosecond accuracy
- */
-
-#include <linux/export.h>
-#include <linux/timex.h>
-#include <linux/capability.h>
-#include <linux/timekeeper_internal.h>
-#include <linux/errno.h>
-#include <linux/syscalls.h>
-#include <linux/security.h>
-#include <linux/fs.h>
-#include <linux/math64.h>
-#include <linux/ptrace.h>
-
-#include <asm/uaccess.h>
-#include <asm/unistd.h>
-
-#include "timeconst.h"
-
-/*
- * The timezone where the local system is located.  Used as a default by some
- * programs who obtain this value by using gettimeofday.
- */
-struct timezone sys_tz;
-
-EXPORT_SYMBOL(sys_tz);
-
-#ifdef __ARCH_WANT_SYS_TIME
-
-/*
- * sys_time() can be implemented in user-level using
- * sys_gettimeofday().  Is this for backwards compatibility?  If so,
- * why not move it into the appropriate arch directory (for those
- * architectures that need it).
- */
-SYSCALL_DEFINE1(time, time_t __user *, tloc)
-{
-       time_t i = get_seconds();
-
-       if (tloc) {
-               if (put_user(i,tloc))
-                       return -EFAULT;
-       }
-       force_successful_syscall_return();
-       return i;
-}
-
-/*
- * sys_stime() can be implemented in user-level using
- * sys_settimeofday().  Is this for backwards compatibility?  If so,
- * why not move it into the appropriate arch directory (for those
- * architectures that need it).
- */
-
-SYSCALL_DEFINE1(stime, time_t __user *, tptr)
-{
-       struct timespec tv;
-       int err;
-
-       if (get_user(tv.tv_sec, tptr))
-               return -EFAULT;
-
-       tv.tv_nsec = 0;
-
-       err = security_settime(&tv, NULL);
-       if (err)
-               return err;
-
-       do_settimeofday(&tv);
-       return 0;
-}
-
-#endif /* __ARCH_WANT_SYS_TIME */
-
-SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
-               struct timezone __user *, tz)
-{
-       if (likely(tv != NULL)) {
-               struct timeval ktv;
-               do_gettimeofday(&ktv);
-               if (copy_to_user(tv, &ktv, sizeof(ktv)))
-                       return -EFAULT;
-       }
-       if (unlikely(tz != NULL)) {
-               if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
-                       return -EFAULT;
-       }
-       return 0;
-}
-
-/*
- * Indicates if there is an offset between the system clock and the hardware
- * clock/persistent clock/rtc.
- */
-int persistent_clock_is_local;
-
-/*
- * Adjust the time obtained from the CMOS to be UTC time instead of
- * local time.
- *
- * This is ugly, but preferable to the alternatives.  Otherwise we
- * would either need to write a program to do it in /etc/rc (and risk
- * confusion if the program gets run more than once; it would also be
- * hard to make the program warp the clock precisely n hours)  or
- * compile in the timezone information into the kernel.  Bad, bad....
- *
- *                                             - TYT, 1992-01-01
- *
- * The best thing to do is to keep the CMOS clock in universal time (UTC)
- * as real UNIX machines always do it. This avoids all headaches about
- * daylight saving times and warping kernel clocks.
- */
-static inline void warp_clock(void)
-{
-       if (sys_tz.tz_minuteswest != 0) {
-               struct timespec adjust;
-
-               persistent_clock_is_local = 1;
-               adjust.tv_sec = sys_tz.tz_minuteswest * 60;
-               adjust.tv_nsec = 0;
-               timekeeping_inject_offset(&adjust);
-       }
-}
-
-/*
- * In case for some reason the CMOS clock has not already been running
- * in UTC, but in some local time: The first time we set the timezone,
- * we will warp the clock so that it is ticking UTC time instead of
- * local time. Presumably, if someone is setting the timezone then we
- * are running in an environment where the programs understand about
- * timezones. This should be done at boot time in the /etc/rc script,
- * as soon as possible, so that the clock can be set right. Otherwise,
- * various programs will get confused when the clock gets warped.
- */
-
-int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
-{
-       static int firsttime = 1;
-       int error = 0;
-
-       if (tv && !timespec_valid(tv))
-               return -EINVAL;
-
-       error = security_settime(tv, tz);
-       if (error)
-               return error;
-
-       if (tz) {
-               sys_tz = *tz;
-               update_vsyscall_tz();
-               if (firsttime) {
-                       firsttime = 0;
-                       if (!tv)
-                               warp_clock();
-               }
-       }
-       if (tv)
-               return do_settimeofday(tv);
-       return 0;
-}
-
-SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
-               struct timezone __user *, tz)
-{
-       struct timeval user_tv;
-       struct timespec new_ts;
-       struct timezone new_tz;
-
-       if (tv) {
-               if (copy_from_user(&user_tv, tv, sizeof(*tv)))
-                       return -EFAULT;
-               new_ts.tv_sec = user_tv.tv_sec;
-               new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
-       }
-       if (tz) {
-               if (copy_from_user(&new_tz, tz, sizeof(*tz)))
-                       return -EFAULT;
-       }
-
-       return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
-}
-
-SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
-{
-       struct timex txc;               /* Local copy of parameter */
-       int ret;
-
-       /* Copy the user data space into the kernel copy
-        * structure. But bear in mind that the structures
-        * may change
-        */
-       if(copy_from_user(&txc, txc_p, sizeof(struct timex)))
-               return -EFAULT;
-       ret = do_adjtimex(&txc);
-       return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
-}
-
-/**
- * current_fs_time - Return FS time
- * @sb: Superblock.
- *
- * Return the current time truncated to the time granularity supported by
- * the fs.
- */
-struct timespec current_fs_time(struct super_block *sb)
-{
-       struct timespec now = current_kernel_time();
-       return timespec_trunc(now, sb->s_time_gran);
-}
-EXPORT_SYMBOL(current_fs_time);
-
-/*
- * Convert jiffies to milliseconds and back.
- *
- * Avoid unnecessary multiplications/divisions in the
- * two most common HZ cases:
- */
-unsigned int jiffies_to_msecs(const unsigned long j)
-{
-#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
-       return (MSEC_PER_SEC / HZ) * j;
-#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
-       return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
-#else
-# if BITS_PER_LONG == 32
-       return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
-# else
-       return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
-# endif
-#endif
-}
-EXPORT_SYMBOL(jiffies_to_msecs);
-
-unsigned int jiffies_to_usecs(const unsigned long j)
-{
-#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
-       return (USEC_PER_SEC / HZ) * j;
-#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
-       return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
-#else
-# if BITS_PER_LONG == 32
-       return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
-# else
-       return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
-# endif
-#endif
-}
-EXPORT_SYMBOL(jiffies_to_usecs);
-
-/**
- * timespec_trunc - Truncate timespec to a granularity
- * @t: Timespec
- * @gran: Granularity in ns.
- *
- * Truncate a timespec to a granularity. gran must be smaller than a second.
- * Always rounds down.
- *
- * This function should be only used for timestamps returned by
- * current_kernel_time() or CURRENT_TIME, not with do_gettimeofday() because
- * it doesn't handle the better resolution of the latter.
- */
-struct timespec timespec_trunc(struct timespec t, unsigned gran)
-{
-       /*
-        * Division is pretty slow so avoid it for common cases.
-        * Currently current_kernel_time() never returns better than
-        * jiffies resolution. Exploit that.
-        */
-       if (gran <= jiffies_to_usecs(1) * 1000) {
-               /* nothing */
-       } else if (gran == 1000000000) {
-               t.tv_nsec = 0;
-       } else {
-               t.tv_nsec -= t.tv_nsec % gran;
-       }
-       return t;
-}
-EXPORT_SYMBOL(timespec_trunc);
-
-/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
- * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
- * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
- *
- * [For the Julian calendar (which was used in Russia before 1917,
- * Britain & colonies before 1752, anywhere else before 1582,
- * and is still in use by some communities) leave out the
- * -year/100+year/400 terms, and add 10.]
- *
- * This algorithm was first published by Gauss (I think).
- *
- * WARNING: this function will overflow on 2106-02-07 06:28:16 on
- * machines where long is 32-bit! (However, as time_t is signed, we
- * will already get problems at other places on 2038-01-19 03:14:08)
- */
-unsigned long
-mktime(const unsigned int year0, const unsigned int mon0,
-       const unsigned int day, const unsigned int hour,
-       const unsigned int min, const unsigned int sec)
-{
-       unsigned int mon = mon0, year = year0;
-
-       /* 1..12 -> 11,12,1..10 */
-       if (0 >= (int) (mon -= 2)) {
-               mon += 12;      /* Puts Feb last since it has leap day */
-               year -= 1;
-       }
-
-       return ((((unsigned long)
-                 (year/4 - year/100 + year/400 + 367*mon/12 + day) +
-                 year*365 - 719499
-           )*24 + hour /* now have hours */
-         )*60 + min /* now have minutes */
-       )*60 + sec; /* finally seconds */
-}
-
-EXPORT_SYMBOL(mktime);
-
-/**
- * set_normalized_timespec - set timespec sec and nsec parts and normalize
- *
- * @ts:                pointer to timespec variable to be set
- * @sec:       seconds to set
- * @nsec:      nanoseconds to set
- *
- * Set seconds and nanoseconds field of a timespec variable and
- * normalize to the timespec storage format
- *
- * Note: The tv_nsec part is always in the range of
- *     0 <= tv_nsec < NSEC_PER_SEC
- * For negative values only the tv_sec field is negative !
- */
-void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec)
-{
-       while (nsec >= NSEC_PER_SEC) {
-               /*
-                * The following asm() prevents the compiler from
-                * optimising this loop into a modulo operation. See
-                * also __iter_div_u64_rem() in include/linux/time.h
-                */
-               asm("" : "+rm"(nsec));
-               nsec -= NSEC_PER_SEC;
-               ++sec;
-       }
-       while (nsec < 0) {
-               asm("" : "+rm"(nsec));
-               nsec += NSEC_PER_SEC;
-               --sec;
-       }
-       ts->tv_sec = sec;
-       ts->tv_nsec = nsec;
-}
-EXPORT_SYMBOL(set_normalized_timespec);
-
-/**
- * ns_to_timespec - Convert nanoseconds to timespec
- * @nsec:       the nanoseconds value to be converted
- *
- * Returns the timespec representation of the nsec parameter.
- */
-struct timespec ns_to_timespec(const s64 nsec)
-{
-       struct timespec ts;
-       s32 rem;
-
-       if (!nsec)
-               return (struct timespec) {0, 0};
-
-       ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
-       if (unlikely(rem < 0)) {
-               ts.tv_sec--;
-               rem += NSEC_PER_SEC;
-       }
-       ts.tv_nsec = rem;
-
-       return ts;
-}
-EXPORT_SYMBOL(ns_to_timespec);
-
-/**
- * ns_to_timeval - Convert nanoseconds to timeval
- * @nsec:       the nanoseconds value to be converted
- *
- * Returns the timeval representation of the nsec parameter.
- */
-struct timeval ns_to_timeval(const s64 nsec)
-{
-       struct timespec ts = ns_to_timespec(nsec);
-       struct timeval tv;
-
-       tv.tv_sec = ts.tv_sec;
-       tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000;
-
-       return tv;
-}
-EXPORT_SYMBOL(ns_to_timeval);
-
-/*
- * When we convert to jiffies then we interpret incoming values
- * the following way:
- *
- * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
- *
- * - 'too large' values [that would result in larger than
- *   MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
- *
- * - all other values are converted to jiffies by either multiplying
- *   the input value by a factor or dividing it with a factor
- *
- * We must also be careful about 32-bit overflows.
- */
-unsigned long msecs_to_jiffies(const unsigned int m)
-{
-       /*
-        * Negative value, means infinite timeout:
-        */
-       if ((int)m < 0)
-               return MAX_JIFFY_OFFSET;
-
-#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
-       /*
-        * HZ is equal to or smaller than 1000, and 1000 is a nice
-        * round multiple of HZ, divide with the factor between them,
-        * but round upwards:
-        */
-       return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
-#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
-       /*
-        * HZ is larger than 1000, and HZ is a nice round multiple of
-        * 1000 - simply multiply with the factor between them.
-        *
-        * But first make sure the multiplication result cannot
-        * overflow:
-        */
-       if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
-               return MAX_JIFFY_OFFSET;
-
-       return m * (HZ / MSEC_PER_SEC);
-#else
-       /*
-        * Generic case - multiply, round and divide. But first
-        * check that if we are doing a net multiplication, that
-        * we wouldn't overflow:
-        */
-       if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
-               return MAX_JIFFY_OFFSET;
-
-       return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32)
-               >> MSEC_TO_HZ_SHR32;
-#endif
-}
-EXPORT_SYMBOL(msecs_to_jiffies);
-
-unsigned long usecs_to_jiffies(const unsigned int u)
-{
-       if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
-               return MAX_JIFFY_OFFSET;
-#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
-       return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
-#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
-       return u * (HZ / USEC_PER_SEC);
-#else
-       return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
-               >> USEC_TO_HZ_SHR32;
-#endif
-}
-EXPORT_SYMBOL(usecs_to_jiffies);
-
-/*
- * The TICK_NSEC - 1 rounds up the value to the next resolution.  Note
- * that a remainder subtract here would not do the right thing as the
- * resolution values don't fall on second boundries.  I.e. the line:
- * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
- *
- * Rather, we just shift the bits off the right.
- *
- * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
- * value to a scaled second value.
- */
-unsigned long
-timespec_to_jiffies(const struct timespec *value)
-{
-       unsigned long sec = value->tv_sec;
-       long nsec = value->tv_nsec + TICK_NSEC - 1;
-
-       if (sec >= MAX_SEC_IN_JIFFIES){
-               sec = MAX_SEC_IN_JIFFIES;
-               nsec = 0;
-       }
-       return (((u64)sec * SEC_CONVERSION) +
-               (((u64)nsec * NSEC_CONVERSION) >>
-                (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
-
-}
-EXPORT_SYMBOL(timespec_to_jiffies);
-
-void
-jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
-{
-       /*
-        * Convert jiffies to nanoseconds and separate with
-        * one divide.
-        */
-       u32 rem;
-       value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
-                                   NSEC_PER_SEC, &rem);
-       value->tv_nsec = rem;
-}
-EXPORT_SYMBOL(jiffies_to_timespec);
-
-/* Same for "timeval"
- *
- * Well, almost.  The problem here is that the real system resolution is
- * in nanoseconds and the value being converted is in micro seconds.
- * Also for some machines (those that use HZ = 1024, in-particular),
- * there is a LARGE error in the tick size in microseconds.
-
- * The solution we use is to do the rounding AFTER we convert the
- * microsecond part.  Thus the USEC_ROUND, the bits to be shifted off.
- * Instruction wise, this should cost only an additional add with carry
- * instruction above the way it was done above.
- */
-unsigned long
-timeval_to_jiffies(const struct timeval *value)
-{
-       unsigned long sec = value->tv_sec;
-       long usec = value->tv_usec;
-
-       if (sec >= MAX_SEC_IN_JIFFIES){
-               sec = MAX_SEC_IN_JIFFIES;
-               usec = 0;
-       }
-       return (((u64)sec * SEC_CONVERSION) +
-               (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
-                (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
-}
-EXPORT_SYMBOL(timeval_to_jiffies);
-
-void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
-{
-       /*
-        * Convert jiffies to nanoseconds and separate with
-        * one divide.
-        */
-       u32 rem;
-
-       value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
-                                   NSEC_PER_SEC, &rem);
-       value->tv_usec = rem / NSEC_PER_USEC;
-}
-EXPORT_SYMBOL(jiffies_to_timeval);
-
-/*
- * Convert jiffies/jiffies_64 to clock_t and back.
- */
-clock_t jiffies_to_clock_t(unsigned long x)
-{
-#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
-# if HZ < USER_HZ
-       return x * (USER_HZ / HZ);
-# else
-       return x / (HZ / USER_HZ);
-# endif
-#else
-       return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
-#endif
-}
-EXPORT_SYMBOL(jiffies_to_clock_t);
-
-unsigned long clock_t_to_jiffies(unsigned long x)
-{
-#if (HZ % USER_HZ)==0
-       if (x >= ~0UL / (HZ / USER_HZ))
-               return ~0UL;
-       return x * (HZ / USER_HZ);
-#else
-       /* Don't worry about loss of precision here .. */
-       if (x >= ~0UL / HZ * USER_HZ)
-               return ~0UL;
-
-       /* .. but do try to contain it here */
-       return div_u64((u64)x * HZ, USER_HZ);
-#endif
-}
-EXPORT_SYMBOL(clock_t_to_jiffies);
-
-u64 jiffies_64_to_clock_t(u64 x)
-{
-#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
-# if HZ < USER_HZ
-       x = div_u64(x * USER_HZ, HZ);
-# elif HZ > USER_HZ
-       x = div_u64(x, HZ / USER_HZ);
-# else
-       /* Nothing to do */
-# endif
-#else
-       /*
-        * There are better ways that don't overflow early,
-        * but even this doesn't overflow in hundreds of years
-        * in 64 bits, so..
-        */
-       x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
-#endif
-       return x;
-}
-EXPORT_SYMBOL(jiffies_64_to_clock_t);
-
-u64 nsec_to_clock_t(u64 x)
-{
-#if (NSEC_PER_SEC % USER_HZ) == 0
-       return div_u64(x, NSEC_PER_SEC / USER_HZ);
-#elif (USER_HZ % 512) == 0
-       return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
-#else
-       /*
-         * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
-         * overflow after 64.99 years.
-         * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
-         */
-       return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
-#endif
-}
-
-/**
- * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
- *
- * @n: nsecs in u64
- *
- * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
- * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
- * for scheduler, not for use in device drivers to calculate timeout value.
- *
- * note:
- *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
- *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
- */
-u64 nsecs_to_jiffies64(u64 n)
-{
-#if (NSEC_PER_SEC % HZ) == 0
-       /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
-       return div_u64(n, NSEC_PER_SEC / HZ);
-#elif (HZ % 512) == 0
-       /* overflow after 292 years if HZ = 1024 */
-       return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
-#else
-       /*
-        * Generic case - optimized for cases where HZ is a multiple of 3.
-        * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
-        */
-       return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
-#endif
-}
-
-/**
- * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
- *
- * @n: nsecs in u64
- *
- * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
- * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
- * for scheduler, not for use in device drivers to calculate timeout value.
- *
- * note:
- *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
- *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
- */
-unsigned long nsecs_to_jiffies(u64 n)
-{
-       return (unsigned long)nsecs_to_jiffies64(n);
-}
-
-/*
- * Add two timespec values and do a safety check for overflow.
- * It's assumed that both values are valid (>= 0)
- */
-struct timespec timespec_add_safe(const struct timespec lhs,
-                                 const struct timespec rhs)
-{
-       struct timespec res;
-
-       set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec,
-                               lhs.tv_nsec + rhs.tv_nsec);
-
-       if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)
-               res.tv_sec = TIME_T_MAX;
-
-       return res;
-}
index f448513..d626dc9 100644 (file)
@@ -12,6 +12,11 @@ config CLOCKSOURCE_WATCHDOG
 config ARCH_CLOCKSOURCE_DATA
        bool
 
+# Clocksources require validation of the clocksource against the last
+# cycle update - x86/TSC misfeature
+config CLOCKSOURCE_VALIDATE_LAST_CYCLE
+       bool
+
 # Timekeeping vsyscall support
 config GENERIC_TIME_VSYSCALL
        bool
@@ -20,10 +25,6 @@ config GENERIC_TIME_VSYSCALL
 config GENERIC_TIME_VSYSCALL_OLD
        bool
 
-# ktime_t scalar 64bit nsec representation
-config KTIME_SCALAR
-       bool
-
 # Old style timekeeping
 config ARCH_USES_GETTIMEOFFSET
        bool
index 57a413f..7347426 100644 (file)
@@ -1,3 +1,4 @@
+obj-y += time.o timer.o hrtimer.o itimer.o posix-timers.o posix-cpu-timers.o
 obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o
 obj-y += timeconv.o posix-clock.o alarmtimer.o
 
@@ -12,3 +13,21 @@ obj-$(CONFIG_TICK_ONESHOT)                   += tick-oneshot.o
 obj-$(CONFIG_TICK_ONESHOT)                     += tick-sched.o
 obj-$(CONFIG_TIMER_STATS)                      += timer_stats.o
 obj-$(CONFIG_DEBUG_FS)                         += timekeeping_debug.o
+obj-$(CONFIG_TEST_UDELAY)                      += udelay_test.o
+
+$(obj)/time.o: $(obj)/timeconst.h
+
+quiet_cmd_hzfile = HZFILE  $@
+      cmd_hzfile = echo "hz=$(CONFIG_HZ)" > $@
+
+targets += hz.bc
+$(obj)/hz.bc: $(objtree)/include/config/hz.h FORCE
+       $(call if_changed,hzfile)
+
+quiet_cmd_bc  = BC      $@
+      cmd_bc  = bc -q $(filter-out FORCE,$^) > $@
+
+targets += timeconst.h
+$(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
+       $(call if_changed,bc)
+
index ba3e502..2e949cc 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/kthread.h>
 
 #include "tick-internal.h"
+#include "timekeeping_internal.h"
 
 void timecounter_init(struct timecounter *tc,
                      const struct cyclecounter *cc,
@@ -249,7 +250,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
 static void clocksource_watchdog(unsigned long data)
 {
        struct clocksource *cs;
-       cycle_t csnow, wdnow;
+       cycle_t csnow, wdnow, delta;
        int64_t wd_nsec, cs_nsec;
        int next_cpu, reset_pending;
 
@@ -282,11 +283,12 @@ static void clocksource_watchdog(unsigned long data)
                        continue;
                }
 
-               wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask,
-                                            watchdog->mult, watchdog->shift);
+               delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
+               wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
+                                            watchdog->shift);
 
-               cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) &
-                                            cs->mask, cs->mult, cs->shift);
+               delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
+               cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
                cs->cs_last = csnow;
                cs->wd_last = wdnow;
 
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
new file mode 100644 (file)
index 0000000..1c2fe7d
--- /dev/null
@@ -0,0 +1,1866 @@
+/*
+ *  linux/kernel/hrtimer.c
+ *
+ *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
+ *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
+ *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner
+ *
+ *  High-resolution kernel timers
+ *
+ *  In contrast to the low-resolution timeout API implemented in
+ *  kernel/timer.c, hrtimers provide finer resolution and accuracy
+ *  depending on system configuration and capabilities.
+ *
+ *  These timers are currently used for:
+ *   - itimers
+ *   - POSIX timers
+ *   - nanosleep
+ *   - precise in-kernel timing
+ *
+ *  Started by: Thomas Gleixner and Ingo Molnar
+ *
+ *  Credits:
+ *     based on kernel/timer.c
+ *
+ *     Help, testing, suggestions, bugfixes, improvements were
+ *     provided by:
+ *
+ *     George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
+ *     et. al.
+ *
+ *  For licencing details see kernel-base/COPYING
+ */
+
+#include <linux/cpu.h>
+#include <linux/export.h>
+#include <linux/percpu.h>
+#include <linux/hrtimer.h>
+#include <linux/notifier.h>
+#include <linux/syscalls.h>
+#include <linux/kallsyms.h>
+#include <linux/interrupt.h>
+#include <linux/tick.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <linux/debugobjects.h>
+#include <linux/sched.h>
+#include <linux/sched/sysctl.h>
+#include <linux/sched/rt.h>
+#include <linux/sched/deadline.h>
+#include <linux/timer.h>
+#include <linux/freezer.h>
+
+#include <asm/uaccess.h>
+
+#include <trace/events/timer.h>
+
+#include "timekeeping.h"
+
+/*
+ * The timer bases:
+ *
+ * There are more clockids then hrtimer bases. Thus, we index
+ * into the timer bases by the hrtimer_base_type enum. When trying
+ * to reach a base using a clockid, hrtimer_clockid_to_base()
+ * is used to convert from clockid to the proper hrtimer_base_type.
+ */
+DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
+{
+
+       .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
+       .clock_base =
+       {
+               {
+                       .index = HRTIMER_BASE_MONOTONIC,
+                       .clockid = CLOCK_MONOTONIC,
+                       .get_time = &ktime_get,
+                       .resolution = KTIME_LOW_RES,
+               },
+               {
+                       .index = HRTIMER_BASE_REALTIME,
+                       .clockid = CLOCK_REALTIME,
+                       .get_time = &ktime_get_real,
+                       .resolution = KTIME_LOW_RES,
+               },
+               {
+                       .index = HRTIMER_BASE_BOOTTIME,
+                       .clockid = CLOCK_BOOTTIME,
+                       .get_time = &ktime_get_boottime,
+                       .resolution = KTIME_LOW_RES,
+               },
+               {
+                       .index = HRTIMER_BASE_TAI,
+                       .clockid = CLOCK_TAI,
+                       .get_time = &ktime_get_clocktai,
+                       .resolution = KTIME_LOW_RES,
+               },
+       }
+};
+
+static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
+       [CLOCK_REALTIME]        = HRTIMER_BASE_REALTIME,
+       [CLOCK_MONOTONIC]       = HRTIMER_BASE_MONOTONIC,
+       [CLOCK_BOOTTIME]        = HRTIMER_BASE_BOOTTIME,
+       [CLOCK_TAI]             = HRTIMER_BASE_TAI,
+};
+
+static inline int hrtimer_clockid_to_base(clockid_t clock_id)
+{
+       return hrtimer_clock_to_base_table[clock_id];
+}
+
+
+/*
+ * Get the coarse grained time at the softirq based on xtime and
+ * wall_to_monotonic.
+ */
+static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
+{
+       ktime_t xtim, mono, boot, tai;
+       ktime_t off_real, off_boot, off_tai;
+
+       mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai);
+       boot = ktime_add(mono, off_boot);
+       xtim = ktime_add(mono, off_real);
+       tai = ktime_add(xtim, off_tai);
+
+       base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
+       base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
+       base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot;
+       base->clock_base[HRTIMER_BASE_TAI].softirq_time = tai;
+}
+
+/*
+ * Functions and macros which are different for UP/SMP systems are kept in a
+ * single place
+ */
+#ifdef CONFIG_SMP
+
+/*
+ * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
+ * means that all timers which are tied to this base via timer->base are
+ * locked, and the base itself is locked too.
+ *
+ * So __run_timers/migrate_timers can safely modify all timers which could
+ * be found on the lists/queues.
+ *
+ * When the timer's base is locked, and the timer removed from list, it is
+ * possible to set timer->base = NULL and drop the lock: the timer remains
+ * locked.
+ */
+static
+struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
+                                            unsigned long *flags)
+{
+       struct hrtimer_clock_base *base;
+
+       for (;;) {
+               base = timer->base;
+               if (likely(base != NULL)) {
+                       raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
+                       if (likely(base == timer->base))
+                               return base;
+                       /* The timer has migrated to another CPU: */
+                       raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
+               }
+               cpu_relax();
+       }
+}
+
+/*
+ * With HIGHRES=y we do not migrate the timer when it is expiring
+ * before the next event on the target cpu because we cannot reprogram
+ * the target cpu hardware and we would cause it to fire late.
+ *
+ * Called with cpu_base->lock of target cpu held.
+ */
+static int
+hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
+{
+#ifdef CONFIG_HIGH_RES_TIMERS
+       ktime_t expires;
+
+       if (!new_base->cpu_base->hres_active)
+               return 0;
+
+       expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
+       return expires.tv64 <= new_base->cpu_base->expires_next.tv64;
+#else
+       return 0;
+#endif
+}
+
+/*
+ * Switch the timer base to the current CPU when possible.
+ */
+static inline struct hrtimer_clock_base *
+switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
+                   int pinned)
+{
+       struct hrtimer_clock_base *new_base;
+       struct hrtimer_cpu_base *new_cpu_base;
+       int this_cpu = smp_processor_id();
+       int cpu = get_nohz_timer_target(pinned);
+       int basenum = base->index;
+
+again:
+       new_cpu_base = &per_cpu(hrtimer_bases, cpu);
+       new_base = &new_cpu_base->clock_base[basenum];
+
+       if (base != new_base) {
+               /*
+                * We are trying to move timer to new_base.
+                * However we can't change timer's base while it is running,
+                * so we keep it on the same CPU. No hassle vs. reprogramming
+                * the event source in the high resolution case. The softirq
+                * code will take care of this when the timer function has
+                * completed. There is no conflict as we hold the lock until
+                * the timer is enqueued.
+                */
+               if (unlikely(hrtimer_callback_running(timer)))
+                       return base;
+
+               /* See the comment in lock_timer_base() */
+               timer->base = NULL;
+               raw_spin_unlock(&base->cpu_base->lock);
+               raw_spin_lock(&new_base->cpu_base->lock);
+
+               if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
+                       cpu = this_cpu;
+                       raw_spin_unlock(&new_base->cpu_base->lock);
+                       raw_spin_lock(&base->cpu_base->lock);
+                       timer->base = base;
+                       goto again;
+               }
+               timer->base = new_base;
+       } else {
+               if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
+                       cpu = this_cpu;
+                       goto again;
+               }
+       }
+       return new_base;
+}
+
+#else /* CONFIG_SMP */
+
+static inline struct hrtimer_clock_base *
+lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
+{
+       struct hrtimer_clock_base *base = timer->base;
+
+       raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
+
+       return base;
+}
+
+# define switch_hrtimer_base(t, b, p)  (b)
+
+#endif /* !CONFIG_SMP */
+
+/*
+ * Functions for the union type storage format of ktime_t which are
+ * too large for inlining:
+ */
+#if BITS_PER_LONG < 64
+/*
+ * Divide a ktime value by a nanosecond value
+ */
+u64 ktime_divns(const ktime_t kt, s64 div)
+{
+       u64 dclc;
+       int sft = 0;
+
+       dclc = ktime_to_ns(kt);
+       /* Make sure the divisor is less than 2^32: */
+       while (div >> 32) {
+               sft++;
+               div >>= 1;
+       }
+       dclc >>= sft;
+       do_div(dclc, (unsigned long) div);
+
+       return dclc;
+}
+EXPORT_SYMBOL_GPL(ktime_divns);
+#endif /* BITS_PER_LONG >= 64 */
+
+/*
+ * Add two ktime values and do a safety check for overflow:
+ */
+ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
+{
+       ktime_t res = ktime_add(lhs, rhs);
+
+       /*
+        * We use KTIME_SEC_MAX here, the maximum timeout which we can
+        * return to user space in a timespec:
+        */
+       if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
+               res = ktime_set(KTIME_SEC_MAX, 0);
+
+       return res;
+}
+
+EXPORT_SYMBOL_GPL(ktime_add_safe);
+
+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
+
+static struct debug_obj_descr hrtimer_debug_descr;
+
+static void *hrtimer_debug_hint(void *addr)
+{
+       return ((struct hrtimer *) addr)->function;
+}
+
+/*
+ * fixup_init is called when:
+ * - an active object is initialized
+ */
+static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
+{
+       struct hrtimer *timer = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_ACTIVE:
+               hrtimer_cancel(timer);
+               debug_object_init(timer, &hrtimer_debug_descr);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+/*
+ * fixup_activate is called when:
+ * - an active object is activated
+ * - an unknown object is activated (might be a statically initialized object)
+ */
+static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
+{
+       switch (state) {
+
+       case ODEBUG_STATE_NOTAVAILABLE:
+               WARN_ON_ONCE(1);
+               return 0;
+
+       case ODEBUG_STATE_ACTIVE:
+               WARN_ON(1);
+
+       default:
+               return 0;
+       }
+}
+
+/*
+ * fixup_free is called when:
+ * - an active object is freed
+ */
+static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
+{
+       struct hrtimer *timer = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_ACTIVE:
+               hrtimer_cancel(timer);
+               debug_object_free(timer, &hrtimer_debug_descr);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+static struct debug_obj_descr hrtimer_debug_descr = {
+       .name           = "hrtimer",
+       .debug_hint     = hrtimer_debug_hint,
+       .fixup_init     = hrtimer_fixup_init,
+       .fixup_activate = hrtimer_fixup_activate,
+       .fixup_free     = hrtimer_fixup_free,
+};
+
+static inline void debug_hrtimer_init(struct hrtimer *timer)
+{
+       debug_object_init(timer, &hrtimer_debug_descr);
+}
+
+static inline void debug_hrtimer_activate(struct hrtimer *timer)
+{
+       debug_object_activate(timer, &hrtimer_debug_descr);
+}
+
+static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
+{
+       debug_object_deactivate(timer, &hrtimer_debug_descr);
+}
+
+static inline void debug_hrtimer_free(struct hrtimer *timer)
+{
+       debug_object_free(timer, &hrtimer_debug_descr);
+}
+
+static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+                          enum hrtimer_mode mode);
+
+void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
+                          enum hrtimer_mode mode)
+{
+       debug_object_init_on_stack(timer, &hrtimer_debug_descr);
+       __hrtimer_init(timer, clock_id, mode);
+}
+EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
+
+void destroy_hrtimer_on_stack(struct hrtimer *timer)
+{
+       debug_object_free(timer, &hrtimer_debug_descr);
+}
+
+#else
+static inline void debug_hrtimer_init(struct hrtimer *timer) { }
+static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
+static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
+#endif
+
+static inline void
+debug_init(struct hrtimer *timer, clockid_t clockid,
+          enum hrtimer_mode mode)
+{
+       debug_hrtimer_init(timer);
+       trace_hrtimer_init(timer, clockid, mode);
+}
+
+static inline void debug_activate(struct hrtimer *timer)
+{
+       debug_hrtimer_activate(timer);
+       trace_hrtimer_start(timer);
+}
+
+static inline void debug_deactivate(struct hrtimer *timer)
+{
+       debug_hrtimer_deactivate(timer);
+       trace_hrtimer_cancel(timer);
+}
+
+/* High resolution timer related functions */
+#ifdef CONFIG_HIGH_RES_TIMERS
+
+/*
+ * High resolution timer enabled ?
+ */
+static int hrtimer_hres_enabled __read_mostly  = 1;
+
+/*
+ * Enable / Disable high resolution mode
+ */
+static int __init setup_hrtimer_hres(char *str)
+{
+       if (!strcmp(str, "off"))
+               hrtimer_hres_enabled = 0;
+       else if (!strcmp(str, "on"))
+               hrtimer_hres_enabled = 1;
+       else
+               return 0;
+       return 1;
+}
+
+__setup("highres=", setup_hrtimer_hres);
+
+/*
+ * hrtimer_high_res_enabled - query, if the highres mode is enabled
+ */
+static inline int hrtimer_is_hres_enabled(void)
+{
+       return hrtimer_hres_enabled;
+}
+
+/*
+ * Is the high resolution mode active ?
+ */
+static inline int hrtimer_hres_active(void)
+{
+       return __this_cpu_read(hrtimer_bases.hres_active);
+}
+
+/*
+ * Reprogram the event source with checking both queues for the
+ * next event
+ * Called with interrupts disabled and base->lock held
+ */
+static void
+hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
+{
+       int i;
+       struct hrtimer_clock_base *base = cpu_base->clock_base;
+       ktime_t expires, expires_next;
+
+       expires_next.tv64 = KTIME_MAX;
+
+       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
+               struct hrtimer *timer;
+               struct timerqueue_node *next;
+
+               next = timerqueue_getnext(&base->active);
+               if (!next)
+                       continue;
+               timer = container_of(next, struct hrtimer, node);
+
+               expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
+               /*
+                * clock_was_set() has changed base->offset so the
+                * result might be negative. Fix it up to prevent a
+                * false positive in clockevents_program_event()
+                */
+               if (expires.tv64 < 0)
+                       expires.tv64 = 0;
+               if (expires.tv64 < expires_next.tv64)
+                       expires_next = expires;
+       }
+
+       if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
+               return;
+
+       cpu_base->expires_next.tv64 = expires_next.tv64;
+
+       /*
+        * If a hang was detected in the last timer interrupt then we
+        * leave the hang delay active in the hardware. We want the
+        * system to make progress. That also prevents the following
+        * scenario:
+        * T1 expires 50ms from now
+        * T2 expires 5s from now
+        *
+        * T1 is removed, so this code is called and would reprogram
+        * the hardware to 5s from now. Any hrtimer_start after that
+        * will not reprogram the hardware due to hang_detected being
+        * set. So we'd effectivly block all timers until the T2 event
+        * fires.
+        */
+       if (cpu_base->hang_detected)
+               return;
+
+       if (cpu_base->expires_next.tv64 != KTIME_MAX)
+               tick_program_event(cpu_base->expires_next, 1);
+}
+
+/*
+ * Shared reprogramming for clock_realtime and clock_monotonic
+ *
+ * When a timer is enqueued and expires earlier than the already enqueued
+ * timers, we have to check, whether it expires earlier than the timer for
+ * which the clock event device was armed.
+ *
+ * Note, that in case the state has HRTIMER_STATE_CALLBACK set, no reprogramming
+ * and no expiry check happens. The timer gets enqueued into the rbtree. The
+ * reprogramming and expiry check is done in the hrtimer_interrupt or in the
+ * softirq.
+ *
+ * Called with interrupts disabled and base->cpu_base.lock held
+ */
+static int hrtimer_reprogram(struct hrtimer *timer,
+                            struct hrtimer_clock_base *base)
+{
+       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+       ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
+       int res;
+
+       WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
+
+       /*
+        * When the callback is running, we do not reprogram the clock event
+        * device. The timer callback is either running on a different CPU or
+        * the callback is executed in the hrtimer_interrupt context. The
+        * reprogramming is handled either by the softirq, which called the
+        * callback or at the end of the hrtimer_interrupt.
+        */
+       if (hrtimer_callback_running(timer))
+               return 0;
+
+       /*
+        * CLOCK_REALTIME timer might be requested with an absolute
+        * expiry time which is less than base->offset. Nothing wrong
+        * about that, just avoid to call into the tick code, which
+        * has now objections against negative expiry values.
+        */
+       if (expires.tv64 < 0)
+               return -ETIME;
+
+       if (expires.tv64 >= cpu_base->expires_next.tv64)
+               return 0;
+
+       /*
+        * If a hang was detected in the last timer interrupt then we
+        * do not schedule a timer which is earlier than the expiry
+        * which we enforced in the hang detection. We want the system
+        * to make progress.
+        */
+       if (cpu_base->hang_detected)
+               return 0;
+
+       /*
+        * Clockevents returns -ETIME, when the event was in the past.
+        */
+       res = tick_program_event(expires, 0);
+       if (!IS_ERR_VALUE(res))
+               cpu_base->expires_next = expires;
+       return res;
+}
+
+/*
+ * Initialize the high resolution related parts of cpu_base
+ */
+static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
+{
+       base->expires_next.tv64 = KTIME_MAX;
+       base->hres_active = 0;
+}
+
+static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+{
+       ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
+       ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
+       ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
+
+       return ktime_get_update_offsets_now(offs_real, offs_boot, offs_tai);
+}
+
+/*
+ * Retrigger next event is called after clock was set
+ *
+ * Called with interrupts disabled via on_each_cpu()
+ */
+static void retrigger_next_event(void *arg)
+{
+       struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
+
+       if (!hrtimer_hres_active())
+               return;
+
+       raw_spin_lock(&base->lock);
+       hrtimer_update_base(base);
+       hrtimer_force_reprogram(base, 0);
+       raw_spin_unlock(&base->lock);
+}
+
+/*
+ * Switch to high resolution mode
+ */
+static int hrtimer_switch_to_hres(void)
+{
+       int i, cpu = smp_processor_id();
+       struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
+       unsigned long flags;
+
+       if (base->hres_active)
+               return 1;
+
+       local_irq_save(flags);
+
+       if (tick_init_highres()) {
+               local_irq_restore(flags);
+               printk(KERN_WARNING "Could not switch to high resolution "
+                                   "mode on CPU %d\n", cpu);
+               return 0;
+       }
+       base->hres_active = 1;
+       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
+               base->clock_base[i].resolution = KTIME_HIGH_RES;
+
+       tick_setup_sched_timer();
+       /* "Retrigger" the interrupt to get things going */
+       retrigger_next_event(NULL);
+       local_irq_restore(flags);
+       return 1;
+}
+
+static void clock_was_set_work(struct work_struct *work)
+{
+       clock_was_set();
+}
+
+static DECLARE_WORK(hrtimer_work, clock_was_set_work);
+
+/*
+ * Called from timekeeping and resume code to reprogramm the hrtimer
+ * interrupt device on all cpus.
+ */
+void clock_was_set_delayed(void)
+{
+       schedule_work(&hrtimer_work);
+}
+
+#else
+
+static inline int hrtimer_hres_active(void) { return 0; }
+static inline int hrtimer_is_hres_enabled(void) { return 0; }
+static inline int hrtimer_switch_to_hres(void) { return 0; }
+static inline void
+hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
+static inline int hrtimer_reprogram(struct hrtimer *timer,
+                                   struct hrtimer_clock_base *base)
+{
+       return 0;
+}
+static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
+static inline void retrigger_next_event(void *arg) { }
+
+#endif /* CONFIG_HIGH_RES_TIMERS */
+
+/*
+ * Clock realtime was set
+ *
+ * Change the offset of the realtime clock vs. the monotonic
+ * clock.
+ *
+ * We might have to reprogram the high resolution timer interrupt. On
+ * SMP we call the architecture specific code to retrigger _all_ high
+ * resolution timer interrupts. On UP we just disable interrupts and
+ * call the high resolution interrupt code.
+ */
+void clock_was_set(void)
+{
+#ifdef CONFIG_HIGH_RES_TIMERS
+       /* Retrigger the CPU local events everywhere */
+       on_each_cpu(retrigger_next_event, NULL, 1);
+#endif
+       timerfd_clock_was_set();
+}
+
+/*
+ * During resume we might have to reprogram the high resolution timer
+ * interrupt on all online CPUs.  However, all other CPUs will be
+ * stopped with IRQs interrupts disabled so the clock_was_set() call
+ * must be deferred.
+ */
+void hrtimers_resume(void)
+{
+       WARN_ONCE(!irqs_disabled(),
+                 KERN_INFO "hrtimers_resume() called with IRQs enabled!");
+
+       /* Retrigger on the local CPU */
+       retrigger_next_event(NULL);
+       /* And schedule a retrigger for all others */
+       clock_was_set_delayed();
+}
+
+static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
+{
+#ifdef CONFIG_TIMER_STATS
+       if (timer->start_site)
+               return;
+       timer->start_site = __builtin_return_address(0);
+       memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
+       timer->start_pid = current->pid;
+#endif
+}
+
+static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
+{
+#ifdef CONFIG_TIMER_STATS
+       timer->start_site = NULL;
+#endif
+}
+
+static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
+{
+#ifdef CONFIG_TIMER_STATS
+       if (likely(!timer_stats_active))
+               return;
+       timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
+                                timer->function, timer->start_comm, 0);
+#endif
+}
+
+/*
+ * Counterpart to lock_hrtimer_base above:
+ */
+static inline
+void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
+{
+       raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
+}
+
+/**
+ * hrtimer_forward - forward the timer expiry
+ * @timer:     hrtimer to forward
+ * @now:       forward past this time
+ * @interval:  the interval to forward
+ *
+ * Forward the timer expiry so it will expire in the future.
+ * Returns the number of overruns.
+ */
+u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
+{
+       u64 orun = 1;
+       ktime_t delta;
+
+       delta = ktime_sub(now, hrtimer_get_expires(timer));
+
+       if (delta.tv64 < 0)
+               return 0;
+
+       if (interval.tv64 < timer->base->resolution.tv64)
+               interval.tv64 = timer->base->resolution.tv64;
+
+       if (unlikely(delta.tv64 >= interval.tv64)) {
+               s64 incr = ktime_to_ns(interval);
+
+               orun = ktime_divns(delta, incr);
+               hrtimer_add_expires_ns(timer, incr * orun);
+               if (hrtimer_get_expires_tv64(timer) > now.tv64)
+                       return orun;
+               /*
+                * This (and the ktime_add() below) is the
+                * correction for exact:
+                */
+               orun++;
+       }
+       hrtimer_add_expires(timer, interval);
+
+       return orun;
+}
+EXPORT_SYMBOL_GPL(hrtimer_forward);
+
+/*
+ * enqueue_hrtimer - internal function to (re)start a timer
+ *
+ * The timer is inserted in expiry order. Insertion into the
+ * red black tree is O(log(n)). Must hold the base lock.
+ *
+ * Returns 1 when the new timer is the leftmost timer in the tree.
+ */
+static int enqueue_hrtimer(struct hrtimer *timer,
+                          struct hrtimer_clock_base *base)
+{
+       debug_activate(timer);
+
+       timerqueue_add(&base->active, &timer->node);
+       base->cpu_base->active_bases |= 1 << base->index;
+
+       /*
+        * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
+        * state of a possibly running callback.
+        */
+       timer->state |= HRTIMER_STATE_ENQUEUED;
+
+       return (&timer->node == base->active.next);
+}
+
+/*
+ * __remove_hrtimer - internal function to remove a timer
+ *
+ * Caller must hold the base lock.
+ *
+ * High resolution timer mode reprograms the clock event device when the
+ * timer is the one which expires next. The caller can disable this by setting
+ * reprogram to zero. This is useful, when the context does a reprogramming
+ * anyway (e.g. timer interrupt)
+ */
+static void __remove_hrtimer(struct hrtimer *timer,
+                            struct hrtimer_clock_base *base,
+                            unsigned long newstate, int reprogram)
+{
+       struct timerqueue_node *next_timer;
+       if (!(timer->state & HRTIMER_STATE_ENQUEUED))
+               goto out;
+
+       next_timer = timerqueue_getnext(&base->active);
+       timerqueue_del(&base->active, &timer->node);
+       if (&timer->node == next_timer) {
+#ifdef CONFIG_HIGH_RES_TIMERS
+               /* Reprogram the clock event device. if enabled */
+               if (reprogram && hrtimer_hres_active()) {
+                       ktime_t expires;
+
+                       expires = ktime_sub(hrtimer_get_expires(timer),
+                                           base->offset);
+                       if (base->cpu_base->expires_next.tv64 == expires.tv64)
+                               hrtimer_force_reprogram(base->cpu_base, 1);
+               }
+#endif
+       }
+       if (!timerqueue_getnext(&base->active))
+               base->cpu_base->active_bases &= ~(1 << base->index);
+out:
+       timer->state = newstate;
+}
+
+/*
+ * remove hrtimer, called with base lock held
+ */
+static inline int
+remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
+{
+       if (hrtimer_is_queued(timer)) {
+               unsigned long state;
+               int reprogram;
+
+               /*
+                * Remove the timer and force reprogramming when high
+                * resolution mode is active and the timer is on the current
+                * CPU. If we remove a timer on another CPU, reprogramming is
+                * skipped. The interrupt event on this CPU is fired and
+                * reprogramming happens in the interrupt handler. This is a
+                * rare case and less expensive than a smp call.
+                */
+               debug_deactivate(timer);
+               timer_stats_hrtimer_clear_start_info(timer);
+               reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
+               /*
+                * We must preserve the CALLBACK state flag here,
+                * otherwise we could move the timer base in
+                * switch_hrtimer_base.
+                */
+               state = timer->state & HRTIMER_STATE_CALLBACK;
+               __remove_hrtimer(timer, base, state, reprogram);
+               return 1;
+       }
+       return 0;
+}
+
+int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+               unsigned long delta_ns, const enum hrtimer_mode mode,
+               int wakeup)
+{
+       struct hrtimer_clock_base *base, *new_base;
+       unsigned long flags;
+       int ret, leftmost;
+
+       base = lock_hrtimer_base(timer, &flags);
+
+       /* Remove an active timer from the queue: */
+       ret = remove_hrtimer(timer, base);
+
+       if (mode & HRTIMER_MODE_REL) {
+               tim = ktime_add_safe(tim, base->get_time());
+               /*
+                * CONFIG_TIME_LOW_RES is a temporary way for architectures
+                * to signal that they simply return xtime in
+                * do_gettimeoffset(). In this case we want to round up by
+                * resolution when starting a relative timer, to avoid short
+                * timeouts. This will go away with the GTOD framework.
+                */
+#ifdef CONFIG_TIME_LOW_RES
+               tim = ktime_add_safe(tim, base->resolution);
+#endif
+       }
+
+       hrtimer_set_expires_range_ns(timer, tim, delta_ns);
+
+       /* Switch the timer base, if necessary: */
+       new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+
+       timer_stats_hrtimer_set_start_info(timer);
+
+       leftmost = enqueue_hrtimer(timer, new_base);
+
+       if (!leftmost) {
+               unlock_hrtimer_base(timer, &flags);
+               return ret;
+       }
+
+       if (!hrtimer_is_hres_active(timer)) {
+               /*
+                * Kick to reschedule the next tick to handle the new timer
+                * on dynticks target.
+                */
+               wake_up_nohz_cpu(new_base->cpu_base->cpu);
+       } else if (new_base->cpu_base == &__get_cpu_var(hrtimer_bases) &&
+                       hrtimer_reprogram(timer, new_base)) {
+               /*
+                * Only allow reprogramming if the new base is on this CPU.
+                * (it might still be on another CPU if the timer was pending)
+                *
+                * XXX send_remote_softirq() ?
+                */
+               if (wakeup) {
+                       /*
+                        * We need to drop cpu_base->lock to avoid a
+                        * lock ordering issue vs. rq->lock.
+                        */
+                       raw_spin_unlock(&new_base->cpu_base->lock);
+                       raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+                       local_irq_restore(flags);
+                       return ret;
+               } else {
+                       __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+               }
+       }
+
+       unlock_hrtimer_base(timer, &flags);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(__hrtimer_start_range_ns);
+
+/**
+ * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
+ * @timer:     the timer to be added
+ * @tim:       expiry time
+ * @delta_ns:  "slack" range for the timer
+ * @mode:      expiry mode: absolute (HRTIMER_MODE_ABS) or
+ *             relative (HRTIMER_MODE_REL)
+ *
+ * Returns:
+ *  0 on success
+ *  1 when the timer was active
+ */
+int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+               unsigned long delta_ns, const enum hrtimer_mode mode)
+{
+       return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1);
+}
+EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
+
+/**
+ * hrtimer_start - (re)start an hrtimer on the current CPU
+ * @timer:     the timer to be added
+ * @tim:       expiry time
+ * @mode:      expiry mode: absolute (HRTIMER_MODE_ABS) or
+ *             relative (HRTIMER_MODE_REL)
+ *
+ * Returns:
+ *  0 on success
+ *  1 when the timer was active
+ */
+int
+hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
+{
+       return __hrtimer_start_range_ns(timer, tim, 0, mode, 1);
+}
+EXPORT_SYMBOL_GPL(hrtimer_start);
+
+
+/**
+ * hrtimer_try_to_cancel - try to deactivate a timer
+ * @timer:     hrtimer to stop
+ *
+ * Returns:
+ *  0 when the timer was not active
+ *  1 when the timer was active
+ * -1 when the timer is currently excuting the callback function and
+ *    cannot be stopped
+ */
+int hrtimer_try_to_cancel(struct hrtimer *timer)
+{
+       struct hrtimer_clock_base *base;
+       unsigned long flags;
+       int ret = -1;
+
+       base = lock_hrtimer_base(timer, &flags);
+
+       if (!hrtimer_callback_running(timer))
+               ret = remove_hrtimer(timer, base);
+
+       unlock_hrtimer_base(timer, &flags);
+
+       return ret;
+
+}
+EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
+
+/**
+ * hrtimer_cancel - cancel a timer and wait for the handler to finish.
+ * @timer:     the timer to be cancelled
+ *
+ * Returns:
+ *  0 when the timer was not active
+ *  1 when the timer was active
+ */
+int hrtimer_cancel(struct hrtimer *timer)
+{
+       for (;;) {
+               int ret = hrtimer_try_to_cancel(timer);
+
+               if (ret >= 0)
+                       return ret;
+               cpu_relax();
+       }
+}
+EXPORT_SYMBOL_GPL(hrtimer_cancel);
+
+/**
+ * hrtimer_get_remaining - get remaining time for the timer
+ * @timer:     the timer to read
+ */
+ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
+{
+       unsigned long flags;
+       ktime_t rem;
+
+       lock_hrtimer_base(timer, &flags);
+       rem = hrtimer_expires_remaining(timer);
+       unlock_hrtimer_base(timer, &flags);
+
+       return rem;
+}
+EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
+
+#ifdef CONFIG_NO_HZ_COMMON
+/**
+ * hrtimer_get_next_event - get the time until next expiry event
+ *
+ * Returns the delta to the next expiry event or KTIME_MAX if no timer
+ * is pending.
+ */
+ktime_t hrtimer_get_next_event(void)
+{
+       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+       struct hrtimer_clock_base *base = cpu_base->clock_base;
+       ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
+       unsigned long flags;
+       int i;
+
+       raw_spin_lock_irqsave(&cpu_base->lock, flags);
+
+       if (!hrtimer_hres_active()) {
+               for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
+                       struct hrtimer *timer;
+                       struct timerqueue_node *next;
+
+                       next = timerqueue_getnext(&base->active);
+                       if (!next)
+                               continue;
+
+                       timer = container_of(next, struct hrtimer, node);
+                       delta.tv64 = hrtimer_get_expires_tv64(timer);
+                       delta = ktime_sub(delta, base->get_time());
+                       if (delta.tv64 < mindelta.tv64)
+                               mindelta.tv64 = delta.tv64;
+               }
+       }
+
+       raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+
+       if (mindelta.tv64 < 0)
+               mindelta.tv64 = 0;
+       return mindelta;
+}
+#endif
+
+static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+                          enum hrtimer_mode mode)
+{
+       struct hrtimer_cpu_base *cpu_base;
+       int base;
+
+       memset(timer, 0, sizeof(struct hrtimer));
+
+       cpu_base = &__raw_get_cpu_var(hrtimer_bases);
+
+       if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
+               clock_id = CLOCK_MONOTONIC;
+
+       base = hrtimer_clockid_to_base(clock_id);
+       timer->base = &cpu_base->clock_base[base];
+       timerqueue_init(&timer->node);
+
+#ifdef CONFIG_TIMER_STATS
+       timer->start_site = NULL;
+       timer->start_pid = -1;
+       memset(timer->start_comm, 0, TASK_COMM_LEN);
+#endif
+}
+
+/**
+ * hrtimer_init - initialize a timer to the given clock
+ * @timer:     the timer to be initialized
+ * @clock_id:  the clock to be used
+ * @mode:      timer mode abs/rel
+ */
+void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+                 enum hrtimer_mode mode)
+{
+       debug_init(timer, clock_id, mode);
+       __hrtimer_init(timer, clock_id, mode);
+}
+EXPORT_SYMBOL_GPL(hrtimer_init);
+
+/**
+ * hrtimer_get_res - get the timer resolution for a clock
+ * @which_clock: which clock to query
+ * @tp:                 pointer to timespec variable to store the resolution
+ *
+ * Store the resolution of the clock selected by @which_clock in the
+ * variable pointed to by @tp.
+ */
+int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
+{
+       struct hrtimer_cpu_base *cpu_base;
+       int base = hrtimer_clockid_to_base(which_clock);
+
+       cpu_base = &__raw_get_cpu_var(hrtimer_bases);
+       *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(hrtimer_get_res);
+
+static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
+{
+       struct hrtimer_clock_base *base = timer->base;
+       struct hrtimer_cpu_base *cpu_base = base->cpu_base;
+       enum hrtimer_restart (*fn)(struct hrtimer *);
+       int restart;
+
+       WARN_ON(!irqs_disabled());
+
+       debug_deactivate(timer);
+       __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
+       timer_stats_account_hrtimer(timer);
+       fn = timer->function;
+
+       /*
+        * Because we run timers from hardirq context, there is no chance
+        * they get migrated to another cpu, therefore its safe to unlock
+        * the timer base.
+        */
+       raw_spin_unlock(&cpu_base->lock);
+       trace_hrtimer_expire_entry(timer, now);
+       restart = fn(timer);
+       trace_hrtimer_expire_exit(timer);
+       raw_spin_lock(&cpu_base->lock);
+
+       /*
+        * Note: We clear the CALLBACK bit after enqueue_hrtimer and
+        * we do not reprogramm the event hardware. Happens either in
+        * hrtimer_start_range_ns() or in hrtimer_interrupt()
+        */
+       if (restart != HRTIMER_NORESTART) {
+               BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
+               enqueue_hrtimer(timer, base);
+       }
+
+       WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
+
+       timer->state &= ~HRTIMER_STATE_CALLBACK;
+}
+
+#ifdef CONFIG_HIGH_RES_TIMERS
+
+/*
+ * High resolution timer interrupt
+ * Called with interrupts disabled
+ */
+void hrtimer_interrupt(struct clock_event_device *dev)
+{
+       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+       ktime_t expires_next, now, entry_time, delta;
+       int i, retries = 0;
+
+       BUG_ON(!cpu_base->hres_active);
+       cpu_base->nr_events++;
+       dev->next_event.tv64 = KTIME_MAX;
+
+       raw_spin_lock(&cpu_base->lock);
+       entry_time = now = hrtimer_update_base(cpu_base);
+retry:
+       expires_next.tv64 = KTIME_MAX;
+       /*
+        * We set expires_next to KTIME_MAX here with cpu_base->lock
+        * held to prevent that a timer is enqueued in our queue via
+        * the migration code. This does not affect enqueueing of
+        * timers which run their callback and need to be requeued on
+        * this CPU.
+        */
+       cpu_base->expires_next.tv64 = KTIME_MAX;
+
+       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
+               struct hrtimer_clock_base *base;
+               struct timerqueue_node *node;
+               ktime_t basenow;
+
+               if (!(cpu_base->active_bases & (1 << i)))
+                       continue;
+
+               base = cpu_base->clock_base + i;
+               basenow = ktime_add(now, base->offset);
+
+               while ((node = timerqueue_getnext(&base->active))) {
+                       struct hrtimer *timer;
+
+                       timer = container_of(node, struct hrtimer, node);
+
+                       /*
+                        * The immediate goal for using the softexpires is
+                        * minimizing wakeups, not running timers at the
+                        * earliest interrupt after their soft expiration.
+                        * This allows us to avoid using a Priority Search
+                        * Tree, which can answer a stabbing querry for
+                        * overlapping intervals and instead use the simple
+                        * BST we already have.
+                        * We don't add extra wakeups by delaying timers that
+                        * are right-of a not yet expired timer, because that
+                        * timer will have to trigger a wakeup anyway.
+                        */
+
+                       if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
+                               ktime_t expires;
+
+                               expires = ktime_sub(hrtimer_get_expires(timer),
+                                                   base->offset);
+                               if (expires.tv64 < 0)
+                                       expires.tv64 = KTIME_MAX;
+                               if (expires.tv64 < expires_next.tv64)
+                                       expires_next = expires;
+                               break;
+                       }
+
+                       __run_hrtimer(timer, &basenow);
+               }
+       }
+
+       /*
+        * Store the new expiry value so the migration code can verify
+        * against it.
+        */
+       cpu_base->expires_next = expires_next;
+       raw_spin_unlock(&cpu_base->lock);
+
+       /* Reprogramming necessary ? */
+       if (expires_next.tv64 == KTIME_MAX ||
+           !tick_program_event(expires_next, 0)) {
+               cpu_base->hang_detected = 0;
+               return;
+       }
+
+       /*
+        * The next timer was already expired due to:
+        * - tracing
+        * - long lasting callbacks
+        * - being scheduled away when running in a VM
+        *
+        * We need to prevent that we loop forever in the hrtimer
+        * interrupt routine. We give it 3 attempts to avoid
+        * overreacting on some spurious event.
+        *
+        * Acquire base lock for updating the offsets and retrieving
+        * the current time.
+        */
+       raw_spin_lock(&cpu_base->lock);
+       now = hrtimer_update_base(cpu_base);
+       cpu_base->nr_retries++;
+       if (++retries < 3)
+               goto retry;
+       /*
+        * Give the system a chance to do something else than looping
+        * here. We stored the entry time, so we know exactly how long
+        * we spent here. We schedule the next event this amount of
+        * time away.
+        */
+       cpu_base->nr_hangs++;
+       cpu_base->hang_detected = 1;
+       raw_spin_unlock(&cpu_base->lock);
+       delta = ktime_sub(now, entry_time);
+       if (delta.tv64 > cpu_base->max_hang_time.tv64)
+               cpu_base->max_hang_time = delta;
+       /*
+        * Limit it to a sensible value as we enforce a longer
+        * delay. Give the CPU at least 100ms to catch up.
+        */
+       if (delta.tv64 > 100 * NSEC_PER_MSEC)
+               expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
+       else
+               expires_next = ktime_add(now, delta);
+       tick_program_event(expires_next, 1);
+       printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
+                   ktime_to_ns(delta));
+}
+
+/*
+ * local version of hrtimer_peek_ahead_timers() called with interrupts
+ * disabled.
+ */
+static void __hrtimer_peek_ahead_timers(void)
+{
+       struct tick_device *td;
+
+       if (!hrtimer_hres_active())
+               return;
+
+       td = &__get_cpu_var(tick_cpu_device);
+       if (td && td->evtdev)
+               hrtimer_interrupt(td->evtdev);
+}
+
+/**
+ * hrtimer_peek_ahead_timers -- run soft-expired timers now
+ *
+ * hrtimer_peek_ahead_timers will peek at the timer queue of
+ * the current cpu and check if there are any timers for which
+ * the soft expires time has passed. If any such timers exist,
+ * they are run immediately and then removed from the timer queue.
+ *
+ */
+void hrtimer_peek_ahead_timers(void)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       __hrtimer_peek_ahead_timers();
+       local_irq_restore(flags);
+}
+
+static void run_hrtimer_softirq(struct softirq_action *h)
+{
+       hrtimer_peek_ahead_timers();
+}
+
+#else /* CONFIG_HIGH_RES_TIMERS */
+
+static inline void __hrtimer_peek_ahead_timers(void) { }
+
+#endif /* !CONFIG_HIGH_RES_TIMERS */
+
+/*
+ * Called from timer softirq every jiffy, expire hrtimers:
+ *
+ * For HRT its the fall back code to run the softirq in the timer
+ * softirq context in case the hrtimer initialization failed or has
+ * not been done yet.
+ */
+void hrtimer_run_pending(void)
+{
+       if (hrtimer_hres_active())
+               return;
+
+       /*
+        * This _is_ ugly: We have to check in the softirq context,
+        * whether we can switch to highres and / or nohz mode. The
+        * clocksource switch happens in the timer interrupt with
+        * xtime_lock held. Notification from there only sets the
+        * check bit in the tick_oneshot code, otherwise we might
+        * deadlock vs. xtime_lock.
+        */
+       if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
+               hrtimer_switch_to_hres();
+}
+
+/*
+ * Called from hardirq context every jiffy
+ */
+void hrtimer_run_queues(void)
+{
+       struct timerqueue_node *node;
+       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+       struct hrtimer_clock_base *base;
+       int index, gettime = 1;
+
+       if (hrtimer_hres_active())
+               return;
+
+       for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
+               base = &cpu_base->clock_base[index];
+               if (!timerqueue_getnext(&base->active))
+                       continue;
+
+               if (gettime) {
+                       hrtimer_get_softirq_time(cpu_base);
+                       gettime = 0;
+               }
+
+               raw_spin_lock(&cpu_base->lock);
+
+               while ((node = timerqueue_getnext(&base->active))) {
+                       struct hrtimer *timer;
+
+                       timer = container_of(node, struct hrtimer, node);
+                       if (base->softirq_time.tv64 <=
+                                       hrtimer_get_expires_tv64(timer))
+                               break;
+
+                       __run_hrtimer(timer, &base->softirq_time);
+               }
+               raw_spin_unlock(&cpu_base->lock);
+       }
+}
+
+/*
+ * Sleep related functions:
+ */
+static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
+{
+       struct hrtimer_sleeper *t =
+               container_of(timer, struct hrtimer_sleeper, timer);
+       struct task_struct *task = t->task;
+
+       t->task = NULL;
+       if (task)
+               wake_up_process(task);
+
+       return HRTIMER_NORESTART;
+}
+
+void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
+{
+       sl->timer.function = hrtimer_wakeup;
+       sl->task = task;
+}
+EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
+
+static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
+{
+       hrtimer_init_sleeper(t, current);
+
+       do {
+               set_current_state(TASK_INTERRUPTIBLE);
+               hrtimer_start_expires(&t->timer, mode);
+               if (!hrtimer_active(&t->timer))
+                       t->task = NULL;
+
+               if (likely(t->task))
+                       freezable_schedule();
+
+               hrtimer_cancel(&t->timer);
+               mode = HRTIMER_MODE_ABS;
+
+       } while (t->task && !signal_pending(current));
+
+       __set_current_state(TASK_RUNNING);
+
+       return t->task == NULL;
+}
+
+static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
+{
+       struct timespec rmt;
+       ktime_t rem;
+
+       rem = hrtimer_expires_remaining(timer);
+       if (rem.tv64 <= 0)
+               return 0;
+       rmt = ktime_to_timespec(rem);
+
+       if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
+               return -EFAULT;
+
+       return 1;
+}
+
+long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
+{
+       struct hrtimer_sleeper t;
+       struct timespec __user  *rmtp;
+       int ret = 0;
+
+       hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid,
+                               HRTIMER_MODE_ABS);
+       hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
+
+       if (do_nanosleep(&t, HRTIMER_MODE_ABS))
+               goto out;
+
+       rmtp = restart->nanosleep.rmtp;
+       if (rmtp) {
+               ret = update_rmtp(&t.timer, rmtp);
+               if (ret <= 0)
+                       goto out;
+       }
+
+       /* The other values in restart are already filled in */
+       ret = -ERESTART_RESTARTBLOCK;
+out:
+       destroy_hrtimer_on_stack(&t.timer);
+       return ret;
+}
+
+long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
+                      const enum hrtimer_mode mode, const clockid_t clockid)
+{
+       struct restart_block *restart;
+       struct hrtimer_sleeper t;
+       int ret = 0;
+       unsigned long slack;
+
+       slack = current->timer_slack_ns;
+       if (dl_task(current) || rt_task(current))
+               slack = 0;
+
+       hrtimer_init_on_stack(&t.timer, clockid, mode);
+       hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
+       if (do_nanosleep(&t, mode))
+               goto out;
+
+       /* Absolute timers do not update the rmtp value and restart: */
+       if (mode == HRTIMER_MODE_ABS) {
+               ret = -ERESTARTNOHAND;
+               goto out;
+       }
+
+       if (rmtp) {
+               ret = update_rmtp(&t.timer, rmtp);
+               if (ret <= 0)
+                       goto out;
+       }
+
+       restart = &current_thread_info()->restart_block;
+       restart->fn = hrtimer_nanosleep_restart;
+       restart->nanosleep.clockid = t.timer.base->clockid;
+       restart->nanosleep.rmtp = rmtp;
+       restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
+
+       ret = -ERESTART_RESTARTBLOCK;
+out:
+       destroy_hrtimer_on_stack(&t.timer);
+       return ret;
+}
+
+SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
+               struct timespec __user *, rmtp)
+{
+       struct timespec tu;
+
+       if (copy_from_user(&tu, rqtp, sizeof(tu)))
+               return -EFAULT;
+
+       if (!timespec_valid(&tu))
+               return -EINVAL;
+
+       return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
+}
+
+/*
+ * Functions related to boot-time initialization:
+ */
+static void init_hrtimers_cpu(int cpu)
+{
+       struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
+       int i;
+
+       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
+               cpu_base->clock_base[i].cpu_base = cpu_base;
+               timerqueue_init_head(&cpu_base->clock_base[i].active);
+       }
+
+       cpu_base->cpu = cpu;
+       hrtimer_init_hres(cpu_base);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
+                               struct hrtimer_clock_base *new_base)
+{
+       struct hrtimer *timer;
+       struct timerqueue_node *node;
+
+       while ((node = timerqueue_getnext(&old_base->active))) {
+               timer = container_of(node, struct hrtimer, node);
+               BUG_ON(hrtimer_callback_running(timer));
+               debug_deactivate(timer);
+
+               /*
+                * Mark it as STATE_MIGRATE not INACTIVE otherwise the
+                * timer could be seen as !active and just vanish away
+                * under us on another CPU
+                */
+               __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
+               timer->base = new_base;
+               /*
+                * Enqueue the timers on the new cpu. This does not
+                * reprogram the event device in case the timer
+                * expires before the earliest on this CPU, but we run
+                * hrtimer_interrupt after we migrated everything to
+                * sort out already expired timers and reprogram the
+                * event device.
+                */
+               enqueue_hrtimer(timer, new_base);
+
+               /* Clear the migration state bit */
+               timer->state &= ~HRTIMER_STATE_MIGRATE;
+       }
+}
+
+static void migrate_hrtimers(int scpu)
+{
+       struct hrtimer_cpu_base *old_base, *new_base;
+       int i;
+
+       BUG_ON(cpu_online(scpu));
+       tick_cancel_sched_timer(scpu);
+
+       local_irq_disable();
+       old_base = &per_cpu(hrtimer_bases, scpu);
+       new_base = &__get_cpu_var(hrtimer_bases);
+       /*
+        * The caller is globally serialized and nobody else
+        * takes two locks at once, deadlock is not possible.
+        */
+       raw_spin_lock(&new_base->lock);
+       raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+
+       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
+               migrate_hrtimer_list(&old_base->clock_base[i],
+                                    &new_base->clock_base[i]);
+       }
+
+       raw_spin_unlock(&old_base->lock);
+       raw_spin_unlock(&new_base->lock);
+
+       /* Check, if we got expired work to do */
+       __hrtimer_peek_ahead_timers();
+       local_irq_enable();
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static int hrtimer_cpu_notify(struct notifier_block *self,
+                                       unsigned long action, void *hcpu)
+{
+       int scpu = (long)hcpu;
+
+       switch (action) {
+
+       case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
+               init_hrtimers_cpu(scpu);
+               break;
+
+#ifdef CONFIG_HOTPLUG_CPU
+       case CPU_DYING:
+       case CPU_DYING_FROZEN:
+               clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
+               break;
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+       {
+               clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
+               migrate_hrtimers(scpu);
+               break;
+       }
+#endif
+
+       default:
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block hrtimers_nb = {
+       .notifier_call = hrtimer_cpu_notify,
+};
+
+void __init hrtimers_init(void)
+{
+       hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
+                         (void *)(long)smp_processor_id());
+       register_cpu_notifier(&hrtimers_nb);
+#ifdef CONFIG_HIGH_RES_TIMERS
+       open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
+#endif
+}
+
+/**
+ * schedule_hrtimeout_range_clock - sleep until timeout
+ * @expires:   timeout value (ktime_t)
+ * @delta:     slack in expires timeout (ktime_t)
+ * @mode:      timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
+ * @clock:     timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
+ */
+int __sched
+schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
+                              const enum hrtimer_mode mode, int clock)
+{
+       struct hrtimer_sleeper t;
+
+       /*
+        * Optimize when a zero timeout value is given. It does not
+        * matter whether this is an absolute or a relative time.
+        */
+       if (expires && !expires->tv64) {
+               __set_current_state(TASK_RUNNING);
+               return 0;
+       }
+
+       /*
+        * A NULL parameter means "infinite"
+        */
+       if (!expires) {
+               schedule();
+               __set_current_state(TASK_RUNNING);
+               return -EINTR;
+       }
+
+       hrtimer_init_on_stack(&t.timer, clock, mode);
+       hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
+
+       hrtimer_init_sleeper(&t, current);
+
+       hrtimer_start_expires(&t.timer, mode);
+       if (!hrtimer_active(&t.timer))
+               t.task = NULL;
+
+       if (likely(t.task))
+               schedule();
+
+       hrtimer_cancel(&t.timer);
+       destroy_hrtimer_on_stack(&t.timer);
+
+       __set_current_state(TASK_RUNNING);
+
+       return !t.task ? 0 : -EINTR;
+}
+
+/**
+ * schedule_hrtimeout_range - sleep until timeout
+ * @expires:   timeout value (ktime_t)
+ * @delta:     slack in expires timeout (ktime_t)
+ * @mode:      timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
+ *
+ * Make the current task sleep until the given expiry time has
+ * elapsed. The routine will return immediately unless
+ * the current task state has been set (see set_current_state()).
+ *
+ * The @delta argument gives the kernel the freedom to schedule the
+ * actual wakeup to a time that is both power and performance friendly.
+ * The kernel give the normal best effort behavior for "@expires+@delta",
+ * but may decide to fire the timer earlier, but no earlier than @expires.
+ *
+ * You can set the task state as follows -
+ *
+ * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
+ * pass before the routine returns.
+ *
+ * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
+ * delivered to the current task.
+ *
+ * The current task state is guaranteed to be TASK_RUNNING when this
+ * routine returns.
+ *
+ * Returns 0 when the timer has expired otherwise -EINTR
+ */
+int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
+                                    const enum hrtimer_mode mode)
+{
+       return schedule_hrtimeout_range_clock(expires, delta, mode,
+                                             CLOCK_MONOTONIC);
+}
+EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
+
+/**
+ * schedule_hrtimeout - sleep until timeout
+ * @expires:   timeout value (ktime_t)
+ * @mode:      timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
+ *
+ * Make the current task sleep until the given expiry time has
+ * elapsed. The routine will return immediately unless
+ * the current task state has been set (see set_current_state()).
+ *
+ * You can set the task state as follows -
+ *
+ * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
+ * pass before the routine returns.
+ *
+ * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
+ * delivered to the current task.
+ *
+ * The current task state is guaranteed to be TASK_RUNNING when this
+ * routine returns.
+ *
+ * Returns 0 when the timer has expired otherwise -EINTR
+ */
+int __sched schedule_hrtimeout(ktime_t *expires,
+                              const enum hrtimer_mode mode)
+{
+       return schedule_hrtimeout_range(expires, 0, mode);
+}
+EXPORT_SYMBOL_GPL(schedule_hrtimeout);
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
new file mode 100644 (file)
index 0000000..8d262b4
--- /dev/null
@@ -0,0 +1,301 @@
+/*
+ * linux/kernel/itimer.c
+ *
+ * Copyright (C) 1992 Darren Senn
+ */
+
+/* These are all the functions necessary to implement itimers */
+
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/syscalls.h>
+#include <linux/time.h>
+#include <linux/posix-timers.h>
+#include <linux/hrtimer.h>
+#include <trace/events/timer.h>
+
+#include <asm/uaccess.h>
+
+/**
+ * itimer_get_remtime - get remaining time for the timer
+ *
+ * @timer: the timer to read
+ *
+ * Returns the delta between the expiry time and now, which can be
+ * less than zero or 1usec for an pending expired timer
+ */
+static struct timeval itimer_get_remtime(struct hrtimer *timer)
+{
+       ktime_t rem = hrtimer_get_remaining(timer);
+
+       /*
+        * Racy but safe: if the itimer expires after the above
+        * hrtimer_get_remtime() call but before this condition
+        * then we return 0 - which is correct.
+        */
+       if (hrtimer_active(timer)) {
+               if (rem.tv64 <= 0)
+                       rem.tv64 = NSEC_PER_USEC;
+       } else
+               rem.tv64 = 0;
+
+       return ktime_to_timeval(rem);
+}
+
+static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
+                          struct itimerval *const value)
+{
+       cputime_t cval, cinterval;
+       struct cpu_itimer *it = &tsk->signal->it[clock_id];
+
+       spin_lock_irq(&tsk->sighand->siglock);
+
+       cval = it->expires;
+       cinterval = it->incr;
+       if (cval) {
+               struct task_cputime cputime;
+               cputime_t t;
+
+               thread_group_cputimer(tsk, &cputime);
+               if (clock_id == CPUCLOCK_PROF)
+                       t = cputime.utime + cputime.stime;
+               else
+                       /* CPUCLOCK_VIRT */
+                       t = cputime.utime;
+
+               if (cval < t)
+                       /* about to fire */
+                       cval = cputime_one_jiffy;
+               else
+                       cval = cval - t;
+       }
+
+       spin_unlock_irq(&tsk->sighand->siglock);
+
+       cputime_to_timeval(cval, &value->it_value);
+       cputime_to_timeval(cinterval, &value->it_interval);
+}
+
+int do_getitimer(int which, struct itimerval *value)
+{
+       struct task_struct *tsk = current;
+
+       switch (which) {
+       case ITIMER_REAL:
+               spin_lock_irq(&tsk->sighand->siglock);
+               value->it_value = itimer_get_remtime(&tsk->signal->real_timer);
+               value->it_interval =
+                       ktime_to_timeval(tsk->signal->it_real_incr);
+               spin_unlock_irq(&tsk->sighand->siglock);
+               break;
+       case ITIMER_VIRTUAL:
+               get_cpu_itimer(tsk, CPUCLOCK_VIRT, value);
+               break;
+       case ITIMER_PROF:
+               get_cpu_itimer(tsk, CPUCLOCK_PROF, value);
+               break;
+       default:
+               return(-EINVAL);
+       }
+       return 0;
+}
+
+SYSCALL_DEFINE2(getitimer, int, which, struct itimerval __user *, value)
+{
+       int error = -EFAULT;
+       struct itimerval get_buffer;
+
+       if (value) {
+               error = do_getitimer(which, &get_buffer);
+               if (!error &&
+                   copy_to_user(value, &get_buffer, sizeof(get_buffer)))
+                       error = -EFAULT;
+       }
+       return error;
+}
+
+
+/*
+ * The timer is automagically restarted, when interval != 0
+ */
+enum hrtimer_restart it_real_fn(struct hrtimer *timer)
+{
+       struct signal_struct *sig =
+               container_of(timer, struct signal_struct, real_timer);
+
+       trace_itimer_expire(ITIMER_REAL, sig->leader_pid, 0);
+       kill_pid_info(SIGALRM, SEND_SIG_PRIV, sig->leader_pid);
+
+       return HRTIMER_NORESTART;
+}
+
+static inline u32 cputime_sub_ns(cputime_t ct, s64 real_ns)
+{
+       struct timespec ts;
+       s64 cpu_ns;
+
+       cputime_to_timespec(ct, &ts);
+       cpu_ns = timespec_to_ns(&ts);
+
+       return (cpu_ns <= real_ns) ? 0 : cpu_ns - real_ns;
+}
+
+static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
+                          const struct itimerval *const value,
+                          struct itimerval *const ovalue)
+{
+       cputime_t cval, nval, cinterval, ninterval;
+       s64 ns_ninterval, ns_nval;
+       u32 error, incr_error;
+       struct cpu_itimer *it = &tsk->signal->it[clock_id];
+
+       nval = timeval_to_cputime(&value->it_value);
+       ns_nval = timeval_to_ns(&value->it_value);
+       ninterval = timeval_to_cputime(&value->it_interval);
+       ns_ninterval = timeval_to_ns(&value->it_interval);
+
+       error = cputime_sub_ns(nval, ns_nval);
+       incr_error = cputime_sub_ns(ninterval, ns_ninterval);
+
+       spin_lock_irq(&tsk->sighand->siglock);
+
+       cval = it->expires;
+       cinterval = it->incr;
+       if (cval || nval) {
+               if (nval > 0)
+                       nval += cputime_one_jiffy;
+               set_process_cpu_timer(tsk, clock_id, &nval, &cval);
+       }
+       it->expires = nval;
+       it->incr = ninterval;
+       it->error = error;
+       it->incr_error = incr_error;
+       trace_itimer_state(clock_id == CPUCLOCK_VIRT ?
+                          ITIMER_VIRTUAL : ITIMER_PROF, value, nval);
+
+       spin_unlock_irq(&tsk->sighand->siglock);
+
+       if (ovalue) {
+               cputime_to_timeval(cval, &ovalue->it_value);
+               cputime_to_timeval(cinterval, &ovalue->it_interval);
+       }
+}
+
+/*
+ * Returns true if the timeval is in canonical form
+ */
+#define timeval_valid(t) \
+       (((t)->tv_sec >= 0) && (((unsigned long) (t)->tv_usec) < USEC_PER_SEC))
+
+int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
+{
+       struct task_struct *tsk = current;
+       struct hrtimer *timer;
+       ktime_t expires;
+
+       /*
+        * Validate the timevals in value.
+        */
+       if (!timeval_valid(&value->it_value) ||
+           !timeval_valid(&value->it_interval))
+               return -EINVAL;
+
+       switch (which) {
+       case ITIMER_REAL:
+again:
+               spin_lock_irq(&tsk->sighand->siglock);
+               timer = &tsk->signal->real_timer;
+               if (ovalue) {
+                       ovalue->it_value = itimer_get_remtime(timer);
+                       ovalue->it_interval
+                               = ktime_to_timeval(tsk->signal->it_real_incr);
+               }
+               /* We are sharing ->siglock with it_real_fn() */
+               if (hrtimer_try_to_cancel(timer) < 0) {
+                       spin_unlock_irq(&tsk->sighand->siglock);
+                       goto again;
+               }
+               expires = timeval_to_ktime(value->it_value);
+               if (expires.tv64 != 0) {
+                       tsk->signal->it_real_incr =
+                               timeval_to_ktime(value->it_interval);
+                       hrtimer_start(timer, expires, HRTIMER_MODE_REL);
+               } else
+                       tsk->signal->it_real_incr.tv64 = 0;
+
+               trace_itimer_state(ITIMER_REAL, value, 0);
+               spin_unlock_irq(&tsk->sighand->siglock);
+               break;
+       case ITIMER_VIRTUAL:
+               set_cpu_itimer(tsk, CPUCLOCK_VIRT, value, ovalue);
+               break;
+       case ITIMER_PROF:
+               set_cpu_itimer(tsk, CPUCLOCK_PROF, value, ovalue);
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+/**
+ * alarm_setitimer - set alarm in seconds
+ *
+ * @seconds:   number of seconds until alarm
+ *             0 disables the alarm
+ *
+ * Returns the remaining time in seconds of a pending timer or 0 when
+ * the timer is not active.
+ *
+ * On 32 bit machines the seconds value is limited to (INT_MAX/2) to avoid
+ * negative timeval settings which would cause immediate expiry.
+ */
+unsigned int alarm_setitimer(unsigned int seconds)
+{
+       struct itimerval it_new, it_old;
+
+#if BITS_PER_LONG < 64
+       if (seconds > INT_MAX)
+               seconds = INT_MAX;
+#endif
+       it_new.it_value.tv_sec = seconds;
+       it_new.it_value.tv_usec = 0;
+       it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
+
+       do_setitimer(ITIMER_REAL, &it_new, &it_old);
+
+       /*
+        * We can't return 0 if we have an alarm pending ...  And we'd
+        * better return too much than too little anyway
+        */
+       if ((!it_old.it_value.tv_sec && it_old.it_value.tv_usec) ||
+             it_old.it_value.tv_usec >= 500000)
+               it_old.it_value.tv_sec++;
+
+       return it_old.it_value.tv_sec;
+}
+
+SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value,
+               struct itimerval __user *, ovalue)
+{
+       struct itimerval set_buffer, get_buffer;
+       int error;
+
+       if (value) {
+               if(copy_from_user(&set_buffer, value, sizeof(set_buffer)))
+                       return -EFAULT;
+       } else {
+               memset(&set_buffer, 0, sizeof(set_buffer));
+               printk_once(KERN_WARNING "%s calls setitimer() with new_value NULL pointer."
+                           " Misfeature support will be removed\n",
+                           current->comm);
+       }
+
+       error = do_setitimer(which, &set_buffer, ovalue ? &get_buffer : NULL);
+       if (error || !ovalue)
+               return error;
+
+       if (copy_to_user(ovalue, &get_buffer, sizeof(get_buffer)))
+               return -EFAULT;
+       return 0;
+}
index 33db43a..87a346f 100644 (file)
@@ -466,7 +466,8 @@ static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
 
 static void sync_cmos_clock(struct work_struct *work)
 {
-       struct timespec now, next;
+       struct timespec64 now;
+       struct timespec next;
        int fail = 1;
 
        /*
@@ -485,9 +486,9 @@ static void sync_cmos_clock(struct work_struct *work)
                return;
        }
 
-       getnstimeofday(&now);
+       getnstimeofday64(&now);
        if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) {
-               struct timespec adjust = now;
+               struct timespec adjust = timespec64_to_timespec(now);
 
                fail = -ENODEV;
                if (persistent_clock_is_local)
@@ -531,7 +532,7 @@ void ntp_notify_cmos_timer(void) { }
 /*
  * Propagate a new txc->status value into the NTP state:
  */
-static inline void process_adj_status(struct timex *txc, struct timespec *ts)
+static inline void process_adj_status(struct timex *txc, struct timespec64 *ts)
 {
        if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
                time_state = TIME_OK;
@@ -554,7 +555,7 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts)
 
 
 static inline void process_adjtimex_modes(struct timex *txc,
-                                               struct timespec *ts,
+                                               struct timespec64 *ts,
                                                s32 *time_tai)
 {
        if (txc->modes & ADJ_STATUS)
@@ -640,7 +641,7 @@ int ntp_validate_timex(struct timex *txc)
  * adjtimex mainly allows reading (and writing, if superuser) of
  * kernel time-keeping variables. used by xntpd.
  */
-int __do_adjtimex(struct timex *txc, struct timespec *ts, s32 *time_tai)
+int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai)
 {
        int result;
 
@@ -684,7 +685,7 @@ int __do_adjtimex(struct timex *txc, struct timespec *ts, s32 *time_tai)
        /* fill PPS status fields */
        pps_fill_timex(txc);
 
-       txc->time.tv_sec = ts->tv_sec;
+       txc->time.tv_sec = (time_t)ts->tv_sec;
        txc->time.tv_usec = ts->tv_nsec;
        if (!(time_status & STA_NANO))
                txc->time.tv_usec /= NSEC_PER_USEC;
index 1950cb4..bbd102a 100644 (file)
@@ -7,6 +7,6 @@ extern void ntp_clear(void);
 extern u64 ntp_tick_length(void);
 extern int second_overflow(unsigned long secs);
 extern int ntp_validate_timex(struct timex *);
-extern int __do_adjtimex(struct timex *, struct timespec *, s32 *);
+extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *);
 extern void __hardpps(const struct timespec *, const struct timespec *);
 #endif /* _LINUX_NTP_INTERNAL_H */
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
new file mode 100644 (file)
index 0000000..3b89464
--- /dev/null
@@ -0,0 +1,1490 @@
+/*
+ * Implement CPU time clocks for the POSIX clock interface.
+ */
+
+#include <linux/sched.h>
+#include <linux/posix-timers.h>
+#include <linux/errno.h>
+#include <linux/math64.h>
+#include <asm/uaccess.h>
+#include <linux/kernel_stat.h>
+#include <trace/events/timer.h>
+#include <linux/random.h>
+#include <linux/tick.h>
+#include <linux/workqueue.h>
+
+/*
+ * Called after updating RLIMIT_CPU to run cpu timer and update
+ * tsk->signal->cputime_expires expiration cache if necessary. Needs
+ * siglock protection since other code may update expiration cache as
+ * well.
+ */
+void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
+{
+       cputime_t cputime = secs_to_cputime(rlim_new);
+
+       spin_lock_irq(&task->sighand->siglock);
+       set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
+       spin_unlock_irq(&task->sighand->siglock);
+}
+
+static int check_clock(const clockid_t which_clock)
+{
+       int error = 0;
+       struct task_struct *p;
+       const pid_t pid = CPUCLOCK_PID(which_clock);
+
+       if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
+               return -EINVAL;
+
+       if (pid == 0)
+               return 0;
+
+       rcu_read_lock();
+       p = find_task_by_vpid(pid);
+       if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
+                  same_thread_group(p, current) : has_group_leader_pid(p))) {
+               error = -EINVAL;
+       }
+       rcu_read_unlock();
+
+       return error;
+}
+
+static inline unsigned long long
+timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
+{
+       unsigned long long ret;
+
+       ret = 0;                /* high half always zero when .cpu used */
+       if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
+               ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
+       } else {
+               ret = cputime_to_expires(timespec_to_cputime(tp));
+       }
+       return ret;
+}
+
+static void sample_to_timespec(const clockid_t which_clock,
+                              unsigned long long expires,
+                              struct timespec *tp)
+{
+       if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
+               *tp = ns_to_timespec(expires);
+       else
+               cputime_to_timespec((__force cputime_t)expires, tp);
+}
+
+/*
+ * Update expiry time from increment, and increase overrun count,
+ * given the current clock sample.
+ */
+static void bump_cpu_timer(struct k_itimer *timer,
+                          unsigned long long now)
+{
+       int i;
+       unsigned long long delta, incr;
+
+       if (timer->it.cpu.incr == 0)
+               return;
+
+       if (now < timer->it.cpu.expires)
+               return;
+
+       incr = timer->it.cpu.incr;
+       delta = now + incr - timer->it.cpu.expires;
+
+       /* Don't use (incr*2 < delta), incr*2 might overflow. */
+       for (i = 0; incr < delta - incr; i++)
+               incr = incr << 1;
+
+       for (; i >= 0; incr >>= 1, i--) {
+               if (delta < incr)
+                       continue;
+
+               timer->it.cpu.expires += incr;
+               timer->it_overrun += 1 << i;
+               delta -= incr;
+       }
+}
+
+/**
+ * task_cputime_zero - Check a task_cputime struct for all zero fields.
+ *
+ * @cputime:   The struct to compare.
+ *
+ * Checks @cputime to see if all fields are zero.  Returns true if all fields
+ * are zero, false if any field is nonzero.
+ */
+static inline int task_cputime_zero(const struct task_cputime *cputime)
+{
+       if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
+               return 1;
+       return 0;
+}
+
+static inline unsigned long long prof_ticks(struct task_struct *p)
+{
+       cputime_t utime, stime;
+
+       task_cputime(p, &utime, &stime);
+
+       return cputime_to_expires(utime + stime);
+}
+static inline unsigned long long virt_ticks(struct task_struct *p)
+{
+       cputime_t utime;
+
+       task_cputime(p, &utime, NULL);
+
+       return cputime_to_expires(utime);
+}
+
+static int
+posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
+{
+       int error = check_clock(which_clock);
+       if (!error) {
+               tp->tv_sec = 0;
+               tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
+               if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
+                       /*
+                        * If sched_clock is using a cycle counter, we
+                        * don't have any idea of its true resolution
+                        * exported, but it is much more than 1s/HZ.
+                        */
+                       tp->tv_nsec = 1;
+               }
+       }
+       return error;
+}
+
+static int
+posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
+{
+       /*
+        * You can never reset a CPU clock, but we check for other errors
+        * in the call before failing with EPERM.
+        */
+       int error = check_clock(which_clock);
+       if (error == 0) {
+               error = -EPERM;
+       }
+       return error;
+}
+
+
+/*
+ * Sample a per-thread clock for the given task.
+ */
+static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
+                           unsigned long long *sample)
+{
+       switch (CPUCLOCK_WHICH(which_clock)) {
+       default:
+               return -EINVAL;
+       case CPUCLOCK_PROF:
+               *sample = prof_ticks(p);
+               break;
+       case CPUCLOCK_VIRT:
+               *sample = virt_ticks(p);
+               break;
+       case CPUCLOCK_SCHED:
+               *sample = task_sched_runtime(p);
+               break;
+       }
+       return 0;
+}
+
+static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
+{
+       if (b->utime > a->utime)
+               a->utime = b->utime;
+
+       if (b->stime > a->stime)
+               a->stime = b->stime;
+
+       if (b->sum_exec_runtime > a->sum_exec_runtime)
+               a->sum_exec_runtime = b->sum_exec_runtime;
+}
+
+void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
+{
+       struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
+       struct task_cputime sum;
+       unsigned long flags;
+
+       if (!cputimer->running) {
+               /*
+                * The POSIX timer interface allows for absolute time expiry
+                * values through the TIMER_ABSTIME flag, therefore we have
+                * to synchronize the timer to the clock every time we start
+                * it.
+                */
+               thread_group_cputime(tsk, &sum);
+               raw_spin_lock_irqsave(&cputimer->lock, flags);
+               cputimer->running = 1;
+               update_gt_cputime(&cputimer->cputime, &sum);
+       } else
+               raw_spin_lock_irqsave(&cputimer->lock, flags);
+       *times = cputimer->cputime;
+       raw_spin_unlock_irqrestore(&cputimer->lock, flags);
+}
+
+/*
+ * Sample a process (thread group) clock for the given group_leader task.
+ * Must be called with task sighand lock held for safe while_each_thread()
+ * traversal.
+ */
+static int cpu_clock_sample_group(const clockid_t which_clock,
+                                 struct task_struct *p,
+                                 unsigned long long *sample)
+{
+       struct task_cputime cputime;
+
+       switch (CPUCLOCK_WHICH(which_clock)) {
+       default:
+               return -EINVAL;
+       case CPUCLOCK_PROF:
+               thread_group_cputime(p, &cputime);
+               *sample = cputime_to_expires(cputime.utime + cputime.stime);
+               break;
+       case CPUCLOCK_VIRT:
+               thread_group_cputime(p, &cputime);
+               *sample = cputime_to_expires(cputime.utime);
+               break;
+       case CPUCLOCK_SCHED:
+               thread_group_cputime(p, &cputime);
+               *sample = cputime.sum_exec_runtime;
+               break;
+       }
+       return 0;
+}
+
+static int posix_cpu_clock_get_task(struct task_struct *tsk,
+                                   const clockid_t which_clock,
+                                   struct timespec *tp)
+{
+       int err = -EINVAL;
+       unsigned long long rtn;
+
+       if (CPUCLOCK_PERTHREAD(which_clock)) {
+               if (same_thread_group(tsk, current))
+                       err = cpu_clock_sample(which_clock, tsk, &rtn);
+       } else {
+               unsigned long flags;
+               struct sighand_struct *sighand;
+
+               /*
+                * while_each_thread() is not yet entirely RCU safe,
+                * keep locking the group while sampling process
+                * clock for now.
+                */
+               sighand = lock_task_sighand(tsk, &flags);
+               if (!sighand)
+                       return err;
+
+               if (tsk == current || thread_group_leader(tsk))
+                       err = cpu_clock_sample_group(which_clock, tsk, &rtn);
+
+               unlock_task_sighand(tsk, &flags);
+       }
+
+       if (!err)
+               sample_to_timespec(which_clock, rtn, tp);
+
+       return err;
+}
+
+
+static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
+{
+       const pid_t pid = CPUCLOCK_PID(which_clock);
+       int err = -EINVAL;
+
+       if (pid == 0) {
+               /*
+                * Special case constant value for our own clocks.
+                * We don't have to do any lookup to find ourselves.
+                */
+               err = posix_cpu_clock_get_task(current, which_clock, tp);
+       } else {
+               /*
+                * Find the given PID, and validate that the caller
+                * should be able to see it.
+                */
+               struct task_struct *p;
+               rcu_read_lock();
+               p = find_task_by_vpid(pid);
+               if (p)
+                       err = posix_cpu_clock_get_task(p, which_clock, tp);
+               rcu_read_unlock();
+       }
+
+       return err;
+}
+
+
+/*
+ * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
+ * This is called from sys_timer_create() and do_cpu_nanosleep() with the
+ * new timer already all-zeros initialized.
+ */
+static int posix_cpu_timer_create(struct k_itimer *new_timer)
+{
+       int ret = 0;
+       const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
+       struct task_struct *p;
+
+       if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
+               return -EINVAL;
+
+       INIT_LIST_HEAD(&new_timer->it.cpu.entry);
+
+       rcu_read_lock();
+       if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
+               if (pid == 0) {
+                       p = current;
+               } else {
+                       p = find_task_by_vpid(pid);
+                       if (p && !same_thread_group(p, current))
+                               p = NULL;
+               }
+       } else {
+               if (pid == 0) {
+                       p = current->group_leader;
+               } else {
+                       p = find_task_by_vpid(pid);
+                       if (p && !has_group_leader_pid(p))
+                               p = NULL;
+               }
+       }
+       new_timer->it.cpu.task = p;
+       if (p) {
+               get_task_struct(p);
+       } else {
+               ret = -EINVAL;
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
+/*
+ * Clean up a CPU-clock timer that is about to be destroyed.
+ * This is called from timer deletion with the timer already locked.
+ * If we return TIMER_RETRY, it's necessary to release the timer's lock
+ * and try again.  (This happens when the timer is in the middle of firing.)
+ */
+static int posix_cpu_timer_del(struct k_itimer *timer)
+{
+       int ret = 0;
+       unsigned long flags;
+       struct sighand_struct *sighand;
+       struct task_struct *p = timer->it.cpu.task;
+
+       WARN_ON_ONCE(p == NULL);
+
+       /*
+        * Protect against sighand release/switch in exit/exec and process/
+        * thread timer list entry concurrent read/writes.
+        */
+       sighand = lock_task_sighand(p, &flags);
+       if (unlikely(sighand == NULL)) {
+               /*
+                * We raced with the reaping of the task.
+                * The deletion should have cleared us off the list.
+                */
+               WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
+       } else {
+               if (timer->it.cpu.firing)
+                       ret = TIMER_RETRY;
+               else
+                       list_del(&timer->it.cpu.entry);
+
+               unlock_task_sighand(p, &flags);
+       }
+
+       if (!ret)
+               put_task_struct(p);
+
+       return ret;
+}
+
+static void cleanup_timers_list(struct list_head *head)
+{
+       struct cpu_timer_list *timer, *next;
+
+       list_for_each_entry_safe(timer, next, head, entry)
+               list_del_init(&timer->entry);
+}
+
+/*
+ * Clean out CPU timers still ticking when a thread exited.  The task
+ * pointer is cleared, and the expiry time is replaced with the residual
+ * time for later timer_gettime calls to return.
+ * This must be called with the siglock held.
+ */
+static void cleanup_timers(struct list_head *head)
+{
+       cleanup_timers_list(head);
+       cleanup_timers_list(++head);
+       cleanup_timers_list(++head);
+}
+
+/*
+ * These are both called with the siglock held, when the current thread
+ * is being reaped.  When the final (leader) thread in the group is reaped,
+ * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
+ */
+void posix_cpu_timers_exit(struct task_struct *tsk)
+{
+       add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
+                                               sizeof(unsigned long long));
+       cleanup_timers(tsk->cpu_timers);
+
+}
+void posix_cpu_timers_exit_group(struct task_struct *tsk)
+{
+       cleanup_timers(tsk->signal->cpu_timers);
+}
+
+static inline int expires_gt(cputime_t expires, cputime_t new_exp)
+{
+       return expires == 0 || expires > new_exp;
+}
+
+/*
+ * Insert the timer on the appropriate list before any timers that
+ * expire later.  This must be called with the sighand lock held.
+ */
+static void arm_timer(struct k_itimer *timer)
+{
+       struct task_struct *p = timer->it.cpu.task;
+       struct list_head *head, *listpos;
+       struct task_cputime *cputime_expires;
+       struct cpu_timer_list *const nt = &timer->it.cpu;
+       struct cpu_timer_list *next;
+
+       if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
+               head = p->cpu_timers;
+               cputime_expires = &p->cputime_expires;
+       } else {
+               head = p->signal->cpu_timers;
+               cputime_expires = &p->signal->cputime_expires;
+       }
+       head += CPUCLOCK_WHICH(timer->it_clock);
+
+       listpos = head;
+       list_for_each_entry(next, head, entry) {
+               if (nt->expires < next->expires)
+                       break;
+               listpos = &next->entry;
+       }
+       list_add(&nt->entry, listpos);
+
+       if (listpos == head) {
+               unsigned long long exp = nt->expires;
+
+               /*
+                * We are the new earliest-expiring POSIX 1.b timer, hence
+                * need to update expiration cache. Take into account that
+                * for process timers we share expiration cache with itimers
+                * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
+                */
+
+               switch (CPUCLOCK_WHICH(timer->it_clock)) {
+               case CPUCLOCK_PROF:
+                       if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp)))
+                               cputime_expires->prof_exp = expires_to_cputime(exp);
+                       break;
+               case CPUCLOCK_VIRT:
+                       if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp)))
+                               cputime_expires->virt_exp = expires_to_cputime(exp);
+                       break;
+               case CPUCLOCK_SCHED:
+                       if (cputime_expires->sched_exp == 0 ||
+                           cputime_expires->sched_exp > exp)
+                               cputime_expires->sched_exp = exp;
+                       break;
+               }
+       }
+}
+
+/*
+ * The timer is locked, fire it and arrange for its reload.
+ */
+static void cpu_timer_fire(struct k_itimer *timer)
+{
+       if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
+               /*
+                * User don't want any signal.
+                */
+               timer->it.cpu.expires = 0;
+       } else if (unlikely(timer->sigq == NULL)) {
+               /*
+                * This a special case for clock_nanosleep,
+                * not a normal timer from sys_timer_create.
+                */
+               wake_up_process(timer->it_process);
+               timer->it.cpu.expires = 0;
+       } else if (timer->it.cpu.incr == 0) {
+               /*
+                * One-shot timer.  Clear it as soon as it's fired.
+                */
+               posix_timer_event(timer, 0);
+               timer->it.cpu.expires = 0;
+       } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
+               /*
+                * The signal did not get queued because the signal
+                * was ignored, so we won't get any callback to
+                * reload the timer.  But we need to keep it
+                * ticking in case the signal is deliverable next time.
+                */
+               posix_cpu_timer_schedule(timer);
+       }
+}
+
+/*
+ * Sample a process (thread group) timer for the given group_leader task.
+ * Must be called with task sighand lock held for safe while_each_thread()
+ * traversal.
+ */
+static int cpu_timer_sample_group(const clockid_t which_clock,
+                                 struct task_struct *p,
+                                 unsigned long long *sample)
+{
+       struct task_cputime cputime;
+
+       thread_group_cputimer(p, &cputime);
+       switch (CPUCLOCK_WHICH(which_clock)) {
+       default:
+               return -EINVAL;
+       case CPUCLOCK_PROF:
+               *sample = cputime_to_expires(cputime.utime + cputime.stime);
+               break;
+       case CPUCLOCK_VIRT:
+               *sample = cputime_to_expires(cputime.utime);
+               break;
+       case CPUCLOCK_SCHED:
+               *sample = cputime.sum_exec_runtime + task_delta_exec(p);
+               break;
+       }
+       return 0;
+}
+
+#ifdef CONFIG_NO_HZ_FULL
+static void nohz_kick_work_fn(struct work_struct *work)
+{
+       tick_nohz_full_kick_all();
+}
+
+static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn);
+
+/*
+ * We need the IPIs to be sent from sane process context.
+ * The posix cpu timers are always set with irqs disabled.
+ */
+static void posix_cpu_timer_kick_nohz(void)
+{
+       if (context_tracking_is_enabled())
+               schedule_work(&nohz_kick_work);
+}
+
+bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
+{
+       if (!task_cputime_zero(&tsk->cputime_expires))
+               return false;
+
+       if (tsk->signal->cputimer.running)
+               return false;
+
+       return true;
+}
+#else
+static inline void posix_cpu_timer_kick_nohz(void) { }
+#endif
+
+/*
+ * Guts of sys_timer_settime for CPU timers.
+ * This is called with the timer locked and interrupts disabled.
+ * If we return TIMER_RETRY, it's necessary to release the timer's lock
+ * and try again.  (This happens when the timer is in the middle of firing.)
+ */
+static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
+                              struct itimerspec *new, struct itimerspec *old)
+{
+       unsigned long flags;
+       struct sighand_struct *sighand;
+       struct task_struct *p = timer->it.cpu.task;
+       unsigned long long old_expires, new_expires, old_incr, val;
+       int ret;
+
+       WARN_ON_ONCE(p == NULL);
+
+       new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
+
+       /*
+        * Protect against sighand release/switch in exit/exec and p->cpu_timers
+        * and p->signal->cpu_timers read/write in arm_timer()
+        */
+       sighand = lock_task_sighand(p, &flags);
+       /*
+        * If p has just been reaped, we can no
+        * longer get any information about it at all.
+        */
+       if (unlikely(sighand == NULL)) {
+               return -ESRCH;
+       }
+
+       /*
+        * Disarm any old timer after extracting its expiry time.
+        */
+       WARN_ON_ONCE(!irqs_disabled());
+
+       ret = 0;
+       old_incr = timer->it.cpu.incr;
+       old_expires = timer->it.cpu.expires;
+       if (unlikely(timer->it.cpu.firing)) {
+               timer->it.cpu.firing = -1;
+               ret = TIMER_RETRY;
+       } else
+               list_del_init(&timer->it.cpu.entry);
+
+       /*
+        * We need to sample the current value to convert the new
+        * value from to relative and absolute, and to convert the
+        * old value from absolute to relative.  To set a process
+        * timer, we need a sample to balance the thread expiry
+        * times (in arm_timer).  With an absolute time, we must
+        * check if it's already passed.  In short, we need a sample.
+        */
+       if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
+               cpu_clock_sample(timer->it_clock, p, &val);
+       } else {
+               cpu_timer_sample_group(timer->it_clock, p, &val);
+       }
+
+       if (old) {
+               if (old_expires == 0) {
+                       old->it_value.tv_sec = 0;
+                       old->it_value.tv_nsec = 0;
+               } else {
+                       /*
+                        * Update the timer in case it has
+                        * overrun already.  If it has,
+                        * we'll report it as having overrun
+                        * and with the next reloaded timer
+                        * already ticking, though we are
+                        * swallowing that pending
+                        * notification here to install the
+                        * new setting.
+                        */
+                       bump_cpu_timer(timer, val);
+                       if (val < timer->it.cpu.expires) {
+                               old_expires = timer->it.cpu.expires - val;
+                               sample_to_timespec(timer->it_clock,
+                                                  old_expires,
+                                                  &old->it_value);
+                       } else {
+                               old->it_value.tv_nsec = 1;
+                               old->it_value.tv_sec = 0;
+                       }
+               }
+       }
+
+       if (unlikely(ret)) {
+               /*
+                * We are colliding with the timer actually firing.
+                * Punt after filling in the timer's old value, and
+                * disable this firing since we are already reporting
+                * it as an overrun (thanks to bump_cpu_timer above).
+                */
+               unlock_task_sighand(p, &flags);
+               goto out;
+       }
+
+       if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
+               new_expires += val;
+       }
+
+       /*
+        * Install the new expiry time (or zero).
+        * For a timer with no notification action, we don't actually
+        * arm the timer (we'll just fake it for timer_gettime).
+        */
+       timer->it.cpu.expires = new_expires;
+       if (new_expires != 0 && val < new_expires) {
+               arm_timer(timer);
+       }
+
+       unlock_task_sighand(p, &flags);
+       /*
+        * Install the new reload setting, and
+        * set up the signal and overrun bookkeeping.
+        */
+       timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
+                                               &new->it_interval);
+
+       /*
+        * This acts as a modification timestamp for the timer,
+        * so any automatic reload attempt will punt on seeing
+        * that we have reset the timer manually.
+        */
+       timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
+               ~REQUEUE_PENDING;
+       timer->it_overrun_last = 0;
+       timer->it_overrun = -1;
+
+       if (new_expires != 0 && !(val < new_expires)) {
+               /*
+                * The designated time already passed, so we notify
+                * immediately, even if the thread never runs to
+                * accumulate more time on this clock.
+                */
+               cpu_timer_fire(timer);
+       }
+
+       ret = 0;
+ out:
+       if (old) {
+               sample_to_timespec(timer->it_clock,
+                                  old_incr, &old->it_interval);
+       }
+       if (!ret)
+               posix_cpu_timer_kick_nohz();
+       return ret;
+}
+
+static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
+{
+       unsigned long long now;
+       struct task_struct *p = timer->it.cpu.task;
+
+       WARN_ON_ONCE(p == NULL);
+
+       /*
+        * Easy part: convert the reload time.
+        */
+       sample_to_timespec(timer->it_clock,
+                          timer->it.cpu.incr, &itp->it_interval);
+
+       if (timer->it.cpu.expires == 0) {       /* Timer not armed at all.  */
+               itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
+               return;
+       }
+
+       /*
+        * Sample the clock to take the difference with the expiry time.
+        */
+       if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
+               cpu_clock_sample(timer->it_clock, p, &now);
+       } else {
+               struct sighand_struct *sighand;
+               unsigned long flags;
+
+               /*
+                * Protect against sighand release/switch in exit/exec and
+                * also make timer sampling safe if it ends up calling
+                * thread_group_cputime().
+                */
+               sighand = lock_task_sighand(p, &flags);
+               if (unlikely(sighand == NULL)) {
+                       /*
+                        * The process has been reaped.
+                        * We can't even collect a sample any more.
+                        * Call the timer disarmed, nothing else to do.
+                        */
+                       timer->it.cpu.expires = 0;
+                       sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
+                                          &itp->it_value);
+               } else {
+                       cpu_timer_sample_group(timer->it_clock, p, &now);
+                       unlock_task_sighand(p, &flags);
+               }
+       }
+
+       if (now < timer->it.cpu.expires) {
+               sample_to_timespec(timer->it_clock,
+                                  timer->it.cpu.expires - now,
+                                  &itp->it_value);
+       } else {
+               /*
+                * The timer should have expired already, but the firing
+                * hasn't taken place yet.  Say it's just about to expire.
+                */
+               itp->it_value.tv_nsec = 1;
+               itp->it_value.tv_sec = 0;
+       }
+}
+
+static unsigned long long
+check_timers_list(struct list_head *timers,
+                 struct list_head *firing,
+                 unsigned long long curr)
+{
+       int maxfire = 20;
+
+       while (!list_empty(timers)) {
+               struct cpu_timer_list *t;
+
+               t = list_first_entry(timers, struct cpu_timer_list, entry);
+
+               if (!--maxfire || curr < t->expires)
+                       return t->expires;
+
+               t->firing = 1;
+               list_move_tail(&t->entry, firing);
+       }
+
+       return 0;
+}
+
+/*
+ * Check for any per-thread CPU timers that have fired and move them off
+ * the tsk->cpu_timers[N] list onto the firing list.  Here we update the
+ * tsk->it_*_expires values to reflect the remaining thread CPU timers.
+ */
+static void check_thread_timers(struct task_struct *tsk,
+                               struct list_head *firing)
+{
+       struct list_head *timers = tsk->cpu_timers;
+       struct signal_struct *const sig = tsk->signal;
+       struct task_cputime *tsk_expires = &tsk->cputime_expires;
+       unsigned long long expires;
+       unsigned long soft;
+
+       expires = check_timers_list(timers, firing, prof_ticks(tsk));
+       tsk_expires->prof_exp = expires_to_cputime(expires);
+
+       expires = check_timers_list(++timers, firing, virt_ticks(tsk));
+       tsk_expires->virt_exp = expires_to_cputime(expires);
+
+       tsk_expires->sched_exp = check_timers_list(++timers, firing,
+                                                  tsk->se.sum_exec_runtime);
+
+       /*
+        * Check for the special case thread timers.
+        */
+       soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
+       if (soft != RLIM_INFINITY) {
+               unsigned long hard =
+                       ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
+
+               if (hard != RLIM_INFINITY &&
+                   tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
+                       /*
+                        * At the hard limit, we just die.
+                        * No need to calculate anything else now.
+                        */
+                       __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
+                       return;
+               }
+               if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
+                       /*
+                        * At the soft limit, send a SIGXCPU every second.
+                        */
+                       if (soft < hard) {
+                               soft += USEC_PER_SEC;
+                               sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
+                       }
+                       printk(KERN_INFO
+                               "RT Watchdog Timeout: %s[%d]\n",
+                               tsk->comm, task_pid_nr(tsk));
+                       __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
+               }
+       }
+}
+
+static void stop_process_timers(struct signal_struct *sig)
+{
+       struct thread_group_cputimer *cputimer = &sig->cputimer;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&cputimer->lock, flags);
+       cputimer->running = 0;
+       raw_spin_unlock_irqrestore(&cputimer->lock, flags);
+}
+
+static u32 onecputick;
+
+static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
+                            unsigned long long *expires,
+                            unsigned long long cur_time, int signo)
+{
+       if (!it->expires)
+               return;
+
+       if (cur_time >= it->expires) {
+               if (it->incr) {
+                       it->expires += it->incr;
+                       it->error += it->incr_error;
+                       if (it->error >= onecputick) {
+                               it->expires -= cputime_one_jiffy;
+                               it->error -= onecputick;
+                       }
+               } else {
+                       it->expires = 0;
+               }
+
+               trace_itimer_expire(signo == SIGPROF ?
+                                   ITIMER_PROF : ITIMER_VIRTUAL,
+                                   tsk->signal->leader_pid, cur_time);
+               __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
+       }
+
+       if (it->expires && (!*expires || it->expires < *expires)) {
+               *expires = it->expires;
+       }
+}
+
+/*
+ * Check for any per-thread CPU timers that have fired and move them
+ * off the tsk->*_timers list onto the firing list.  Per-thread timers
+ * have already been taken off.
+ */
+static void check_process_timers(struct task_struct *tsk,
+                                struct list_head *firing)
+{
+       struct signal_struct *const sig = tsk->signal;
+       unsigned long long utime, ptime, virt_expires, prof_expires;
+       unsigned long long sum_sched_runtime, sched_expires;
+       struct list_head *timers = sig->cpu_timers;
+       struct task_cputime cputime;
+       unsigned long soft;
+
+       /*
+        * Collect the current process totals.
+        */
+       thread_group_cputimer(tsk, &cputime);
+       utime = cputime_to_expires(cputime.utime);
+       ptime = utime + cputime_to_expires(cputime.stime);
+       sum_sched_runtime = cputime.sum_exec_runtime;
+
+       prof_expires = check_timers_list(timers, firing, ptime);
+       virt_expires = check_timers_list(++timers, firing, utime);
+       sched_expires = check_timers_list(++timers, firing, sum_sched_runtime);
+
+       /*
+        * Check for the special case process timers.
+        */
+       check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
+                        SIGPROF);
+       check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
+                        SIGVTALRM);
+       soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
+       if (soft != RLIM_INFINITY) {
+               unsigned long psecs = cputime_to_secs(ptime);
+               unsigned long hard =
+                       ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
+               cputime_t x;
+               if (psecs >= hard) {
+                       /*
+                        * At the hard limit, we just die.
+                        * No need to calculate anything else now.
+                        */
+                       __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
+                       return;
+               }
+               if (psecs >= soft) {
+                       /*
+                        * At the soft limit, send a SIGXCPU every second.
+                        */
+                       __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
+                       if (soft < hard) {
+                               soft++;
+                               sig->rlim[RLIMIT_CPU].rlim_cur = soft;
+                       }
+               }
+               x = secs_to_cputime(soft);
+               if (!prof_expires || x < prof_expires) {
+                       prof_expires = x;
+               }
+       }
+
+       sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires);
+       sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires);
+       sig->cputime_expires.sched_exp = sched_expires;
+       if (task_cputime_zero(&sig->cputime_expires))
+               stop_process_timers(sig);
+}
+
+/*
+ * This is called from the signal code (via do_schedule_next_timer)
+ * when the last timer signal was delivered and we have to reload the timer.
+ */
+void posix_cpu_timer_schedule(struct k_itimer *timer)
+{
+       struct sighand_struct *sighand;
+       unsigned long flags;
+       struct task_struct *p = timer->it.cpu.task;
+       unsigned long long now;
+
+       WARN_ON_ONCE(p == NULL);
+
+       /*
+        * Fetch the current sample and update the timer's expiry time.
+        */
+       if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
+               cpu_clock_sample(timer->it_clock, p, &now);
+               bump_cpu_timer(timer, now);
+               if (unlikely(p->exit_state))
+                       goto out;
+
+               /* Protect timer list r/w in arm_timer() */
+               sighand = lock_task_sighand(p, &flags);
+               if (!sighand)
+                       goto out;
+       } else {
+               /*
+                * Protect arm_timer() and timer sampling in case of call to
+                * thread_group_cputime().
+                */
+               sighand = lock_task_sighand(p, &flags);
+               if (unlikely(sighand == NULL)) {
+                       /*
+                        * The process has been reaped.
+                        * We can't even collect a sample any more.
+                        */
+                       timer->it.cpu.expires = 0;
+                       goto out;
+               } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
+                       unlock_task_sighand(p, &flags);
+                       /* Optimizations: if the process is dying, no need to rearm */
+                       goto out;
+               }
+               cpu_timer_sample_group(timer->it_clock, p, &now);
+               bump_cpu_timer(timer, now);
+               /* Leave the sighand locked for the call below.  */
+       }
+
+       /*
+        * Now re-arm for the new expiry time.
+        */
+       WARN_ON_ONCE(!irqs_disabled());
+       arm_timer(timer);
+       unlock_task_sighand(p, &flags);
+
+       /* Kick full dynticks CPUs in case they need to tick on the new timer */
+       posix_cpu_timer_kick_nohz();
+out:
+       timer->it_overrun_last = timer->it_overrun;
+       timer->it_overrun = -1;
+       ++timer->it_requeue_pending;
+}
+
+/**
+ * task_cputime_expired - Compare two task_cputime entities.
+ *
+ * @sample:    The task_cputime structure to be checked for expiration.
+ * @expires:   Expiration times, against which @sample will be checked.
+ *
+ * Checks @sample against @expires to see if any field of @sample has expired.
+ * Returns true if any field of the former is greater than the corresponding
+ * field of the latter if the latter field is set.  Otherwise returns false.
+ */
+static inline int task_cputime_expired(const struct task_cputime *sample,
+                                       const struct task_cputime *expires)
+{
+       if (expires->utime && sample->utime >= expires->utime)
+               return 1;
+       if (expires->stime && sample->utime + sample->stime >= expires->stime)
+               return 1;
+       if (expires->sum_exec_runtime != 0 &&
+           sample->sum_exec_runtime >= expires->sum_exec_runtime)
+               return 1;
+       return 0;
+}
+
+/**
+ * fastpath_timer_check - POSIX CPU timers fast path.
+ *
+ * @tsk:       The task (thread) being checked.
+ *
+ * Check the task and thread group timers.  If both are zero (there are no
+ * timers set) return false.  Otherwise snapshot the task and thread group
+ * timers and compare them with the corresponding expiration times.  Return
+ * true if a timer has expired, else return false.
+ */
+static inline int fastpath_timer_check(struct task_struct *tsk)
+{
+       struct signal_struct *sig;
+       cputime_t utime, stime;
+
+       task_cputime(tsk, &utime, &stime);
+
+       if (!task_cputime_zero(&tsk->cputime_expires)) {
+               struct task_cputime task_sample = {
+                       .utime = utime,
+                       .stime = stime,
+                       .sum_exec_runtime = tsk->se.sum_exec_runtime
+               };
+
+               if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
+                       return 1;
+       }
+
+       sig = tsk->signal;
+       if (sig->cputimer.running) {
+               struct task_cputime group_sample;
+
+               raw_spin_lock(&sig->cputimer.lock);
+               group_sample = sig->cputimer.cputime;
+               raw_spin_unlock(&sig->cputimer.lock);
+
+               if (task_cputime_expired(&group_sample, &sig->cputime_expires))
+                       return 1;
+       }
+
+       return 0;
+}
+
+/*
+ * This is called from the timer interrupt handler.  The irq handler has
+ * already updated our counts.  We need to check if any timers fire now.
+ * Interrupts are disabled.
+ */
+void run_posix_cpu_timers(struct task_struct *tsk)
+{
+       LIST_HEAD(firing);
+       struct k_itimer *timer, *next;
+       unsigned long flags;
+
+       WARN_ON_ONCE(!irqs_disabled());
+
+       /*
+        * The fast path checks that there are no expired thread or thread
+        * group timers.  If that's so, just return.
+        */
+       if (!fastpath_timer_check(tsk))
+               return;
+
+       if (!lock_task_sighand(tsk, &flags))
+               return;
+       /*
+        * Here we take off tsk->signal->cpu_timers[N] and
+        * tsk->cpu_timers[N] all the timers that are firing, and
+        * put them on the firing list.
+        */
+       check_thread_timers(tsk, &firing);
+       /*
+        * If there are any active process wide timers (POSIX 1.b, itimers,
+        * RLIMIT_CPU) cputimer must be running.
+        */
+       if (tsk->signal->cputimer.running)
+               check_process_timers(tsk, &firing);
+
+       /*
+        * We must release these locks before taking any timer's lock.
+        * There is a potential race with timer deletion here, as the
+        * siglock now protects our private firing list.  We have set
+        * the firing flag in each timer, so that a deletion attempt
+        * that gets the timer lock before we do will give it up and
+        * spin until we've taken care of that timer below.
+        */
+       unlock_task_sighand(tsk, &flags);
+
+       /*
+        * Now that all the timers on our list have the firing flag,
+        * no one will touch their list entries but us.  We'll take
+        * each timer's lock before clearing its firing flag, so no
+        * timer call will interfere.
+        */
+       list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
+               int cpu_firing;
+
+               spin_lock(&timer->it_lock);
+               list_del_init(&timer->it.cpu.entry);
+               cpu_firing = timer->it.cpu.firing;
+               timer->it.cpu.firing = 0;
+               /*
+                * The firing flag is -1 if we collided with a reset
+                * of the timer, which already reported this
+                * almost-firing as an overrun.  So don't generate an event.
+                */
+               if (likely(cpu_firing >= 0))
+                       cpu_timer_fire(timer);
+               spin_unlock(&timer->it_lock);
+       }
+}
+
+/*
+ * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
+ * The tsk->sighand->siglock must be held by the caller.
+ */
+void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
+                          cputime_t *newval, cputime_t *oldval)
+{
+       unsigned long long now;
+
+       WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
+       cpu_timer_sample_group(clock_idx, tsk, &now);
+
+       if (oldval) {
+               /*
+                * We are setting itimer. The *oldval is absolute and we update
+                * it to be relative, *newval argument is relative and we update
+                * it to be absolute.
+                */
+               if (*oldval) {
+                       if (*oldval <= now) {
+                               /* Just about to fire. */
+                               *oldval = cputime_one_jiffy;
+                       } else {
+                               *oldval -= now;
+                       }
+               }
+
+               if (!*newval)
+                       goto out;
+               *newval += now;
+       }
+
+       /*
+        * Update expiration cache if we are the earliest timer, or eventually
+        * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
+        */
+       switch (clock_idx) {
+       case CPUCLOCK_PROF:
+               if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
+                       tsk->signal->cputime_expires.prof_exp = *newval;
+               break;
+       case CPUCLOCK_VIRT:
+               if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
+                       tsk->signal->cputime_expires.virt_exp = *newval;
+               break;
+       }
+out:
+       posix_cpu_timer_kick_nohz();
+}
+
+static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
+                           struct timespec *rqtp, struct itimerspec *it)
+{
+       struct k_itimer timer;
+       int error;
+
+       /*
+        * Set up a temporary timer and then wait for it to go off.
+        */
+       memset(&timer, 0, sizeof timer);
+       spin_lock_init(&timer.it_lock);
+       timer.it_clock = which_clock;
+       timer.it_overrun = -1;
+       error = posix_cpu_timer_create(&timer);
+       timer.it_process = current;
+       if (!error) {
+               static struct itimerspec zero_it;
+
+               memset(it, 0, sizeof *it);
+               it->it_value = *rqtp;
+
+               spin_lock_irq(&timer.it_lock);
+               error = posix_cpu_timer_set(&timer, flags, it, NULL);
+               if (error) {
+                       spin_unlock_irq(&timer.it_lock);
+                       return error;
+               }
+
+               while (!signal_pending(current)) {
+                       if (timer.it.cpu.expires == 0) {
+                               /*
+                                * Our timer fired and was reset, below
+                                * deletion can not fail.
+                                */
+                               posix_cpu_timer_del(&timer);
+                               spin_unlock_irq(&timer.it_lock);
+                               return 0;
+                       }
+
+                       /*
+                        * Block until cpu_timer_fire (or a signal) wakes us.
+                        */
+                       __set_current_state(TASK_INTERRUPTIBLE);
+                       spin_unlock_irq(&timer.it_lock);
+                       schedule();
+                       spin_lock_irq(&timer.it_lock);
+               }
+
+               /*
+                * We were interrupted by a signal.
+                */
+               sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
+               error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
+               if (!error) {
+                       /*
+                        * Timer is now unarmed, deletion can not fail.
+                        */
+                       posix_cpu_timer_del(&timer);
+               }
+               spin_unlock_irq(&timer.it_lock);
+
+               while (error == TIMER_RETRY) {
+                       /*
+                        * We need to handle case when timer was or is in the
+                        * middle of firing. In other cases we already freed
+                        * resources.
+                        */
+                       spin_lock_irq(&timer.it_lock);
+                       error = posix_cpu_timer_del(&timer);
+                       spin_unlock_irq(&timer.it_lock);
+               }
+
+               if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
+                       /*
+                        * It actually did fire already.
+                        */
+                       return 0;
+               }
+
+               error = -ERESTART_RESTARTBLOCK;
+       }
+
+       return error;
+}
+
+static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
+
+static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
+                           struct timespec *rqtp, struct timespec __user *rmtp)
+{
+       struct restart_block *restart_block =
+               &current_thread_info()->restart_block;
+       struct itimerspec it;
+       int error;
+
+       /*
+        * Diagnose required errors first.
+        */
+       if (CPUCLOCK_PERTHREAD(which_clock) &&
+           (CPUCLOCK_PID(which_clock) == 0 ||
+            CPUCLOCK_PID(which_clock) == current->pid))
+               return -EINVAL;
+
+       error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
+
+       if (error == -ERESTART_RESTARTBLOCK) {
+
+               if (flags & TIMER_ABSTIME)
+                       return -ERESTARTNOHAND;
+               /*
+                * Report back to the user the time still remaining.
+                */
+               if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
+                       return -EFAULT;
+
+               restart_block->fn = posix_cpu_nsleep_restart;
+               restart_block->nanosleep.clockid = which_clock;
+               restart_block->nanosleep.rmtp = rmtp;
+               restart_block->nanosleep.expires = timespec_to_ns(rqtp);
+       }
+       return error;
+}
+
+static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
+{
+       clockid_t which_clock = restart_block->nanosleep.clockid;
+       struct timespec t;
+       struct itimerspec it;
+       int error;
+
+       t = ns_to_timespec(restart_block->nanosleep.expires);
+
+       error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
+
+       if (error == -ERESTART_RESTARTBLOCK) {
+               struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
+               /*
+                * Report back to the user the time still remaining.
+                */
+               if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
+                       return -EFAULT;
+
+               restart_block->nanosleep.expires = timespec_to_ns(&t);
+       }
+       return error;
+
+}
+
+#define PROCESS_CLOCK  MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
+#define THREAD_CLOCK   MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
+
+static int process_cpu_clock_getres(const clockid_t which_clock,
+                                   struct timespec *tp)
+{
+       return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
+}
+static int process_cpu_clock_get(const clockid_t which_clock,
+                                struct timespec *tp)
+{
+       return posix_cpu_clock_get(PROCESS_CLOCK, tp);
+}
+static int process_cpu_timer_create(struct k_itimer *timer)
+{
+       timer->it_clock = PROCESS_CLOCK;
+       return posix_cpu_timer_create(timer);
+}
+static int process_cpu_nsleep(const clockid_t which_clock, int flags,
+                             struct timespec *rqtp,
+                             struct timespec __user *rmtp)
+{
+       return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
+}
+static long process_cpu_nsleep_restart(struct restart_block *restart_block)
+{
+       return -EINVAL;
+}
+static int thread_cpu_clock_getres(const clockid_t which_clock,
+                                  struct timespec *tp)
+{
+       return posix_cpu_clock_getres(THREAD_CLOCK, tp);
+}
+static int thread_cpu_clock_get(const clockid_t which_clock,
+                               struct timespec *tp)
+{
+       return posix_cpu_clock_get(THREAD_CLOCK, tp);
+}
+static int thread_cpu_timer_create(struct k_itimer *timer)
+{
+       timer->it_clock = THREAD_CLOCK;
+       return posix_cpu_timer_create(timer);
+}
+
+struct k_clock clock_posix_cpu = {
+       .clock_getres   = posix_cpu_clock_getres,
+       .clock_set      = posix_cpu_clock_set,
+       .clock_get      = posix_cpu_clock_get,
+       .timer_create   = posix_cpu_timer_create,
+       .nsleep         = posix_cpu_nsleep,
+       .nsleep_restart = posix_cpu_nsleep_restart,
+       .timer_set      = posix_cpu_timer_set,
+       .timer_del      = posix_cpu_timer_del,
+       .timer_get      = posix_cpu_timer_get,
+};
+
+static __init int init_posix_cpu_timers(void)
+{
+       struct k_clock process = {
+               .clock_getres   = process_cpu_clock_getres,
+               .clock_get      = process_cpu_clock_get,
+               .timer_create   = process_cpu_timer_create,
+               .nsleep         = process_cpu_nsleep,
+               .nsleep_restart = process_cpu_nsleep_restart,
+       };
+       struct k_clock thread = {
+               .clock_getres   = thread_cpu_clock_getres,
+               .clock_get      = thread_cpu_clock_get,
+               .timer_create   = thread_cpu_timer_create,
+       };
+       struct timespec ts;
+
+       posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
+       posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
+
+       cputime_to_timespec(cputime_one_jiffy, &ts);
+       onecputick = ts.tv_nsec;
+       WARN_ON(ts.tv_sec != 0);
+
+       return 0;
+}
+__initcall(init_posix_cpu_timers);
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
new file mode 100644 (file)
index 0000000..42b463a
--- /dev/null
@@ -0,0 +1,1123 @@
+/*
+ * linux/kernel/posix-timers.c
+ *
+ *
+ * 2002-10-15  Posix Clocks & timers
+ *                           by George Anzinger george@mvista.com
+ *
+ *                          Copyright (C) 2002 2003 by MontaVista Software.
+ *
+ * 2004-06-01  Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
+ *                          Copyright (C) 2004 Boris Hu
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA
+ */
+
+/* These are all the functions necessary to implement
+ * POSIX clocks & timers
+ */
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/mutex.h>
+
+#include <asm/uaccess.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/hash.h>
+#include <linux/posix-clock.h>
+#include <linux/posix-timers.h>
+#include <linux/syscalls.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/export.h>
+#include <linux/hashtable.h>
+
+#include "timekeeping.h"
+
+/*
+ * Management arrays for POSIX timers. Timers are now kept in static hash table
+ * with 512 entries.
+ * Timer ids are allocated by local routine, which selects proper hash head by
+ * key, constructed from current->signal address and per signal struct counter.
+ * This keeps timer ids unique per process, but now they can intersect between
+ * processes.
+ */
+
+/*
+ * Lets keep our timers in a slab cache :-)
+ */
+static struct kmem_cache *posix_timers_cache;
+
+static DEFINE_HASHTABLE(posix_timers_hashtable, 9);
+static DEFINE_SPINLOCK(hash_lock);
+
+/*
+ * we assume that the new SIGEV_THREAD_ID shares no bits with the other
+ * SIGEV values.  Here we put out an error if this assumption fails.
+ */
+#if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
+                       ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
+#error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
+#endif
+
+/*
+ * parisc wants ENOTSUP instead of EOPNOTSUPP
+ */
+#ifndef ENOTSUP
+# define ENANOSLEEP_NOTSUP EOPNOTSUPP
+#else
+# define ENANOSLEEP_NOTSUP ENOTSUP
+#endif
+
+/*
+ * The timer ID is turned into a timer address by idr_find().
+ * Verifying a valid ID consists of:
+ *
+ * a) checking that idr_find() returns other than -1.
+ * b) checking that the timer id matches the one in the timer itself.
+ * c) that the timer owner is in the callers thread group.
+ */
+
+/*
+ * CLOCKs: The POSIX standard calls for a couple of clocks and allows us
+ *         to implement others.  This structure defines the various
+ *         clocks.
+ *
+ * RESOLUTION: Clock resolution is used to round up timer and interval
+ *         times, NOT to report clock times, which are reported with as
+ *         much resolution as the system can muster.  In some cases this
+ *         resolution may depend on the underlying clock hardware and
+ *         may not be quantifiable until run time, and only then is the
+ *         necessary code is written.  The standard says we should say
+ *         something about this issue in the documentation...
+ *
+ * FUNCTIONS: The CLOCKs structure defines possible functions to
+ *         handle various clock functions.
+ *
+ *         The standard POSIX timer management code assumes the
+ *         following: 1.) The k_itimer struct (sched.h) is used for
+ *         the timer.  2.) The list, it_lock, it_clock, it_id and
+ *         it_pid fields are not modified by timer code.
+ *
+ * Permissions: It is assumed that the clock_settime() function defined
+ *         for each clock will take care of permission checks.  Some
+ *         clocks may be set able by any user (i.e. local process
+ *         clocks) others not.  Currently the only set able clock we
+ *         have is CLOCK_REALTIME and its high res counter part, both of
+ *         which we beg off on and pass to do_sys_settimeofday().
+ */
+
+static struct k_clock posix_clocks[MAX_CLOCKS];
+
+/*
+ * These ones are defined below.
+ */
+static int common_nsleep(const clockid_t, int flags, struct timespec *t,
+                        struct timespec __user *rmtp);
+static int common_timer_create(struct k_itimer *new_timer);
+static void common_timer_get(struct k_itimer *, struct itimerspec *);
+static int common_timer_set(struct k_itimer *, int,
+                           struct itimerspec *, struct itimerspec *);
+static int common_timer_del(struct k_itimer *timer);
+
+static enum hrtimer_restart posix_timer_fn(struct hrtimer *data);
+
+static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);
+
+#define lock_timer(tid, flags)                                            \
+({     struct k_itimer *__timr;                                           \
+       __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags));  \
+       __timr;                                                            \
+})
+
+static int hash(struct signal_struct *sig, unsigned int nr)
+{
+       return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable));
+}
+
+static struct k_itimer *__posix_timers_find(struct hlist_head *head,
+                                           struct signal_struct *sig,
+                                           timer_t id)
+{
+       struct k_itimer *timer;
+
+       hlist_for_each_entry_rcu(timer, head, t_hash) {
+               if ((timer->it_signal == sig) && (timer->it_id == id))
+                       return timer;
+       }
+       return NULL;
+}
+
+static struct k_itimer *posix_timer_by_id(timer_t id)
+{
+       struct signal_struct *sig = current->signal;
+       struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)];
+
+       return __posix_timers_find(head, sig, id);
+}
+
+static int posix_timer_add(struct k_itimer *timer)
+{
+       struct signal_struct *sig = current->signal;
+       int first_free_id = sig->posix_timer_id;
+       struct hlist_head *head;
+       int ret = -ENOENT;
+
+       do {
+               spin_lock(&hash_lock);
+               head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)];
+               if (!__posix_timers_find(head, sig, sig->posix_timer_id)) {
+                       hlist_add_head_rcu(&timer->t_hash, head);
+                       ret = sig->posix_timer_id;
+               }
+               if (++sig->posix_timer_id < 0)
+                       sig->posix_timer_id = 0;
+               if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT))
+                       /* Loop over all possible ids completed */
+                       ret = -EAGAIN;
+               spin_unlock(&hash_lock);
+       } while (ret == -ENOENT);
+       return ret;
+}
+
+static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
+{
+       spin_unlock_irqrestore(&timr->it_lock, flags);
+}
+
+/* Get clock_realtime */
+static int posix_clock_realtime_get(clockid_t which_clock, struct timespec *tp)
+{
+       ktime_get_real_ts(tp);
+       return 0;
+}
+
+/* Set clock_realtime */
+static int posix_clock_realtime_set(const clockid_t which_clock,
+                                   const struct timespec *tp)
+{
+       return do_sys_settimeofday(tp, NULL);
+}
+
+static int posix_clock_realtime_adj(const clockid_t which_clock,
+                                   struct timex *t)
+{
+       return do_adjtimex(t);
+}
+
+/*
+ * Get monotonic time for posix timers
+ */
+static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
+{
+       ktime_get_ts(tp);
+       return 0;
+}
+
+/*
+ * Get monotonic-raw time for posix timers
+ */
+static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
+{
+       getrawmonotonic(tp);
+       return 0;
+}
+
+
+static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp)
+{
+       *tp = current_kernel_time();
+       return 0;
+}
+
+static int posix_get_monotonic_coarse(clockid_t which_clock,
+                                               struct timespec *tp)
+{
+       *tp = get_monotonic_coarse();
+       return 0;
+}
+
+static int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
+{
+       *tp = ktime_to_timespec(KTIME_LOW_RES);
+       return 0;
+}
+
+static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
+{
+       get_monotonic_boottime(tp);
+       return 0;
+}
+
+static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
+{
+       timekeeping_clocktai(tp);
+       return 0;
+}
+
+/*
+ * Initialize everything, well, just everything in Posix clocks/timers ;)
+ */
+static __init int init_posix_timers(void)
+{
+       struct k_clock clock_realtime = {
+               .clock_getres   = hrtimer_get_res,
+               .clock_get      = posix_clock_realtime_get,
+               .clock_set      = posix_clock_realtime_set,
+               .clock_adj      = posix_clock_realtime_adj,
+               .nsleep         = common_nsleep,
+               .nsleep_restart = hrtimer_nanosleep_restart,
+               .timer_create   = common_timer_create,
+               .timer_set      = common_timer_set,
+               .timer_get      = common_timer_get,
+               .timer_del      = common_timer_del,
+       };
+       struct k_clock clock_monotonic = {
+               .clock_getres   = hrtimer_get_res,
+               .clock_get      = posix_ktime_get_ts,
+               .nsleep         = common_nsleep,
+               .nsleep_restart = hrtimer_nanosleep_restart,
+               .timer_create   = common_timer_create,
+               .timer_set      = common_timer_set,
+               .timer_get      = common_timer_get,
+               .timer_del      = common_timer_del,
+       };
+       struct k_clock clock_monotonic_raw = {
+               .clock_getres   = hrtimer_get_res,
+               .clock_get      = posix_get_monotonic_raw,
+       };
+       struct k_clock clock_realtime_coarse = {
+               .clock_getres   = posix_get_coarse_res,
+               .clock_get      = posix_get_realtime_coarse,
+       };
+       struct k_clock clock_monotonic_coarse = {
+               .clock_getres   = posix_get_coarse_res,
+               .clock_get      = posix_get_monotonic_coarse,
+       };
+       struct k_clock clock_tai = {
+               .clock_getres   = hrtimer_get_res,
+               .clock_get      = posix_get_tai,
+               .nsleep         = common_nsleep,
+               .nsleep_restart = hrtimer_nanosleep_restart,
+               .timer_create   = common_timer_create,
+               .timer_set      = common_timer_set,
+               .timer_get      = common_timer_get,
+               .timer_del      = common_timer_del,
+       };
+       struct k_clock clock_boottime = {
+               .clock_getres   = hrtimer_get_res,
+               .clock_get      = posix_get_boottime,
+               .nsleep         = common_nsleep,
+               .nsleep_restart = hrtimer_nanosleep_restart,
+               .timer_create   = common_timer_create,
+               .timer_set      = common_timer_set,
+               .timer_get      = common_timer_get,
+               .timer_del      = common_timer_del,
+       };
+
+       posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
+       posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
+       posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
+       posix_timers_register_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
+       posix_timers_register_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
+       posix_timers_register_clock(CLOCK_BOOTTIME, &clock_boottime);
+       posix_timers_register_clock(CLOCK_TAI, &clock_tai);
+
+       posix_timers_cache = kmem_cache_create("posix_timers_cache",
+                                       sizeof (struct k_itimer), 0, SLAB_PANIC,
+                                       NULL);
+       return 0;
+}
+
+__initcall(init_posix_timers);
+
+static void schedule_next_timer(struct k_itimer *timr)
+{
+       struct hrtimer *timer = &timr->it.real.timer;
+
+       if (timr->it.real.interval.tv64 == 0)
+               return;
+
+       timr->it_overrun += (unsigned int) hrtimer_forward(timer,
+                                               timer->base->get_time(),
+                                               timr->it.real.interval);
+
+       timr->it_overrun_last = timr->it_overrun;
+       timr->it_overrun = -1;
+       ++timr->it_requeue_pending;
+       hrtimer_restart(timer);
+}
+
+/*
+ * This function is exported for use by the signal deliver code.  It is
+ * called just prior to the info block being released and passes that
+ * block to us.  It's function is to update the overrun entry AND to
+ * restart the timer.  It should only be called if the timer is to be
+ * restarted (i.e. we have flagged this in the sys_private entry of the
+ * info block).
+ *
+ * To protect against the timer going away while the interrupt is queued,
+ * we require that the it_requeue_pending flag be set.
+ */
+void do_schedule_next_timer(struct siginfo *info)
+{
+       struct k_itimer *timr;
+       unsigned long flags;
+
+       timr = lock_timer(info->si_tid, &flags);
+
+       if (timr && timr->it_requeue_pending == info->si_sys_private) {
+               if (timr->it_clock < 0)
+                       posix_cpu_timer_schedule(timr);
+               else
+                       schedule_next_timer(timr);
+
+               info->si_overrun += timr->it_overrun_last;
+       }
+
+       if (timr)
+               unlock_timer(timr, flags);
+}
+
+int posix_timer_event(struct k_itimer *timr, int si_private)
+{
+       struct task_struct *task;
+       int shared, ret = -1;
+       /*
+        * FIXME: if ->sigq is queued we can race with
+        * dequeue_signal()->do_schedule_next_timer().
+        *
+        * If dequeue_signal() sees the "right" value of
+        * si_sys_private it calls do_schedule_next_timer().
+        * We re-queue ->sigq and drop ->it_lock().
+        * do_schedule_next_timer() locks the timer
+        * and re-schedules it while ->sigq is pending.
+        * Not really bad, but not that we want.
+        */
+       timr->sigq->info.si_sys_private = si_private;
+
+       rcu_read_lock();
+       task = pid_task(timr->it_pid, PIDTYPE_PID);
+       if (task) {
+               shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
+               ret = send_sigqueue(timr->sigq, task, shared);
+       }
+       rcu_read_unlock();
+       /* If we failed to send the signal the timer stops. */
+       return ret > 0;
+}
+EXPORT_SYMBOL_GPL(posix_timer_event);
+
+/*
+ * This function gets called when a POSIX.1b interval timer expires.  It
+ * is used as a callback from the kernel internal timer.  The
+ * run_timer_list code ALWAYS calls with interrupts on.
+
+ * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
+ */
+static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
+{
+       struct k_itimer *timr;
+       unsigned long flags;
+       int si_private = 0;
+       enum hrtimer_restart ret = HRTIMER_NORESTART;
+
+       timr = container_of(timer, struct k_itimer, it.real.timer);
+       spin_lock_irqsave(&timr->it_lock, flags);
+
+       if (timr->it.real.interval.tv64 != 0)
+               si_private = ++timr->it_requeue_pending;
+
+       if (posix_timer_event(timr, si_private)) {
+               /*
+                * signal was not sent because of sig_ignor
+                * we will not get a call back to restart it AND
+                * it should be restarted.
+                */
+               if (timr->it.real.interval.tv64 != 0) {
+                       ktime_t now = hrtimer_cb_get_time(timer);
+
+                       /*
+                        * FIXME: What we really want, is to stop this
+                        * timer completely and restart it in case the
+                        * SIG_IGN is removed. This is a non trivial
+                        * change which involves sighand locking
+                        * (sigh !), which we don't want to do late in
+                        * the release cycle.
+                        *
+                        * For now we just let timers with an interval
+                        * less than a jiffie expire every jiffie to
+                        * avoid softirq starvation in case of SIG_IGN
+                        * and a very small interval, which would put
+                        * the timer right back on the softirq pending
+                        * list. By moving now ahead of time we trick
+                        * hrtimer_forward() to expire the timer
+                        * later, while we still maintain the overrun
+                        * accuracy, but have some inconsistency in
+                        * the timer_gettime() case. This is at least
+                        * better than a starved softirq. A more
+                        * complex fix which solves also another related
+                        * inconsistency is already in the pipeline.
+                        */
+#ifdef CONFIG_HIGH_RES_TIMERS
+                       {
+                               ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);
+
+                               if (timr->it.real.interval.tv64 < kj.tv64)
+                                       now = ktime_add(now, kj);
+                       }
+#endif
+                       timr->it_overrun += (unsigned int)
+                               hrtimer_forward(timer, now,
+                                               timr->it.real.interval);
+                       ret = HRTIMER_RESTART;
+                       ++timr->it_requeue_pending;
+               }
+       }
+
+       unlock_timer(timr, flags);
+       return ret;
+}
+
+static struct pid *good_sigevent(sigevent_t * event)
+{
+       struct task_struct *rtn = current->group_leader;
+
+       if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
+               (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
+                !same_thread_group(rtn, current) ||
+                (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
+               return NULL;
+
+       if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
+           ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
+               return NULL;
+
+       return task_pid(rtn);
+}
+
+void posix_timers_register_clock(const clockid_t clock_id,
+                                struct k_clock *new_clock)
+{
+       if ((unsigned) clock_id >= MAX_CLOCKS) {
+               printk(KERN_WARNING "POSIX clock register failed for clock_id %d\n",
+                      clock_id);
+               return;
+       }
+
+       if (!new_clock->clock_get) {
+               printk(KERN_WARNING "POSIX clock id %d lacks clock_get()\n",
+                      clock_id);
+               return;
+       }
+       if (!new_clock->clock_getres) {
+               printk(KERN_WARNING "POSIX clock id %d lacks clock_getres()\n",
+                      clock_id);
+               return;
+       }
+
+       posix_clocks[clock_id] = *new_clock;
+}
+EXPORT_SYMBOL_GPL(posix_timers_register_clock);
+
+static struct k_itimer * alloc_posix_timer(void)
+{
+       struct k_itimer *tmr;
+       tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
+       if (!tmr)
+               return tmr;
+       if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
+               kmem_cache_free(posix_timers_cache, tmr);
+               return NULL;
+       }
+       memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
+       return tmr;
+}
+
+static void k_itimer_rcu_free(struct rcu_head *head)
+{
+       struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu);
+
+       kmem_cache_free(posix_timers_cache, tmr);
+}
+
+#define IT_ID_SET      1
+#define IT_ID_NOT_SET  0
+static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
+{
+       if (it_id_set) {
+               unsigned long flags;
+               spin_lock_irqsave(&hash_lock, flags);
+               hlist_del_rcu(&tmr->t_hash);
+               spin_unlock_irqrestore(&hash_lock, flags);
+       }
+       put_pid(tmr->it_pid);
+       sigqueue_free(tmr->sigq);
+       call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
+}
+
+static struct k_clock *clockid_to_kclock(const clockid_t id)
+{
+       if (id < 0)
+               return (id & CLOCKFD_MASK) == CLOCKFD ?
+                       &clock_posix_dynamic : &clock_posix_cpu;
+
+       if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
+               return NULL;
+       return &posix_clocks[id];
+}
+
+static int common_timer_create(struct k_itimer *new_timer)
+{
+       hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
+       return 0;
+}
+
+/* Create a POSIX.1b interval timer. */
+
+SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
+               struct sigevent __user *, timer_event_spec,
+               timer_t __user *, created_timer_id)
+{
+       struct k_clock *kc = clockid_to_kclock(which_clock);
+       struct k_itimer *new_timer;
+       int error, new_timer_id;
+       sigevent_t event;
+       int it_id_set = IT_ID_NOT_SET;
+
+       if (!kc)
+               return -EINVAL;
+       if (!kc->timer_create)
+               return -EOPNOTSUPP;
+
+       new_timer = alloc_posix_timer();
+       if (unlikely(!new_timer))
+               return -EAGAIN;
+
+       spin_lock_init(&new_timer->it_lock);
+       new_timer_id = posix_timer_add(new_timer);
+       if (new_timer_id < 0) {
+               error = new_timer_id;
+               goto out;
+       }
+
+       it_id_set = IT_ID_SET;
+       new_timer->it_id = (timer_t) new_timer_id;
+       new_timer->it_clock = which_clock;
+       new_timer->it_overrun = -1;
+
+       if (timer_event_spec) {
+               if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
+                       error = -EFAULT;
+                       goto out;
+               }
+               rcu_read_lock();
+               new_timer->it_pid = get_pid(good_sigevent(&event));
+               rcu_read_unlock();
+               if (!new_timer->it_pid) {
+                       error = -EINVAL;
+                       goto out;
+               }
+       } else {
+               event.sigev_notify = SIGEV_SIGNAL;
+               event.sigev_signo = SIGALRM;
+               event.sigev_value.sival_int = new_timer->it_id;
+               new_timer->it_pid = get_pid(task_tgid(current));
+       }
+
+       new_timer->it_sigev_notify     = event.sigev_notify;
+       new_timer->sigq->info.si_signo = event.sigev_signo;
+       new_timer->sigq->info.si_value = event.sigev_value;
+       new_timer->sigq->info.si_tid   = new_timer->it_id;
+       new_timer->sigq->info.si_code  = SI_TIMER;
+
+       if (copy_to_user(created_timer_id,
+                        &new_timer_id, sizeof (new_timer_id))) {
+               error = -EFAULT;
+               goto out;
+       }
+
+       error = kc->timer_create(new_timer);
+       if (error)
+               goto out;
+
+       spin_lock_irq(&current->sighand->siglock);
+       new_timer->it_signal = current->signal;
+       list_add(&new_timer->list, &current->signal->posix_timers);
+       spin_unlock_irq(&current->sighand->siglock);
+
+       return 0;
+       /*
+        * In the case of the timer belonging to another task, after
+        * the task is unlocked, the timer is owned by the other task
+        * and may cease to exist at any time.  Don't use or modify
+        * new_timer after the unlock call.
+        */
+out:
+       release_posix_timer(new_timer, it_id_set);
+       return error;
+}
+
+/*
+ * Locking issues: We need to protect the result of the id look up until
+ * we get the timer locked down so it is not deleted under us.  The
+ * removal is done under the idr spinlock so we use that here to bridge
+ * the find to the timer lock.  To avoid a dead lock, the timer id MUST
+ * be release with out holding the timer lock.
+ */
+static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
+{
+       struct k_itimer *timr;
+
+       /*
+        * timer_t could be any type >= int and we want to make sure any
+        * @timer_id outside positive int range fails lookup.
+        */
+       if ((unsigned long long)timer_id > INT_MAX)
+               return NULL;
+
+       rcu_read_lock();
+       timr = posix_timer_by_id(timer_id);
+       if (timr) {
+               spin_lock_irqsave(&timr->it_lock, *flags);
+               if (timr->it_signal == current->signal) {
+                       rcu_read_unlock();
+                       return timr;
+               }
+               spin_unlock_irqrestore(&timr->it_lock, *flags);
+       }
+       rcu_read_unlock();
+
+       return NULL;
+}
+
+/*
+ * Get the time remaining on a POSIX.1b interval timer.  This function
+ * is ALWAYS called with spin_lock_irq on the timer, thus it must not
+ * mess with irq.
+ *
+ * We have a couple of messes to clean up here.  First there is the case
+ * of a timer that has a requeue pending.  These timers should appear to
+ * be in the timer list with an expiry as if we were to requeue them
+ * now.
+ *
+ * The second issue is the SIGEV_NONE timer which may be active but is
+ * not really ever put in the timer list (to save system resources).
+ * This timer may be expired, and if so, we will do it here.  Otherwise
+ * it is the same as a requeue pending timer WRT to what we should
+ * report.
+ */
+static void
+common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
+{
+       ktime_t now, remaining, iv;
+       struct hrtimer *timer = &timr->it.real.timer;
+
+       memset(cur_setting, 0, sizeof(struct itimerspec));
+
+       iv = timr->it.real.interval;
+
+       /* interval timer ? */
+       if (iv.tv64)
+               cur_setting->it_interval = ktime_to_timespec(iv);
+       else if (!hrtimer_active(timer) &&
+                (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
+               return;
+
+       now = timer->base->get_time();
+
+       /*
+        * When a requeue is pending or this is a SIGEV_NONE
+        * timer move the expiry time forward by intervals, so
+        * expiry is > now.
+        */
+       if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
+           (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
+               timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
+
+       remaining = ktime_sub(hrtimer_get_expires(timer), now);
+       /* Return 0 only, when the timer is expired and not pending */
+       if (remaining.tv64 <= 0) {
+               /*
+                * A single shot SIGEV_NONE timer must return 0, when
+                * it is expired !
+                */
+               if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
+                       cur_setting->it_value.tv_nsec = 1;
+       } else
+               cur_setting->it_value = ktime_to_timespec(remaining);
+}
+
+/* Get the time remaining on a POSIX.1b interval timer. */
+SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
+               struct itimerspec __user *, setting)
+{
+       struct itimerspec cur_setting;
+       struct k_itimer *timr;
+       struct k_clock *kc;
+       unsigned long flags;
+       int ret = 0;
+
+       timr = lock_timer(timer_id, &flags);
+       if (!timr)
+               return -EINVAL;
+
+       kc = clockid_to_kclock(timr->it_clock);
+       if (WARN_ON_ONCE(!kc || !kc->timer_get))
+               ret = -EINVAL;
+       else
+               kc->timer_get(timr, &cur_setting);
+
+       unlock_timer(timr, flags);
+
+       if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
+               return -EFAULT;
+
+       return ret;
+}
+
+/*
+ * Get the number of overruns of a POSIX.1b interval timer.  This is to
+ * be the overrun of the timer last delivered.  At the same time we are
+ * accumulating overruns on the next timer.  The overrun is frozen when
+ * the signal is delivered, either at the notify time (if the info block
+ * is not queued) or at the actual delivery time (as we are informed by
+ * the call back to do_schedule_next_timer().  So all we need to do is
+ * to pick up the frozen overrun.
+ */
+SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
+{
+       struct k_itimer *timr;
+       int overrun;
+       unsigned long flags;
+
+       timr = lock_timer(timer_id, &flags);
+       if (!timr)
+               return -EINVAL;
+
+       overrun = timr->it_overrun_last;
+       unlock_timer(timr, flags);
+
+       return overrun;
+}
+
+/* Set a POSIX.1b interval timer. */
+/* timr->it_lock is taken. */
+static int
+common_timer_set(struct k_itimer *timr, int flags,
+                struct itimerspec *new_setting, struct itimerspec *old_setting)
+{
+       struct hrtimer *timer = &timr->it.real.timer;
+       enum hrtimer_mode mode;
+
+       if (old_setting)
+               common_timer_get(timr, old_setting);
+
+       /* disable the timer */
+       timr->it.real.interval.tv64 = 0;
+       /*
+        * careful here.  If smp we could be in the "fire" routine which will
+        * be spinning as we hold the lock.  But this is ONLY an SMP issue.
+        */
+       if (hrtimer_try_to_cancel(timer) < 0)
+               return TIMER_RETRY;
+
+       timr->it_requeue_pending = (timr->it_requeue_pending + 2) & 
+               ~REQUEUE_PENDING;
+       timr->it_overrun_last = 0;
+
+       /* switch off the timer when it_value is zero */
+       if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
+               return 0;
+
+       mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
+       hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
+       timr->it.real.timer.function = posix_timer_fn;
+
+       hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));
+
+       /* Convert interval */
+       timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
+
+       /* SIGEV_NONE timers are not queued ! See common_timer_get */
+       if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
+               /* Setup correct expiry time for relative timers */
+               if (mode == HRTIMER_MODE_REL) {
+                       hrtimer_add_expires(timer, timer->base->get_time());
+               }
+               return 0;
+       }
+
+       hrtimer_start_expires(timer, mode);
+       return 0;
+}
+
+/* Set a POSIX.1b interval timer */
+SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
+               const struct itimerspec __user *, new_setting,
+               struct itimerspec __user *, old_setting)
+{
+       struct k_itimer *timr;
+       struct itimerspec new_spec, old_spec;
+       int error = 0;
+       unsigned long flag;
+       struct itimerspec *rtn = old_setting ? &old_spec : NULL;
+       struct k_clock *kc;
+
+       if (!new_setting)
+               return -EINVAL;
+
+       if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
+               return -EFAULT;
+
+       if (!timespec_valid(&new_spec.it_interval) ||
+           !timespec_valid(&new_spec.it_value))
+               return -EINVAL;
+retry:
+       timr = lock_timer(timer_id, &flag);
+       if (!timr)
+               return -EINVAL;
+
+       kc = clockid_to_kclock(timr->it_clock);
+       if (WARN_ON_ONCE(!kc || !kc->timer_set))
+               error = -EINVAL;
+       else
+               error = kc->timer_set(timr, flags, &new_spec, rtn);
+
+       unlock_timer(timr, flag);
+       if (error == TIMER_RETRY) {
+               rtn = NULL;     // We already got the old time...
+               goto retry;
+       }
+
+       if (old_setting && !error &&
+           copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
+               error = -EFAULT;
+
+       return error;
+}
+
+static int common_timer_del(struct k_itimer *timer)
+{
+       timer->it.real.interval.tv64 = 0;
+
+       if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
+               return TIMER_RETRY;
+       return 0;
+}
+
+static inline int timer_delete_hook(struct k_itimer *timer)
+{
+       struct k_clock *kc = clockid_to_kclock(timer->it_clock);
+
+       if (WARN_ON_ONCE(!kc || !kc->timer_del))
+               return -EINVAL;
+       return kc->timer_del(timer);
+}
+
+/* Delete a POSIX.1b interval timer. */
+SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
+{
+       struct k_itimer *timer;
+       unsigned long flags;
+
+retry_delete:
+       timer = lock_timer(timer_id, &flags);
+       if (!timer)
+               return -EINVAL;
+
+       if (timer_delete_hook(timer) == TIMER_RETRY) {
+               unlock_timer(timer, flags);
+               goto retry_delete;
+       }
+
+       spin_lock(&current->sighand->siglock);
+       list_del(&timer->list);
+       spin_unlock(&current->sighand->siglock);
+       /*
+        * This keeps any tasks waiting on the spin lock from thinking
+        * they got something (see the lock code above).
+        */
+       timer->it_signal = NULL;
+
+       unlock_timer(timer, flags);
+       release_posix_timer(timer, IT_ID_SET);
+       return 0;
+}
+
+/*
+ * return timer owned by the process, used by exit_itimers
+ */
+static void itimer_delete(struct k_itimer *timer)
+{
+       unsigned long flags;
+
+retry_delete:
+       spin_lock_irqsave(&timer->it_lock, flags);
+
+       if (timer_delete_hook(timer) == TIMER_RETRY) {
+               unlock_timer(timer, flags);
+               goto retry_delete;
+       }
+       list_del(&timer->list);
+       /*
+        * This keeps any tasks waiting on the spin lock from thinking
+        * they got something (see the lock code above).
+        */
+       timer->it_signal = NULL;
+
+       unlock_timer(timer, flags);
+       release_posix_timer(timer, IT_ID_SET);
+}
+
+/*
+ * This is called by do_exit or de_thread, only when there are no more
+ * references to the shared signal_struct.
+ */
+void exit_itimers(struct signal_struct *sig)
+{
+       struct k_itimer *tmr;
+
+       while (!list_empty(&sig->posix_timers)) {
+               tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
+               itimer_delete(tmr);
+       }
+}
+
+SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
+               const struct timespec __user *, tp)
+{
+       struct k_clock *kc = clockid_to_kclock(which_clock);
+       struct timespec new_tp;
+
+       if (!kc || !kc->clock_set)
+               return -EINVAL;
+
+       if (copy_from_user(&new_tp, tp, sizeof (*tp)))
+               return -EFAULT;
+
+       return kc->clock_set(which_clock, &new_tp);
+}
+
+SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
+               struct timespec __user *,tp)
+{
+       struct k_clock *kc = clockid_to_kclock(which_clock);
+       struct timespec kernel_tp;
+       int error;
+
+       if (!kc)
+               return -EINVAL;
+
+       error = kc->clock_get(which_clock, &kernel_tp);
+
+       if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
+               error = -EFAULT;
+
+       return error;
+}
+
+SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
+               struct timex __user *, utx)
+{
+       struct k_clock *kc = clockid_to_kclock(which_clock);
+       struct timex ktx;
+       int err;
+
+       if (!kc)
+               return -EINVAL;
+       if (!kc->clock_adj)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&ktx, utx, sizeof(ktx)))
+               return -EFAULT;
+
+       err = kc->clock_adj(which_clock, &ktx);
+
+       if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
+               return -EFAULT;
+
+       return err;
+}
+
+SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
+               struct timespec __user *, tp)
+{
+       struct k_clock *kc = clockid_to_kclock(which_clock);
+       struct timespec rtn_tp;
+       int error;
+
+       if (!kc)
+               return -EINVAL;
+
+       error = kc->clock_getres(which_clock, &rtn_tp);
+
+       if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp)))
+               error = -EFAULT;
+
+       return error;
+}
+
+/*
+ * nanosleep for monotonic and realtime clocks
+ */
+static int common_nsleep(const clockid_t which_clock, int flags,
+                        struct timespec *tsave, struct timespec __user *rmtp)
+{
+       return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
+                                HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
+                                which_clock);
+}
+
+SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
+               const struct timespec __user *, rqtp,
+               struct timespec __user *, rmtp)
+{
+       struct k_clock *kc = clockid_to_kclock(which_clock);
+       struct timespec t;
+
+       if (!kc)
+               return -EINVAL;
+       if (!kc->nsleep)
+               return -ENANOSLEEP_NOTSUP;
+
+       if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
+               return -EFAULT;
+
+       if (!timespec_valid(&t))
+               return -EINVAL;
+
+       return kc->nsleep(which_clock, flags, &t, rmtp);
+}
+
+/*
+ * This will restart clock_nanosleep. This is required only by
+ * compat_clock_nanosleep_restart for now.
+ */
+long clock_nanosleep_restart(struct restart_block *restart_block)
+{
+       clockid_t which_clock = restart_block->nanosleep.clockid;
+       struct k_clock *kc = clockid_to_kclock(which_clock);
+
+       if (WARN_ON_ONCE(!kc || !kc->nsleep_restart))
+               return -EINVAL;
+
+       return kc->nsleep_restart(restart_block);
+}
index 7ab92b1..c19c1d8 100644 (file)
@@ -4,6 +4,8 @@
 #include <linux/hrtimer.h>
 #include <linux/tick.h>
 
+#include "timekeeping.h"
+
 extern seqlock_t jiffies_lock;
 
 #define CS_NAME_LEN    32
diff --git a/kernel/time/time.c b/kernel/time/time.c
new file mode 100644 (file)
index 0000000..f0294ba
--- /dev/null
@@ -0,0 +1,778 @@
+/*
+ *  linux/kernel/time.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  This file contains the interface functions for the various
+ *  time related system calls: time, stime, gettimeofday, settimeofday,
+ *                            adjtime
+ */
+/*
+ * Modification history kernel/time.c
+ *
+ * 1993-09-02    Philip Gladstone
+ *      Created file with time related functions from sched/core.c and adjtimex()
+ * 1993-10-08    Torsten Duwe
+ *      adjtime interface update and CMOS clock write code
+ * 1995-08-13    Torsten Duwe
+ *      kernel PLL updated to 1994-12-13 specs (rfc-1589)
+ * 1999-01-16    Ulrich Windl
+ *     Introduced error checking for many cases in adjtimex().
+ *     Updated NTP code according to technical memorandum Jan '96
+ *     "A Kernel Model for Precision Timekeeping" by Dave Mills
+ *     Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
+ *     (Even though the technical memorandum forbids it)
+ * 2004-07-14   Christoph Lameter
+ *     Added getnstimeofday to allow the posix timer functions to return
+ *     with nanosecond accuracy
+ */
+
+#include <linux/export.h>
+#include <linux/timex.h>
+#include <linux/capability.h>
+#include <linux/timekeeper_internal.h>
+#include <linux/errno.h>
+#include <linux/syscalls.h>
+#include <linux/security.h>
+#include <linux/fs.h>
+#include <linux/math64.h>
+#include <linux/ptrace.h>
+
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+
+#include "timeconst.h"
+#include "timekeeping.h"
+
+/*
+ * The timezone where the local system is located.  Used as a default by some
+ * programs who obtain this value by using gettimeofday.
+ */
+struct timezone sys_tz;
+
+EXPORT_SYMBOL(sys_tz);
+
+#ifdef __ARCH_WANT_SYS_TIME
+
+/*
+ * sys_time() can be implemented in user-level using
+ * sys_gettimeofday().  Is this for backwards compatibility?  If so,
+ * why not move it into the appropriate arch directory (for those
+ * architectures that need it).
+ */
+SYSCALL_DEFINE1(time, time_t __user *, tloc)
+{
+       time_t i = get_seconds();
+
+       if (tloc) {
+               if (put_user(i,tloc))
+                       return -EFAULT;
+       }
+       force_successful_syscall_return();
+       return i;
+}
+
+/*
+ * sys_stime() can be implemented in user-level using
+ * sys_settimeofday().  Is this for backwards compatibility?  If so,
+ * why not move it into the appropriate arch directory (for those
+ * architectures that need it).
+ */
+
+SYSCALL_DEFINE1(stime, time_t __user *, tptr)
+{
+       struct timespec tv;
+       int err;
+
+       if (get_user(tv.tv_sec, tptr))
+               return -EFAULT;
+
+       tv.tv_nsec = 0;
+
+       err = security_settime(&tv, NULL);
+       if (err)
+               return err;
+
+       do_settimeofday(&tv);
+       return 0;
+}
+
+#endif /* __ARCH_WANT_SYS_TIME */
+
+SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
+               struct timezone __user *, tz)
+{
+       if (likely(tv != NULL)) {
+               struct timeval ktv;
+               do_gettimeofday(&ktv);
+               if (copy_to_user(tv, &ktv, sizeof(ktv)))
+                       return -EFAULT;
+       }
+       if (unlikely(tz != NULL)) {
+               if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
+                       return -EFAULT;
+       }
+       return 0;
+}
+
+/*
+ * Indicates if there is an offset between the system clock and the hardware
+ * clock/persistent clock/rtc.
+ */
+int persistent_clock_is_local;
+
+/*
+ * Adjust the time obtained from the CMOS to be UTC time instead of
+ * local time.
+ *
+ * This is ugly, but preferable to the alternatives.  Otherwise we
+ * would either need to write a program to do it in /etc/rc (and risk
+ * confusion if the program gets run more than once; it would also be
+ * hard to make the program warp the clock precisely n hours)  or
+ * compile in the timezone information into the kernel.  Bad, bad....
+ *
+ *                                             - TYT, 1992-01-01
+ *
+ * The best thing to do is to keep the CMOS clock in universal time (UTC)
+ * as real UNIX machines always do it. This avoids all headaches about
+ * daylight saving times and warping kernel clocks.
+ */
+static inline void warp_clock(void)
+{
+       if (sys_tz.tz_minuteswest != 0) {
+               struct timespec adjust;
+
+               persistent_clock_is_local = 1;
+               adjust.tv_sec = sys_tz.tz_minuteswest * 60;
+               adjust.tv_nsec = 0;
+               timekeeping_inject_offset(&adjust);
+       }
+}
+
+/*
+ * In case for some reason the CMOS clock has not already been running
+ * in UTC, but in some local time: The first time we set the timezone,
+ * we will warp the clock so that it is ticking UTC time instead of
+ * local time. Presumably, if someone is setting the timezone then we
+ * are running in an environment where the programs understand about
+ * timezones. This should be done at boot time in the /etc/rc script,
+ * as soon as possible, so that the clock can be set right. Otherwise,
+ * various programs will get confused when the clock gets warped.
+ */
+
+int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
+{
+       static int firsttime = 1;
+       int error = 0;
+
+       if (tv && !timespec_valid(tv))
+               return -EINVAL;
+
+       error = security_settime(tv, tz);
+       if (error)
+               return error;
+
+       if (tz) {
+               sys_tz = *tz;
+               update_vsyscall_tz();
+               if (firsttime) {
+                       firsttime = 0;
+                       if (!tv)
+                               warp_clock();
+               }
+       }
+       if (tv)
+               return do_settimeofday(tv);
+       return 0;
+}
+
+SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
+               struct timezone __user *, tz)
+{
+       struct timeval user_tv;
+       struct timespec new_ts;
+       struct timezone new_tz;
+
+       if (tv) {
+               if (copy_from_user(&user_tv, tv, sizeof(*tv)))
+                       return -EFAULT;
+               new_ts.tv_sec = user_tv.tv_sec;
+               new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
+       }
+       if (tz) {
+               if (copy_from_user(&new_tz, tz, sizeof(*tz)))
+                       return -EFAULT;
+       }
+
+       return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
+}
+
+SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
+{
+       struct timex txc;               /* Local copy of parameter */
+       int ret;
+
+       /* Copy the user data space into the kernel copy
+        * structure. But bear in mind that the structures
+        * may change
+        */
+       if(copy_from_user(&txc, txc_p, sizeof(struct timex)))
+               return -EFAULT;
+       ret = do_adjtimex(&txc);
+       return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
+}
+
+/**
+ * current_fs_time - Return FS time
+ * @sb: Superblock.
+ *
+ * Return the current time truncated to the time granularity supported by
+ * the fs.
+ */
+struct timespec current_fs_time(struct super_block *sb)
+{
+       struct timespec now = current_kernel_time();
+       return timespec_trunc(now, sb->s_time_gran);
+}
+EXPORT_SYMBOL(current_fs_time);
+
+/*
+ * Convert jiffies to milliseconds and back.
+ *
+ * Avoid unnecessary multiplications/divisions in the
+ * two most common HZ cases:
+ */
+unsigned int jiffies_to_msecs(const unsigned long j)
+{
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+       return (MSEC_PER_SEC / HZ) * j;
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+       return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
+#else
+# if BITS_PER_LONG == 32
+       return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
+# else
+       return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
+# endif
+#endif
+}
+EXPORT_SYMBOL(jiffies_to_msecs);
+
+unsigned int jiffies_to_usecs(const unsigned long j)
+{
+#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
+       return (USEC_PER_SEC / HZ) * j;
+#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
+       return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
+#else
+# if BITS_PER_LONG == 32
+       return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
+# else
+       return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
+# endif
+#endif
+}
+EXPORT_SYMBOL(jiffies_to_usecs);
+
+/**
+ * timespec_trunc - Truncate timespec to a granularity
+ * @t: Timespec
+ * @gran: Granularity in ns.
+ *
+ * Truncate a timespec to a granularity. gran must be smaller than a second.
+ * Always rounds down.
+ *
+ * This function should be only used for timestamps returned by
+ * current_kernel_time() or CURRENT_TIME, not with do_gettimeofday() because
+ * it doesn't handle the better resolution of the latter.
+ */
+struct timespec timespec_trunc(struct timespec t, unsigned gran)
+{
+       /*
+        * Division is pretty slow so avoid it for common cases.
+        * Currently current_kernel_time() never returns better than
+        * jiffies resolution. Exploit that.
+        */
+       if (gran <= jiffies_to_usecs(1) * 1000) {
+               /* nothing */
+       } else if (gran == 1000000000) {
+               t.tv_nsec = 0;
+       } else {
+               t.tv_nsec -= t.tv_nsec % gran;
+       }
+       return t;
+}
+EXPORT_SYMBOL(timespec_trunc);
+
+/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
+ * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
+ * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
+ *
+ * [For the Julian calendar (which was used in Russia before 1917,
+ * Britain & colonies before 1752, anywhere else before 1582,
+ * and is still in use by some communities) leave out the
+ * -year/100+year/400 terms, and add 10.]
+ *
+ * This algorithm was first published by Gauss (I think).
+ *
+ * WARNING: this function will overflow on 2106-02-07 06:28:16 on
+ * machines where long is 32-bit! (However, as time_t is signed, we
+ * will already get problems at other places on 2038-01-19 03:14:08)
+ */
+unsigned long
+mktime(const unsigned int year0, const unsigned int mon0,
+       const unsigned int day, const unsigned int hour,
+       const unsigned int min, const unsigned int sec)
+{
+       unsigned int mon = mon0, year = year0;
+
+       /* 1..12 -> 11,12,1..10 */
+       if (0 >= (int) (mon -= 2)) {
+               mon += 12;      /* Puts Feb last since it has leap day */
+               year -= 1;
+       }
+
+       return ((((unsigned long)
+                 (year/4 - year/100 + year/400 + 367*mon/12 + day) +
+                 year*365 - 719499
+           )*24 + hour /* now have hours */
+         )*60 + min /* now have minutes */
+       )*60 + sec; /* finally seconds */
+}
+
+EXPORT_SYMBOL(mktime);
+
+/**
+ * set_normalized_timespec - set timespec sec and nsec parts and normalize
+ *
+ * @ts:                pointer to timespec variable to be set
+ * @sec:       seconds to set
+ * @nsec:      nanoseconds to set
+ *
+ * Set seconds and nanoseconds field of a timespec variable and
+ * normalize to the timespec storage format
+ *
+ * Note: The tv_nsec part is always in the range of
+ *     0 <= tv_nsec < NSEC_PER_SEC
+ * For negative values only the tv_sec field is negative !
+ */
+void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec)
+{
+       while (nsec >= NSEC_PER_SEC) {
+               /*
+                * The following asm() prevents the compiler from
+                * optimising this loop into a modulo operation. See
+                * also __iter_div_u64_rem() in include/linux/time.h
+                */
+               asm("" : "+rm"(nsec));
+               nsec -= NSEC_PER_SEC;
+               ++sec;
+       }
+       while (nsec < 0) {
+               asm("" : "+rm"(nsec));
+               nsec += NSEC_PER_SEC;
+               --sec;
+       }
+       ts->tv_sec = sec;
+       ts->tv_nsec = nsec;
+}
+EXPORT_SYMBOL(set_normalized_timespec);
+
+/**
+ * ns_to_timespec - Convert nanoseconds to timespec
+ * @nsec:       the nanoseconds value to be converted
+ *
+ * Returns the timespec representation of the nsec parameter.
+ */
+struct timespec ns_to_timespec(const s64 nsec)
+{
+       struct timespec ts;
+       s32 rem;
+
+       if (!nsec)
+               return (struct timespec) {0, 0};
+
+       ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
+       if (unlikely(rem < 0)) {
+               ts.tv_sec--;
+               rem += NSEC_PER_SEC;
+       }
+       ts.tv_nsec = rem;
+
+       return ts;
+}
+EXPORT_SYMBOL(ns_to_timespec);
+
+/**
+ * ns_to_timeval - Convert nanoseconds to timeval
+ * @nsec:       the nanoseconds value to be converted
+ *
+ * Returns the timeval representation of the nsec parameter.
+ */
+struct timeval ns_to_timeval(const s64 nsec)
+{
+       struct timespec ts = ns_to_timespec(nsec);
+       struct timeval tv;
+
+       tv.tv_sec = ts.tv_sec;
+       tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000;
+
+       return tv;
+}
+EXPORT_SYMBOL(ns_to_timeval);
+
+#if BITS_PER_LONG == 32
+/**
+ * set_normalized_timespec - set timespec sec and nsec parts and normalize
+ *
+ * @ts:                pointer to timespec variable to be set
+ * @sec:       seconds to set
+ * @nsec:      nanoseconds to set
+ *
+ * Set seconds and nanoseconds field of a timespec variable and
+ * normalize to the timespec storage format
+ *
+ * Note: The tv_nsec part is always in the range of
+ *     0 <= tv_nsec < NSEC_PER_SEC
+ * For negative values only the tv_sec field is negative !
+ */
+void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec)
+{
+       while (nsec >= NSEC_PER_SEC) {
+               /*
+                * The following asm() prevents the compiler from
+                * optimising this loop into a modulo operation. See
+                * also __iter_div_u64_rem() in include/linux/time.h
+                */
+               asm("" : "+rm"(nsec));
+               nsec -= NSEC_PER_SEC;
+               ++sec;
+       }
+       while (nsec < 0) {
+               asm("" : "+rm"(nsec));
+               nsec += NSEC_PER_SEC;
+               --sec;
+       }
+       ts->tv_sec = sec;
+       ts->tv_nsec = nsec;
+}
+EXPORT_SYMBOL(set_normalized_timespec64);
+
+/**
+ * ns_to_timespec64 - Convert nanoseconds to timespec64
+ * @nsec:       the nanoseconds value to be converted
+ *
+ * Returns the timespec64 representation of the nsec parameter.
+ */
+struct timespec64 ns_to_timespec64(const s64 nsec)
+{
+       struct timespec64 ts;
+       s32 rem;
+
+       if (!nsec)
+               return (struct timespec64) {0, 0};
+
+       ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
+       if (unlikely(rem < 0)) {
+               ts.tv_sec--;
+               rem += NSEC_PER_SEC;
+       }
+       ts.tv_nsec = rem;
+
+       return ts;
+}
+EXPORT_SYMBOL(ns_to_timespec64);
+#endif
+/*
+ * When we convert to jiffies then we interpret incoming values
+ * the following way:
+ *
+ * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
+ *
+ * - 'too large' values [that would result in larger than
+ *   MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
+ *
+ * - all other values are converted to jiffies by either multiplying
+ *   the input value by a factor or dividing it with a factor
+ *
+ * We must also be careful about 32-bit overflows.
+ */
+unsigned long msecs_to_jiffies(const unsigned int m)
+{
+       /*
+        * Negative value, means infinite timeout:
+        */
+       if ((int)m < 0)
+               return MAX_JIFFY_OFFSET;
+
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+       /*
+        * HZ is equal to or smaller than 1000, and 1000 is a nice
+        * round multiple of HZ, divide with the factor between them,
+        * but round upwards:
+        */
+       return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+       /*
+        * HZ is larger than 1000, and HZ is a nice round multiple of
+        * 1000 - simply multiply with the factor between them.
+        *
+        * But first make sure the multiplication result cannot
+        * overflow:
+        */
+       if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+               return MAX_JIFFY_OFFSET;
+
+       return m * (HZ / MSEC_PER_SEC);
+#else
+       /*
+        * Generic case - multiply, round and divide. But first
+        * check that if we are doing a net multiplication, that
+        * we wouldn't overflow:
+        */
+       if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+               return MAX_JIFFY_OFFSET;
+
+       return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32)
+               >> MSEC_TO_HZ_SHR32;
+#endif
+}
+EXPORT_SYMBOL(msecs_to_jiffies);
+
+unsigned long usecs_to_jiffies(const unsigned int u)
+{
+       if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
+               return MAX_JIFFY_OFFSET;
+#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
+       return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
+#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
+       return u * (HZ / USEC_PER_SEC);
+#else
+       return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
+               >> USEC_TO_HZ_SHR32;
+#endif
+}
+EXPORT_SYMBOL(usecs_to_jiffies);
+
+/*
+ * The TICK_NSEC - 1 rounds up the value to the next resolution.  Note
+ * that a remainder subtract here would not do the right thing as the
+ * resolution values don't fall on second boundries.  I.e. the line:
+ * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
+ *
+ * Rather, we just shift the bits off the right.
+ *
+ * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
+ * value to a scaled second value.
+ */
+unsigned long
+timespec_to_jiffies(const struct timespec *value)
+{
+       unsigned long sec = value->tv_sec;
+       long nsec = value->tv_nsec + TICK_NSEC - 1;
+
+       if (sec >= MAX_SEC_IN_JIFFIES){
+               sec = MAX_SEC_IN_JIFFIES;
+               nsec = 0;
+       }
+       return (((u64)sec * SEC_CONVERSION) +
+               (((u64)nsec * NSEC_CONVERSION) >>
+                (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
+
+}
+EXPORT_SYMBOL(timespec_to_jiffies);
+
+void
+jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
+{
+       /*
+        * Convert jiffies to nanoseconds and separate with
+        * one divide.
+        */
+       u32 rem;
+       value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
+                                   NSEC_PER_SEC, &rem);
+       value->tv_nsec = rem;
+}
+EXPORT_SYMBOL(jiffies_to_timespec);
+
+/* Same for "timeval"
+ *
+ * Well, almost.  The problem here is that the real system resolution is
+ * in nanoseconds and the value being converted is in micro seconds.
+ * Also for some machines (those that use HZ = 1024, in-particular),
+ * there is a LARGE error in the tick size in microseconds.
+
+ * The solution we use is to do the rounding AFTER we convert the
+ * microsecond part.  Thus the USEC_ROUND, the bits to be shifted off.
+ * Instruction wise, this should cost only an additional add with carry
+ * instruction above the way it was done above.
+ */
+unsigned long
+timeval_to_jiffies(const struct timeval *value)
+{
+       unsigned long sec = value->tv_sec;
+       long usec = value->tv_usec;
+
+       if (sec >= MAX_SEC_IN_JIFFIES){
+               sec = MAX_SEC_IN_JIFFIES;
+               usec = 0;
+       }
+       return (((u64)sec * SEC_CONVERSION) +
+               (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
+                (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
+}
+EXPORT_SYMBOL(timeval_to_jiffies);
+
+void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
+{
+       /*
+        * Convert jiffies to nanoseconds and separate with
+        * one divide.
+        */
+       u32 rem;
+
+       value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
+                                   NSEC_PER_SEC, &rem);
+       value->tv_usec = rem / NSEC_PER_USEC;
+}
+EXPORT_SYMBOL(jiffies_to_timeval);
+
+/*
+ * Convert jiffies/jiffies_64 to clock_t and back.
+ */
+clock_t jiffies_to_clock_t(unsigned long x)
+{
+#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
+# if HZ < USER_HZ
+       return x * (USER_HZ / HZ);
+# else
+       return x / (HZ / USER_HZ);
+# endif
+#else
+       return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
+#endif
+}
+EXPORT_SYMBOL(jiffies_to_clock_t);
+
+unsigned long clock_t_to_jiffies(unsigned long x)
+{
+#if (HZ % USER_HZ)==0
+       if (x >= ~0UL / (HZ / USER_HZ))
+               return ~0UL;
+       return x * (HZ / USER_HZ);
+#else
+       /* Don't worry about loss of precision here .. */
+       if (x >= ~0UL / HZ * USER_HZ)
+               return ~0UL;
+
+       /* .. but do try to contain it here */
+       return div_u64((u64)x * HZ, USER_HZ);
+#endif
+}
+EXPORT_SYMBOL(clock_t_to_jiffies);
+
+u64 jiffies_64_to_clock_t(u64 x)
+{
+#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
+# if HZ < USER_HZ
+       x = div_u64(x * USER_HZ, HZ);
+# elif HZ > USER_HZ
+       x = div_u64(x, HZ / USER_HZ);
+# else
+       /* Nothing to do */
+# endif
+#else
+       /*
+        * There are better ways that don't overflow early,
+        * but even this doesn't overflow in hundreds of years
+        * in 64 bits, so..
+        */
+       x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
+#endif
+       return x;
+}
+EXPORT_SYMBOL(jiffies_64_to_clock_t);
+
+u64 nsec_to_clock_t(u64 x)
+{
+#if (NSEC_PER_SEC % USER_HZ) == 0
+       return div_u64(x, NSEC_PER_SEC / USER_HZ);
+#elif (USER_HZ % 512) == 0
+       return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
+#else
+       /*
+         * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
+         * overflow after 64.99 years.
+         * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
+         */
+       return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
+#endif
+}
+
+/**
+ * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
+ *
+ * @n: nsecs in u64
+ *
+ * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
+ * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
+ * for scheduler, not for use in device drivers to calculate timeout value.
+ *
+ * note:
+ *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
+ *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
+ */
+u64 nsecs_to_jiffies64(u64 n)
+{
+#if (NSEC_PER_SEC % HZ) == 0
+       /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
+       return div_u64(n, NSEC_PER_SEC / HZ);
+#elif (HZ % 512) == 0
+       /* overflow after 292 years if HZ = 1024 */
+       return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
+#else
+       /*
+        * Generic case - optimized for cases where HZ is a multiple of 3.
+        * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
+        */
+       return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
+#endif
+}
+
+/**
+ * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
+ *
+ * @n: nsecs in u64
+ *
+ * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
+ * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
+ * for scheduler, not for use in device drivers to calculate timeout value.
+ *
+ * note:
+ *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
+ *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
+ */
+unsigned long nsecs_to_jiffies(u64 n)
+{
+       return (unsigned long)nsecs_to_jiffies64(n);
+}
+EXPORT_SYMBOL_GPL(nsecs_to_jiffies);
+
+/*
+ * Add two timespec values and do a safety check for overflow.
+ * It's assumed that both values are valid (>= 0)
+ */
+struct timespec timespec_add_safe(const struct timespec lhs,
+                                 const struct timespec rhs)
+{
+       struct timespec res;
+
+       set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec,
+                               lhs.tv_nsec + rhs.tv_nsec);
+
+       if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)
+               res.tv_sec = TIME_T_MAX;
+
+       return res;
+}
diff --git a/kernel/time/timeconst.bc b/kernel/time/timeconst.bc
new file mode 100644 (file)
index 0000000..511bdf2
--- /dev/null
@@ -0,0 +1,108 @@
+scale=0
+
+define gcd(a,b) {
+       auto t;
+       while (b) {
+               t = b;
+               b = a % b;
+               a = t;
+       }
+       return a;
+}
+
+/* Division by reciprocal multiplication. */
+define fmul(b,n,d) {
+       return (2^b*n+d-1)/d;
+}
+
+/* Adjustment factor when a ceiling value is used.  Use as:
+   (imul * n) + (fmulxx * n + fadjxx) >> xx) */
+define fadj(b,n,d) {
+       auto v;
+       d = d/gcd(n,d);
+       v = 2^b*(d-1)/d;
+       return v;
+}
+
+/* Compute the appropriate mul/adj values as well as a shift count,
+   which brings the mul value into the range 2^b-1 <= x < 2^b.  Such
+   a shift value will be correct in the signed integer range and off
+   by at most one in the upper half of the unsigned range. */
+define fmuls(b,n,d) {
+       auto s, m;
+       for (s = 0; 1; s++) {
+               m = fmul(s,n,d);
+               if (m >= 2^(b-1))
+                       return s;
+       }
+       return 0;
+}
+
+define timeconst(hz) {
+       print "/* Automatically generated by kernel/timeconst.bc */\n"
+       print "/* Time conversion constants for HZ == ", hz, " */\n"
+       print "\n"
+
+       print "#ifndef KERNEL_TIMECONST_H\n"
+       print "#define KERNEL_TIMECONST_H\n\n"
+
+       print "#include <linux/param.h>\n"
+       print "#include <linux/types.h>\n\n"
+
+       print "#if HZ != ", hz, "\n"
+       print "#error \qkernel/timeconst.h has the wrong HZ value!\q\n"
+       print "#endif\n\n"
+
+       if (hz < 2) {
+               print "#error Totally bogus HZ value!\n"
+       } else {
+               s=fmuls(32,1000,hz)
+               obase=16
+               print "#define HZ_TO_MSEC_MUL32\tU64_C(0x", fmul(s,1000,hz), ")\n"
+               print "#define HZ_TO_MSEC_ADJ32\tU64_C(0x", fadj(s,1000,hz), ")\n"
+               obase=10
+               print "#define HZ_TO_MSEC_SHR32\t", s, "\n"
+
+               s=fmuls(32,hz,1000)
+               obase=16
+               print "#define MSEC_TO_HZ_MUL32\tU64_C(0x", fmul(s,hz,1000), ")\n"
+               print "#define MSEC_TO_HZ_ADJ32\tU64_C(0x", fadj(s,hz,1000), ")\n"
+               obase=10
+               print "#define MSEC_TO_HZ_SHR32\t", s, "\n"
+
+               obase=10
+               cd=gcd(hz,1000)
+               print "#define HZ_TO_MSEC_NUM\t\t", 1000/cd, "\n"
+               print "#define HZ_TO_MSEC_DEN\t\t", hz/cd, "\n"
+               print "#define MSEC_TO_HZ_NUM\t\t", hz/cd, "\n"
+               print "#define MSEC_TO_HZ_DEN\t\t", 1000/cd, "\n"
+               print "\n"
+
+               s=fmuls(32,1000000,hz)
+               obase=16
+               print "#define HZ_TO_USEC_MUL32\tU64_C(0x", fmul(s,1000000,hz), ")\n"
+               print "#define HZ_TO_USEC_ADJ32\tU64_C(0x", fadj(s,1000000,hz), ")\n"
+               obase=10
+               print "#define HZ_TO_USEC_SHR32\t", s, "\n"
+
+               s=fmuls(32,hz,1000000)
+               obase=16
+               print "#define USEC_TO_HZ_MUL32\tU64_C(0x", fmul(s,hz,1000000), ")\n"
+               print "#define USEC_TO_HZ_ADJ32\tU64_C(0x", fadj(s,hz,1000000), ")\n"
+               obase=10
+               print "#define USEC_TO_HZ_SHR32\t", s, "\n"
+
+               obase=10
+               cd=gcd(hz,1000000)
+               print "#define HZ_TO_USEC_NUM\t\t", 1000000/cd, "\n"
+               print "#define HZ_TO_USEC_DEN\t\t", hz/cd, "\n"
+               print "#define USEC_TO_HZ_NUM\t\t", hz/cd, "\n"
+               print "#define USEC_TO_HZ_DEN\t\t", 1000000/cd, "\n"
+               print "\n"
+
+               print "#endif /* KERNEL_TIMECONST_H */\n"
+       }
+       halt
+}
+
+timeconst(hz)
index 32d8d6a..f36b028 100644 (file)
 #define TK_MIRROR              (1 << 1)
 #define TK_CLOCK_WAS_SET       (1 << 2)
 
-static struct timekeeper timekeeper;
+/*
+ * The most important data for readout fits into a single 64 byte
+ * cache line.
+ */
+static struct {
+       seqcount_t              seq;
+       struct timekeeper       timekeeper;
+} tk_core ____cacheline_aligned;
+
 static DEFINE_RAW_SPINLOCK(timekeeper_lock);
-static seqcount_t timekeeper_seq;
 static struct timekeeper shadow_timekeeper;
 
+/**
+ * struct tk_fast - NMI safe timekeeper
+ * @seq:       Sequence counter for protecting updates. The lowest bit
+ *             is the index for the tk_read_base array
+ * @base:      tk_read_base array. Access is indexed by the lowest bit of
+ *             @seq.
+ *
+ * See @update_fast_timekeeper() below.
+ */
+struct tk_fast {
+       seqcount_t              seq;
+       struct tk_read_base     base[2];
+};
+
+static struct tk_fast tk_fast_mono ____cacheline_aligned;
+
 /* flag for if timekeeping is suspended */
 int __read_mostly timekeeping_suspended;
 
@@ -45,49 +68,54 @@ bool __read_mostly persistent_clock_exist = false;
 
 static inline void tk_normalize_xtime(struct timekeeper *tk)
 {
-       while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
-               tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
+       while (tk->tkr.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr.shift)) {
+               tk->tkr.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr.shift;
                tk->xtime_sec++;
        }
 }
 
-static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
+static inline struct timespec64 tk_xtime(struct timekeeper *tk)
+{
+       struct timespec64 ts;
+
+       ts.tv_sec = tk->xtime_sec;
+       ts.tv_nsec = (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
+       return ts;
+}
+
+static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
 {
        tk->xtime_sec = ts->tv_sec;
-       tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
+       tk->tkr.xtime_nsec = (u64)ts->tv_nsec << tk->tkr.shift;
 }
 
-static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
+static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
 {
        tk->xtime_sec += ts->tv_sec;
-       tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
+       tk->tkr.xtime_nsec += (u64)ts->tv_nsec << tk->tkr.shift;
        tk_normalize_xtime(tk);
 }
 
-static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
+static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
 {
-       struct timespec tmp;
+       struct timespec64 tmp;
 
        /*
         * Verify consistency of: offset_real = -wall_to_monotonic
         * before modifying anything
         */
-       set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec,
+       set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
                                        -tk->wall_to_monotonic.tv_nsec);
-       WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64);
+       WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64);
        tk->wall_to_monotonic = wtm;
-       set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
-       tk->offs_real = timespec_to_ktime(tmp);
+       set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
+       tk->offs_real = timespec64_to_ktime(tmp);
        tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
 }
 
-static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
+static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
 {
-       /* Verify consistency before modifying */
-       WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);
-
-       tk->total_sleep_time    = t;
-       tk->offs_boot           = timespec_to_ktime(t);
+       tk->offs_boot = ktime_add(tk->offs_boot, delta);
 }
 
 /**
@@ -107,9 +135,11 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
        u64 tmp, ntpinterval;
        struct clocksource *old_clock;
 
-       old_clock = tk->clock;
-       tk->clock = clock;
-       tk->cycle_last = clock->cycle_last = clock->read(clock);
+       old_clock = tk->tkr.clock;
+       tk->tkr.clock = clock;
+       tk->tkr.read = clock->read;
+       tk->tkr.mask = clock->mask;
+       tk->tkr.cycle_last = tk->tkr.read(clock);
 
        /* Do the ns -> cycle conversion first, using original mult */
        tmp = NTP_INTERVAL_LENGTH;
@@ -133,78 +163,212 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
        if (old_clock) {
                int shift_change = clock->shift - old_clock->shift;
                if (shift_change < 0)
-                       tk->xtime_nsec >>= -shift_change;
+                       tk->tkr.xtime_nsec >>= -shift_change;
                else
-                       tk->xtime_nsec <<= shift_change;
+                       tk->tkr.xtime_nsec <<= shift_change;
        }
-       tk->shift = clock->shift;
+       tk->tkr.shift = clock->shift;
 
        tk->ntp_error = 0;
        tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
+       tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
 
        /*
         * The timekeeper keeps its own mult values for the currently
         * active clocksource. These value will be adjusted via NTP
         * to counteract clock drifting.
         */
-       tk->mult = clock->mult;
+       tk->tkr.mult = clock->mult;
+       tk->ntp_err_mult = 0;
 }
 
 /* Timekeeper helper functions. */
 
 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
-u32 (*arch_gettimeoffset)(void);
-
-u32 get_arch_timeoffset(void)
-{
-       if (likely(arch_gettimeoffset))
-               return arch_gettimeoffset();
-       return 0;
-}
+static u32 default_arch_gettimeoffset(void) { return 0; }
+u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
 #else
-static inline u32 get_arch_timeoffset(void) { return 0; }
+static inline u32 arch_gettimeoffset(void) { return 0; }
 #endif
 
-static inline s64 timekeeping_get_ns(struct timekeeper *tk)
+static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
 {
-       cycle_t cycle_now, cycle_delta;
-       struct clocksource *clock;
+       cycle_t cycle_now, delta;
        s64 nsec;
 
        /* read clocksource: */
-       clock = tk->clock;
-       cycle_now = clock->read(clock);
+       cycle_now = tkr->read(tkr->clock);
 
        /* calculate the delta since the last update_wall_time: */
-       cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+       delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
 
-       nsec = cycle_delta * tk->mult + tk->xtime_nsec;
-       nsec >>= tk->shift;
+       nsec = delta * tkr->mult + tkr->xtime_nsec;
+       nsec >>= tkr->shift;
 
        /* If arch requires, add in get_arch_timeoffset() */
-       return nsec + get_arch_timeoffset();
+       return nsec + arch_gettimeoffset();
 }
 
 static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
 {
-       cycle_t cycle_now, cycle_delta;
-       struct clocksource *clock;
+       struct clocksource *clock = tk->tkr.clock;
+       cycle_t cycle_now, delta;
        s64 nsec;
 
        /* read clocksource: */
-       clock = tk->clock;
-       cycle_now = clock->read(clock);
+       cycle_now = tk->tkr.read(clock);
 
        /* calculate the delta since the last update_wall_time: */
-       cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+       delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
 
        /* convert delta to nanoseconds. */
-       nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
+       nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
 
        /* If arch requires, add in get_arch_timeoffset() */
-       return nsec + get_arch_timeoffset();
+       return nsec + arch_gettimeoffset();
+}
+
+/**
+ * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
+ * @tk:                The timekeeper from which we take the update
+ * @tkf:       The fast timekeeper to update
+ * @tbase:     The time base for the fast timekeeper (mono/raw)
+ *
+ * We want to use this from any context including NMI and tracing /
+ * instrumenting the timekeeping code itself.
+ *
+ * So we handle this differently than the other timekeeping accessor
+ * functions which retry when the sequence count has changed. The
+ * update side does:
+ *
+ * smp_wmb();  <- Ensure that the last base[1] update is visible
+ * tkf->seq++;
+ * smp_wmb();  <- Ensure that the seqcount update is visible
+ * update(tkf->base[0], tk);
+ * smp_wmb();  <- Ensure that the base[0] update is visible
+ * tkf->seq++;
+ * smp_wmb();  <- Ensure that the seqcount update is visible
+ * update(tkf->base[1], tk);
+ *
+ * The reader side does:
+ *
+ * do {
+ *     seq = tkf->seq;
+ *     smp_rmb();
+ *     idx = seq & 0x01;
+ *     now = now(tkf->base[idx]);
+ *     smp_rmb();
+ * } while (seq != tkf->seq)
+ *
+ * As long as we update base[0] readers are forced off to
+ * base[1]. Once base[0] is updated readers are redirected to base[0]
+ * and the base[1] update takes place.
+ *
+ * So if a NMI hits the update of base[0] then it will use base[1]
+ * which is still consistent. In the worst case this can result is a
+ * slightly wrong timestamp (a few nanoseconds). See
+ * @ktime_get_mono_fast_ns.
+ */
+static void update_fast_timekeeper(struct timekeeper *tk)
+{
+       struct tk_read_base *base = tk_fast_mono.base;
+
+       /* Force readers off to base[1] */
+       raw_write_seqcount_latch(&tk_fast_mono.seq);
+
+       /* Update base[0] */
+       memcpy(base, &tk->tkr, sizeof(*base));
+
+       /* Force readers back to base[0] */
+       raw_write_seqcount_latch(&tk_fast_mono.seq);
+
+       /* Update base[1] */
+       memcpy(base + 1, base, sizeof(*base));
 }
 
+/**
+ * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
+ *
+ * This timestamp is not guaranteed to be monotonic across an update.
+ * The timestamp is calculated by:
+ *
+ *     now = base_mono + clock_delta * slope
+ *
+ * So if the update lowers the slope, readers who are forced to the
+ * not yet updated second array are still using the old steeper slope.
+ *
+ * tmono
+ * ^
+ * |    o  n
+ * |   o n
+ * |  u
+ * | o
+ * |o
+ * |12345678---> reader order
+ *
+ * o = old slope
+ * u = update
+ * n = new slope
+ *
+ * So reader 6 will observe time going backwards versus reader 5.
+ *
+ * While other CPUs are likely to be able observe that, the only way
+ * for a CPU local observation is when an NMI hits in the middle of
+ * the update. Timestamps taken from that NMI context might be ahead
+ * of the following timestamps. Callers need to be aware of that and
+ * deal with it.
+ */
+u64 notrace ktime_get_mono_fast_ns(void)
+{
+       struct tk_read_base *tkr;
+       unsigned int seq;
+       u64 now;
+
+       do {
+               seq = raw_read_seqcount(&tk_fast_mono.seq);
+               tkr = tk_fast_mono.base + (seq & 0x01);
+               now = ktime_to_ns(tkr->base_mono) + timekeeping_get_ns(tkr);
+
+       } while (read_seqcount_retry(&tk_fast_mono.seq, seq));
+       return now;
+}
+EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
+
+#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
+
+static inline void update_vsyscall(struct timekeeper *tk)
+{
+       struct timespec xt;
+
+       xt = timespec64_to_timespec(tk_xtime(tk));
+       update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->tkr.clock, tk->tkr.mult,
+                           tk->tkr.cycle_last);
+}
+
+static inline void old_vsyscall_fixup(struct timekeeper *tk)
+{
+       s64 remainder;
+
+       /*
+       * Store only full nanoseconds into xtime_nsec after rounding
+       * it up and add the remainder to the error difference.
+       * XXX - This is necessary to avoid small 1ns inconsistnecies caused
+       * by truncating the remainder in vsyscalls. However, it causes
+       * additional work to be done in timekeeping_adjust(). Once
+       * the vsyscall implementations are converted to use xtime_nsec
+       * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
+       * users are removed, this can be killed.
+       */
+       remainder = tk->tkr.xtime_nsec & ((1ULL << tk->tkr.shift) - 1);
+       tk->tkr.xtime_nsec -= remainder;
+       tk->tkr.xtime_nsec += 1ULL << tk->tkr.shift;
+       tk->ntp_error += remainder << tk->ntp_error_shift;
+       tk->ntp_error -= (1ULL << tk->tkr.shift) << tk->ntp_error_shift;
+}
+#else
+#define old_vsyscall_fixup(tk)
+#endif
+
 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
 
 static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
@@ -217,7 +381,7 @@ static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
  */
 int pvclock_gtod_register_notifier(struct notifier_block *nb)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
        unsigned long flags;
        int ret;
 
@@ -247,6 +411,29 @@ int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
 
+/*
+ * Update the ktime_t based scalar nsec members of the timekeeper
+ */
+static inline void tk_update_ktime_data(struct timekeeper *tk)
+{
+       s64 nsec;
+
+       /*
+        * The xtime based monotonic readout is:
+        *      nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
+        * The ktime based monotonic readout is:
+        *      nsec = base_mono + now();
+        * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
+        */
+       nsec = (s64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
+       nsec *= NSEC_PER_SEC;
+       nsec += tk->wall_to_monotonic.tv_nsec;
+       tk->tkr.base_mono = ns_to_ktime(nsec);
+
+       /* Update the monotonic raw base */
+       tk->base_raw = timespec64_to_ktime(tk->raw_time);
+}
+
 /* must hold timekeeper_lock */
 static void timekeeping_update(struct timekeeper *tk, unsigned int action)
 {
@@ -257,8 +444,13 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
        update_vsyscall(tk);
        update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
 
+       tk_update_ktime_data(tk);
+
        if (action & TK_MIRROR)
-               memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper));
+               memcpy(&shadow_timekeeper, &tk_core.timekeeper,
+                      sizeof(tk_core.timekeeper));
+
+       update_fast_timekeeper(tk);
 }
 
 /**
@@ -270,49 +462,48 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
  */
 static void timekeeping_forward_now(struct timekeeper *tk)
 {
-       cycle_t cycle_now, cycle_delta;
-       struct clocksource *clock;
+       struct clocksource *clock = tk->tkr.clock;
+       cycle_t cycle_now, delta;
        s64 nsec;
 
-       clock = tk->clock;
-       cycle_now = clock->read(clock);
-       cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
-       tk->cycle_last = clock->cycle_last = cycle_now;
+       cycle_now = tk->tkr.read(clock);
+       delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
+       tk->tkr.cycle_last = cycle_now;
 
-       tk->xtime_nsec += cycle_delta * tk->mult;
+       tk->tkr.xtime_nsec += delta * tk->tkr.mult;
 
        /* If arch requires, add in get_arch_timeoffset() */
-       tk->xtime_nsec += (u64)get_arch_timeoffset() << tk->shift;
+       tk->tkr.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr.shift;
 
        tk_normalize_xtime(tk);
 
-       nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
-       timespec_add_ns(&tk->raw_time, nsec);
+       nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
+       timespec64_add_ns(&tk->raw_time, nsec);
 }
 
 /**
- * __getnstimeofday - Returns the time of day in a timespec.
+ * __getnstimeofday64 - Returns the time of day in a timespec64.
  * @ts:                pointer to the timespec to be set
  *
  * Updates the time of day in the timespec.
  * Returns 0 on success, or -ve when suspended (timespec will be undefined).
  */
-int __getnstimeofday(struct timespec *ts)
+int __getnstimeofday64(struct timespec64 *ts)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
        unsigned long seq;
        s64 nsecs = 0;
 
        do {
-               seq = read_seqcount_begin(&timekeeper_seq);
+               seq = read_seqcount_begin(&tk_core.seq);
 
                ts->tv_sec = tk->xtime_sec;
-               nsecs = timekeeping_get_ns(tk);
+               nsecs = timekeeping_get_ns(&tk->tkr);
 
-       } while (read_seqcount_retry(&timekeeper_seq, seq));
+       } while (read_seqcount_retry(&tk_core.seq, seq));
 
        ts->tv_nsec = 0;
-       timespec_add_ns(ts, nsecs);
+       timespec64_add_ns(ts, nsecs);
 
        /*
         * Do not bail out early, in case there were callers still using
@@ -322,116 +513,138 @@ int __getnstimeofday(struct timespec *ts)
                return -EAGAIN;
        return 0;
 }
-EXPORT_SYMBOL(__getnstimeofday);
+EXPORT_SYMBOL(__getnstimeofday64);
 
 /**
- * getnstimeofday - Returns the time of day in a timespec.
+ * getnstimeofday64 - Returns the time of day in a timespec64.
  * @ts:                pointer to the timespec to be set
  *
  * Returns the time of day in a timespec (WARN if suspended).
  */
-void getnstimeofday(struct timespec *ts)
+void getnstimeofday64(struct timespec64 *ts)
 {
-       WARN_ON(__getnstimeofday(ts));
+       WARN_ON(__getnstimeofday64(ts));
 }
-EXPORT_SYMBOL(getnstimeofday);
+EXPORT_SYMBOL(getnstimeofday64);
 
 ktime_t ktime_get(void)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
        unsigned int seq;
-       s64 secs, nsecs;
+       ktime_t base;
+       s64 nsecs;
 
        WARN_ON(timekeeping_suspended);
 
        do {
-               seq = read_seqcount_begin(&timekeeper_seq);
-               secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
-               nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
+               seq = read_seqcount_begin(&tk_core.seq);
+               base = tk->tkr.base_mono;
+               nsecs = timekeeping_get_ns(&tk->tkr);
 
-       } while (read_seqcount_retry(&timekeeper_seq, seq));
-       /*
-        * Use ktime_set/ktime_add_ns to create a proper ktime on
-        * 32-bit architectures without CONFIG_KTIME_SCALAR.
-        */
-       return ktime_add_ns(ktime_set(secs, 0), nsecs);
+       } while (read_seqcount_retry(&tk_core.seq, seq));
+
+       return ktime_add_ns(base, nsecs);
 }
 EXPORT_SYMBOL_GPL(ktime_get);
 
-/**
- * ktime_get_ts - get the monotonic clock in timespec format
- * @ts:                pointer to timespec variable
- *
- * The function calculates the monotonic clock from the realtime
- * clock and the wall_to_monotonic offset and stores the result
- * in normalized timespec format in the variable pointed to by @ts.
- */
-void ktime_get_ts(struct timespec *ts)
+static ktime_t *offsets[TK_OFFS_MAX] = {
+       [TK_OFFS_REAL]  = &tk_core.timekeeper.offs_real,
+       [TK_OFFS_BOOT]  = &tk_core.timekeeper.offs_boot,
+       [TK_OFFS_TAI]   = &tk_core.timekeeper.offs_tai,
+};
+
+ktime_t ktime_get_with_offset(enum tk_offsets offs)
 {
-       struct timekeeper *tk = &timekeeper;
-       struct timespec tomono;
-       s64 nsec;
+       struct timekeeper *tk = &tk_core.timekeeper;
        unsigned int seq;
+       ktime_t base, *offset = offsets[offs];
+       s64 nsecs;
 
        WARN_ON(timekeeping_suspended);
 
        do {
-               seq = read_seqcount_begin(&timekeeper_seq);
-               ts->tv_sec = tk->xtime_sec;
-               nsec = timekeeping_get_ns(tk);
-               tomono = tk->wall_to_monotonic;
+               seq = read_seqcount_begin(&tk_core.seq);
+               base = ktime_add(tk->tkr.base_mono, *offset);
+               nsecs = timekeeping_get_ns(&tk->tkr);
 
-       } while (read_seqcount_retry(&timekeeper_seq, seq));
+       } while (read_seqcount_retry(&tk_core.seq, seq));
 
-       ts->tv_sec += tomono.tv_sec;
-       ts->tv_nsec = 0;
-       timespec_add_ns(ts, nsec + tomono.tv_nsec);
-}
-EXPORT_SYMBOL_GPL(ktime_get_ts);
+       return ktime_add_ns(base, nsecs);
 
+}
+EXPORT_SYMBOL_GPL(ktime_get_with_offset);
 
 /**
- * timekeeping_clocktai - Returns the TAI time of day in a timespec
- * @ts:                pointer to the timespec to be set
- *
- * Returns the time of day in a timespec.
+ * ktime_mono_to_any() - convert mononotic time to any other time
+ * @tmono:     time to convert.
+ * @offs:      which offset to use
  */
-void timekeeping_clocktai(struct timespec *ts)
+ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
 {
-       struct timekeeper *tk = &timekeeper;
+       ktime_t *offset = offsets[offs];
        unsigned long seq;
-       u64 nsecs;
-
-       WARN_ON(timekeeping_suspended);
+       ktime_t tconv;
 
        do {
-               seq = read_seqcount_begin(&timekeeper_seq);
+               seq = read_seqcount_begin(&tk_core.seq);
+               tconv = ktime_add(tmono, *offset);
+       } while (read_seqcount_retry(&tk_core.seq, seq));
 
-               ts->tv_sec = tk->xtime_sec + tk->tai_offset;
-               nsecs = timekeeping_get_ns(tk);
+       return tconv;
+}
+EXPORT_SYMBOL_GPL(ktime_mono_to_any);
 
-       } while (read_seqcount_retry(&timekeeper_seq, seq));
+/**
+ * ktime_get_raw - Returns the raw monotonic time in ktime_t format
+ */
+ktime_t ktime_get_raw(void)
+{
+       struct timekeeper *tk = &tk_core.timekeeper;
+       unsigned int seq;
+       ktime_t base;
+       s64 nsecs;
 
-       ts->tv_nsec = 0;
-       timespec_add_ns(ts, nsecs);
+       do {
+               seq = read_seqcount_begin(&tk_core.seq);
+               base = tk->base_raw;
+               nsecs = timekeeping_get_ns_raw(tk);
 
-}
-EXPORT_SYMBOL(timekeeping_clocktai);
+       } while (read_seqcount_retry(&tk_core.seq, seq));
 
+       return ktime_add_ns(base, nsecs);
+}
+EXPORT_SYMBOL_GPL(ktime_get_raw);
 
 /**
- * ktime_get_clocktai - Returns the TAI time of day in a ktime
+ * ktime_get_ts64 - get the monotonic clock in timespec64 format
+ * @ts:                pointer to timespec variable
  *
- * Returns the time of day in a ktime.
+ * The function calculates the monotonic clock from the realtime
+ * clock and the wall_to_monotonic offset and stores the result
+ * in normalized timespec format in the variable pointed to by @ts.
  */
-ktime_t ktime_get_clocktai(void)
+void ktime_get_ts64(struct timespec64 *ts)
 {
-       struct timespec ts;
+       struct timekeeper *tk = &tk_core.timekeeper;
+       struct timespec64 tomono;
+       s64 nsec;
+       unsigned int seq;
+
+       WARN_ON(timekeeping_suspended);
 
-       timekeeping_clocktai(&ts);
-       return timespec_to_ktime(ts);
+       do {
+               seq = read_seqcount_begin(&tk_core.seq);
+               ts->tv_sec = tk->xtime_sec;
+               nsec = timekeeping_get_ns(&tk->tkr);
+               tomono = tk->wall_to_monotonic;
+
+       } while (read_seqcount_retry(&tk_core.seq, seq));
+
+       ts->tv_sec += tomono.tv_sec;
+       ts->tv_nsec = 0;
+       timespec64_add_ns(ts, nsec + tomono.tv_nsec);
 }
-EXPORT_SYMBOL(ktime_get_clocktai);
+EXPORT_SYMBOL_GPL(ktime_get_ts64);
 
 #ifdef CONFIG_NTP_PPS
 
@@ -446,23 +659,23 @@ EXPORT_SYMBOL(ktime_get_clocktai);
  */
 void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
        unsigned long seq;
        s64 nsecs_raw, nsecs_real;
 
        WARN_ON_ONCE(timekeeping_suspended);
 
        do {
-               seq = read_seqcount_begin(&timekeeper_seq);
+               seq = read_seqcount_begin(&tk_core.seq);
 
-               *ts_raw = tk->raw_time;
+               *ts_raw = timespec64_to_timespec(tk->raw_time);
                ts_real->tv_sec = tk->xtime_sec;
                ts_real->tv_nsec = 0;
 
                nsecs_raw = timekeeping_get_ns_raw(tk);
-               nsecs_real = timekeeping_get_ns(tk);
+               nsecs_real = timekeeping_get_ns(&tk->tkr);
 
-       } while (read_seqcount_retry(&timekeeper_seq, seq));
+       } while (read_seqcount_retry(&tk_core.seq, seq));
 
        timespec_add_ns(ts_raw, nsecs_raw);
        timespec_add_ns(ts_real, nsecs_real);
@@ -479,9 +692,9 @@ EXPORT_SYMBOL(getnstime_raw_and_real);
  */
 void do_gettimeofday(struct timeval *tv)
 {
-       struct timespec now;
+       struct timespec64 now;
 
-       getnstimeofday(&now);
+       getnstimeofday64(&now);
        tv->tv_sec = now.tv_sec;
        tv->tv_usec = now.tv_nsec/1000;
 }
@@ -495,15 +708,15 @@ EXPORT_SYMBOL(do_gettimeofday);
  */
 int do_settimeofday(const struct timespec *tv)
 {
-       struct timekeeper *tk = &timekeeper;
-       struct timespec ts_delta, xt;
+       struct timekeeper *tk = &tk_core.timekeeper;
+       struct timespec64 ts_delta, xt, tmp;
        unsigned long flags;
 
        if (!timespec_valid_strict(tv))
                return -EINVAL;
 
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
-       write_seqcount_begin(&timekeeper_seq);
+       write_seqcount_begin(&tk_core.seq);
 
        timekeeping_forward_now(tk);
 
@@ -511,13 +724,14 @@ int do_settimeofday(const struct timespec *tv)
        ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
        ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
 
-       tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta));
+       tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
 
-       tk_set_xtime(tk, tv);
+       tmp = timespec_to_timespec64(*tv);
+       tk_set_xtime(tk, &tmp);
 
        timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
 
-       write_seqcount_end(&timekeeper_seq);
+       write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
        /* signal hrtimers about time change */
@@ -535,33 +749,35 @@ EXPORT_SYMBOL(do_settimeofday);
  */
 int timekeeping_inject_offset(struct timespec *ts)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
        unsigned long flags;
-       struct timespec tmp;
+       struct timespec64 ts64, tmp;
        int ret = 0;
 
        if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
                return -EINVAL;
 
+       ts64 = timespec_to_timespec64(*ts);
+
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
-       write_seqcount_begin(&timekeeper_seq);
+       write_seqcount_begin(&tk_core.seq);
 
        timekeeping_forward_now(tk);
 
        /* Make sure the proposed value is valid */
-       tmp = timespec_add(tk_xtime(tk),  *ts);
-       if (!timespec_valid_strict(&tmp)) {
+       tmp = timespec64_add(tk_xtime(tk),  ts64);
+       if (!timespec64_valid_strict(&tmp)) {
                ret = -EINVAL;
                goto error;
        }
 
-       tk_xtime_add(tk, ts);
-       tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
+       tk_xtime_add(tk, &ts64);
+       tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
 
 error: /* even if we error out, we forwarded the time, so call update */
        timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
 
-       write_seqcount_end(&timekeeper_seq);
+       write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
        /* signal hrtimers about time change */
@@ -578,14 +794,14 @@ EXPORT_SYMBOL(timekeeping_inject_offset);
  */
 s32 timekeeping_get_tai_offset(void)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
        unsigned int seq;
        s32 ret;
 
        do {
-               seq = read_seqcount_begin(&timekeeper_seq);
+               seq = read_seqcount_begin(&tk_core.seq);
                ret = tk->tai_offset;
-       } while (read_seqcount_retry(&timekeeper_seq, seq));
+       } while (read_seqcount_retry(&tk_core.seq, seq));
 
        return ret;
 }
@@ -606,14 +822,14 @@ static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
  */
 void timekeeping_set_tai_offset(s32 tai_offset)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
-       write_seqcount_begin(&timekeeper_seq);
+       write_seqcount_begin(&tk_core.seq);
        __timekeeping_set_tai_offset(tk, tai_offset);
        timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
-       write_seqcount_end(&timekeeper_seq);
+       write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
        clock_was_set();
 }
@@ -625,14 +841,14 @@ void timekeeping_set_tai_offset(s32 tai_offset)
  */
 static int change_clocksource(void *data)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
        struct clocksource *new, *old;
        unsigned long flags;
 
        new = (struct clocksource *) data;
 
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
-       write_seqcount_begin(&timekeeper_seq);
+       write_seqcount_begin(&tk_core.seq);
 
        timekeeping_forward_now(tk);
        /*
@@ -641,7 +857,7 @@ static int change_clocksource(void *data)
         */
        if (try_module_get(new->owner)) {
                if (!new->enable || new->enable(new) == 0) {
-                       old = tk->clock;
+                       old = tk->tkr.clock;
                        tk_setup_internals(tk, new);
                        if (old->disable)
                                old->disable(old);
@@ -652,7 +868,7 @@ static int change_clocksource(void *data)
        }
        timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
 
-       write_seqcount_end(&timekeeper_seq);
+       write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
        return 0;
@@ -667,29 +883,14 @@ static int change_clocksource(void *data)
  */
 int timekeeping_notify(struct clocksource *clock)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
 
-       if (tk->clock == clock)
+       if (tk->tkr.clock == clock)
                return 0;
        stop_machine(change_clocksource, clock, NULL);
        tick_clock_notify();
-       return tk->clock == clock ? 0 : -1;
-}
-
-/**
- * ktime_get_real - get the real (wall-) time in ktime_t format
- *
- * returns the time in ktime_t format
- */
-ktime_t ktime_get_real(void)
-{
-       struct timespec now;
-
-       getnstimeofday(&now);
-
-       return timespec_to_ktime(now);
+       return tk->tkr.clock == clock ? 0 : -1;
 }
-EXPORT_SYMBOL_GPL(ktime_get_real);
 
 /**
  * getrawmonotonic - Returns the raw monotonic time in a timespec
@@ -699,18 +900,20 @@ EXPORT_SYMBOL_GPL(ktime_get_real);
  */
 void getrawmonotonic(struct timespec *ts)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
+       struct timespec64 ts64;
        unsigned long seq;
        s64 nsecs;
 
        do {
-               seq = read_seqcount_begin(&timekeeper_seq);
+               seq = read_seqcount_begin(&tk_core.seq);
                nsecs = timekeeping_get_ns_raw(tk);
-               *ts = tk->raw_time;
+               ts64 = tk->raw_time;
 
-       } while (read_seqcount_retry(&timekeeper_seq, seq));
+       } while (read_seqcount_retry(&tk_core.seq, seq));
 
-       timespec_add_ns(ts, nsecs);
+       timespec64_add_ns(&ts64, nsecs);
+       *ts = timespec64_to_timespec(ts64);
 }
 EXPORT_SYMBOL(getrawmonotonic);
 
@@ -719,16 +922,16 @@ EXPORT_SYMBOL(getrawmonotonic);
  */
 int timekeeping_valid_for_hres(void)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
        unsigned long seq;
        int ret;
 
        do {
-               seq = read_seqcount_begin(&timekeeper_seq);
+               seq = read_seqcount_begin(&tk_core.seq);
 
-               ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
+               ret = tk->tkr.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
 
-       } while (read_seqcount_retry(&timekeeper_seq, seq));
+       } while (read_seqcount_retry(&tk_core.seq, seq));
 
        return ret;
 }
@@ -738,16 +941,16 @@ int timekeeping_valid_for_hres(void)
  */
 u64 timekeeping_max_deferment(void)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
        unsigned long seq;
        u64 ret;
 
        do {
-               seq = read_seqcount_begin(&timekeeper_seq);
+               seq = read_seqcount_begin(&tk_core.seq);
 
-               ret = tk->clock->max_idle_ns;
+               ret = tk->tkr.clock->max_idle_ns;
 
-       } while (read_seqcount_retry(&timekeeper_seq, seq));
+       } while (read_seqcount_retry(&tk_core.seq, seq));
 
        return ret;
 }
@@ -787,14 +990,15 @@ void __weak read_boot_clock(struct timespec *ts)
  */
 void __init timekeeping_init(void)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
        struct clocksource *clock;
        unsigned long flags;
-       struct timespec now, boot, tmp;
-
-       read_persistent_clock(&now);
+       struct timespec64 now, boot, tmp;
+       struct timespec ts;
 
-       if (!timespec_valid_strict(&now)) {
+       read_persistent_clock(&ts);
+       now = timespec_to_timespec64(ts);
+       if (!timespec64_valid_strict(&now)) {
                pr_warn("WARNING: Persistent clock returned invalid value!\n"
                        "         Check your CMOS/BIOS settings.\n");
                now.tv_sec = 0;
@@ -802,8 +1006,9 @@ void __init timekeeping_init(void)
        } else if (now.tv_sec || now.tv_nsec)
                persistent_clock_exist = true;
 
-       read_boot_clock(&boot);
-       if (!timespec_valid_strict(&boot)) {
+       read_boot_clock(&ts);
+       boot = timespec_to_timespec64(ts);
+       if (!timespec64_valid_strict(&boot)) {
                pr_warn("WARNING: Boot clock returned invalid value!\n"
                        "         Check your CMOS/BIOS settings.\n");
                boot.tv_sec = 0;
@@ -811,7 +1016,7 @@ void __init timekeeping_init(void)
        }
 
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
-       write_seqcount_begin(&timekeeper_seq);
+       write_seqcount_begin(&tk_core.seq);
        ntp_init();
 
        clock = clocksource_default_clock();
@@ -822,24 +1027,21 @@ void __init timekeeping_init(void)
        tk_set_xtime(tk, &now);
        tk->raw_time.tv_sec = 0;
        tk->raw_time.tv_nsec = 0;
+       tk->base_raw.tv64 = 0;
        if (boot.tv_sec == 0 && boot.tv_nsec == 0)
                boot = tk_xtime(tk);
 
-       set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
+       set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
        tk_set_wall_to_mono(tk, tmp);
 
-       tmp.tv_sec = 0;
-       tmp.tv_nsec = 0;
-       tk_set_sleep_time(tk, tmp);
-
-       memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper));
+       timekeeping_update(tk, TK_MIRROR);
 
-       write_seqcount_end(&timekeeper_seq);
+       write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 }
 
 /* time in seconds when suspend began */
-static struct timespec timekeeping_suspend_time;
+static struct timespec64 timekeeping_suspend_time;
 
 /**
  * __timekeeping_inject_sleeptime - Internal function to add sleep interval
@@ -849,17 +1051,17 @@ static struct timespec timekeeping_suspend_time;
  * adds the sleep offset to the timekeeping variables.
  */
 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
-                                                       struct timespec *delta)
+                                          struct timespec64 *delta)
 {
-       if (!timespec_valid_strict(delta)) {
+       if (!timespec64_valid_strict(delta)) {
                printk_deferred(KERN_WARNING
                                "__timekeeping_inject_sleeptime: Invalid "
                                "sleep delta value!\n");
                return;
        }
        tk_xtime_add(tk, delta);
-       tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta));
-       tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta));
+       tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
+       tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
        tk_debug_account_sleep_time(delta);
 }
 
@@ -875,7 +1077,8 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
  */
 void timekeeping_inject_sleeptime(struct timespec *delta)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
+       struct timespec64 tmp;
        unsigned long flags;
 
        /*
@@ -886,15 +1089,16 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
                return;
 
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
-       write_seqcount_begin(&timekeeper_seq);
+       write_seqcount_begin(&tk_core.seq);
 
        timekeeping_forward_now(tk);
 
-       __timekeeping_inject_sleeptime(tk, delta);
+       tmp = timespec_to_timespec64(*delta);
+       __timekeeping_inject_sleeptime(tk, &tmp);
 
        timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
 
-       write_seqcount_end(&timekeeper_seq);
+       write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
        /* signal hrtimers about time change */
@@ -910,20 +1114,22 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
  */
 static void timekeeping_resume(void)
 {
-       struct timekeeper *tk = &timekeeper;
-       struct clocksource *clock = tk->clock;
+       struct timekeeper *tk = &tk_core.timekeeper;
+       struct clocksource *clock = tk->tkr.clock;
        unsigned long flags;
-       struct timespec ts_new, ts_delta;
+       struct timespec64 ts_new, ts_delta;
+       struct timespec tmp;
        cycle_t cycle_now, cycle_delta;
        bool suspendtime_found = false;
 
-       read_persistent_clock(&ts_new);
+       read_persistent_clock(&tmp);
+       ts_new = timespec_to_timespec64(tmp);
 
        clockevents_resume();
        clocksource_resume();
 
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
-       write_seqcount_begin(&timekeeper_seq);
+       write_seqcount_begin(&tk_core.seq);
 
        /*
         * After system resumes, we need to calculate the suspended time and
@@ -937,15 +1143,16 @@ static void timekeeping_resume(void)
         * The less preferred source will only be tried if there is no better
         * usable source. The rtc part is handled separately in rtc core code.
         */
-       cycle_now = clock->read(clock);
+       cycle_now = tk->tkr.read(clock);
        if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
-               cycle_now > clock->cycle_last) {
+               cycle_now > tk->tkr.cycle_last) {
                u64 num, max = ULLONG_MAX;
                u32 mult = clock->mult;
                u32 shift = clock->shift;
                s64 nsec = 0;
 
-               cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+               cycle_delta = clocksource_delta(cycle_now, tk->tkr.cycle_last,
+                                               tk->tkr.mask);
 
                /*
                 * "cycle_delta * mutl" may cause 64 bits overflow, if the
@@ -960,10 +1167,10 @@ static void timekeeping_resume(void)
                }
                nsec += ((u64) cycle_delta * mult) >> shift;
 
-               ts_delta = ns_to_timespec(nsec);
+               ts_delta = ns_to_timespec64(nsec);
                suspendtime_found = true;
-       } else if (timespec_compare(&ts_new, &timekeeping_suspend_time) > 0) {
-               ts_delta = timespec_sub(ts_new, timekeeping_suspend_time);
+       } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
+               ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
                suspendtime_found = true;
        }
 
@@ -971,11 +1178,11 @@ static void timekeeping_resume(void)
                __timekeeping_inject_sleeptime(tk, &ts_delta);
 
        /* Re-base the last cycle value */
-       tk->cycle_last = clock->cycle_last = cycle_now;
+       tk->tkr.cycle_last = cycle_now;
        tk->ntp_error = 0;
        timekeeping_suspended = 0;
        timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
-       write_seqcount_end(&timekeeper_seq);
+       write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
        touch_softlockup_watchdog();
@@ -988,12 +1195,14 @@ static void timekeeping_resume(void)
 
 static int timekeeping_suspend(void)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
        unsigned long flags;
-       struct timespec         delta, delta_delta;
-       static struct timespec  old_delta;
+       struct timespec64               delta, delta_delta;
+       static struct timespec64        old_delta;
+       struct timespec tmp;
 
-       read_persistent_clock(&timekeeping_suspend_time);
+       read_persistent_clock(&tmp);
+       timekeeping_suspend_time = timespec_to_timespec64(tmp);
 
        /*
         * On some systems the persistent_clock can not be detected at
@@ -1004,7 +1213,7 @@ static int timekeeping_suspend(void)
                persistent_clock_exist = true;
 
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
-       write_seqcount_begin(&timekeeper_seq);
+       write_seqcount_begin(&tk_core.seq);
        timekeeping_forward_now(tk);
        timekeeping_suspended = 1;
 
@@ -1014,8 +1223,8 @@ static int timekeeping_suspend(void)
         * try to compensate so the difference in system time
         * and persistent_clock time stays close to constant.
         */
-       delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time);
-       delta_delta = timespec_sub(delta, old_delta);
+       delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
+       delta_delta = timespec64_sub(delta, old_delta);
        if (abs(delta_delta.tv_sec)  >= 2) {
                /*
                 * if delta_delta is too large, assume time correction
@@ -1025,11 +1234,11 @@ static int timekeeping_suspend(void)
        } else {
                /* Otherwise try to adjust old_system to compensate */
                timekeeping_suspend_time =
-                       timespec_add(timekeeping_suspend_time, delta_delta);
+                       timespec64_add(timekeeping_suspend_time, delta_delta);
        }
 
        timekeeping_update(tk, TK_MIRROR);
-       write_seqcount_end(&timekeeper_seq);
+       write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
        clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
@@ -1050,125 +1259,34 @@ static int __init timekeeping_init_ops(void)
        register_syscore_ops(&timekeeping_syscore_ops);
        return 0;
 }
-
 device_initcall(timekeeping_init_ops);
 
 /*
- * If the error is already larger, we look ahead even further
- * to compensate for late or lost adjustments.
+ * Apply a multiplier adjustment to the timekeeper
  */
-static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
-                                                s64 error, s64 *interval,
-                                                s64 *offset)
+static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
+                                                        s64 offset,
+                                                        bool negative,
+                                                        int adj_scale)
 {
-       s64 tick_error, i;
-       u32 look_ahead, adj;
-       s32 error2, mult;
-
-       /*
-        * Use the current error value to determine how much to look ahead.
-        * The larger the error the slower we adjust for it to avoid problems
-        * with losing too many ticks, otherwise we would overadjust and
-        * produce an even larger error.  The smaller the adjustment the
-        * faster we try to adjust for it, as lost ticks can do less harm
-        * here.  This is tuned so that an error of about 1 msec is adjusted
-        * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
-        */
-       error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
-       error2 = abs(error2);
-       for (look_ahead = 0; error2 > 0; look_ahead++)
-               error2 >>= 2;
+       s64 interval = tk->cycle_interval;
+       s32 mult_adj = 1;
 
-       /*
-        * Now calculate the error in (1 << look_ahead) ticks, but first
-        * remove the single look ahead already included in the error.
-        */
-       tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
-       tick_error -= tk->xtime_interval >> 1;
-       error = ((error - tick_error) >> look_ahead) + tick_error;
-
-       /* Finally calculate the adjustment shift value.  */
-       i = *interval;
-       mult = 1;
-       if (error < 0) {
-               error = -error;
-               *interval = -*interval;
-               *offset = -*offset;
-               mult = -1;
+       if (negative) {
+               mult_adj = -mult_adj;
+               interval = -interval;
+               offset  = -offset;
        }
-       for (adj = 0; error > i; adj++)
-               error >>= 1;
-
-       *interval <<= adj;
-       *offset <<= adj;
-       return mult << adj;
-}
-
-/*
- * Adjust the multiplier to reduce the error value,
- * this is optimized for the most common adjustments of -1,0,1,
- * for other values we can do a bit more work.
- */
-static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
-{
-       s64 error, interval = tk->cycle_interval;
-       int adj;
+       mult_adj <<= adj_scale;
+       interval <<= adj_scale;
+       offset <<= adj_scale;
 
-       /*
-        * The point of this is to check if the error is greater than half
-        * an interval.
-        *
-        * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
-        *
-        * Note we subtract one in the shift, so that error is really error*2.
-        * This "saves" dividing(shifting) interval twice, but keeps the
-        * (error > interval) comparison as still measuring if error is
-        * larger than half an interval.
-        *
-        * Note: It does not "save" on aggravation when reading the code.
-        */
-       error = tk->ntp_error >> (tk->ntp_error_shift - 1);
-       if (error > interval) {
-               /*
-                * We now divide error by 4(via shift), which checks if
-                * the error is greater than twice the interval.
-                * If it is greater, we need a bigadjust, if its smaller,
-                * we can adjust by 1.
-                */
-               error >>= 2;
-               if (likely(error <= interval))
-                       adj = 1;
-               else
-                       adj = timekeeping_bigadjust(tk, error, &interval, &offset);
-       } else {
-               if (error < -interval) {
-                       /* See comment above, this is just switched for the negative */
-                       error >>= 2;
-                       if (likely(error >= -interval)) {
-                               adj = -1;
-                               interval = -interval;
-                               offset = -offset;
-                       } else {
-                               adj = timekeeping_bigadjust(tk, error, &interval, &offset);
-                       }
-               } else {
-                       goto out_adjust;
-               }
-       }
-
-       if (unlikely(tk->clock->maxadj &&
-               (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
-               printk_deferred_once(KERN_WARNING
-                       "Adjusting %s more than 11%% (%ld vs %ld)\n",
-                       tk->clock->name, (long)tk->mult + adj,
-                       (long)tk->clock->mult + tk->clock->maxadj);
-       }
        /*
         * So the following can be confusing.
         *
-        * To keep things simple, lets assume adj == 1 for now.
+        * To keep things simple, lets assume mult_adj == 1 for now.
         *
-        * When adj != 1, remember that the interval and offset values
+        * When mult_adj != 1, remember that the interval and offset values
         * have been appropriately scaled so the math is the same.
         *
         * The basic idea here is that we're increasing the multiplier
@@ -1212,12 +1330,78 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
         *
         * XXX - TODO: Doc ntp_error calculation.
         */
-       tk->mult += adj;
+       tk->tkr.mult += mult_adj;
        tk->xtime_interval += interval;
-       tk->xtime_nsec -= offset;
+       tk->tkr.xtime_nsec -= offset;
        tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
+}
+
+/*
+ * Calculate the multiplier adjustment needed to match the frequency
+ * specified by NTP
+ */
+static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
+                                                       s64 offset)
+{
+       s64 interval = tk->cycle_interval;
+       s64 xinterval = tk->xtime_interval;
+       s64 tick_error;
+       bool negative;
+       u32 adj;
+
+       /* Remove any current error adj from freq calculation */
+       if (tk->ntp_err_mult)
+               xinterval -= tk->cycle_interval;
+
+       tk->ntp_tick = ntp_tick_length();
+
+       /* Calculate current error per tick */
+       tick_error = ntp_tick_length() >> tk->ntp_error_shift;
+       tick_error -= (xinterval + tk->xtime_remainder);
+
+       /* Don't worry about correcting it if its small */
+       if (likely((tick_error >= 0) && (tick_error <= interval)))
+               return;
+
+       /* preserve the direction of correction */
+       negative = (tick_error < 0);
+
+       /* Sort out the magnitude of the correction */
+       tick_error = abs(tick_error);
+       for (adj = 0; tick_error > interval; adj++)
+               tick_error >>= 1;
+
+       /* scale the corrections */
+       timekeeping_apply_adjustment(tk, offset, negative, adj);
+}
+
+/*
+ * Adjust the timekeeper's multiplier to the correct frequency
+ * and also to reduce the accumulated error value.
+ */
+static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
+{
+       /* Correct for the current frequency error */
+       timekeeping_freqadjust(tk, offset);
+
+       /* Next make a small adjustment to fix any cumulative error */
+       if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
+               tk->ntp_err_mult = 1;
+               timekeeping_apply_adjustment(tk, offset, 0, 0);
+       } else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
+               /* Undo any existing error adjustment */
+               timekeeping_apply_adjustment(tk, offset, 1, 0);
+               tk->ntp_err_mult = 0;
+       }
+
+       if (unlikely(tk->tkr.clock->maxadj &&
+               (tk->tkr.mult > tk->tkr.clock->mult + tk->tkr.clock->maxadj))) {
+               printk_once(KERN_WARNING
+                       "Adjusting %s more than 11%% (%ld vs %ld)\n",
+                       tk->tkr.clock->name, (long)tk->tkr.mult,
+                       (long)tk->tkr.clock->mult + tk->tkr.clock->maxadj);
+       }
 
-out_adjust:
        /*
         * It may be possible that when we entered this function, xtime_nsec
         * was very small.  Further, if we're slightly speeding the clocksource
@@ -1232,12 +1416,11 @@ out_adjust:
         * We'll correct this error next time through this function, when
         * xtime_nsec is not as small.
         */
-       if (unlikely((s64)tk->xtime_nsec < 0)) {
-               s64 neg = -(s64)tk->xtime_nsec;
-               tk->xtime_nsec = 0;
+       if (unlikely((s64)tk->tkr.xtime_nsec < 0)) {
+               s64 neg = -(s64)tk->tkr.xtime_nsec;
+               tk->tkr.xtime_nsec = 0;
                tk->ntp_error += neg << tk->ntp_error_shift;
        }
-
 }
 
 /**
@@ -1250,26 +1433,26 @@ out_adjust:
  */
 static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
 {
-       u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
+       u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr.shift;
        unsigned int clock_set = 0;
 
-       while (tk->xtime_nsec >= nsecps) {
+       while (tk->tkr.xtime_nsec >= nsecps) {
                int leap;
 
-               tk->xtime_nsec -= nsecps;
+               tk->tkr.xtime_nsec -= nsecps;
                tk->xtime_sec++;
 
                /* Figure out if its a leap sec and apply if needed */
                leap = second_overflow(tk->xtime_sec);
                if (unlikely(leap)) {
-                       struct timespec ts;
+                       struct timespec64 ts;
 
                        tk->xtime_sec += leap;
 
                        ts.tv_sec = leap;
                        ts.tv_nsec = 0;
                        tk_set_wall_to_mono(tk,
-                               timespec_sub(tk->wall_to_monotonic, ts));
+                               timespec64_sub(tk->wall_to_monotonic, ts));
 
                        __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
 
@@ -1301,9 +1484,9 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
 
        /* Accumulate one shifted interval */
        offset -= interval;
-       tk->cycle_last += interval;
+       tk->tkr.cycle_last += interval;
 
-       tk->xtime_nsec += tk->xtime_interval << shift;
+       tk->tkr.xtime_nsec += tk->xtime_interval << shift;
        *clock_set |= accumulate_nsecs_to_secs(tk);
 
        /* Accumulate raw time */
@@ -1317,48 +1500,20 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
        tk->raw_time.tv_nsec = raw_nsecs;
 
        /* Accumulate error between NTP and clock interval */
-       tk->ntp_error += ntp_tick_length() << shift;
+       tk->ntp_error += tk->ntp_tick << shift;
        tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
                                                (tk->ntp_error_shift + shift);
 
        return offset;
 }
 
-#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
-static inline void old_vsyscall_fixup(struct timekeeper *tk)
-{
-       s64 remainder;
-
-       /*
-       * Store only full nanoseconds into xtime_nsec after rounding
-       * it up and add the remainder to the error difference.
-       * XXX - This is necessary to avoid small 1ns inconsistnecies caused
-       * by truncating the remainder in vsyscalls. However, it causes
-       * additional work to be done in timekeeping_adjust(). Once
-       * the vsyscall implementations are converted to use xtime_nsec
-       * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
-       * users are removed, this can be killed.
-       */
-       remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
-       tk->xtime_nsec -= remainder;
-       tk->xtime_nsec += 1ULL << tk->shift;
-       tk->ntp_error += remainder << tk->ntp_error_shift;
-       tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift;
-}
-#else
-#define old_vsyscall_fixup(tk)
-#endif
-
-
-
 /**
  * update_wall_time - Uses the current clocksource to increment the wall time
  *
  */
 void update_wall_time(void)
 {
-       struct clocksource *clock;
-       struct timekeeper *real_tk = &timekeeper;
+       struct timekeeper *real_tk = &tk_core.timekeeper;
        struct timekeeper *tk = &shadow_timekeeper;
        cycle_t offset;
        int shift = 0, maxshift;
@@ -1371,12 +1526,11 @@ void update_wall_time(void)
        if (unlikely(timekeeping_suspended))
                goto out;
 
-       clock = real_tk->clock;
-
 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
        offset = real_tk->cycle_interval;
 #else
-       offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
+       offset = clocksource_delta(tk->tkr.read(tk->tkr.clock),
+                                  tk->tkr.cycle_last, tk->tkr.mask);
 #endif
 
        /* Check if there's really nothing to do */
@@ -1418,9 +1572,7 @@ void update_wall_time(void)
         */
        clock_set |= accumulate_nsecs_to_secs(tk);
 
-       write_seqcount_begin(&timekeeper_seq);
-       /* Update clock->cycle_last with the new value */
-       clock->cycle_last = tk->cycle_last;
+       write_seqcount_begin(&tk_core.seq);
        /*
         * Update the real timekeeper.
         *
@@ -1428,12 +1580,12 @@ void update_wall_time(void)
         * requires changes to all other timekeeper usage sites as
         * well, i.e. move the timekeeper pointer getter into the
         * spinlocked/seqcount protected sections. And we trade this
-        * memcpy under the timekeeper_seq against one before we start
+        * memcpy under the tk_core.seq against one before we start
         * updating.
         */
        memcpy(real_tk, tk, sizeof(*tk));
        timekeeping_update(real_tk, clock_set);
-       write_seqcount_end(&timekeeper_seq);
+       write_seqcount_end(&tk_core.seq);
 out:
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
        if (clock_set)
@@ -1454,83 +1606,16 @@ out:
  */
 void getboottime(struct timespec *ts)
 {
-       struct timekeeper *tk = &timekeeper;
-       struct timespec boottime = {
-               .tv_sec = tk->wall_to_monotonic.tv_sec +
-                               tk->total_sleep_time.tv_sec,
-               .tv_nsec = tk->wall_to_monotonic.tv_nsec +
-                               tk->total_sleep_time.tv_nsec
-       };
-
-       set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
-}
-EXPORT_SYMBOL_GPL(getboottime);
-
-/**
- * get_monotonic_boottime - Returns monotonic time since boot
- * @ts:                pointer to the timespec to be set
- *
- * Returns the monotonic time since boot in a timespec.
- *
- * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
- * includes the time spent in suspend.
- */
-void get_monotonic_boottime(struct timespec *ts)
-{
-       struct timekeeper *tk = &timekeeper;
-       struct timespec tomono, sleep;
-       s64 nsec;
-       unsigned int seq;
-
-       WARN_ON(timekeeping_suspended);
-
-       do {
-               seq = read_seqcount_begin(&timekeeper_seq);
-               ts->tv_sec = tk->xtime_sec;
-               nsec = timekeeping_get_ns(tk);
-               tomono = tk->wall_to_monotonic;
-               sleep = tk->total_sleep_time;
-
-       } while (read_seqcount_retry(&timekeeper_seq, seq));
-
-       ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
-       ts->tv_nsec = 0;
-       timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec);
-}
-EXPORT_SYMBOL_GPL(get_monotonic_boottime);
-
-/**
- * ktime_get_boottime - Returns monotonic time since boot in a ktime
- *
- * Returns the monotonic time since boot in a ktime
- *
- * This is similar to CLOCK_MONTONIC/ktime_get, but also
- * includes the time spent in suspend.
- */
-ktime_t ktime_get_boottime(void)
-{
-       struct timespec ts;
-
-       get_monotonic_boottime(&ts);
-       return timespec_to_ktime(ts);
-}
-EXPORT_SYMBOL_GPL(ktime_get_boottime);
-
-/**
- * monotonic_to_bootbased - Convert the monotonic time to boot based.
- * @ts:                pointer to the timespec to be converted
- */
-void monotonic_to_bootbased(struct timespec *ts)
-{
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
+       ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
 
-       *ts = timespec_add(*ts, tk->total_sleep_time);
+       *ts = ktime_to_timespec(t);
 }
-EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
+EXPORT_SYMBOL_GPL(getboottime);
 
 unsigned long get_seconds(void)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
 
        return tk->xtime_sec;
 }
@@ -1538,43 +1623,44 @@ EXPORT_SYMBOL(get_seconds);
 
 struct timespec __current_kernel_time(void)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
 
-       return tk_xtime(tk);
+       return timespec64_to_timespec(tk_xtime(tk));
 }
 
 struct timespec current_kernel_time(void)
 {
-       struct timekeeper *tk = &timekeeper;
-       struct timespec now;
+       struct timekeeper *tk = &tk_core.timekeeper;
+       struct timespec64 now;
        unsigned long seq;
 
        do {
-               seq = read_seqcount_begin(&timekeeper_seq);
+               seq = read_seqcount_begin(&tk_core.seq);
 
                now = tk_xtime(tk);
-       } while (read_seqcount_retry(&timekeeper_seq, seq));
+       } while (read_seqcount_retry(&tk_core.seq, seq));
 
-       return now;
+       return timespec64_to_timespec(now);
 }
 EXPORT_SYMBOL(current_kernel_time);
 
 struct timespec get_monotonic_coarse(void)
 {
-       struct timekeeper *tk = &timekeeper;
-       struct timespec now, mono;
+       struct timekeeper *tk = &tk_core.timekeeper;
+       struct timespec64 now, mono;
        unsigned long seq;
 
        do {
-               seq = read_seqcount_begin(&timekeeper_seq);
+               seq = read_seqcount_begin(&tk_core.seq);
 
                now = tk_xtime(tk);
                mono = tk->wall_to_monotonic;
-       } while (read_seqcount_retry(&timekeeper_seq, seq));
+       } while (read_seqcount_retry(&tk_core.seq, seq));
 
-       set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
+       set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
                                now.tv_nsec + mono.tv_nsec);
-       return now;
+
+       return timespec64_to_timespec(now);
 }
 
 /*
@@ -1587,29 +1673,38 @@ void do_timer(unsigned long ticks)
 }
 
 /**
- * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
- *    and sleep offsets.
- * @xtim:      pointer to timespec to be set with xtime
- * @wtom:      pointer to timespec to be set with wall_to_monotonic
- * @sleep:     pointer to timespec to be set with time in suspend
+ * ktime_get_update_offsets_tick - hrtimer helper
+ * @offs_real: pointer to storage for monotonic -> realtime offset
+ * @offs_boot: pointer to storage for monotonic -> boottime offset
+ * @offs_tai:  pointer to storage for monotonic -> clock tai offset
+ *
+ * Returns monotonic time at last tick and various offsets
  */
-void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
-                               struct timespec *wtom, struct timespec *sleep)
+ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot,
+                                                       ktime_t *offs_tai)
 {
-       struct timekeeper *tk = &timekeeper;
-       unsigned long seq;
+       struct timekeeper *tk = &tk_core.timekeeper;
+       unsigned int seq;
+       ktime_t base;
+       u64 nsecs;
 
        do {
-               seq = read_seqcount_begin(&timekeeper_seq);
-               *xtim = tk_xtime(tk);
-               *wtom = tk->wall_to_monotonic;
-               *sleep = tk->total_sleep_time;
-       } while (read_seqcount_retry(&timekeeper_seq, seq));
+               seq = read_seqcount_begin(&tk_core.seq);
+
+               base = tk->tkr.base_mono;
+               nsecs = tk->tkr.xtime_nsec >> tk->tkr.shift;
+
+               *offs_real = tk->offs_real;
+               *offs_boot = tk->offs_boot;
+               *offs_tai = tk->offs_tai;
+       } while (read_seqcount_retry(&tk_core.seq, seq));
+
+       return ktime_add_ns(base, nsecs);
 }
 
 #ifdef CONFIG_HIGH_RES_TIMERS
 /**
- * ktime_get_update_offsets - hrtimer helper
+ * ktime_get_update_offsets_now - hrtimer helper
  * @offs_real: pointer to storage for monotonic -> realtime offset
  * @offs_boot: pointer to storage for monotonic -> boottime offset
  * @offs_tai:  pointer to storage for monotonic -> clock tai offset
@@ -1617,57 +1712,37 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
  * Returns current monotonic time and updates the offsets
  * Called from hrtimer_interrupt() or retrigger_next_event()
  */
-ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,
+ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
                                                        ktime_t *offs_tai)
 {
-       struct timekeeper *tk = &timekeeper;
-       ktime_t now;
+       struct timekeeper *tk = &tk_core.timekeeper;
        unsigned int seq;
-       u64 secs, nsecs;
+       ktime_t base;
+       u64 nsecs;
 
        do {
-               seq = read_seqcount_begin(&timekeeper_seq);
+               seq = read_seqcount_begin(&tk_core.seq);
 
-               secs = tk->xtime_sec;
-               nsecs = timekeeping_get_ns(tk);
+               base = tk->tkr.base_mono;
+               nsecs = timekeeping_get_ns(&tk->tkr);
 
                *offs_real = tk->offs_real;
                *offs_boot = tk->offs_boot;
                *offs_tai = tk->offs_tai;
-       } while (read_seqcount_retry(&timekeeper_seq, seq));
+       } while (read_seqcount_retry(&tk_core.seq, seq));
 
-       now = ktime_add_ns(ktime_set(secs, 0), nsecs);
-       now = ktime_sub(now, *offs_real);
-       return now;
+       return ktime_add_ns(base, nsecs);
 }
 #endif
 
-/**
- * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
- */
-ktime_t ktime_get_monotonic_offset(void)
-{
-       struct timekeeper *tk = &timekeeper;
-       unsigned long seq;
-       struct timespec wtom;
-
-       do {
-               seq = read_seqcount_begin(&timekeeper_seq);
-               wtom = tk->wall_to_monotonic;
-       } while (read_seqcount_retry(&timekeeper_seq, seq));
-
-       return timespec_to_ktime(wtom);
-}
-EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
-
 /**
  * do_adjtimex() - Accessor function to NTP __do_adjtimex function
  */
 int do_adjtimex(struct timex *txc)
 {
-       struct timekeeper *tk = &timekeeper;
+       struct timekeeper *tk = &tk_core.timekeeper;
        unsigned long flags;
-       struct timespec ts;
+       struct timespec64 ts;
        s32 orig_tai, tai;
        int ret;
 
@@ -1687,10 +1762,10 @@ int do_adjtimex(struct timex *txc)
                        return ret;
        }
 
-       getnstimeofday(&ts);
+       getnstimeofday64(&ts);
 
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
-       write_seqcount_begin(&timekeeper_seq);
+       write_seqcount_begin(&tk_core.seq);
 
        orig_tai = tai = tk->tai_offset;
        ret = __do_adjtimex(txc, &ts, &tai);
@@ -1699,7 +1774,7 @@ int do_adjtimex(struct timex *txc)
                __timekeeping_set_tai_offset(tk, tai);
                timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
        }
-       write_seqcount_end(&timekeeper_seq);
+       write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
        if (tai != orig_tai)
@@ -1719,11 +1794,11 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
        unsigned long flags;
 
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
-       write_seqcount_begin(&timekeeper_seq);
+       write_seqcount_begin(&tk_core.seq);
 
        __hardpps(phase_ts, raw_ts);
 
-       write_seqcount_end(&timekeeper_seq);
+       write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 }
 EXPORT_SYMBOL(hardpps);
diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
new file mode 100644 (file)
index 0000000..adc1fc9
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef _KERNEL_TIME_TIMEKEEPING_H
+#define _KERNEL_TIME_TIMEKEEPING_H
+/*
+ * Internal interfaces for kernel/time/
+ */
+extern ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real,
+                                               ktime_t *offs_boot,
+                                               ktime_t *offs_tai);
+extern ktime_t ktime_get_update_offsets_now(ktime_t *offs_real,
+                                               ktime_t *offs_boot,
+                                               ktime_t *offs_tai);
+
+extern int timekeeping_valid_for_hres(void);
+extern u64 timekeeping_max_deferment(void);
+extern int timekeeping_inject_offset(struct timespec *ts);
+extern s32 timekeeping_get_tai_offset(void);
+extern void timekeeping_set_tai_offset(s32 tai_offset);
+extern void timekeeping_clocktai(struct timespec *ts);
+
+#endif
index 4d54f97..f6bd652 100644 (file)
@@ -67,7 +67,7 @@ static int __init tk_debug_sleep_time_init(void)
 }
 late_initcall(tk_debug_sleep_time_init);
 
-void tk_debug_account_sleep_time(struct timespec *t)
+void tk_debug_account_sleep_time(struct timespec64 *t)
 {
        sleep_time_bin[fls(t->tv_sec)]++;
 }
index 13323ea..4ea005a 100644 (file)
@@ -3,12 +3,27 @@
 /*
  * timekeeping debug functions
  */
+#include <linux/clocksource.h>
 #include <linux/time.h>
 
 #ifdef CONFIG_DEBUG_FS
-extern void tk_debug_account_sleep_time(struct timespec *t);
+extern void tk_debug_account_sleep_time(struct timespec64 *t);
 #else
 #define tk_debug_account_sleep_time(x)
 #endif
 
+#ifdef CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE
+static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask)
+{
+       cycle_t ret = (now - last) & mask;
+
+       return (s64) ret > 0 ? ret : 0;
+}
+#else
+static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask)
+{
+       return (now - last) & mask;
+}
+#endif
+
 #endif /* _TIMEKEEPING_INTERNAL_H */
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
new file mode 100644 (file)
index 0000000..aca5dfe
--- /dev/null
@@ -0,0 +1,1736 @@
+/*
+ *  linux/kernel/timer.c
+ *
+ *  Kernel internal timers
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better.
+ *
+ *  1997-09-10  Updated NTP code according to technical memorandum Jan '96
+ *              "A Kernel Model for Precision Timekeeping" by Dave Mills
+ *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
+ *              serialize accesses to xtime/lost_ticks).
+ *                              Copyright (C) 1998  Andrea Arcangeli
+ *  1999-03-10  Improved NTP compatibility by Ulrich Windl
+ *  2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
+ *  2000-10-05  Implemented scalable SMP per-CPU timer handling.
+ *                              Copyright (C) 2000, 2001, 2002  Ingo Molnar
+ *              Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/pid_namespace.h>
+#include <linux/notifier.h>
+#include <linux/thread_info.h>
+#include <linux/time.h>
+#include <linux/jiffies.h>
+#include <linux/posix-timers.h>
+#include <linux/cpu.h>
+#include <linux/syscalls.h>
+#include <linux/delay.h>
+#include <linux/tick.h>
+#include <linux/kallsyms.h>
+#include <linux/irq_work.h>
+#include <linux/sched.h>
+#include <linux/sched/sysctl.h>
+#include <linux/slab.h>
+#include <linux/compat.h>
+
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/div64.h>
+#include <asm/timex.h>
+#include <asm/io.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/timer.h>
+
+__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
+
+/*
+ * per-CPU timer vector definitions:
+ */
+#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
+#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
+#define TVN_SIZE (1 << TVN_BITS)
+#define TVR_SIZE (1 << TVR_BITS)
+#define TVN_MASK (TVN_SIZE - 1)
+#define TVR_MASK (TVR_SIZE - 1)
+#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
+
+struct tvec {
+       struct list_head vec[TVN_SIZE];
+};
+
+struct tvec_root {
+       struct list_head vec[TVR_SIZE];
+};
+
+struct tvec_base {
+       spinlock_t lock;
+       struct timer_list *running_timer;
+       unsigned long timer_jiffies;
+       unsigned long next_timer;
+       unsigned long active_timers;
+       unsigned long all_timers;
+       int cpu;
+       struct tvec_root tv1;
+       struct tvec tv2;
+       struct tvec tv3;
+       struct tvec tv4;
+       struct tvec tv5;
+} ____cacheline_aligned;
+
+struct tvec_base boot_tvec_bases;
+EXPORT_SYMBOL(boot_tvec_bases);
+static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
+
+/* Functions below help us manage 'deferrable' flag */
+static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
+{
+       return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE);
+}
+
+static inline unsigned int tbase_get_irqsafe(struct tvec_base *base)
+{
+       return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE);
+}
+
+static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
+{
+       return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK));
+}
+
+static inline void
+timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
+{
+       unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK;
+
+       timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags);
+}
+
+static unsigned long round_jiffies_common(unsigned long j, int cpu,
+               bool force_up)
+{
+       int rem;
+       unsigned long original = j;
+
+       /*
+        * We don't want all cpus firing their timers at once hitting the
+        * same lock or cachelines, so we skew each extra cpu with an extra
+        * 3 jiffies. This 3 jiffies came originally from the mm/ code which
+        * already did this.
+        * The skew is done by adding 3*cpunr, then round, then subtract this
+        * extra offset again.
+        */
+       j += cpu * 3;
+
+       rem = j % HZ;
+
+       /*
+        * If the target jiffie is just after a whole second (which can happen
+        * due to delays of the timer irq, long irq off times etc etc) then
+        * we should round down to the whole second, not up. Use 1/4th second
+        * as cutoff for this rounding as an extreme upper bound for this.
+        * But never round down if @force_up is set.
+        */
+       if (rem < HZ/4 && !force_up) /* round down */
+               j = j - rem;
+       else /* round up */
+               j = j - rem + HZ;
+
+       /* now that we have rounded, subtract the extra skew again */
+       j -= cpu * 3;
+
+       /*
+        * Make sure j is still in the future. Otherwise return the
+        * unmodified value.
+        */
+       return time_is_after_jiffies(j) ? j : original;
+}
+
+/**
+ * __round_jiffies - function to round jiffies to a full second
+ * @j: the time in (absolute) jiffies that should be rounded
+ * @cpu: the processor number on which the timeout will happen
+ *
+ * __round_jiffies() rounds an absolute time in the future (in jiffies)
+ * up or down to (approximately) full seconds. This is useful for timers
+ * for which the exact time they fire does not matter too much, as long as
+ * they fire approximately every X seconds.
+ *
+ * By rounding these timers to whole seconds, all such timers will fire
+ * at the same time, rather than at various times spread out. The goal
+ * of this is to have the CPU wake up less, which saves power.
+ *
+ * The exact rounding is skewed for each processor to avoid all
+ * processors firing at the exact same time, which could lead
+ * to lock contention or spurious cache line bouncing.
+ *
+ * The return value is the rounded version of the @j parameter.
+ */
+unsigned long __round_jiffies(unsigned long j, int cpu)
+{
+       return round_jiffies_common(j, cpu, false);
+}
+EXPORT_SYMBOL_GPL(__round_jiffies);
+
+/**
+ * __round_jiffies_relative - function to round jiffies to a full second
+ * @j: the time in (relative) jiffies that should be rounded
+ * @cpu: the processor number on which the timeout will happen
+ *
+ * __round_jiffies_relative() rounds a time delta  in the future (in jiffies)
+ * up or down to (approximately) full seconds. This is useful for timers
+ * for which the exact time they fire does not matter too much, as long as
+ * they fire approximately every X seconds.
+ *
+ * By rounding these timers to whole seconds, all such timers will fire
+ * at the same time, rather than at various times spread out. The goal
+ * of this is to have the CPU wake up less, which saves power.
+ *
+ * The exact rounding is skewed for each processor to avoid all
+ * processors firing at the exact same time, which could lead
+ * to lock contention or spurious cache line bouncing.
+ *
+ * The return value is the rounded version of the @j parameter.
+ */
+unsigned long __round_jiffies_relative(unsigned long j, int cpu)
+{
+       unsigned long j0 = jiffies;
+
+       /* Use j0 because jiffies might change while we run */
+       return round_jiffies_common(j + j0, cpu, false) - j0;
+}
+EXPORT_SYMBOL_GPL(__round_jiffies_relative);
+
+/**
+ * round_jiffies - function to round jiffies to a full second
+ * @j: the time in (absolute) jiffies that should be rounded
+ *
+ * round_jiffies() rounds an absolute time in the future (in jiffies)
+ * up or down to (approximately) full seconds. This is useful for timers
+ * for which the exact time they fire does not matter too much, as long as
+ * they fire approximately every X seconds.
+ *
+ * By rounding these timers to whole seconds, all such timers will fire
+ * at the same time, rather than at various times spread out. The goal
+ * of this is to have the CPU wake up less, which saves power.
+ *
+ * The return value is the rounded version of the @j parameter.
+ */
+unsigned long round_jiffies(unsigned long j)
+{
+       return round_jiffies_common(j, raw_smp_processor_id(), false);
+}
+EXPORT_SYMBOL_GPL(round_jiffies);
+
+/**
+ * round_jiffies_relative - function to round jiffies to a full second
+ * @j: the time in (relative) jiffies that should be rounded
+ *
+ * round_jiffies_relative() rounds a time delta  in the future (in jiffies)
+ * up or down to (approximately) full seconds. This is useful for timers
+ * for which the exact time they fire does not matter too much, as long as
+ * they fire approximately every X seconds.
+ *
+ * By rounding these timers to whole seconds, all such timers will fire
+ * at the same time, rather than at various times spread out. The goal
+ * of this is to have the CPU wake up less, which saves power.
+ *
+ * The return value is the rounded version of the @j parameter.
+ */
+unsigned long round_jiffies_relative(unsigned long j)
+{
+       return __round_jiffies_relative(j, raw_smp_processor_id());
+}
+EXPORT_SYMBOL_GPL(round_jiffies_relative);
+
+/**
+ * __round_jiffies_up - function to round jiffies up to a full second
+ * @j: the time in (absolute) jiffies that should be rounded
+ * @cpu: the processor number on which the timeout will happen
+ *
+ * This is the same as __round_jiffies() except that it will never
+ * round down.  This is useful for timeouts for which the exact time
+ * of firing does not matter too much, as long as they don't fire too
+ * early.
+ */
+unsigned long __round_jiffies_up(unsigned long j, int cpu)
+{
+       return round_jiffies_common(j, cpu, true);
+}
+EXPORT_SYMBOL_GPL(__round_jiffies_up);
+
+/**
+ * __round_jiffies_up_relative - function to round jiffies up to a full second
+ * @j: the time in (relative) jiffies that should be rounded
+ * @cpu: the processor number on which the timeout will happen
+ *
+ * This is the same as __round_jiffies_relative() except that it will never
+ * round down.  This is useful for timeouts for which the exact time
+ * of firing does not matter too much, as long as they don't fire too
+ * early.
+ */
+unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
+{
+       unsigned long j0 = jiffies;
+
+       /* Use j0 because jiffies might change while we run */
+       return round_jiffies_common(j + j0, cpu, true) - j0;
+}
+EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
+
+/**
+ * round_jiffies_up - function to round jiffies up to a full second
+ * @j: the time in (absolute) jiffies that should be rounded
+ *
+ * This is the same as round_jiffies() except that it will never
+ * round down.  This is useful for timeouts for which the exact time
+ * of firing does not matter too much, as long as they don't fire too
+ * early.
+ */
+unsigned long round_jiffies_up(unsigned long j)
+{
+       return round_jiffies_common(j, raw_smp_processor_id(), true);
+}
+EXPORT_SYMBOL_GPL(round_jiffies_up);
+
+/**
+ * round_jiffies_up_relative - function to round jiffies up to a full second
+ * @j: the time in (relative) jiffies that should be rounded
+ *
+ * This is the same as round_jiffies_relative() except that it will never
+ * round down.  This is useful for timeouts for which the exact time
+ * of firing does not matter too much, as long as they don't fire too
+ * early.
+ */
+unsigned long round_jiffies_up_relative(unsigned long j)
+{
+       return __round_jiffies_up_relative(j, raw_smp_processor_id());
+}
+EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
+
+/**
+ * set_timer_slack - set the allowed slack for a timer
+ * @timer: the timer to be modified
+ * @slack_hz: the amount of time (in jiffies) allowed for rounding
+ *
+ * Set the amount of time, in jiffies, that a certain timer has
+ * in terms of slack. By setting this value, the timer subsystem
+ * will schedule the actual timer somewhere between
+ * the time mod_timer() asks for, and that time plus the slack.
+ *
+ * By setting the slack to -1, a percentage of the delay is used
+ * instead.
+ */
+void set_timer_slack(struct timer_list *timer, int slack_hz)
+{
+       timer->slack = slack_hz;
+}
+EXPORT_SYMBOL_GPL(set_timer_slack);
+
+/*
+ * If the list is empty, catch up ->timer_jiffies to the current time.
+ * The caller must hold the tvec_base lock.  Returns true if the list
+ * was empty and therefore ->timer_jiffies was updated.
+ */
+static bool catchup_timer_jiffies(struct tvec_base *base)
+{
+       if (!base->all_timers) {
+               base->timer_jiffies = jiffies;
+               return true;
+       }
+       return false;
+}
+
+static void
+__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+{
+       unsigned long expires = timer->expires;
+       unsigned long idx = expires - base->timer_jiffies;
+       struct list_head *vec;
+
+       if (idx < TVR_SIZE) {
+               int i = expires & TVR_MASK;
+               vec = base->tv1.vec + i;
+       } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
+               int i = (expires >> TVR_BITS) & TVN_MASK;
+               vec = base->tv2.vec + i;
+       } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
+               int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
+               vec = base->tv3.vec + i;
+       } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
+               int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
+               vec = base->tv4.vec + i;
+       } else if ((signed long) idx < 0) {
+               /*
+                * Can happen if you add a timer with expires == jiffies,
+                * or you set a timer to go off in the past
+                */
+               vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
+       } else {
+               int i;
+               /* If the timeout is larger than MAX_TVAL (on 64-bit
+                * architectures or with CONFIG_BASE_SMALL=1) then we
+                * use the maximum timeout.
+                */
+               if (idx > MAX_TVAL) {
+                       idx = MAX_TVAL;
+                       expires = idx + base->timer_jiffies;
+               }
+               i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
+               vec = base->tv5.vec + i;
+       }
+       /*
+        * Timers are FIFO:
+        */
+       list_add_tail(&timer->entry, vec);
+}
+
+static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+{
+       (void)catchup_timer_jiffies(base);
+       __internal_add_timer(base, timer);
+       /*
+        * Update base->active_timers and base->next_timer
+        */
+       if (!tbase_get_deferrable(timer->base)) {
+               if (!base->active_timers++ ||
+                   time_before(timer->expires, base->next_timer))
+                       base->next_timer = timer->expires;
+       }
+       base->all_timers++;
+
+       /*
+        * Check whether the other CPU is in dynticks mode and needs
+        * to be triggered to reevaluate the timer wheel.
+        * We are protected against the other CPU fiddling
+        * with the timer by holding the timer base lock. This also
+        * makes sure that a CPU on the way to stop its tick can not
+        * evaluate the timer wheel.
+        *
+        * Spare the IPI for deferrable timers on idle targets though.
+        * The next busy ticks will take care of it. Except full dynticks
+        * require special care against races with idle_cpu(), lets deal
+        * with that later.
+        */
+       if (!tbase_get_deferrable(base) || tick_nohz_full_cpu(base->cpu))
+               wake_up_nohz_cpu(base->cpu);
+}
+
+#ifdef CONFIG_TIMER_STATS
+void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
+{
+       if (timer->start_site)
+               return;
+
+       timer->start_site = addr;
+       memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
+       timer->start_pid = current->pid;
+}
+
+static void timer_stats_account_timer(struct timer_list *timer)
+{
+       unsigned int flag = 0;
+
+       if (likely(!timer->start_site))
+               return;
+       if (unlikely(tbase_get_deferrable(timer->base)))
+               flag |= TIMER_STATS_FLAG_DEFERRABLE;
+
+       timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
+                                timer->function, timer->start_comm, flag);
+}
+
+#else
+static void timer_stats_account_timer(struct timer_list *timer) {}
+#endif
+
+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
+
+static struct debug_obj_descr timer_debug_descr;
+
+static void *timer_debug_hint(void *addr)
+{
+       return ((struct timer_list *) addr)->function;
+}
+
+/*
+ * fixup_init is called when:
+ * - an active object is initialized
+ */
+static int timer_fixup_init(void *addr, enum debug_obj_state state)
+{
+       struct timer_list *timer = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_ACTIVE:
+               del_timer_sync(timer);
+               debug_object_init(timer, &timer_debug_descr);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+/* Stub timer callback for improperly used timers. */
+static void stub_timer(unsigned long data)
+{
+       WARN_ON(1);
+}
+
+/*
+ * fixup_activate is called when:
+ * - an active object is activated
+ * - an unknown object is activated (might be a statically initialized object)
+ */
+static int timer_fixup_activate(void *addr, enum debug_obj_state state)
+{
+       struct timer_list *timer = addr;
+
+       switch (state) {
+
+       case ODEBUG_STATE_NOTAVAILABLE:
+               /*
+                * This is not really a fixup. The timer was
+                * statically initialized. We just make sure that it
+                * is tracked in the object tracker.
+                */
+               if (timer->entry.next == NULL &&
+                   timer->entry.prev == TIMER_ENTRY_STATIC) {
+                       debug_object_init(timer, &timer_debug_descr);
+                       debug_object_activate(timer, &timer_debug_descr);
+                       return 0;
+               } else {
+                       setup_timer(timer, stub_timer, 0);
+                       return 1;
+               }
+               return 0;
+
+       case ODEBUG_STATE_ACTIVE:
+               WARN_ON(1);
+
+       default:
+               return 0;
+       }
+}
+
+/*
+ * fixup_free is called when:
+ * - an active object is freed
+ */
+static int timer_fixup_free(void *addr, enum debug_obj_state state)
+{
+       struct timer_list *timer = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_ACTIVE:
+               del_timer_sync(timer);
+               debug_object_free(timer, &timer_debug_descr);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+/*
+ * fixup_assert_init is called when:
+ * - an untracked/uninit-ed object is found
+ */
+static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
+{
+       struct timer_list *timer = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_NOTAVAILABLE:
+               if (timer->entry.prev == TIMER_ENTRY_STATIC) {
+                       /*
+                        * This is not really a fixup. The timer was
+                        * statically initialized. We just make sure that it
+                        * is tracked in the object tracker.
+                        */
+                       debug_object_init(timer, &timer_debug_descr);
+                       return 0;
+               } else {
+                       setup_timer(timer, stub_timer, 0);
+                       return 1;
+               }
+       default:
+               return 0;
+       }
+}
+
+static struct debug_obj_descr timer_debug_descr = {
+       .name                   = "timer_list",
+       .debug_hint             = timer_debug_hint,
+       .fixup_init             = timer_fixup_init,
+       .fixup_activate         = timer_fixup_activate,
+       .fixup_free             = timer_fixup_free,
+       .fixup_assert_init      = timer_fixup_assert_init,
+};
+
+static inline void debug_timer_init(struct timer_list *timer)
+{
+       debug_object_init(timer, &timer_debug_descr);
+}
+
+static inline void debug_timer_activate(struct timer_list *timer)
+{
+       debug_object_activate(timer, &timer_debug_descr);
+}
+
+static inline void debug_timer_deactivate(struct timer_list *timer)
+{
+       debug_object_deactivate(timer, &timer_debug_descr);
+}
+
+static inline void debug_timer_free(struct timer_list *timer)
+{
+       debug_object_free(timer, &timer_debug_descr);
+}
+
+static inline void debug_timer_assert_init(struct timer_list *timer)
+{
+       debug_object_assert_init(timer, &timer_debug_descr);
+}
+
+static void do_init_timer(struct timer_list *timer, unsigned int flags,
+                         const char *name, struct lock_class_key *key);
+
+void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
+                            const char *name, struct lock_class_key *key)
+{
+       debug_object_init_on_stack(timer, &timer_debug_descr);
+       do_init_timer(timer, flags, name, key);
+}
+EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
+
+void destroy_timer_on_stack(struct timer_list *timer)
+{
+       debug_object_free(timer, &timer_debug_descr);
+}
+EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
+
+#else
+static inline void debug_timer_init(struct timer_list *timer) { }
+static inline void debug_timer_activate(struct timer_list *timer) { }
+static inline void debug_timer_deactivate(struct timer_list *timer) { }
+static inline void debug_timer_assert_init(struct timer_list *timer) { }
+#endif
+
+static inline void debug_init(struct timer_list *timer)
+{
+       debug_timer_init(timer);
+       trace_timer_init(timer);
+}
+
+static inline void
+debug_activate(struct timer_list *timer, unsigned long expires)
+{
+       debug_timer_activate(timer);
+       trace_timer_start(timer, expires);
+}
+
+static inline void debug_deactivate(struct timer_list *timer)
+{
+       debug_timer_deactivate(timer);
+       trace_timer_cancel(timer);
+}
+
+static inline void debug_assert_init(struct timer_list *timer)
+{
+       debug_timer_assert_init(timer);
+}
+
+static void do_init_timer(struct timer_list *timer, unsigned int flags,
+                         const char *name, struct lock_class_key *key)
+{
+       struct tvec_base *base = __raw_get_cpu_var(tvec_bases);
+
+       timer->entry.next = NULL;
+       timer->base = (void *)((unsigned long)base | flags);
+       timer->slack = -1;
+#ifdef CONFIG_TIMER_STATS
+       timer->start_site = NULL;
+       timer->start_pid = -1;
+       memset(timer->start_comm, 0, TASK_COMM_LEN);
+#endif
+       lockdep_init_map(&timer->lockdep_map, name, key, 0);
+}
+
+/**
+ * init_timer_key - initialize a timer
+ * @timer: the timer to be initialized
+ * @flags: timer flags
+ * @name: name of the timer
+ * @key: lockdep class key of the fake lock used for tracking timer
+ *       sync lock dependencies
+ *
+ * init_timer_key() must be done to a timer prior calling *any* of the
+ * other timer functions.
+ */
+void init_timer_key(struct timer_list *timer, unsigned int flags,
+                   const char *name, struct lock_class_key *key)
+{
+       debug_init(timer);
+       do_init_timer(timer, flags, name, key);
+}
+EXPORT_SYMBOL(init_timer_key);
+
+static inline void detach_timer(struct timer_list *timer, bool clear_pending)
+{
+       struct list_head *entry = &timer->entry;
+
+       debug_deactivate(timer);
+
+       __list_del(entry->prev, entry->next);
+       if (clear_pending)
+               entry->next = NULL;
+       entry->prev = LIST_POISON2;
+}
+
+static inline void
+detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
+{
+       detach_timer(timer, true);
+       if (!tbase_get_deferrable(timer->base))
+               base->active_timers--;
+       base->all_timers--;
+       (void)catchup_timer_jiffies(base);
+}
+
+static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
+                            bool clear_pending)
+{
+       if (!timer_pending(timer))
+               return 0;
+
+       detach_timer(timer, clear_pending);
+       if (!tbase_get_deferrable(timer->base)) {
+               base->active_timers--;
+               if (timer->expires == base->next_timer)
+                       base->next_timer = base->timer_jiffies;
+       }
+       base->all_timers--;
+       (void)catchup_timer_jiffies(base);
+       return 1;
+}
+
+/*
+ * We are using hashed locking: holding per_cpu(tvec_bases).lock
+ * means that all timers which are tied to this base via timer->base are
+ * locked, and the base itself is locked too.
+ *
+ * So __run_timers/migrate_timers can safely modify all timers which could
+ * be found on ->tvX lists.
+ *
+ * When the timer's base is locked, and the timer removed from list, it is
+ * possible to set timer->base = NULL and drop the lock: the timer remains
+ * locked.
+ */
+static struct tvec_base *lock_timer_base(struct timer_list *timer,
+                                       unsigned long *flags)
+       __acquires(timer->base->lock)
+{
+       struct tvec_base *base;
+
+       for (;;) {
+               struct tvec_base *prelock_base = timer->base;
+               base = tbase_get_base(prelock_base);
+               if (likely(base != NULL)) {
+                       spin_lock_irqsave(&base->lock, *flags);
+                       if (likely(prelock_base == timer->base))
+                               return base;
+                       /* The timer has migrated to another CPU */
+                       spin_unlock_irqrestore(&base->lock, *flags);
+               }
+               cpu_relax();
+       }
+}
+
+static inline int
+__mod_timer(struct timer_list *timer, unsigned long expires,
+                                               bool pending_only, int pinned)
+{
+       struct tvec_base *base, *new_base;
+       unsigned long flags;
+       int ret = 0 , cpu;
+
+       timer_stats_timer_set_start_info(timer);
+       BUG_ON(!timer->function);
+
+       base = lock_timer_base(timer, &flags);
+
+       ret = detach_if_pending(timer, base, false);
+       if (!ret && pending_only)
+               goto out_unlock;
+
+       debug_activate(timer, expires);
+
+       cpu = get_nohz_timer_target(pinned);
+       new_base = per_cpu(tvec_bases, cpu);
+
+       if (base != new_base) {
+               /*
+                * We are trying to schedule the timer on the local CPU.
+                * However we can't change timer's base while it is running,
+                * otherwise del_timer_sync() can't detect that the timer's
+                * handler yet has not finished. This also guarantees that
+                * the timer is serialized wrt itself.
+                */
+               if (likely(base->running_timer != timer)) {
+                       /* See the comment in lock_timer_base() */
+                       timer_set_base(timer, NULL);
+                       spin_unlock(&base->lock);
+                       base = new_base;
+                       spin_lock(&base->lock);
+                       timer_set_base(timer, base);
+               }
+       }
+
+       timer->expires = expires;
+       internal_add_timer(base, timer);
+
+out_unlock:
+       spin_unlock_irqrestore(&base->lock, flags);
+
+       return ret;
+}
+
+/**
+ * mod_timer_pending - modify a pending timer's timeout
+ * @timer: the pending timer to be modified
+ * @expires: new timeout in jiffies
+ *
+ * mod_timer_pending() is the same for pending timers as mod_timer(),
+ * but will not re-activate and modify already deleted timers.
+ *
+ * It is useful for unserialized use of timers.
+ */
+int mod_timer_pending(struct timer_list *timer, unsigned long expires)
+{
+       return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
+}
+EXPORT_SYMBOL(mod_timer_pending);
+
+/*
+ * Decide where to put the timer while taking the slack into account
+ *
+ * Algorithm:
+ *   1) calculate the maximum (absolute) time
+ *   2) calculate the highest bit where the expires and new max are different
+ *   3) use this bit to make a mask
+ *   4) use the bitmask to round down the maximum time, so that all last
+ *      bits are zeros
+ */
+static inline
+unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
+{
+       unsigned long expires_limit, mask;
+       int bit;
+
+       if (timer->slack >= 0) {
+               expires_limit = expires + timer->slack;
+       } else {
+               long delta = expires - jiffies;
+
+               if (delta < 256)
+                       return expires;
+
+               expires_limit = expires + delta / 256;
+       }
+       mask = expires ^ expires_limit;
+       if (mask == 0)
+               return expires;
+
+       bit = find_last_bit(&mask, BITS_PER_LONG);
+
+       mask = (1UL << bit) - 1;
+
+       expires_limit = expires_limit & ~(mask);
+
+       return expires_limit;
+}
+
+/**
+ * mod_timer - modify a timer's timeout
+ * @timer: the timer to be modified
+ * @expires: new timeout in jiffies
+ *
+ * mod_timer() is a more efficient way to update the expire field of an
+ * active timer (if the timer is inactive it will be activated)
+ *
+ * mod_timer(timer, expires) is equivalent to:
+ *
+ *     del_timer(timer); timer->expires = expires; add_timer(timer);
+ *
+ * Note that if there are multiple unserialized concurrent users of the
+ * same timer, then mod_timer() is the only safe way to modify the timeout,
+ * since add_timer() cannot modify an already running timer.
+ *
+ * The function returns whether it has modified a pending timer or not.
+ * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
+ * active timer returns 1.)
+ */
+int mod_timer(struct timer_list *timer, unsigned long expires)
+{
+       expires = apply_slack(timer, expires);
+
+       /*
+        * This is a common optimization triggered by the
+        * networking code - if the timer is re-modified
+        * to be the same thing then just return:
+        */
+       if (timer_pending(timer) && timer->expires == expires)
+               return 1;
+
+       return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
+}
+EXPORT_SYMBOL(mod_timer);
+
+/**
+ * mod_timer_pinned - modify a timer's timeout
+ * @timer: the timer to be modified
+ * @expires: new timeout in jiffies
+ *
+ * mod_timer_pinned() is a way to update the expire field of an
+ * active timer (if the timer is inactive it will be activated)
+ * and to ensure that the timer is scheduled on the current CPU.
+ *
+ * Note that this does not prevent the timer from being migrated
+ * when the current CPU goes offline.  If this is a problem for
+ * you, use CPU-hotplug notifiers to handle it correctly, for
+ * example, cancelling the timer when the corresponding CPU goes
+ * offline.
+ *
+ * mod_timer_pinned(timer, expires) is equivalent to:
+ *
+ *     del_timer(timer); timer->expires = expires; add_timer(timer);
+ */
+int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
+{
+       if (timer->expires == expires && timer_pending(timer))
+               return 1;
+
+       return __mod_timer(timer, expires, false, TIMER_PINNED);
+}
+EXPORT_SYMBOL(mod_timer_pinned);
+
+/**
+ * add_timer - start a timer
+ * @timer: the timer to be added
+ *
+ * The kernel will do a ->function(->data) callback from the
+ * timer interrupt at the ->expires point in the future. The
+ * current time is 'jiffies'.
+ *
+ * The timer's ->expires, ->function (and if the handler uses it, ->data)
+ * fields must be set prior calling this function.
+ *
+ * Timers with an ->expires field in the past will be executed in the next
+ * timer tick.
+ */
+void add_timer(struct timer_list *timer)
+{
+       BUG_ON(timer_pending(timer));
+       mod_timer(timer, timer->expires);
+}
+EXPORT_SYMBOL(add_timer);
+
+/**
+ * add_timer_on - start a timer on a particular CPU
+ * @timer: the timer to be added
+ * @cpu: the CPU to start it on
+ *
+ * This is not very scalable on SMP. Double adds are not possible.
+ */
+void add_timer_on(struct timer_list *timer, int cpu)
+{
+       struct tvec_base *base = per_cpu(tvec_bases, cpu);
+       unsigned long flags;
+
+       timer_stats_timer_set_start_info(timer);
+       BUG_ON(timer_pending(timer) || !timer->function);
+       spin_lock_irqsave(&base->lock, flags);
+       timer_set_base(timer, base);
+       debug_activate(timer, timer->expires);
+       internal_add_timer(base, timer);
+       spin_unlock_irqrestore(&base->lock, flags);
+}
+EXPORT_SYMBOL_GPL(add_timer_on);
+
+/**
+ * del_timer - deactive a timer.
+ * @timer: the timer to be deactivated
+ *
+ * del_timer() deactivates a timer - this works on both active and inactive
+ * timers.
+ *
+ * The function returns whether it has deactivated a pending timer or not.
+ * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
+ * active timer returns 1.)
+ */
+int del_timer(struct timer_list *timer)
+{
+       struct tvec_base *base;
+       unsigned long flags;
+       int ret = 0;
+
+       debug_assert_init(timer);
+
+       timer_stats_timer_clear_start_info(timer);
+       if (timer_pending(timer)) {
+               base = lock_timer_base(timer, &flags);
+               ret = detach_if_pending(timer, base, true);
+               spin_unlock_irqrestore(&base->lock, flags);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(del_timer);
+
+/**
+ * try_to_del_timer_sync - Try to deactivate a timer
+ * @timer: timer do del
+ *
+ * This function tries to deactivate a timer. Upon successful (ret >= 0)
+ * exit the timer is not queued and the handler is not running on any CPU.
+ */
+int try_to_del_timer_sync(struct timer_list *timer)
+{
+       struct tvec_base *base;
+       unsigned long flags;
+       int ret = -1;
+
+       debug_assert_init(timer);
+
+       base = lock_timer_base(timer, &flags);
+
+       if (base->running_timer != timer) {
+               timer_stats_timer_clear_start_info(timer);
+               ret = detach_if_pending(timer, base, true);
+       }
+       spin_unlock_irqrestore(&base->lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(try_to_del_timer_sync);
+
+#ifdef CONFIG_SMP
+/**
+ * del_timer_sync - deactivate a timer and wait for the handler to finish.
+ * @timer: the timer to be deactivated
+ *
+ * This function only differs from del_timer() on SMP: besides deactivating
+ * the timer it also makes sure the handler has finished executing on other
+ * CPUs.
+ *
+ * Synchronization rules: Callers must prevent restarting of the timer,
+ * otherwise this function is meaningless. It must not be called from
+ * interrupt contexts unless the timer is an irqsafe one. The caller must
+ * not hold locks which would prevent completion of the timer's
+ * handler. The timer's handler must not call add_timer_on(). Upon exit the
+ * timer is not queued and the handler is not running on any CPU.
+ *
+ * Note: For !irqsafe timers, you must not hold locks that are held in
+ *   interrupt context while calling this function. Even if the lock has
+ *   nothing to do with the timer in question.  Here's why:
+ *
+ *    CPU0                             CPU1
+ *    ----                             ----
+ *                                   <SOFTIRQ>
+ *                                   call_timer_fn();
+ *                                     base->running_timer = mytimer;
+ *  spin_lock_irq(somelock);
+ *                                     <IRQ>
+ *                                        spin_lock(somelock);
+ *  del_timer_sync(mytimer);
+ *   while (base->running_timer == mytimer);
+ *
+ * Now del_timer_sync() will never return and never release somelock.
+ * The interrupt on the other CPU is waiting to grab somelock but
+ * it has interrupted the softirq that CPU0 is waiting to finish.
+ *
+ * The function returns whether it has deactivated a pending timer or not.
+ */
+int del_timer_sync(struct timer_list *timer)
+{
+#ifdef CONFIG_LOCKDEP
+       unsigned long flags;
+
+       /*
+        * If lockdep gives a backtrace here, please reference
+        * the synchronization rules above.
+        */
+       local_irq_save(flags);
+       lock_map_acquire(&timer->lockdep_map);
+       lock_map_release(&timer->lockdep_map);
+       local_irq_restore(flags);
+#endif
+       /*
+        * don't use it in hardirq context, because it
+        * could lead to deadlock.
+        */
+       WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base));
+       for (;;) {
+               int ret = try_to_del_timer_sync(timer);
+               if (ret >= 0)
+                       return ret;
+               cpu_relax();
+       }
+}
+EXPORT_SYMBOL(del_timer_sync);
+#endif
+
+static int cascade(struct tvec_base *base, struct tvec *tv, int index)
+{
+       /* cascade all the timers from tv up one level */
+       struct timer_list *timer, *tmp;
+       struct list_head tv_list;
+
+       list_replace_init(tv->vec + index, &tv_list);
+
+       /*
+        * We are removing _all_ timers from the list, so we
+        * don't have to detach them individually.
+        */
+       list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
+               BUG_ON(tbase_get_base(timer->base) != base);
+               /* No accounting, while moving them */
+               __internal_add_timer(base, timer);
+       }
+
+       return index;
+}
+
+static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
+                         unsigned long data)
+{
+       int count = preempt_count();
+
+#ifdef CONFIG_LOCKDEP
+       /*
+        * It is permissible to free the timer from inside the
+        * function that is called from it, this we need to take into
+        * account for lockdep too. To avoid bogus "held lock freed"
+        * warnings as well as problems when looking into
+        * timer->lockdep_map, make a copy and use that here.
+        */
+       struct lockdep_map lockdep_map;
+
+       lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
+#endif
+       /*
+        * Couple the lock chain with the lock chain at
+        * del_timer_sync() by acquiring the lock_map around the fn()
+        * call here and in del_timer_sync().
+        */
+       lock_map_acquire(&lockdep_map);
+
+       trace_timer_expire_entry(timer);
+       fn(data);
+       trace_timer_expire_exit(timer);
+
+       lock_map_release(&lockdep_map);
+
+       if (count != preempt_count()) {
+               WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
+                         fn, count, preempt_count());
+               /*
+                * Restore the preempt count. That gives us a decent
+                * chance to survive and extract information. If the
+                * callback kept a lock held, bad luck, but not worse
+                * than the BUG() we had.
+                */
+               preempt_count_set(count);
+       }
+}
+
+#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
+
+/**
+ * __run_timers - run all expired timers (if any) on this CPU.
+ * @base: the timer vector to be processed.
+ *
+ * This function cascades all vectors and executes all expired timer
+ * vectors.
+ */
+static inline void __run_timers(struct tvec_base *base)
+{
+       struct timer_list *timer;
+
+       spin_lock_irq(&base->lock);
+       if (catchup_timer_jiffies(base)) {
+               spin_unlock_irq(&base->lock);
+               return;
+       }
+       while (time_after_eq(jiffies, base->timer_jiffies)) {
+               struct list_head work_list;
+               struct list_head *head = &work_list;
+               int index = base->timer_jiffies & TVR_MASK;
+
+               /*
+                * Cascade timers:
+                */
+               if (!index &&
+                       (!cascade(base, &base->tv2, INDEX(0))) &&
+                               (!cascade(base, &base->tv3, INDEX(1))) &&
+                                       !cascade(base, &base->tv4, INDEX(2)))
+                       cascade(base, &base->tv5, INDEX(3));
+               ++base->timer_jiffies;
+               list_replace_init(base->tv1.vec + index, head);
+               while (!list_empty(head)) {
+                       void (*fn)(unsigned long);
+                       unsigned long data;
+                       bool irqsafe;
+
+                       timer = list_first_entry(head, struct timer_list,entry);
+                       fn = timer->function;
+                       data = timer->data;
+                       irqsafe = tbase_get_irqsafe(timer->base);
+
+                       timer_stats_account_timer(timer);
+
+                       base->running_timer = timer;
+                       detach_expired_timer(timer, base);
+
+                       if (irqsafe) {
+                               spin_unlock(&base->lock);
+                               call_timer_fn(timer, fn, data);
+                               spin_lock(&base->lock);
+                       } else {
+                               spin_unlock_irq(&base->lock);
+                               call_timer_fn(timer, fn, data);
+                               spin_lock_irq(&base->lock);
+                       }
+               }
+       }
+       base->running_timer = NULL;
+       spin_unlock_irq(&base->lock);
+}
+
+#ifdef CONFIG_NO_HZ_COMMON
+/*
+ * Find out when the next timer event is due to happen. This
+ * is used on S/390 to stop all activity when a CPU is idle.
+ * This function needs to be called with interrupts disabled.
+ */
+static unsigned long __next_timer_interrupt(struct tvec_base *base)
+{
+       unsigned long timer_jiffies = base->timer_jiffies;
+       unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
+       int index, slot, array, found = 0;
+       struct timer_list *nte;
+       struct tvec *varray[4];
+
+       /* Look for timer events in tv1. */
+       index = slot = timer_jiffies & TVR_MASK;
+       do {
+               list_for_each_entry(nte, base->tv1.vec + slot, entry) {
+                       if (tbase_get_deferrable(nte->base))
+                               continue;
+
+                       found = 1;
+                       expires = nte->expires;
+                       /* Look at the cascade bucket(s)? */
+                       if (!index || slot < index)
+                               goto cascade;
+                       return expires;
+               }
+               slot = (slot + 1) & TVR_MASK;
+       } while (slot != index);
+
+cascade:
+       /* Calculate the next cascade event */
+       if (index)
+               timer_jiffies += TVR_SIZE - index;
+       timer_jiffies >>= TVR_BITS;
+
+       /* Check tv2-tv5. */
+       varray[0] = &base->tv2;
+       varray[1] = &base->tv3;
+       varray[2] = &base->tv4;
+       varray[3] = &base->tv5;
+
+       for (array = 0; array < 4; array++) {
+               struct tvec *varp = varray[array];
+
+               index = slot = timer_jiffies & TVN_MASK;
+               do {
+                       list_for_each_entry(nte, varp->vec + slot, entry) {
+                               if (tbase_get_deferrable(nte->base))
+                                       continue;
+
+                               found = 1;
+                               if (time_before(nte->expires, expires))
+                                       expires = nte->expires;
+                       }
+                       /*
+                        * Do we still search for the first timer or are
+                        * we looking up the cascade buckets ?
+                        */
+                       if (found) {
+                               /* Look at the cascade bucket(s)? */
+                               if (!index || slot < index)
+                                       break;
+                               return expires;
+                       }
+                       slot = (slot + 1) & TVN_MASK;
+               } while (slot != index);
+
+               if (index)
+                       timer_jiffies += TVN_SIZE - index;
+               timer_jiffies >>= TVN_BITS;
+       }
+       return expires;
+}
+
+/*
+ * Check, if the next hrtimer event is before the next timer wheel
+ * event:
+ */
+static unsigned long cmp_next_hrtimer_event(unsigned long now,
+                                           unsigned long expires)
+{
+       ktime_t hr_delta = hrtimer_get_next_event();
+       struct timespec tsdelta;
+       unsigned long delta;
+
+       if (hr_delta.tv64 == KTIME_MAX)
+               return expires;
+
+       /*
+        * Expired timer available, let it expire in the next tick
+        */
+       if (hr_delta.tv64 <= 0)
+               return now + 1;
+
+       tsdelta = ktime_to_timespec(hr_delta);
+       delta = timespec_to_jiffies(&tsdelta);
+
+       /*
+        * Limit the delta to the max value, which is checked in
+        * tick_nohz_stop_sched_tick():
+        */
+       if (delta > NEXT_TIMER_MAX_DELTA)
+               delta = NEXT_TIMER_MAX_DELTA;
+
+       /*
+        * Take rounding errors in to account and make sure, that it
+        * expires in the next tick. Otherwise we go into an endless
+        * ping pong due to tick_nohz_stop_sched_tick() retriggering
+        * the timer softirq
+        */
+       if (delta < 1)
+               delta = 1;
+       now += delta;
+       if (time_before(now, expires))
+               return now;
+       return expires;
+}
+
+/**
+ * get_next_timer_interrupt - return the jiffy of the next pending timer
+ * @now: current time (in jiffies)
+ */
+unsigned long get_next_timer_interrupt(unsigned long now)
+{
+       struct tvec_base *base = __this_cpu_read(tvec_bases);
+       unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
+
+       /*
+        * Pretend that there is no timer pending if the cpu is offline.
+        * Possible pending timers will be migrated later to an active cpu.
+        */
+       if (cpu_is_offline(smp_processor_id()))
+               return expires;
+
+       spin_lock(&base->lock);
+       if (base->active_timers) {
+               if (time_before_eq(base->next_timer, base->timer_jiffies))
+                       base->next_timer = __next_timer_interrupt(base);
+               expires = base->next_timer;
+       }
+       spin_unlock(&base->lock);
+
+       if (time_before_eq(expires, now))
+               return now;
+
+       return cmp_next_hrtimer_event(now, expires);
+}
+#endif
+
+/*
+ * Called from the timer interrupt handler to charge one tick to the current
+ * process.  user_tick is 1 if the tick is user time, 0 for system.
+ */
+void update_process_times(int user_tick)
+{
+       struct task_struct *p = current;
+       int cpu = smp_processor_id();
+
+       /* Note: this timer irq context must be accounted for as well. */
+       account_process_tick(p, user_tick);
+       run_local_timers();
+       rcu_check_callbacks(cpu, user_tick);
+#ifdef CONFIG_IRQ_WORK
+       if (in_irq())
+               irq_work_run();
+#endif
+       scheduler_tick();
+       run_posix_cpu_timers(p);
+}
+
+/*
+ * This function runs timers and the timer-tq in bottom half context.
+ */
+static void run_timer_softirq(struct softirq_action *h)
+{
+       struct tvec_base *base = __this_cpu_read(tvec_bases);
+
+       hrtimer_run_pending();
+
+       if (time_after_eq(jiffies, base->timer_jiffies))
+               __run_timers(base);
+}
+
+/*
+ * Called by the local, per-CPU timer interrupt on SMP.
+ */
+void run_local_timers(void)
+{
+       hrtimer_run_queues();
+       raise_softirq(TIMER_SOFTIRQ);
+}
+
+#ifdef __ARCH_WANT_SYS_ALARM
+
+/*
+ * For backwards compatibility?  This can be done in libc so Alpha
+ * and all newer ports shouldn't need it.
+ */
+SYSCALL_DEFINE1(alarm, unsigned int, seconds)
+{
+       return alarm_setitimer(seconds);
+}
+
+#endif
+
+static void process_timeout(unsigned long __data)
+{
+       wake_up_process((struct task_struct *)__data);
+}
+
+/**
+ * schedule_timeout - sleep until timeout
+ * @timeout: timeout value in jiffies
+ *
+ * Make the current task sleep until @timeout jiffies have
+ * elapsed. The routine will return immediately unless
+ * the current task state has been set (see set_current_state()).
+ *
+ * You can set the task state as follows -
+ *
+ * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
+ * pass before the routine returns. The routine will return 0
+ *
+ * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
+ * delivered to the current task. In this case the remaining time
+ * in jiffies will be returned, or 0 if the timer expired in time
+ *
+ * The current task state is guaranteed to be TASK_RUNNING when this
+ * routine returns.
+ *
+ * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
+ * the CPU away without a bound on the timeout. In this case the return
+ * value will be %MAX_SCHEDULE_TIMEOUT.
+ *
+ * In all cases the return value is guaranteed to be non-negative.
+ */
+signed long __sched schedule_timeout(signed long timeout)
+{
+       struct timer_list timer;
+       unsigned long expire;
+
+       switch (timeout)
+       {
+       case MAX_SCHEDULE_TIMEOUT:
+               /*
+                * These two special cases are useful to be comfortable
+                * in the caller. Nothing more. We could take
+                * MAX_SCHEDULE_TIMEOUT from one of the negative value
+                * but I' d like to return a valid offset (>=0) to allow
+                * the caller to do everything it want with the retval.
+                */
+               schedule();
+               goto out;
+       default:
+               /*
+                * Another bit of PARANOID. Note that the retval will be
+                * 0 since no piece of kernel is supposed to do a check
+                * for a negative retval of schedule_timeout() (since it
+                * should never happens anyway). You just have the printk()
+                * that will tell you if something is gone wrong and where.
+                */
+               if (timeout < 0) {
+                       printk(KERN_ERR "schedule_timeout: wrong timeout "
+                               "value %lx\n", timeout);
+                       dump_stack();
+                       current->state = TASK_RUNNING;
+                       goto out;
+               }
+       }
+
+       expire = timeout + jiffies;
+
+       setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
+       __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
+       schedule();
+       del_singleshot_timer_sync(&timer);
+
+       /* Remove the timer from the object tracker */
+       destroy_timer_on_stack(&timer);
+
+       timeout = expire - jiffies;
+
+ out:
+       return timeout < 0 ? 0 : timeout;
+}
+EXPORT_SYMBOL(schedule_timeout);
+
+/*
+ * We can use __set_current_state() here because schedule_timeout() calls
+ * schedule() unconditionally.
+ */
+signed long __sched schedule_timeout_interruptible(signed long timeout)
+{
+       __set_current_state(TASK_INTERRUPTIBLE);
+       return schedule_timeout(timeout);
+}
+EXPORT_SYMBOL(schedule_timeout_interruptible);
+
+signed long __sched schedule_timeout_killable(signed long timeout)
+{
+       __set_current_state(TASK_KILLABLE);
+       return schedule_timeout(timeout);
+}
+EXPORT_SYMBOL(schedule_timeout_killable);
+
+signed long __sched schedule_timeout_uninterruptible(signed long timeout)
+{
+       __set_current_state(TASK_UNINTERRUPTIBLE);
+       return schedule_timeout(timeout);
+}
+EXPORT_SYMBOL(schedule_timeout_uninterruptible);
+
+static int init_timers_cpu(int cpu)
+{
+       int j;
+       struct tvec_base *base;
+       static char tvec_base_done[NR_CPUS];
+
+       if (!tvec_base_done[cpu]) {
+               static char boot_done;
+
+               if (boot_done) {
+                       /*
+                        * The APs use this path later in boot
+                        */
+                       base = kzalloc_node(sizeof(*base), GFP_KERNEL,
+                                           cpu_to_node(cpu));
+                       if (!base)
+                               return -ENOMEM;
+
+                       /* Make sure tvec_base has TIMER_FLAG_MASK bits free */
+                       if (WARN_ON(base != tbase_get_base(base))) {
+                               kfree(base);
+                               return -ENOMEM;
+                       }
+                       per_cpu(tvec_bases, cpu) = base;
+               } else {
+                       /*
+                        * This is for the boot CPU - we use compile-time
+                        * static initialisation because per-cpu memory isn't
+                        * ready yet and because the memory allocators are not
+                        * initialised either.
+                        */
+                       boot_done = 1;
+                       base = &boot_tvec_bases;
+               }
+               spin_lock_init(&base->lock);
+               tvec_base_done[cpu] = 1;
+               base->cpu = cpu;
+       } else {
+               base = per_cpu(tvec_bases, cpu);
+       }
+
+
+       for (j = 0; j < TVN_SIZE; j++) {
+               INIT_LIST_HEAD(base->tv5.vec + j);
+               INIT_LIST_HEAD(base->tv4.vec + j);
+               INIT_LIST_HEAD(base->tv3.vec + j);
+               INIT_LIST_HEAD(base->tv2.vec + j);
+       }
+       for (j = 0; j < TVR_SIZE; j++)
+               INIT_LIST_HEAD(base->tv1.vec + j);
+
+       base->timer_jiffies = jiffies;
+       base->next_timer = base->timer_jiffies;
+       base->active_timers = 0;
+       base->all_timers = 0;
+       return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
+{
+       struct timer_list *timer;
+
+       while (!list_empty(head)) {
+               timer = list_first_entry(head, struct timer_list, entry);
+               /* We ignore the accounting on the dying cpu */
+               detach_timer(timer, false);
+               timer_set_base(timer, new_base);
+               internal_add_timer(new_base, timer);
+       }
+}
+
+static void migrate_timers(int cpu)
+{
+       struct tvec_base *old_base;
+       struct tvec_base *new_base;
+       int i;
+
+       BUG_ON(cpu_online(cpu));
+       old_base = per_cpu(tvec_bases, cpu);
+       new_base = get_cpu_var(tvec_bases);
+       /*
+        * The caller is globally serialized and nobody else
+        * takes two locks at once, deadlock is not possible.
+        */
+       spin_lock_irq(&new_base->lock);
+       spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+
+       BUG_ON(old_base->running_timer);
+
+       for (i = 0; i < TVR_SIZE; i++)
+               migrate_timer_list(new_base, old_base->tv1.vec + i);
+       for (i = 0; i < TVN_SIZE; i++) {
+               migrate_timer_list(new_base, old_base->tv2.vec + i);
+               migrate_timer_list(new_base, old_base->tv3.vec + i);
+               migrate_timer_list(new_base, old_base->tv4.vec + i);
+               migrate_timer_list(new_base, old_base->tv5.vec + i);
+       }
+
+       spin_unlock(&old_base->lock);
+       spin_unlock_irq(&new_base->lock);
+       put_cpu_var(tvec_bases);
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static int timer_cpu_notify(struct notifier_block *self,
+                               unsigned long action, void *hcpu)
+{
+       long cpu = (long)hcpu;
+       int err;
+
+       switch(action) {
+       case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
+               err = init_timers_cpu(cpu);
+               if (err < 0)
+                       return notifier_from_errno(err);
+               break;
+#ifdef CONFIG_HOTPLUG_CPU
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+               migrate_timers(cpu);
+               break;
+#endif
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block timers_nb = {
+       .notifier_call  = timer_cpu_notify,
+};
+
+
+void __init init_timers(void)
+{
+       int err;
+
+       /* ensure there are enough low bits for flags in timer->base pointer */
+       BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
+
+       err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
+                              (void *)(long)smp_processor_id());
+       BUG_ON(err != NOTIFY_OK);
+
+       init_timer_stats();
+       register_cpu_notifier(&timers_nb);
+       open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
+}
+
+/**
+ * msleep - sleep safely even with waitqueue interruptions
+ * @msecs: Time in milliseconds to sleep for
+ */
+void msleep(unsigned int msecs)
+{
+       unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+
+       while (timeout)
+               timeout = schedule_timeout_uninterruptible(timeout);
+}
+
+EXPORT_SYMBOL(msleep);
+
+/**
+ * msleep_interruptible - sleep waiting for signals
+ * @msecs: Time in milliseconds to sleep for
+ */
+unsigned long msleep_interruptible(unsigned int msecs)
+{
+       unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+
+       while (timeout && !signal_pending(current))
+               timeout = schedule_timeout_interruptible(timeout);
+       return jiffies_to_msecs(timeout);
+}
+
+EXPORT_SYMBOL(msleep_interruptible);
+
+static int __sched do_usleep_range(unsigned long min, unsigned long max)
+{
+       ktime_t kmin;
+       unsigned long delta;
+
+       kmin = ktime_set(0, min * NSEC_PER_USEC);
+       delta = (max - min) * NSEC_PER_USEC;
+       return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
+}
+
+/**
+ * usleep_range - Drop in replacement for udelay where wakeup is flexible
+ * @min: Minimum time in usecs to sleep
+ * @max: Maximum time in usecs to sleep
+ */
+void usleep_range(unsigned long min, unsigned long max)
+{
+       __set_current_state(TASK_UNINTERRUPTIBLE);
+       do_usleep_range(min, max);
+}
+EXPORT_SYMBOL(usleep_range);
diff --git a/kernel/time/udelay_test.c b/kernel/time/udelay_test.c
new file mode 100644 (file)
index 0000000..e622ba3
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+ * udelay() test kernel module
+ *
+ * Test is executed by writing and reading to /sys/kernel/debug/udelay_test
+ * Tests are configured by writing: USECS ITERATIONS
+ * Tests are executed by reading from the same file.
+ * Specifying usecs of 0 or negative values will run multiples tests.
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#define DEFAULT_ITERATIONS 100
+
+#define DEBUGFS_FILENAME "udelay_test"
+
+static DEFINE_MUTEX(udelay_test_lock);
+static struct dentry *udelay_test_debugfs_file;
+static int udelay_test_usecs;
+static int udelay_test_iterations = DEFAULT_ITERATIONS;
+
+static int udelay_test_single(struct seq_file *s, int usecs, uint32_t iters)
+{
+       int min = 0, max = 0, fail_count = 0;
+       uint64_t sum = 0;
+       uint64_t avg;
+       int i;
+       /* Allow udelay to be up to 0.5% fast */
+       int allowed_error_ns = usecs * 5;
+
+       for (i = 0; i < iters; ++i) {
+               struct timespec ts1, ts2;
+               int time_passed;
+
+               ktime_get_ts(&ts1);
+               udelay(usecs);
+               ktime_get_ts(&ts2);
+               time_passed = timespec_to_ns(&ts2) - timespec_to_ns(&ts1);
+
+               if (i == 0 || time_passed < min)
+                       min = time_passed;
+               if (i == 0 || time_passed > max)
+                       max = time_passed;
+               if ((time_passed + allowed_error_ns) / 1000 < usecs)
+                       ++fail_count;
+               WARN_ON(time_passed < 0);
+               sum += time_passed;
+       }
+
+       avg = sum;
+       do_div(avg, iters);
+       seq_printf(s, "%d usecs x %d: exp=%d allowed=%d min=%d avg=%lld max=%d",
+                       usecs, iters, usecs * 1000,
+                       (usecs * 1000) - allowed_error_ns, min, avg, max);
+       if (fail_count)
+               seq_printf(s, " FAIL=%d", fail_count);
+       seq_puts(s, "\n");
+
+       return 0;
+}
+
+static int udelay_test_show(struct seq_file *s, void *v)
+{
+       int usecs;
+       int iters;
+       int ret = 0;
+
+       mutex_lock(&udelay_test_lock);
+       usecs = udelay_test_usecs;
+       iters = udelay_test_iterations;
+       mutex_unlock(&udelay_test_lock);
+
+       if (usecs > 0 && iters > 0) {
+               return udelay_test_single(s, usecs, iters);
+       } else if (usecs == 0) {
+               struct timespec ts;
+
+               ktime_get_ts(&ts);
+               seq_printf(s, "udelay() test (lpj=%ld kt=%ld.%09ld)\n",
+                               loops_per_jiffy, ts.tv_sec, ts.tv_nsec);
+               seq_puts(s, "usage:\n");
+               seq_puts(s, "echo USECS [ITERS] > " DEBUGFS_FILENAME "\n");
+               seq_puts(s, "cat " DEBUGFS_FILENAME "\n");
+       }
+
+       return ret;
+}
+
+static int udelay_test_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, udelay_test_show, inode->i_private);
+}
+
+static ssize_t udelay_test_write(struct file *file, const char __user *buf,
+               size_t count, loff_t *pos)
+{
+       char lbuf[32];
+       int ret;
+       int usecs;
+       int iters;
+
+       if (count >= sizeof(lbuf))
+               return -EINVAL;
+
+       if (copy_from_user(lbuf, buf, count))
+               return -EFAULT;
+       lbuf[count] = '\0';
+
+       ret = sscanf(lbuf, "%d %d", &usecs, &iters);
+       if (ret < 1)
+               return -EINVAL;
+       else if (ret < 2)
+               iters = DEFAULT_ITERATIONS;
+
+       mutex_lock(&udelay_test_lock);
+       udelay_test_usecs = usecs;
+       udelay_test_iterations = iters;
+       mutex_unlock(&udelay_test_lock);
+
+       return count;
+}
+
+static const struct file_operations udelay_test_debugfs_ops = {
+       .owner = THIS_MODULE,
+       .open = udelay_test_open,
+       .read = seq_read,
+       .write = udelay_test_write,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static int __init udelay_test_init(void)
+{
+       mutex_lock(&udelay_test_lock);
+       udelay_test_debugfs_file = debugfs_create_file(DEBUGFS_FILENAME,
+                       S_IRUSR, NULL, NULL, &udelay_test_debugfs_ops);
+       mutex_unlock(&udelay_test_lock);
+
+       return 0;
+}
+
+module_init(udelay_test_init);
+
+static void __exit udelay_test_exit(void)
+{
+       mutex_lock(&udelay_test_lock);
+       debugfs_remove(udelay_test_debugfs_file);
+       mutex_unlock(&udelay_test_lock);
+}
+
+module_exit(udelay_test_exit);
+
+MODULE_AUTHOR("David Riley <davidriley@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/kernel/timeconst.bc b/kernel/timeconst.bc
deleted file mode 100644 (file)
index 511bdf2..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-scale=0
-
-define gcd(a,b) {
-       auto t;
-       while (b) {
-               t = b;
-               b = a % b;
-               a = t;
-       }
-       return a;
-}
-
-/* Division by reciprocal multiplication. */
-define fmul(b,n,d) {
-       return (2^b*n+d-1)/d;
-}
-
-/* Adjustment factor when a ceiling value is used.  Use as:
-   (imul * n) + (fmulxx * n + fadjxx) >> xx) */
-define fadj(b,n,d) {
-       auto v;
-       d = d/gcd(n,d);
-       v = 2^b*(d-1)/d;
-       return v;
-}
-
-/* Compute the appropriate mul/adj values as well as a shift count,
-   which brings the mul value into the range 2^b-1 <= x < 2^b.  Such
-   a shift value will be correct in the signed integer range and off
-   by at most one in the upper half of the unsigned range. */
-define fmuls(b,n,d) {
-       auto s, m;
-       for (s = 0; 1; s++) {
-               m = fmul(s,n,d);
-               if (m >= 2^(b-1))
-                       return s;
-       }
-       return 0;
-}
-
-define timeconst(hz) {
-       print "/* Automatically generated by kernel/timeconst.bc */\n"
-       print "/* Time conversion constants for HZ == ", hz, " */\n"
-       print "\n"
-
-       print "#ifndef KERNEL_TIMECONST_H\n"
-       print "#define KERNEL_TIMECONST_H\n\n"
-
-       print "#include <linux/param.h>\n"
-       print "#include <linux/types.h>\n\n"
-
-       print "#if HZ != ", hz, "\n"
-       print "#error \qkernel/timeconst.h has the wrong HZ value!\q\n"
-       print "#endif\n\n"
-
-       if (hz < 2) {
-               print "#error Totally bogus HZ value!\n"
-       } else {
-               s=fmuls(32,1000,hz)
-               obase=16
-               print "#define HZ_TO_MSEC_MUL32\tU64_C(0x", fmul(s,1000,hz), ")\n"
-               print "#define HZ_TO_MSEC_ADJ32\tU64_C(0x", fadj(s,1000,hz), ")\n"
-               obase=10
-               print "#define HZ_TO_MSEC_SHR32\t", s, "\n"
-
-               s=fmuls(32,hz,1000)
-               obase=16
-               print "#define MSEC_TO_HZ_MUL32\tU64_C(0x", fmul(s,hz,1000), ")\n"
-               print "#define MSEC_TO_HZ_ADJ32\tU64_C(0x", fadj(s,hz,1000), ")\n"
-               obase=10
-               print "#define MSEC_TO_HZ_SHR32\t", s, "\n"
-
-               obase=10
-               cd=gcd(hz,1000)
-               print "#define HZ_TO_MSEC_NUM\t\t", 1000/cd, "\n"
-               print "#define HZ_TO_MSEC_DEN\t\t", hz/cd, "\n"
-               print "#define MSEC_TO_HZ_NUM\t\t", hz/cd, "\n"
-               print "#define MSEC_TO_HZ_DEN\t\t", 1000/cd, "\n"
-               print "\n"
-
-               s=fmuls(32,1000000,hz)
-               obase=16
-               print "#define HZ_TO_USEC_MUL32\tU64_C(0x", fmul(s,1000000,hz), ")\n"
-               print "#define HZ_TO_USEC_ADJ32\tU64_C(0x", fadj(s,1000000,hz), ")\n"
-               obase=10
-               print "#define HZ_TO_USEC_SHR32\t", s, "\n"
-
-               s=fmuls(32,hz,1000000)
-               obase=16
-               print "#define USEC_TO_HZ_MUL32\tU64_C(0x", fmul(s,hz,1000000), ")\n"
-               print "#define USEC_TO_HZ_ADJ32\tU64_C(0x", fadj(s,hz,1000000), ")\n"
-               obase=10
-               print "#define USEC_TO_HZ_SHR32\t", s, "\n"
-
-               obase=10
-               cd=gcd(hz,1000000)
-               print "#define HZ_TO_USEC_NUM\t\t", 1000000/cd, "\n"
-               print "#define HZ_TO_USEC_DEN\t\t", hz/cd, "\n"
-               print "#define USEC_TO_HZ_NUM\t\t", hz/cd, "\n"
-               print "#define USEC_TO_HZ_DEN\t\t", 1000000/cd, "\n"
-               print "\n"
-
-               print "#endif /* KERNEL_TIMECONST_H */\n"
-       }
-       halt
-}
-
-timeconst(hz)
diff --git a/kernel/timer.c b/kernel/timer.c
deleted file mode 100644 (file)
index 3bb01a3..0000000
+++ /dev/null
@@ -1,1734 +0,0 @@
-/*
- *  linux/kernel/timer.c
- *
- *  Kernel internal timers
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better.
- *
- *  1997-09-10  Updated NTP code according to technical memorandum Jan '96
- *              "A Kernel Model for Precision Timekeeping" by Dave Mills
- *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
- *              serialize accesses to xtime/lost_ticks).
- *                              Copyright (C) 1998  Andrea Arcangeli
- *  1999-03-10  Improved NTP compatibility by Ulrich Windl
- *  2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
- *  2000-10-05  Implemented scalable SMP per-CPU timer handling.
- *                              Copyright (C) 2000, 2001, 2002  Ingo Molnar
- *              Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
- */
-
-#include <linux/kernel_stat.h>
-#include <linux/export.h>
-#include <linux/interrupt.h>
-#include <linux/percpu.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/pid_namespace.h>
-#include <linux/notifier.h>
-#include <linux/thread_info.h>
-#include <linux/time.h>
-#include <linux/jiffies.h>
-#include <linux/posix-timers.h>
-#include <linux/cpu.h>
-#include <linux/syscalls.h>
-#include <linux/delay.h>
-#include <linux/tick.h>
-#include <linux/kallsyms.h>
-#include <linux/irq_work.h>
-#include <linux/sched.h>
-#include <linux/sched/sysctl.h>
-#include <linux/slab.h>
-#include <linux/compat.h>
-
-#include <asm/uaccess.h>
-#include <asm/unistd.h>
-#include <asm/div64.h>
-#include <asm/timex.h>
-#include <asm/io.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/timer.h>
-
-__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
-
-EXPORT_SYMBOL(jiffies_64);
-
-/*
- * per-CPU timer vector definitions:
- */
-#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
-#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
-#define TVN_SIZE (1 << TVN_BITS)
-#define TVR_SIZE (1 << TVR_BITS)
-#define TVN_MASK (TVN_SIZE - 1)
-#define TVR_MASK (TVR_SIZE - 1)
-#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
-
-struct tvec {
-       struct list_head vec[TVN_SIZE];
-};
-
-struct tvec_root {
-       struct list_head vec[TVR_SIZE];
-};
-
-struct tvec_base {
-       spinlock_t lock;
-       struct timer_list *running_timer;
-       unsigned long timer_jiffies;
-       unsigned long next_timer;
-       unsigned long active_timers;
-       unsigned long all_timers;
-       struct tvec_root tv1;
-       struct tvec tv2;
-       struct tvec tv3;
-       struct tvec tv4;
-       struct tvec tv5;
-} ____cacheline_aligned;
-
-struct tvec_base boot_tvec_bases;
-EXPORT_SYMBOL(boot_tvec_bases);
-static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
-
-/* Functions below help us manage 'deferrable' flag */
-static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
-{
-       return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE);
-}
-
-static inline unsigned int tbase_get_irqsafe(struct tvec_base *base)
-{
-       return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE);
-}
-
-static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
-{
-       return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK));
-}
-
-static inline void
-timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
-{
-       unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK;
-
-       timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags);
-}
-
-static unsigned long round_jiffies_common(unsigned long j, int cpu,
-               bool force_up)
-{
-       int rem;
-       unsigned long original = j;
-
-       /*
-        * We don't want all cpus firing their timers at once hitting the
-        * same lock or cachelines, so we skew each extra cpu with an extra
-        * 3 jiffies. This 3 jiffies came originally from the mm/ code which
-        * already did this.
-        * The skew is done by adding 3*cpunr, then round, then subtract this
-        * extra offset again.
-        */
-       j += cpu * 3;
-
-       rem = j % HZ;
-
-       /*
-        * If the target jiffie is just after a whole second (which can happen
-        * due to delays of the timer irq, long irq off times etc etc) then
-        * we should round down to the whole second, not up. Use 1/4th second
-        * as cutoff for this rounding as an extreme upper bound for this.
-        * But never round down if @force_up is set.
-        */
-       if (rem < HZ/4 && !force_up) /* round down */
-               j = j - rem;
-       else /* round up */
-               j = j - rem + HZ;
-
-       /* now that we have rounded, subtract the extra skew again */
-       j -= cpu * 3;
-
-       /*
-        * Make sure j is still in the future. Otherwise return the
-        * unmodified value.
-        */
-       return time_is_after_jiffies(j) ? j : original;
-}
-
-/**
- * __round_jiffies - function to round jiffies to a full second
- * @j: the time in (absolute) jiffies that should be rounded
- * @cpu: the processor number on which the timeout will happen
- *
- * __round_jiffies() rounds an absolute time in the future (in jiffies)
- * up or down to (approximately) full seconds. This is useful for timers
- * for which the exact time they fire does not matter too much, as long as
- * they fire approximately every X seconds.
- *
- * By rounding these timers to whole seconds, all such timers will fire
- * at the same time, rather than at various times spread out. The goal
- * of this is to have the CPU wake up less, which saves power.
- *
- * The exact rounding is skewed for each processor to avoid all
- * processors firing at the exact same time, which could lead
- * to lock contention or spurious cache line bouncing.
- *
- * The return value is the rounded version of the @j parameter.
- */
-unsigned long __round_jiffies(unsigned long j, int cpu)
-{
-       return round_jiffies_common(j, cpu, false);
-}
-EXPORT_SYMBOL_GPL(__round_jiffies);
-
-/**
- * __round_jiffies_relative - function to round jiffies to a full second
- * @j: the time in (relative) jiffies that should be rounded
- * @cpu: the processor number on which the timeout will happen
- *
- * __round_jiffies_relative() rounds a time delta  in the future (in jiffies)
- * up or down to (approximately) full seconds. This is useful for timers
- * for which the exact time they fire does not matter too much, as long as
- * they fire approximately every X seconds.
- *
- * By rounding these timers to whole seconds, all such timers will fire
- * at the same time, rather than at various times spread out. The goal
- * of this is to have the CPU wake up less, which saves power.
- *
- * The exact rounding is skewed for each processor to avoid all
- * processors firing at the exact same time, which could lead
- * to lock contention or spurious cache line bouncing.
- *
- * The return value is the rounded version of the @j parameter.
- */
-unsigned long __round_jiffies_relative(unsigned long j, int cpu)
-{
-       unsigned long j0 = jiffies;
-
-       /* Use j0 because jiffies might change while we run */
-       return round_jiffies_common(j + j0, cpu, false) - j0;
-}
-EXPORT_SYMBOL_GPL(__round_jiffies_relative);
-
-/**
- * round_jiffies - function to round jiffies to a full second
- * @j: the time in (absolute) jiffies that should be rounded
- *
- * round_jiffies() rounds an absolute time in the future (in jiffies)
- * up or down to (approximately) full seconds. This is useful for timers
- * for which the exact time they fire does not matter too much, as long as
- * they fire approximately every X seconds.
- *
- * By rounding these timers to whole seconds, all such timers will fire
- * at the same time, rather than at various times spread out. The goal
- * of this is to have the CPU wake up less, which saves power.
- *
- * The return value is the rounded version of the @j parameter.
- */
-unsigned long round_jiffies(unsigned long j)
-{
-       return round_jiffies_common(j, raw_smp_processor_id(), false);
-}
-EXPORT_SYMBOL_GPL(round_jiffies);
-
-/**
- * round_jiffies_relative - function to round jiffies to a full second
- * @j: the time in (relative) jiffies that should be rounded
- *
- * round_jiffies_relative() rounds a time delta  in the future (in jiffies)
- * up or down to (approximately) full seconds. This is useful for timers
- * for which the exact time they fire does not matter too much, as long as
- * they fire approximately every X seconds.
- *
- * By rounding these timers to whole seconds, all such timers will fire
- * at the same time, rather than at various times spread out. The goal
- * of this is to have the CPU wake up less, which saves power.
- *
- * The return value is the rounded version of the @j parameter.
- */
-unsigned long round_jiffies_relative(unsigned long j)
-{
-       return __round_jiffies_relative(j, raw_smp_processor_id());
-}
-EXPORT_SYMBOL_GPL(round_jiffies_relative);
-
-/**
- * __round_jiffies_up - function to round jiffies up to a full second
- * @j: the time in (absolute) jiffies that should be rounded
- * @cpu: the processor number on which the timeout will happen
- *
- * This is the same as __round_jiffies() except that it will never
- * round down.  This is useful for timeouts for which the exact time
- * of firing does not matter too much, as long as they don't fire too
- * early.
- */
-unsigned long __round_jiffies_up(unsigned long j, int cpu)
-{
-       return round_jiffies_common(j, cpu, true);
-}
-EXPORT_SYMBOL_GPL(__round_jiffies_up);
-
-/**
- * __round_jiffies_up_relative - function to round jiffies up to a full second
- * @j: the time in (relative) jiffies that should be rounded
- * @cpu: the processor number on which the timeout will happen
- *
- * This is the same as __round_jiffies_relative() except that it will never
- * round down.  This is useful for timeouts for which the exact time
- * of firing does not matter too much, as long as they don't fire too
- * early.
- */
-unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
-{
-       unsigned long j0 = jiffies;
-
-       /* Use j0 because jiffies might change while we run */
-       return round_jiffies_common(j + j0, cpu, true) - j0;
-}
-EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
-
-/**
- * round_jiffies_up - function to round jiffies up to a full second
- * @j: the time in (absolute) jiffies that should be rounded
- *
- * This is the same as round_jiffies() except that it will never
- * round down.  This is useful for timeouts for which the exact time
- * of firing does not matter too much, as long as they don't fire too
- * early.
- */
-unsigned long round_jiffies_up(unsigned long j)
-{
-       return round_jiffies_common(j, raw_smp_processor_id(), true);
-}
-EXPORT_SYMBOL_GPL(round_jiffies_up);
-
-/**
- * round_jiffies_up_relative - function to round jiffies up to a full second
- * @j: the time in (relative) jiffies that should be rounded
- *
- * This is the same as round_jiffies_relative() except that it will never
- * round down.  This is useful for timeouts for which the exact time
- * of firing does not matter too much, as long as they don't fire too
- * early.
- */
-unsigned long round_jiffies_up_relative(unsigned long j)
-{
-       return __round_jiffies_up_relative(j, raw_smp_processor_id());
-}
-EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
-
-/**
- * set_timer_slack - set the allowed slack for a timer
- * @timer: the timer to be modified
- * @slack_hz: the amount of time (in jiffies) allowed for rounding
- *
- * Set the amount of time, in jiffies, that a certain timer has
- * in terms of slack. By setting this value, the timer subsystem
- * will schedule the actual timer somewhere between
- * the time mod_timer() asks for, and that time plus the slack.
- *
- * By setting the slack to -1, a percentage of the delay is used
- * instead.
- */
-void set_timer_slack(struct timer_list *timer, int slack_hz)
-{
-       timer->slack = slack_hz;
-}
-EXPORT_SYMBOL_GPL(set_timer_slack);
-
-/*
- * If the list is empty, catch up ->timer_jiffies to the current time.
- * The caller must hold the tvec_base lock.  Returns true if the list
- * was empty and therefore ->timer_jiffies was updated.
- */
-static bool catchup_timer_jiffies(struct tvec_base *base)
-{
-       if (!base->all_timers) {
-               base->timer_jiffies = jiffies;
-               return true;
-       }
-       return false;
-}
-
-static void
-__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
-{
-       unsigned long expires = timer->expires;
-       unsigned long idx = expires - base->timer_jiffies;
-       struct list_head *vec;
-
-       if (idx < TVR_SIZE) {
-               int i = expires & TVR_MASK;
-               vec = base->tv1.vec + i;
-       } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
-               int i = (expires >> TVR_BITS) & TVN_MASK;
-               vec = base->tv2.vec + i;
-       } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
-               int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
-               vec = base->tv3.vec + i;
-       } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
-               int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
-               vec = base->tv4.vec + i;
-       } else if ((signed long) idx < 0) {
-               /*
-                * Can happen if you add a timer with expires == jiffies,
-                * or you set a timer to go off in the past
-                */
-               vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
-       } else {
-               int i;
-               /* If the timeout is larger than MAX_TVAL (on 64-bit
-                * architectures or with CONFIG_BASE_SMALL=1) then we
-                * use the maximum timeout.
-                */
-               if (idx > MAX_TVAL) {
-                       idx = MAX_TVAL;
-                       expires = idx + base->timer_jiffies;
-               }
-               i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
-               vec = base->tv5.vec + i;
-       }
-       /*
-        * Timers are FIFO:
-        */
-       list_add_tail(&timer->entry, vec);
-}
-
-static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
-{
-       (void)catchup_timer_jiffies(base);
-       __internal_add_timer(base, timer);
-       /*
-        * Update base->active_timers and base->next_timer
-        */
-       if (!tbase_get_deferrable(timer->base)) {
-               if (!base->active_timers++ ||
-                   time_before(timer->expires, base->next_timer))
-                       base->next_timer = timer->expires;
-       }
-       base->all_timers++;
-}
-
-#ifdef CONFIG_TIMER_STATS
-void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
-{
-       if (timer->start_site)
-               return;
-
-       timer->start_site = addr;
-       memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
-       timer->start_pid = current->pid;
-}
-
-static void timer_stats_account_timer(struct timer_list *timer)
-{
-       unsigned int flag = 0;
-
-       if (likely(!timer->start_site))
-               return;
-       if (unlikely(tbase_get_deferrable(timer->base)))
-               flag |= TIMER_STATS_FLAG_DEFERRABLE;
-
-       timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
-                                timer->function, timer->start_comm, flag);
-}
-
-#else
-static void timer_stats_account_timer(struct timer_list *timer) {}
-#endif
-
-#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
-
-static struct debug_obj_descr timer_debug_descr;
-
-static void *timer_debug_hint(void *addr)
-{
-       return ((struct timer_list *) addr)->function;
-}
-
-/*
- * fixup_init is called when:
- * - an active object is initialized
- */
-static int timer_fixup_init(void *addr, enum debug_obj_state state)
-{
-       struct timer_list *timer = addr;
-
-       switch (state) {
-       case ODEBUG_STATE_ACTIVE:
-               del_timer_sync(timer);
-               debug_object_init(timer, &timer_debug_descr);
-               return 1;
-       default:
-               return 0;
-       }
-}
-
-/* Stub timer callback for improperly used timers. */
-static void stub_timer(unsigned long data)
-{
-       WARN_ON(1);
-}
-
-/*
- * fixup_activate is called when:
- * - an active object is activated
- * - an unknown object is activated (might be a statically initialized object)
- */
-static int timer_fixup_activate(void *addr, enum debug_obj_state state)
-{
-       struct timer_list *timer = addr;
-
-       switch (state) {
-
-       case ODEBUG_STATE_NOTAVAILABLE:
-               /*
-                * This is not really a fixup. The timer was
-                * statically initialized. We just make sure that it
-                * is tracked in the object tracker.
-                */
-               if (timer->entry.next == NULL &&
-                   timer->entry.prev == TIMER_ENTRY_STATIC) {
-                       debug_object_init(timer, &timer_debug_descr);
-                       debug_object_activate(timer, &timer_debug_descr);
-                       return 0;
-               } else {
-                       setup_timer(timer, stub_timer, 0);
-                       return 1;
-               }
-               return 0;
-
-       case ODEBUG_STATE_ACTIVE:
-               WARN_ON(1);
-
-       default:
-               return 0;
-       }
-}
-
-/*
- * fixup_free is called when:
- * - an active object is freed
- */
-static int timer_fixup_free(void *addr, enum debug_obj_state state)
-{
-       struct timer_list *timer = addr;
-
-       switch (state) {
-       case ODEBUG_STATE_ACTIVE:
-               del_timer_sync(timer);
-               debug_object_free(timer, &timer_debug_descr);
-               return 1;
-       default:
-               return 0;
-       }
-}
-
-/*
- * fixup_assert_init is called when:
- * - an untracked/uninit-ed object is found
- */
-static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
-{
-       struct timer_list *timer = addr;
-
-       switch (state) {
-       case ODEBUG_STATE_NOTAVAILABLE:
-               if (timer->entry.prev == TIMER_ENTRY_STATIC) {
-                       /*
-                        * This is not really a fixup. The timer was
-                        * statically initialized. We just make sure that it
-                        * is tracked in the object tracker.
-                        */
-                       debug_object_init(timer, &timer_debug_descr);
-                       return 0;
-               } else {
-                       setup_timer(timer, stub_timer, 0);
-                       return 1;
-               }
-       default:
-               return 0;
-       }
-}
-
-static struct debug_obj_descr timer_debug_descr = {
-       .name                   = "timer_list",
-       .debug_hint             = timer_debug_hint,
-       .fixup_init             = timer_fixup_init,
-       .fixup_activate         = timer_fixup_activate,
-       .fixup_free             = timer_fixup_free,
-       .fixup_assert_init      = timer_fixup_assert_init,
-};
-
-static inline void debug_timer_init(struct timer_list *timer)
-{
-       debug_object_init(timer, &timer_debug_descr);
-}
-
-static inline void debug_timer_activate(struct timer_list *timer)
-{
-       debug_object_activate(timer, &timer_debug_descr);
-}
-
-static inline void debug_timer_deactivate(struct timer_list *timer)
-{
-       debug_object_deactivate(timer, &timer_debug_descr);
-}
-
-static inline void debug_timer_free(struct timer_list *timer)
-{
-       debug_object_free(timer, &timer_debug_descr);
-}
-
-static inline void debug_timer_assert_init(struct timer_list *timer)
-{
-       debug_object_assert_init(timer, &timer_debug_descr);
-}
-
-static void do_init_timer(struct timer_list *timer, unsigned int flags,
-                         const char *name, struct lock_class_key *key);
-
-void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
-                            const char *name, struct lock_class_key *key)
-{
-       debug_object_init_on_stack(timer, &timer_debug_descr);
-       do_init_timer(timer, flags, name, key);
-}
-EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
-
-void destroy_timer_on_stack(struct timer_list *timer)
-{
-       debug_object_free(timer, &timer_debug_descr);
-}
-EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
-
-#else
-static inline void debug_timer_init(struct timer_list *timer) { }
-static inline void debug_timer_activate(struct timer_list *timer) { }
-static inline void debug_timer_deactivate(struct timer_list *timer) { }
-static inline void debug_timer_assert_init(struct timer_list *timer) { }
-#endif
-
-static inline void debug_init(struct timer_list *timer)
-{
-       debug_timer_init(timer);
-       trace_timer_init(timer);
-}
-
-static inline void
-debug_activate(struct timer_list *timer, unsigned long expires)
-{
-       debug_timer_activate(timer);
-       trace_timer_start(timer, expires);
-}
-
-static inline void debug_deactivate(struct timer_list *timer)
-{
-       debug_timer_deactivate(timer);
-       trace_timer_cancel(timer);
-}
-
-static inline void debug_assert_init(struct timer_list *timer)
-{
-       debug_timer_assert_init(timer);
-}
-
-static void do_init_timer(struct timer_list *timer, unsigned int flags,
-                         const char *name, struct lock_class_key *key)
-{
-       struct tvec_base *base = __raw_get_cpu_var(tvec_bases);
-
-       timer->entry.next = NULL;
-       timer->base = (void *)((unsigned long)base | flags);
-       timer->slack = -1;
-#ifdef CONFIG_TIMER_STATS
-       timer->start_site = NULL;
-       timer->start_pid = -1;
-       memset(timer->start_comm, 0, TASK_COMM_LEN);
-#endif
-       lockdep_init_map(&timer->lockdep_map, name, key, 0);
-}
-
-/**
- * init_timer_key - initialize a timer
- * @timer: the timer to be initialized
- * @flags: timer flags
- * @name: name of the timer
- * @key: lockdep class key of the fake lock used for tracking timer
- *       sync lock dependencies
- *
- * init_timer_key() must be done to a timer prior calling *any* of the
- * other timer functions.
- */
-void init_timer_key(struct timer_list *timer, unsigned int flags,
-                   const char *name, struct lock_class_key *key)
-{
-       debug_init(timer);
-       do_init_timer(timer, flags, name, key);
-}
-EXPORT_SYMBOL(init_timer_key);
-
-static inline void detach_timer(struct timer_list *timer, bool clear_pending)
-{
-       struct list_head *entry = &timer->entry;
-
-       debug_deactivate(timer);
-
-       __list_del(entry->prev, entry->next);
-       if (clear_pending)
-               entry->next = NULL;
-       entry->prev = LIST_POISON2;
-}
-
-static inline void
-detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
-{
-       detach_timer(timer, true);
-       if (!tbase_get_deferrable(timer->base))
-               base->active_timers--;
-       base->all_timers--;
-       (void)catchup_timer_jiffies(base);
-}
-
-static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
-                            bool clear_pending)
-{
-       if (!timer_pending(timer))
-               return 0;
-
-       detach_timer(timer, clear_pending);
-       if (!tbase_get_deferrable(timer->base)) {
-               base->active_timers--;
-               if (timer->expires == base->next_timer)
-                       base->next_timer = base->timer_jiffies;
-       }
-       base->all_timers--;
-       (void)catchup_timer_jiffies(base);
-       return 1;
-}
-
-/*
- * We are using hashed locking: holding per_cpu(tvec_bases).lock
- * means that all timers which are tied to this base via timer->base are
- * locked, and the base itself is locked too.
- *
- * So __run_timers/migrate_timers can safely modify all timers which could
- * be found on ->tvX lists.
- *
- * When the timer's base is locked, and the timer removed from list, it is
- * possible to set timer->base = NULL and drop the lock: the timer remains
- * locked.
- */
-static struct tvec_base *lock_timer_base(struct timer_list *timer,
-                                       unsigned long *flags)
-       __acquires(timer->base->lock)
-{
-       struct tvec_base *base;
-
-       for (;;) {
-               struct tvec_base *prelock_base = timer->base;
-               base = tbase_get_base(prelock_base);
-               if (likely(base != NULL)) {
-                       spin_lock_irqsave(&base->lock, *flags);
-                       if (likely(prelock_base == timer->base))
-                               return base;
-                       /* The timer has migrated to another CPU */
-                       spin_unlock_irqrestore(&base->lock, *flags);
-               }
-               cpu_relax();
-       }
-}
-
-static inline int
-__mod_timer(struct timer_list *timer, unsigned long expires,
-                                               bool pending_only, int pinned)
-{
-       struct tvec_base *base, *new_base;
-       unsigned long flags;
-       int ret = 0 , cpu;
-
-       timer_stats_timer_set_start_info(timer);
-       BUG_ON(!timer->function);
-
-       base = lock_timer_base(timer, &flags);
-
-       ret = detach_if_pending(timer, base, false);
-       if (!ret && pending_only)
-               goto out_unlock;
-
-       debug_activate(timer, expires);
-
-       cpu = get_nohz_timer_target(pinned);
-       new_base = per_cpu(tvec_bases, cpu);
-
-       if (base != new_base) {
-               /*
-                * We are trying to schedule the timer on the local CPU.
-                * However we can't change timer's base while it is running,
-                * otherwise del_timer_sync() can't detect that the timer's
-                * handler yet has not finished. This also guarantees that
-                * the timer is serialized wrt itself.
-                */
-               if (likely(base->running_timer != timer)) {
-                       /* See the comment in lock_timer_base() */
-                       timer_set_base(timer, NULL);
-                       spin_unlock(&base->lock);
-                       base = new_base;
-                       spin_lock(&base->lock);
-                       timer_set_base(timer, base);
-               }
-       }
-
-       timer->expires = expires;
-       internal_add_timer(base, timer);
-
-out_unlock:
-       spin_unlock_irqrestore(&base->lock, flags);
-
-       return ret;
-}
-
-/**
- * mod_timer_pending - modify a pending timer's timeout
- * @timer: the pending timer to be modified
- * @expires: new timeout in jiffies
- *
- * mod_timer_pending() is the same for pending timers as mod_timer(),
- * but will not re-activate and modify already deleted timers.
- *
- * It is useful for unserialized use of timers.
- */
-int mod_timer_pending(struct timer_list *timer, unsigned long expires)
-{
-       return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
-}
-EXPORT_SYMBOL(mod_timer_pending);
-
-/*
- * Decide where to put the timer while taking the slack into account
- *
- * Algorithm:
- *   1) calculate the maximum (absolute) time
- *   2) calculate the highest bit where the expires and new max are different
- *   3) use this bit to make a mask
- *   4) use the bitmask to round down the maximum time, so that all last
- *      bits are zeros
- */
-static inline
-unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
-{
-       unsigned long expires_limit, mask;
-       int bit;
-
-       if (timer->slack >= 0) {
-               expires_limit = expires + timer->slack;
-       } else {
-               long delta = expires - jiffies;
-
-               if (delta < 256)
-                       return expires;
-
-               expires_limit = expires + delta / 256;
-       }
-       mask = expires ^ expires_limit;
-       if (mask == 0)
-               return expires;
-
-       bit = find_last_bit(&mask, BITS_PER_LONG);
-
-       mask = (1UL << bit) - 1;
-
-       expires_limit = expires_limit & ~(mask);
-
-       return expires_limit;
-}
-
-/**
- * mod_timer - modify a timer's timeout
- * @timer: the timer to be modified
- * @expires: new timeout in jiffies
- *
- * mod_timer() is a more efficient way to update the expire field of an
- * active timer (if the timer is inactive it will be activated)
- *
- * mod_timer(timer, expires) is equivalent to:
- *
- *     del_timer(timer); timer->expires = expires; add_timer(timer);
- *
- * Note that if there are multiple unserialized concurrent users of the
- * same timer, then mod_timer() is the only safe way to modify the timeout,
- * since add_timer() cannot modify an already running timer.
- *
- * The function returns whether it has modified a pending timer or not.
- * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
- * active timer returns 1.)
- */
-int mod_timer(struct timer_list *timer, unsigned long expires)
-{
-       expires = apply_slack(timer, expires);
-
-       /*
-        * This is a common optimization triggered by the
-        * networking code - if the timer is re-modified
-        * to be the same thing then just return:
-        */
-       if (timer_pending(timer) && timer->expires == expires)
-               return 1;
-
-       return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
-}
-EXPORT_SYMBOL(mod_timer);
-
-/**
- * mod_timer_pinned - modify a timer's timeout
- * @timer: the timer to be modified
- * @expires: new timeout in jiffies
- *
- * mod_timer_pinned() is a way to update the expire field of an
- * active timer (if the timer is inactive it will be activated)
- * and to ensure that the timer is scheduled on the current CPU.
- *
- * Note that this does not prevent the timer from being migrated
- * when the current CPU goes offline.  If this is a problem for
- * you, use CPU-hotplug notifiers to handle it correctly, for
- * example, cancelling the timer when the corresponding CPU goes
- * offline.
- *
- * mod_timer_pinned(timer, expires) is equivalent to:
- *
- *     del_timer(timer); timer->expires = expires; add_timer(timer);
- */
-int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
-{
-       if (timer->expires == expires && timer_pending(timer))
-               return 1;
-
-       return __mod_timer(timer, expires, false, TIMER_PINNED);
-}
-EXPORT_SYMBOL(mod_timer_pinned);
-
-/**
- * add_timer - start a timer
- * @timer: the timer to be added
- *
- * The kernel will do a ->function(->data) callback from the
- * timer interrupt at the ->expires point in the future. The
- * current time is 'jiffies'.
- *
- * The timer's ->expires, ->function (and if the handler uses it, ->data)
- * fields must be set prior calling this function.
- *
- * Timers with an ->expires field in the past will be executed in the next
- * timer tick.
- */
-void add_timer(struct timer_list *timer)
-{
-       BUG_ON(timer_pending(timer));
-       mod_timer(timer, timer->expires);
-}
-EXPORT_SYMBOL(add_timer);
-
-/**
- * add_timer_on - start a timer on a particular CPU
- * @timer: the timer to be added
- * @cpu: the CPU to start it on
- *
- * This is not very scalable on SMP. Double adds are not possible.
- */
-void add_timer_on(struct timer_list *timer, int cpu)
-{
-       struct tvec_base *base = per_cpu(tvec_bases, cpu);
-       unsigned long flags;
-
-       timer_stats_timer_set_start_info(timer);
-       BUG_ON(timer_pending(timer) || !timer->function);
-       spin_lock_irqsave(&base->lock, flags);
-       timer_set_base(timer, base);
-       debug_activate(timer, timer->expires);
-       internal_add_timer(base, timer);
-       /*
-        * Check whether the other CPU is in dynticks mode and needs
-        * to be triggered to reevaluate the timer wheel.
-        * We are protected against the other CPU fiddling
-        * with the timer by holding the timer base lock. This also
-        * makes sure that a CPU on the way to stop its tick can not
-        * evaluate the timer wheel.
-        *
-        * Spare the IPI for deferrable timers on idle targets though.
-        * The next busy ticks will take care of it. Except full dynticks
-        * require special care against races with idle_cpu(), lets deal
-        * with that later.
-        */
-       if (!tbase_get_deferrable(timer->base) || tick_nohz_full_cpu(cpu))
-               wake_up_nohz_cpu(cpu);
-
-       spin_unlock_irqrestore(&base->lock, flags);
-}
-EXPORT_SYMBOL_GPL(add_timer_on);
-
-/**
- * del_timer - deactive a timer.
- * @timer: the timer to be deactivated
- *
- * del_timer() deactivates a timer - this works on both active and inactive
- * timers.
- *
- * The function returns whether it has deactivated a pending timer or not.
- * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
- * active timer returns 1.)
- */
-int del_timer(struct timer_list *timer)
-{
-       struct tvec_base *base;
-       unsigned long flags;
-       int ret = 0;
-
-       debug_assert_init(timer);
-
-       timer_stats_timer_clear_start_info(timer);
-       if (timer_pending(timer)) {
-               base = lock_timer_base(timer, &flags);
-               ret = detach_if_pending(timer, base, true);
-               spin_unlock_irqrestore(&base->lock, flags);
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL(del_timer);
-
-/**
- * try_to_del_timer_sync - Try to deactivate a timer
- * @timer: timer do del
- *
- * This function tries to deactivate a timer. Upon successful (ret >= 0)
- * exit the timer is not queued and the handler is not running on any CPU.
- */
-int try_to_del_timer_sync(struct timer_list *timer)
-{
-       struct tvec_base *base;
-       unsigned long flags;
-       int ret = -1;
-
-       debug_assert_init(timer);
-
-       base = lock_timer_base(timer, &flags);
-
-       if (base->running_timer != timer) {
-               timer_stats_timer_clear_start_info(timer);
-               ret = detach_if_pending(timer, base, true);
-       }
-       spin_unlock_irqrestore(&base->lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(try_to_del_timer_sync);
-
-#ifdef CONFIG_SMP
-/**
- * del_timer_sync - deactivate a timer and wait for the handler to finish.
- * @timer: the timer to be deactivated
- *
- * This function only differs from del_timer() on SMP: besides deactivating
- * the timer it also makes sure the handler has finished executing on other
- * CPUs.
- *
- * Synchronization rules: Callers must prevent restarting of the timer,
- * otherwise this function is meaningless. It must not be called from
- * interrupt contexts unless the timer is an irqsafe one. The caller must
- * not hold locks which would prevent completion of the timer's
- * handler. The timer's handler must not call add_timer_on(). Upon exit the
- * timer is not queued and the handler is not running on any CPU.
- *
- * Note: For !irqsafe timers, you must not hold locks that are held in
- *   interrupt context while calling this function. Even if the lock has
- *   nothing to do with the timer in question.  Here's why:
- *
- *    CPU0                             CPU1
- *    ----                             ----
- *                                   <SOFTIRQ>
- *                                   call_timer_fn();
- *                                     base->running_timer = mytimer;
- *  spin_lock_irq(somelock);
- *                                     <IRQ>
- *                                        spin_lock(somelock);
- *  del_timer_sync(mytimer);
- *   while (base->running_timer == mytimer);
- *
- * Now del_timer_sync() will never return and never release somelock.
- * The interrupt on the other CPU is waiting to grab somelock but
- * it has interrupted the softirq that CPU0 is waiting to finish.
- *
- * The function returns whether it has deactivated a pending timer or not.
- */
-int del_timer_sync(struct timer_list *timer)
-{
-#ifdef CONFIG_LOCKDEP
-       unsigned long flags;
-
-       /*
-        * If lockdep gives a backtrace here, please reference
-        * the synchronization rules above.
-        */
-       local_irq_save(flags);
-       lock_map_acquire(&timer->lockdep_map);
-       lock_map_release(&timer->lockdep_map);
-       local_irq_restore(flags);
-#endif
-       /*
-        * don't use it in hardirq context, because it
-        * could lead to deadlock.
-        */
-       WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base));
-       for (;;) {
-               int ret = try_to_del_timer_sync(timer);
-               if (ret >= 0)
-                       return ret;
-               cpu_relax();
-       }
-}
-EXPORT_SYMBOL(del_timer_sync);
-#endif
-
-static int cascade(struct tvec_base *base, struct tvec *tv, int index)
-{
-       /* cascade all the timers from tv up one level */
-       struct timer_list *timer, *tmp;
-       struct list_head tv_list;
-
-       list_replace_init(tv->vec + index, &tv_list);
-
-       /*
-        * We are removing _all_ timers from the list, so we
-        * don't have to detach them individually.
-        */
-       list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
-               BUG_ON(tbase_get_base(timer->base) != base);
-               /* No accounting, while moving them */
-               __internal_add_timer(base, timer);
-       }
-
-       return index;
-}
-
-static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
-                         unsigned long data)
-{
-       int count = preempt_count();
-
-#ifdef CONFIG_LOCKDEP
-       /*
-        * It is permissible to free the timer from inside the
-        * function that is called from it, this we need to take into
-        * account for lockdep too. To avoid bogus "held lock freed"
-        * warnings as well as problems when looking into
-        * timer->lockdep_map, make a copy and use that here.
-        */
-       struct lockdep_map lockdep_map;
-
-       lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
-#endif
-       /*
-        * Couple the lock chain with the lock chain at
-        * del_timer_sync() by acquiring the lock_map around the fn()
-        * call here and in del_timer_sync().
-        */
-       lock_map_acquire(&lockdep_map);
-
-       trace_timer_expire_entry(timer);
-       fn(data);
-       trace_timer_expire_exit(timer);
-
-       lock_map_release(&lockdep_map);
-
-       if (count != preempt_count()) {
-               WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
-                         fn, count, preempt_count());
-               /*
-                * Restore the preempt count. That gives us a decent
-                * chance to survive and extract information. If the
-                * callback kept a lock held, bad luck, but not worse
-                * than the BUG() we had.
-                */
-               preempt_count_set(count);
-       }
-}
-
-#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
-
-/**
- * __run_timers - run all expired timers (if any) on this CPU.
- * @base: the timer vector to be processed.
- *
- * This function cascades all vectors and executes all expired timer
- * vectors.
- */
-static inline void __run_timers(struct tvec_base *base)
-{
-       struct timer_list *timer;
-
-       spin_lock_irq(&base->lock);
-       if (catchup_timer_jiffies(base)) {
-               spin_unlock_irq(&base->lock);
-               return;
-       }
-       while (time_after_eq(jiffies, base->timer_jiffies)) {
-               struct list_head work_list;
-               struct list_head *head = &work_list;
-               int index = base->timer_jiffies & TVR_MASK;
-
-               /*
-                * Cascade timers:
-                */
-               if (!index &&
-                       (!cascade(base, &base->tv2, INDEX(0))) &&
-                               (!cascade(base, &base->tv3, INDEX(1))) &&
-                                       !cascade(base, &base->tv4, INDEX(2)))
-                       cascade(base, &base->tv5, INDEX(3));
-               ++base->timer_jiffies;
-               list_replace_init(base->tv1.vec + index, head);
-               while (!list_empty(head)) {
-                       void (*fn)(unsigned long);
-                       unsigned long data;
-                       bool irqsafe;
-
-                       timer = list_first_entry(head, struct timer_list,entry);
-                       fn = timer->function;
-                       data = timer->data;
-                       irqsafe = tbase_get_irqsafe(timer->base);
-
-                       timer_stats_account_timer(timer);
-
-                       base->running_timer = timer;
-                       detach_expired_timer(timer, base);
-
-                       if (irqsafe) {
-                               spin_unlock(&base->lock);
-                               call_timer_fn(timer, fn, data);
-                               spin_lock(&base->lock);
-                       } else {
-                               spin_unlock_irq(&base->lock);
-                               call_timer_fn(timer, fn, data);
-                               spin_lock_irq(&base->lock);
-                       }
-               }
-       }
-       base->running_timer = NULL;
-       spin_unlock_irq(&base->lock);
-}
-
-#ifdef CONFIG_NO_HZ_COMMON
-/*
- * Find out when the next timer event is due to happen. This
- * is used on S/390 to stop all activity when a CPU is idle.
- * This function needs to be called with interrupts disabled.
- */
-static unsigned long __next_timer_interrupt(struct tvec_base *base)
-{
-       unsigned long timer_jiffies = base->timer_jiffies;
-       unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
-       int index, slot, array, found = 0;
-       struct timer_list *nte;
-       struct tvec *varray[4];
-
-       /* Look for timer events in tv1. */
-       index = slot = timer_jiffies & TVR_MASK;
-       do {
-               list_for_each_entry(nte, base->tv1.vec + slot, entry) {
-                       if (tbase_get_deferrable(nte->base))
-                               continue;
-
-                       found = 1;
-                       expires = nte->expires;
-                       /* Look at the cascade bucket(s)? */
-                       if (!index || slot < index)
-                               goto cascade;
-                       return expires;
-               }
-               slot = (slot + 1) & TVR_MASK;
-       } while (slot != index);
-
-cascade:
-       /* Calculate the next cascade event */
-       if (index)
-               timer_jiffies += TVR_SIZE - index;
-       timer_jiffies >>= TVR_BITS;
-
-       /* Check tv2-tv5. */
-       varray[0] = &base->tv2;
-       varray[1] = &base->tv3;
-       varray[2] = &base->tv4;
-       varray[3] = &base->tv5;
-
-       for (array = 0; array < 4; array++) {
-               struct tvec *varp = varray[array];
-
-               index = slot = timer_jiffies & TVN_MASK;
-               do {
-                       list_for_each_entry(nte, varp->vec + slot, entry) {
-                               if (tbase_get_deferrable(nte->base))
-                                       continue;
-
-                               found = 1;
-                               if (time_before(nte->expires, expires))
-                                       expires = nte->expires;
-                       }
-                       /*
-                        * Do we still search for the first timer or are
-                        * we looking up the cascade buckets ?
-                        */
-                       if (found) {
-                               /* Look at the cascade bucket(s)? */
-                               if (!index || slot < index)
-                                       break;
-                               return expires;
-                       }
-                       slot = (slot + 1) & TVN_MASK;
-               } while (slot != index);
-
-               if (index)
-                       timer_jiffies += TVN_SIZE - index;
-               timer_jiffies >>= TVN_BITS;
-       }
-       return expires;
-}
-
-/*
- * Check, if the next hrtimer event is before the next timer wheel
- * event:
- */
-static unsigned long cmp_next_hrtimer_event(unsigned long now,
-                                           unsigned long expires)
-{
-       ktime_t hr_delta = hrtimer_get_next_event();
-       struct timespec tsdelta;
-       unsigned long delta;
-
-       if (hr_delta.tv64 == KTIME_MAX)
-               return expires;
-
-       /*
-        * Expired timer available, let it expire in the next tick
-        */
-       if (hr_delta.tv64 <= 0)
-               return now + 1;
-
-       tsdelta = ktime_to_timespec(hr_delta);
-       delta = timespec_to_jiffies(&tsdelta);
-
-       /*
-        * Limit the delta to the max value, which is checked in
-        * tick_nohz_stop_sched_tick():
-        */
-       if (delta > NEXT_TIMER_MAX_DELTA)
-               delta = NEXT_TIMER_MAX_DELTA;
-
-       /*
-        * Take rounding errors in to account and make sure, that it
-        * expires in the next tick. Otherwise we go into an endless
-        * ping pong due to tick_nohz_stop_sched_tick() retriggering
-        * the timer softirq
-        */
-       if (delta < 1)
-               delta = 1;
-       now += delta;
-       if (time_before(now, expires))
-               return now;
-       return expires;
-}
-
-/**
- * get_next_timer_interrupt - return the jiffy of the next pending timer
- * @now: current time (in jiffies)
- */
-unsigned long get_next_timer_interrupt(unsigned long now)
-{
-       struct tvec_base *base = __this_cpu_read(tvec_bases);
-       unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
-
-       /*
-        * Pretend that there is no timer pending if the cpu is offline.
-        * Possible pending timers will be migrated later to an active cpu.
-        */
-       if (cpu_is_offline(smp_processor_id()))
-               return expires;
-
-       spin_lock(&base->lock);
-       if (base->active_timers) {
-               if (time_before_eq(base->next_timer, base->timer_jiffies))
-                       base->next_timer = __next_timer_interrupt(base);
-               expires = base->next_timer;
-       }
-       spin_unlock(&base->lock);
-
-       if (time_before_eq(expires, now))
-               return now;
-
-       return cmp_next_hrtimer_event(now, expires);
-}
-#endif
-
-/*
- * Called from the timer interrupt handler to charge one tick to the current
- * process.  user_tick is 1 if the tick is user time, 0 for system.
- */
-void update_process_times(int user_tick)
-{
-       struct task_struct *p = current;
-       int cpu = smp_processor_id();
-
-       /* Note: this timer irq context must be accounted for as well. */
-       account_process_tick(p, user_tick);
-       run_local_timers();
-       rcu_check_callbacks(cpu, user_tick);
-#ifdef CONFIG_IRQ_WORK
-       if (in_irq())
-               irq_work_run();
-#endif
-       scheduler_tick();
-       run_posix_cpu_timers(p);
-}
-
-/*
- * This function runs timers and the timer-tq in bottom half context.
- */
-static void run_timer_softirq(struct softirq_action *h)
-{
-       struct tvec_base *base = __this_cpu_read(tvec_bases);
-
-       hrtimer_run_pending();
-
-       if (time_after_eq(jiffies, base->timer_jiffies))
-               __run_timers(base);
-}
-
-/*
- * Called by the local, per-CPU timer interrupt on SMP.
- */
-void run_local_timers(void)
-{
-       hrtimer_run_queues();
-       raise_softirq(TIMER_SOFTIRQ);
-}
-
-#ifdef __ARCH_WANT_SYS_ALARM
-
-/*
- * For backwards compatibility?  This can be done in libc so Alpha
- * and all newer ports shouldn't need it.
- */
-SYSCALL_DEFINE1(alarm, unsigned int, seconds)
-{
-       return alarm_setitimer(seconds);
-}
-
-#endif
-
-static void process_timeout(unsigned long __data)
-{
-       wake_up_process((struct task_struct *)__data);
-}
-
-/**
- * schedule_timeout - sleep until timeout
- * @timeout: timeout value in jiffies
- *
- * Make the current task sleep until @timeout jiffies have
- * elapsed. The routine will return immediately unless
- * the current task state has been set (see set_current_state()).
- *
- * You can set the task state as follows -
- *
- * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
- * pass before the routine returns. The routine will return 0
- *
- * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
- * delivered to the current task. In this case the remaining time
- * in jiffies will be returned, or 0 if the timer expired in time
- *
- * The current task state is guaranteed to be TASK_RUNNING when this
- * routine returns.
- *
- * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
- * the CPU away without a bound on the timeout. In this case the return
- * value will be %MAX_SCHEDULE_TIMEOUT.
- *
- * In all cases the return value is guaranteed to be non-negative.
- */
-signed long __sched schedule_timeout(signed long timeout)
-{
-       struct timer_list timer;
-       unsigned long expire;
-
-       switch (timeout)
-       {
-       case MAX_SCHEDULE_TIMEOUT:
-               /*
-                * These two special cases are useful to be comfortable
-                * in the caller. Nothing more. We could take
-                * MAX_SCHEDULE_TIMEOUT from one of the negative value
-                * but I' d like to return a valid offset (>=0) to allow
-                * the caller to do everything it want with the retval.
-                */
-               schedule();
-               goto out;
-       default:
-               /*
-                * Another bit of PARANOID. Note that the retval will be
-                * 0 since no piece of kernel is supposed to do a check
-                * for a negative retval of schedule_timeout() (since it
-                * should never happens anyway). You just have the printk()
-                * that will tell you if something is gone wrong and where.
-                */
-               if (timeout < 0) {
-                       printk(KERN_ERR "schedule_timeout: wrong timeout "
-                               "value %lx\n", timeout);
-                       dump_stack();
-                       current->state = TASK_RUNNING;
-                       goto out;
-               }
-       }
-
-       expire = timeout + jiffies;
-
-       setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
-       __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
-       schedule();
-       del_singleshot_timer_sync(&timer);
-
-       /* Remove the timer from the object tracker */
-       destroy_timer_on_stack(&timer);
-
-       timeout = expire - jiffies;
-
- out:
-       return timeout < 0 ? 0 : timeout;
-}
-EXPORT_SYMBOL(schedule_timeout);
-
-/*
- * We can use __set_current_state() here because schedule_timeout() calls
- * schedule() unconditionally.
- */
-signed long __sched schedule_timeout_interruptible(signed long timeout)
-{
-       __set_current_state(TASK_INTERRUPTIBLE);
-       return schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(schedule_timeout_interruptible);
-
-signed long __sched schedule_timeout_killable(signed long timeout)
-{
-       __set_current_state(TASK_KILLABLE);
-       return schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(schedule_timeout_killable);
-
-signed long __sched schedule_timeout_uninterruptible(signed long timeout)
-{
-       __set_current_state(TASK_UNINTERRUPTIBLE);
-       return schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(schedule_timeout_uninterruptible);
-
-static int init_timers_cpu(int cpu)
-{
-       int j;
-       struct tvec_base *base;
-       static char tvec_base_done[NR_CPUS];
-
-       if (!tvec_base_done[cpu]) {
-               static char boot_done;
-
-               if (boot_done) {
-                       /*
-                        * The APs use this path later in boot
-                        */
-                       base = kzalloc_node(sizeof(*base), GFP_KERNEL,
-                                           cpu_to_node(cpu));
-                       if (!base)
-                               return -ENOMEM;
-
-                       /* Make sure tvec_base has TIMER_FLAG_MASK bits free */
-                       if (WARN_ON(base != tbase_get_base(base))) {
-                               kfree(base);
-                               return -ENOMEM;
-                       }
-                       per_cpu(tvec_bases, cpu) = base;
-               } else {
-                       /*
-                        * This is for the boot CPU - we use compile-time
-                        * static initialisation because per-cpu memory isn't
-                        * ready yet and because the memory allocators are not
-                        * initialised either.
-                        */
-                       boot_done = 1;
-                       base = &boot_tvec_bases;
-               }
-               spin_lock_init(&base->lock);
-               tvec_base_done[cpu] = 1;
-       } else {
-               base = per_cpu(tvec_bases, cpu);
-       }
-
-
-       for (j = 0; j < TVN_SIZE; j++) {
-               INIT_LIST_HEAD(base->tv5.vec + j);
-               INIT_LIST_HEAD(base->tv4.vec + j);
-               INIT_LIST_HEAD(base->tv3.vec + j);
-               INIT_LIST_HEAD(base->tv2.vec + j);
-       }
-       for (j = 0; j < TVR_SIZE; j++)
-               INIT_LIST_HEAD(base->tv1.vec + j);
-
-       base->timer_jiffies = jiffies;
-       base->next_timer = base->timer_jiffies;
-       base->active_timers = 0;
-       base->all_timers = 0;
-       return 0;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
-{
-       struct timer_list *timer;
-
-       while (!list_empty(head)) {
-               timer = list_first_entry(head, struct timer_list, entry);
-               /* We ignore the accounting on the dying cpu */
-               detach_timer(timer, false);
-               timer_set_base(timer, new_base);
-               internal_add_timer(new_base, timer);
-       }
-}
-
-static void migrate_timers(int cpu)
-{
-       struct tvec_base *old_base;
-       struct tvec_base *new_base;
-       int i;
-
-       BUG_ON(cpu_online(cpu));
-       old_base = per_cpu(tvec_bases, cpu);
-       new_base = get_cpu_var(tvec_bases);
-       /*
-        * The caller is globally serialized and nobody else
-        * takes two locks at once, deadlock is not possible.
-        */
-       spin_lock_irq(&new_base->lock);
-       spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
-
-       BUG_ON(old_base->running_timer);
-
-       for (i = 0; i < TVR_SIZE; i++)
-               migrate_timer_list(new_base, old_base->tv1.vec + i);
-       for (i = 0; i < TVN_SIZE; i++) {
-               migrate_timer_list(new_base, old_base->tv2.vec + i);
-               migrate_timer_list(new_base, old_base->tv3.vec + i);
-               migrate_timer_list(new_base, old_base->tv4.vec + i);
-               migrate_timer_list(new_base, old_base->tv5.vec + i);
-       }
-
-       spin_unlock(&old_base->lock);
-       spin_unlock_irq(&new_base->lock);
-       put_cpu_var(tvec_bases);
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
-static int timer_cpu_notify(struct notifier_block *self,
-                               unsigned long action, void *hcpu)
-{
-       long cpu = (long)hcpu;
-       int err;
-
-       switch(action) {
-       case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
-               err = init_timers_cpu(cpu);
-               if (err < 0)
-                       return notifier_from_errno(err);
-               break;
-#ifdef CONFIG_HOTPLUG_CPU
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               migrate_timers(cpu);
-               break;
-#endif
-       default:
-               break;
-       }
-       return NOTIFY_OK;
-}
-
-static struct notifier_block timers_nb = {
-       .notifier_call  = timer_cpu_notify,
-};
-
-
-void __init init_timers(void)
-{
-       int err;
-
-       /* ensure there are enough low bits for flags in timer->base pointer */
-       BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
-
-       err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
-                              (void *)(long)smp_processor_id());
-       BUG_ON(err != NOTIFY_OK);
-
-       init_timer_stats();
-       register_cpu_notifier(&timers_nb);
-       open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
-}
-
-/**
- * msleep - sleep safely even with waitqueue interruptions
- * @msecs: Time in milliseconds to sleep for
- */
-void msleep(unsigned int msecs)
-{
-       unsigned long timeout = msecs_to_jiffies(msecs) + 1;
-
-       while (timeout)
-               timeout = schedule_timeout_uninterruptible(timeout);
-}
-
-EXPORT_SYMBOL(msleep);
-
-/**
- * msleep_interruptible - sleep waiting for signals
- * @msecs: Time in milliseconds to sleep for
- */
-unsigned long msleep_interruptible(unsigned int msecs)
-{
-       unsigned long timeout = msecs_to_jiffies(msecs) + 1;
-
-       while (timeout && !signal_pending(current))
-               timeout = schedule_timeout_interruptible(timeout);
-       return jiffies_to_msecs(timeout);
-}
-
-EXPORT_SYMBOL(msleep_interruptible);
-
-static int __sched do_usleep_range(unsigned long min, unsigned long max)
-{
-       ktime_t kmin;
-       unsigned long delta;
-
-       kmin = ktime_set(0, min * NSEC_PER_USEC);
-       delta = (max - min) * NSEC_PER_USEC;
-       return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
-}
-
-/**
- * usleep_range - Drop in replacement for udelay where wakeup is flexible
- * @min: Minimum time in usecs to sleep
- * @max: Maximum time in usecs to sleep
- */
-void usleep_range(unsigned long min, unsigned long max)
-{
-       __set_current_state(TASK_UNINTERRUPTIBLE);
-       do_usleep_range(min, max);
-}
-EXPORT_SYMBOL(usleep_range);
index 8bb80fe..8a52839 100644 (file)
@@ -820,11 +820,12 @@ static struct {
        const char *name;
        int in_ns;              /* is this clock in nanoseconds? */
 } trace_clocks[] = {
-       { trace_clock_local,    "local",        1 },
-       { trace_clock_global,   "global",       1 },
-       { trace_clock_counter,  "counter",      0 },
-       { trace_clock_jiffies,  "uptime",       0 },
-       { trace_clock,          "perf",         1 },
+       { trace_clock_local,            "local",        1 },
+       { trace_clock_global,           "global",       1 },
+       { trace_clock_counter,          "counter",      0 },
+       { trace_clock_jiffies,          "uptime",       0 },
+       { trace_clock,                  "perf",         1 },
+       { ktime_get_mono_fast_ns,       "mono",         1 },
        ARCH_TRACE_CLOCKS
 };
 
index a1dd9a1..975cb49 100644 (file)
@@ -31,20 +31,19 @@ void bacct_add_tsk(struct user_namespace *user_ns,
                   struct taskstats *stats, struct task_struct *tsk)
 {
        const struct cred *tcred;
-       struct timespec uptime, ts;
        cputime_t utime, stime, utimescaled, stimescaled;
-       u64 ac_etime;
+       u64 delta;
 
        BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN);
 
-       /* calculate task elapsed time in timespec */
-       do_posix_clock_monotonic_gettime(&uptime);
-       ts = timespec_sub(uptime, tsk->start_time);
-       /* rebase elapsed time to usec (should never be negative) */
-       ac_etime = timespec_to_ns(&ts);
-       do_div(ac_etime, NSEC_PER_USEC);
-       stats->ac_etime = ac_etime;
-       stats->ac_btime = get_seconds() - ts.tv_sec;
+       /* calculate task elapsed time in nsec */
+       delta = ktime_get_ns() - tsk->start_time;
+       /* Convert to micro seconds */
+       do_div(delta, NSEC_PER_USEC);
+       stats->ac_etime = delta;
+       /* Convert to seconds for btime */
+       do_div(delta, USEC_PER_SEC);
+       stats->ac_btime = get_seconds() - delta;
        if (thread_group_leader(tsk)) {
                stats->ac_exitcode = tsk->exit_code;
                if (tsk->flags & PF_FORKNOEXEC)
index f8f45ec..1f630ad 100644 (file)
@@ -1648,6 +1648,15 @@ config TEST_FIRMWARE
 
          If unsure, say N.
 
+config TEST_UDELAY
+       tristate "udelay test driver"
+       default n
+       help
+         This builds the "udelay_test" module that helps to make sure
+         that udelay() is working properly.
+
+         If unsure, say N.
+
 source "samples/Kconfig"
 
 source "lib/Kconfig.kgdb"
index 6a4aee8..f4a195a 100644 (file)
@@ -86,8 +86,6 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
 }
 EXPORT_SYMBOL(devm_iounmap);
 
-#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
-
 /**
  * devm_ioremap_resource() - check, request region, and ioremap resource
  * @dev: generic device to handle the resource for
index c1b0037..3ffa4f5 100644 (file)
@@ -155,11 +155,9 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
        u8 i;
        if (!buffer)
                return NULL;
-       {
-               struct timeval tv;
-               do_gettimeofday(&tv);
-               tomoyo_convert_time(tv.tv_sec, &stamp);
-       }
+
+       tomoyo_convert_time(get_seconds(), &stamp);
+
        pos = snprintf(buffer, tomoyo_buffer_len - 1,
                       "#%04u/%02u/%02u %02u:%02u:%02u# profile=%u mode=%s "
                       "granted=%s (global-pid=%u) task={ pid=%u ppid=%u "
index 283862a..e0fb750 100644 (file)
@@ -2267,13 +2267,11 @@ static unsigned int tomoyo_stat_modified[TOMOYO_MAX_POLICY_STAT];
  */
 void tomoyo_update_stat(const u8 index)
 {
-       struct timeval tv;
-       do_gettimeofday(&tv);
        /*
         * I don't use atomic operations because race condition is not fatal.
         */
        tomoyo_stat_updated[index]++;
-       tomoyo_stat_modified[index] = tv.tv_sec;
+       tomoyo_stat_modified[index] = get_seconds();
 }
 
 /**
diff --git a/tools/time/udelay_test.sh b/tools/time/udelay_test.sh
new file mode 100755 (executable)
index 0000000..12d46b9
--- /dev/null
@@ -0,0 +1,66 @@
+#!/bin/bash
+
+# udelay() test script
+#
+# Test is executed by writing and reading to /sys/kernel/debug/udelay_test
+# and exercises a variety of delays to ensure that udelay() is delaying
+# at least as long as requested (as compared to ktime).
+#
+# Copyright (C) 2014 Google, Inc.
+#
+# This software is licensed under the terms of the GNU General Public
+# License version 2, as published by the Free Software Foundation, and
+# may be copied, distributed, and modified under those terms.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+MODULE_NAME=udelay_test
+UDELAY_PATH=/sys/kernel/debug/udelay_test
+
+setup()
+{
+       /sbin/modprobe -q $MODULE_NAME
+       tmp_file=`mktemp`
+}
+
+test_one()
+{
+       delay=$1
+       echo $delay > $UDELAY_PATH
+       tee -a $tmp_file < $UDELAY_PATH
+}
+
+cleanup()
+{
+       if [ -f $tmp_file ]; then
+               rm $tmp_file
+       fi
+       /sbin/modprobe -q -r $MODULE_NAME
+}
+
+trap cleanup EXIT
+setup
+
+# Delay for a variety of times.
+# 1..200, 200..500 (by 10), 500..2000 (by 100)
+for (( delay = 1; delay < 200; delay += 1 )); do
+       test_one $delay
+done
+for (( delay = 200; delay < 500; delay += 10 )); do
+       test_one $delay
+done
+for (( delay = 500; delay <= 2000; delay += 100 )); do
+       test_one $delay
+done
+
+# Search for failures
+count=`grep -c FAIL $tmp_file`
+if [ $? -eq "0" ]; then
+       echo "ERROR: $count delays failed to delay long enough"
+       retcode=1
+fi
+
+exit $retcode