1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Kernel timekeeping code and accessor functions. Based on code from
4 * timer.c, moved in commit 8524070b7982.
5 */
6 #include <linux/timekeeper_internal.h>
7 #include <linux/module.h>
8 #include <linux/interrupt.h>
9 #include <linux/percpu.h>
10 #include <linux/init.h>
11 #include <linux/mm.h>
12 #include <linux/nmi.h>
13 #include <linux/sched.h>
14 #include <linux/sched/loadavg.h>
15 #include <linux/sched/clock.h>
16 #include <linux/syscore_ops.h>
17 #include <linux/clocksource.h>
18 #include <linux/jiffies.h>
19 #include <linux/time.h>
20 #include <linux/timex.h>
21 #include <linux/tick.h>
22 #include <linux/stop_machine.h>
23 #include <linux/pvclock_gtod.h>
24 #include <linux/compiler.h>
25 #include <linux/audit.h>
26 #include <linux/random.h>
27
28 #include "tick-internal.h"
29 #include "ntp_internal.h"
30 #include "timekeeping_internal.h"
31
32 #define TK_CLEAR_NTP (1 << 0)
33 #define TK_MIRROR (1 << 1)
34 #define TK_CLOCK_WAS_SET (1 << 2)
35
36 enum timekeeping_adv_mode {
37 /* Update timekeeper when a tick has passed */
38 TK_ADV_TICK,
39
40 /* Update timekeeper on a direct frequency change */
41 TK_ADV_FREQ
42 };
43
44 DEFINE_RAW_SPINLOCK(timekeeper_lock);
45
46 /*
47 * The most important data for readout fits into a single 64 byte
48 * cache line.
49 */
50 static struct {
51 seqcount_raw_spinlock_t seq;
52 struct timekeeper timekeeper;
53 } tk_core ____cacheline_aligned = {
54 .seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock),
55 };
56
57 static struct timekeeper shadow_timekeeper;
58
59 /* flag for if timekeeping is suspended */
60 int __read_mostly timekeeping_suspended;
61
62 /**
63 * struct tk_fast - NMI safe timekeeper
64 * @seq: Sequence counter for protecting updates. The lowest bit
65 * is the index for the tk_read_base array
66 * @base: tk_read_base array. Access is indexed by the lowest bit of
67 * @seq.
68 *
69 * See @update_fast_timekeeper() below.
70 */
71 struct tk_fast {
72 seqcount_latch_t seq;
73 struct tk_read_base base[2];
74 };
75
76 /* Suspend-time cycles value for halted fast timekeeper. */
77 static u64 cycles_at_suspend;
78
dummy_clock_read(struct clocksource * cs)79 static u64 dummy_clock_read(struct clocksource *cs)
80 {
81 if (timekeeping_suspended)
82 return cycles_at_suspend;
83 return local_clock();
84 }
85
86 static struct clocksource dummy_clock = {
87 .read = dummy_clock_read,
88 };
89
90 /*
91 * Boot time initialization which allows local_clock() to be utilized
92 * during early boot when clocksources are not available. local_clock()
93 * returns nanoseconds already so no conversion is required, hence mult=1
94 * and shift=0. When the first proper clocksource is installed then
95 * the fast time keepers are updated with the correct values.
96 */
97 #define FAST_TK_INIT \
98 { \
99 .clock = &dummy_clock, \
100 .mask = CLOCKSOURCE_MASK(64), \
101 .mult = 1, \
102 .shift = 0, \
103 }
104
105 static struct tk_fast tk_fast_mono ____cacheline_aligned = {
106 .seq = SEQCNT_LATCH_ZERO(tk_fast_mono.seq),
107 .base[0] = FAST_TK_INIT,
108 .base[1] = FAST_TK_INIT,
109 };
110
111 static struct tk_fast tk_fast_raw ____cacheline_aligned = {
112 .seq = SEQCNT_LATCH_ZERO(tk_fast_raw.seq),
113 .base[0] = FAST_TK_INIT,
114 .base[1] = FAST_TK_INIT,
115 };
116
tk_normalize_xtime(struct timekeeper * tk)117 static inline void tk_normalize_xtime(struct timekeeper *tk)
118 {
119 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
120 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
121 tk->xtime_sec++;
122 }
123 while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
124 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
125 tk->raw_sec++;
126 }
127 }
128
tk_xtime(const struct timekeeper * tk)129 static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
130 {
131 struct timespec64 ts;
132
133 ts.tv_sec = tk->xtime_sec;
134 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
135 return ts;
136 }
137
tk_set_xtime(struct timekeeper * tk,const struct timespec64 * ts)138 static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
139 {
140 tk->xtime_sec = ts->tv_sec;
141 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
142 }
143
tk_xtime_add(struct timekeeper * tk,const struct timespec64 * ts)144 static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
145 {
146 tk->xtime_sec += ts->tv_sec;
147 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
148 tk_normalize_xtime(tk);
149 }
150
tk_set_wall_to_mono(struct timekeeper * tk,struct timespec64 wtm)151 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
152 {
153 struct timespec64 tmp;
154
155 /*
156 * Verify consistency of: offset_real = -wall_to_monotonic
157 * before modifying anything
158 */
159 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
160 -tk->wall_to_monotonic.tv_nsec);
161 WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
162 tk->wall_to_monotonic = wtm;
163 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
164 tk->offs_real = timespec64_to_ktime(tmp);
165 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
166 }
167
tk_update_sleep_time(struct timekeeper * tk,ktime_t delta)168 static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
169 {
170 tk->offs_boot = ktime_add(tk->offs_boot, delta);
171 /*
172 * Timespec representation for VDSO update to avoid 64bit division
173 * on every update.
174 */
175 tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
176 }
177
178 /*
179 * tk_clock_read - atomic clocksource read() helper
180 *
181 * This helper is necessary to use in the read paths because, while the
182 * seqcount ensures we don't return a bad value while structures are updated,
183 * it doesn't protect from potential crashes. There is the possibility that
184 * the tkr's clocksource may change between the read reference, and the
185 * clock reference passed to the read function. This can cause crashes if
186 * the wrong clocksource is passed to the wrong read function.
187 * This isn't necessary to use when holding the timekeeper_lock or doing
188 * a read of the fast-timekeeper tkrs (which is protected by its own locking
189 * and update logic).
190 */
tk_clock_read(const struct tk_read_base * tkr)191 static inline u64 tk_clock_read(const struct tk_read_base *tkr)
192 {
193 struct clocksource *clock = READ_ONCE(tkr->clock);
194
195 return clock->read(clock);
196 }
197
198 #ifdef CONFIG_DEBUG_TIMEKEEPING
199 #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
200
timekeeping_check_update(struct timekeeper * tk,u64 offset)201 static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
202 {
203
204 u64 max_cycles = tk->tkr_mono.clock->max_cycles;
205 const char *name = tk->tkr_mono.clock->name;
206
207 if (offset > max_cycles) {
208 printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
209 offset, name, max_cycles);
210 printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
211 } else {
212 if (offset > (max_cycles >> 1)) {
213 printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
214 offset, name, max_cycles >> 1);
215 printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
216 }
217 }
218
219 if (tk->underflow_seen) {
220 if (jiffies - tk->last_warning > WARNING_FREQ) {
221 printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
222 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
223 printk_deferred(" Your kernel is probably still fine.\n");
224 tk->last_warning = jiffies;
225 }
226 tk->underflow_seen = 0;
227 }
228
229 if (tk->overflow_seen) {
230 if (jiffies - tk->last_warning > WARNING_FREQ) {
231 printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
232 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
233 printk_deferred(" Your kernel is probably still fine.\n");
234 tk->last_warning = jiffies;
235 }
236 tk->overflow_seen = 0;
237 }
238 }
239
240 static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles);
241
timekeeping_debug_get_ns(const struct tk_read_base * tkr)242 static inline u64 timekeeping_debug_get_ns(const struct tk_read_base *tkr)
243 {
244 struct timekeeper *tk = &tk_core.timekeeper;
245 u64 now, last, mask, max, delta;
246 unsigned int seq;
247
248 /*
249 * Since we're called holding a seqcount, the data may shift
250 * under us while we're doing the calculation. This can cause
251 * false positives, since we'd note a problem but throw the
252 * results away. So nest another seqcount here to atomically
253 * grab the points we are checking with.
254 */
255 do {
256 seq = read_seqcount_begin(&tk_core.seq);
257 now = tk_clock_read(tkr);
258 last = tkr->cycle_last;
259 mask = tkr->mask;
260 max = tkr->clock->max_cycles;
261 } while (read_seqcount_retry(&tk_core.seq, seq));
262
263 delta = clocksource_delta(now, last, mask);
264
265 /*
266 * Try to catch underflows by checking if we are seeing small
267 * mask-relative negative values.
268 */
269 if (unlikely((~delta & mask) < (mask >> 3)))
270 tk->underflow_seen = 1;
271
272 /* Check for multiplication overflows */
273 if (unlikely(delta > max))
274 tk->overflow_seen = 1;
275
276 /* timekeeping_cycles_to_ns() handles both under and overflow */
277 return timekeeping_cycles_to_ns(tkr, now);
278 }
279 #else
timekeeping_check_update(struct timekeeper * tk,u64 offset)280 static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
281 {
282 }
timekeeping_debug_get_ns(const struct tk_read_base * tkr)283 static inline u64 timekeeping_debug_get_ns(const struct tk_read_base *tkr)
284 {
285 BUG();
286 }
287 #endif
288
289 /**
290 * tk_setup_internals - Set up internals to use clocksource clock.
291 *
292 * @tk: The target timekeeper to setup.
293 * @clock: Pointer to clocksource.
294 *
295 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
296 * pair and interval request.
297 *
298 * Unless you're the timekeeping code, you should not be using this!
299 */
tk_setup_internals(struct timekeeper * tk,struct clocksource * clock)300 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
301 {
302 u64 interval;
303 u64 tmp, ntpinterval;
304 struct clocksource *old_clock;
305
306 ++tk->cs_was_changed_seq;
307 old_clock = tk->tkr_mono.clock;
308 tk->tkr_mono.clock = clock;
309 tk->tkr_mono.mask = clock->mask;
310 tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
311
312 tk->tkr_raw.clock = clock;
313 tk->tkr_raw.mask = clock->mask;
314 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
315
316 /* Do the ns -> cycle conversion first, using original mult */
317 tmp = NTP_INTERVAL_LENGTH;
318 tmp <<= clock->shift;
319 ntpinterval = tmp;
320 tmp += clock->mult/2;
321 do_div(tmp, clock->mult);
322 if (tmp == 0)
323 tmp = 1;
324
325 interval = (u64) tmp;
326 tk->cycle_interval = interval;
327
328 /* Go back from cycles -> shifted ns */
329 tk->xtime_interval = interval * clock->mult;
330 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
331 tk->raw_interval = interval * clock->mult;
332
333 /* if changing clocks, convert xtime_nsec shift units */
334 if (old_clock) {
335 int shift_change = clock->shift - old_clock->shift;
336 if (shift_change < 0) {
337 tk->tkr_mono.xtime_nsec >>= -shift_change;
338 tk->tkr_raw.xtime_nsec >>= -shift_change;
339 } else {
340 tk->tkr_mono.xtime_nsec <<= shift_change;
341 tk->tkr_raw.xtime_nsec <<= shift_change;
342 }
343 }
344
345 tk->tkr_mono.shift = clock->shift;
346 tk->tkr_raw.shift = clock->shift;
347
348 tk->ntp_error = 0;
349 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
350 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
351
352 /*
353 * The timekeeper keeps its own mult values for the currently
354 * active clocksource. These value will be adjusted via NTP
355 * to counteract clock drifting.
356 */
357 tk->tkr_mono.mult = clock->mult;
358 tk->tkr_raw.mult = clock->mult;
359 tk->ntp_err_mult = 0;
360 tk->skip_second_overflow = 0;
361 }
362
363 /* Timekeeper helper functions. */
delta_to_ns_safe(const struct tk_read_base * tkr,u64 delta)364 static noinline u64 delta_to_ns_safe(const struct tk_read_base *tkr, u64 delta)
365 {
366 return mul_u64_u32_add_u64_shr(delta, tkr->mult, tkr->xtime_nsec, tkr->shift);
367 }
368
timekeeping_cycles_to_ns(const struct tk_read_base * tkr,u64 cycles)369 static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
370 {
371 /* Calculate the delta since the last update_wall_time() */
372 u64 mask = tkr->mask, delta = (cycles - tkr->cycle_last) & mask;
373
374 /*
375 * This detects both negative motion and the case where the delta
376 * overflows the multiplication with tkr->mult.
377 */
378 if (unlikely(delta > tkr->clock->max_cycles)) {
379 /*
380 * Handle clocksource inconsistency between CPUs to prevent
381 * time from going backwards by checking for the MSB of the
382 * mask being set in the delta.
383 */
384 if (delta & ~(mask >> 1))
385 return tkr->xtime_nsec >> tkr->shift;
386
387 return delta_to_ns_safe(tkr, delta);
388 }
389
390 return ((delta * tkr->mult) + tkr->xtime_nsec) >> tkr->shift;
391 }
392
__timekeeping_get_ns(const struct tk_read_base * tkr)393 static __always_inline u64 __timekeeping_get_ns(const struct tk_read_base *tkr)
394 {
395 return timekeeping_cycles_to_ns(tkr, tk_clock_read(tkr));
396 }
397
timekeeping_get_ns(const struct tk_read_base * tkr)398 static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
399 {
400 if (IS_ENABLED(CONFIG_DEBUG_TIMEKEEPING))
401 return timekeeping_debug_get_ns(tkr);
402
403 return __timekeeping_get_ns(tkr);
404 }
405
406 /**
407 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
408 * @tkr: Timekeeping readout base from which we take the update
409 * @tkf: Pointer to NMI safe timekeeper
410 *
411 * We want to use this from any context including NMI and tracing /
412 * instrumenting the timekeeping code itself.
413 *
414 * Employ the latch technique; see @raw_write_seqcount_latch.
415 *
416 * So if a NMI hits the update of base[0] then it will use base[1]
417 * which is still consistent. In the worst case this can result is a
418 * slightly wrong timestamp (a few nanoseconds). See
419 * @ktime_get_mono_fast_ns.
420 */
update_fast_timekeeper(const struct tk_read_base * tkr,struct tk_fast * tkf)421 static void update_fast_timekeeper(const struct tk_read_base *tkr,
422 struct tk_fast *tkf)
423 {
424 struct tk_read_base *base = tkf->base;
425
426 /* Force readers off to base[1] */
427 raw_write_seqcount_latch(&tkf->seq);
428
429 /* Update base[0] */
430 memcpy(base, tkr, sizeof(*base));
431
432 /* Force readers back to base[0] */
433 raw_write_seqcount_latch(&tkf->seq);
434
435 /* Update base[1] */
436 memcpy(base + 1, base, sizeof(*base));
437 }
438
__ktime_get_fast_ns(struct tk_fast * tkf)439 static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
440 {
441 struct tk_read_base *tkr;
442 unsigned int seq;
443 u64 now;
444
445 do {
446 seq = raw_read_seqcount_latch(&tkf->seq);
447 tkr = tkf->base + (seq & 0x01);
448 now = ktime_to_ns(tkr->base);
449 now += __timekeeping_get_ns(tkr);
450 } while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
451
452 return now;
453 }
454
455 /**
456 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
457 *
458 * This timestamp is not guaranteed to be monotonic across an update.
459 * The timestamp is calculated by:
460 *
461 * now = base_mono + clock_delta * slope
462 *
463 * So if the update lowers the slope, readers who are forced to the
464 * not yet updated second array are still using the old steeper slope.
465 *
466 * tmono
467 * ^
468 * | o n
469 * | o n
470 * | u
471 * | o
472 * |o
473 * |12345678---> reader order
474 *
475 * o = old slope
476 * u = update
477 * n = new slope
478 *
479 * So reader 6 will observe time going backwards versus reader 5.
480 *
481 * While other CPUs are likely to be able to observe that, the only way
482 * for a CPU local observation is when an NMI hits in the middle of
483 * the update. Timestamps taken from that NMI context might be ahead
484 * of the following timestamps. Callers need to be aware of that and
485 * deal with it.
486 */
ktime_get_mono_fast_ns(void)487 u64 notrace ktime_get_mono_fast_ns(void)
488 {
489 return __ktime_get_fast_ns(&tk_fast_mono);
490 }
491 EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
492
493 /**
494 * ktime_get_raw_fast_ns - Fast NMI safe access to clock monotonic raw
495 *
496 * Contrary to ktime_get_mono_fast_ns() this is always correct because the
497 * conversion factor is not affected by NTP/PTP correction.
498 */
ktime_get_raw_fast_ns(void)499 u64 notrace ktime_get_raw_fast_ns(void)
500 {
501 return __ktime_get_fast_ns(&tk_fast_raw);
502 }
503 EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
504
505 /**
506 * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
507 *
508 * To keep it NMI safe since we're accessing from tracing, we're not using a
509 * separate timekeeper with updates to monotonic clock and boot offset
510 * protected with seqcounts. This has the following minor side effects:
511 *
512 * (1) Its possible that a timestamp be taken after the boot offset is updated
513 * but before the timekeeper is updated. If this happens, the new boot offset
514 * is added to the old timekeeping making the clock appear to update slightly
515 * earlier:
516 * CPU 0 CPU 1
517 * timekeeping_inject_sleeptime64()
518 * __timekeeping_inject_sleeptime(tk, delta);
519 * timestamp();
520 * timekeeping_update(tk, TK_CLEAR_NTP...);
521 *
522 * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
523 * partially updated. Since the tk->offs_boot update is a rare event, this
524 * should be a rare occurrence which postprocessing should be able to handle.
525 *
526 * The caveats vs. timestamp ordering as documented for ktime_get_mono_fast_ns()
527 * apply as well.
528 */
ktime_get_boot_fast_ns(void)529 u64 notrace ktime_get_boot_fast_ns(void)
530 {
531 struct timekeeper *tk = &tk_core.timekeeper;
532
533 return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_boot)));
534 }
535 EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
536
537 /**
538 * ktime_get_tai_fast_ns - NMI safe and fast access to tai clock.
539 *
540 * The same limitations as described for ktime_get_boot_fast_ns() apply. The
541 * mono time and the TAI offset are not read atomically which may yield wrong
542 * readouts. However, an update of the TAI offset is an rare event e.g., caused
543 * by settime or adjtimex with an offset. The user of this function has to deal
544 * with the possibility of wrong timestamps in post processing.
545 */
ktime_get_tai_fast_ns(void)546 u64 notrace ktime_get_tai_fast_ns(void)
547 {
548 struct timekeeper *tk = &tk_core.timekeeper;
549
550 return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_tai)));
551 }
552 EXPORT_SYMBOL_GPL(ktime_get_tai_fast_ns);
553
__ktime_get_real_fast(struct tk_fast * tkf,u64 * mono)554 static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
555 {
556 struct tk_read_base *tkr;
557 u64 basem, baser, delta;
558 unsigned int seq;
559
560 do {
561 seq = raw_read_seqcount_latch(&tkf->seq);
562 tkr = tkf->base + (seq & 0x01);
563 basem = ktime_to_ns(tkr->base);
564 baser = ktime_to_ns(tkr->base_real);
565 delta = __timekeeping_get_ns(tkr);
566 } while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
567
568 if (mono)
569 *mono = basem + delta;
570 return baser + delta;
571 }
572
573 /**
574 * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
575 *
576 * See ktime_get_mono_fast_ns() for documentation of the time stamp ordering.
577 */
ktime_get_real_fast_ns(void)578 u64 ktime_get_real_fast_ns(void)
579 {
580 return __ktime_get_real_fast(&tk_fast_mono, NULL);
581 }
582 EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
583
584 /**
585 * ktime_get_fast_timestamps: - NMI safe timestamps
586 * @snapshot: Pointer to timestamp storage
587 *
588 * Stores clock monotonic, boottime and realtime timestamps.
589 *
590 * Boot time is a racy access on 32bit systems if the sleep time injection
591 * happens late during resume and not in timekeeping_resume(). That could
592 * be avoided by expanding struct tk_read_base with boot offset for 32bit
593 * and adding more overhead to the update. As this is a hard to observe
594 * once per resume event which can be filtered with reasonable effort using
595 * the accurate mono/real timestamps, it's probably not worth the trouble.
596 *
597 * Aside of that it might be possible on 32 and 64 bit to observe the
598 * following when the sleep time injection happens late:
599 *
600 * CPU 0 CPU 1
601 * timekeeping_resume()
602 * ktime_get_fast_timestamps()
603 * mono, real = __ktime_get_real_fast()
604 * inject_sleep_time()
605 * update boot offset
606 * boot = mono + bootoffset;
607 *
608 * That means that boot time already has the sleep time adjustment, but
609 * real time does not. On the next readout both are in sync again.
610 *
611 * Preventing this for 64bit is not really feasible without destroying the
612 * careful cache layout of the timekeeper because the sequence count and
613 * struct tk_read_base would then need two cache lines instead of one.
614 *
615 * Access to the time keeper clock source is disabled across the innermost
616 * steps of suspend/resume. The accessors still work, but the timestamps
617 * are frozen until time keeping is resumed which happens very early.
618 *
619 * For regular suspend/resume there is no observable difference vs. sched
620 * clock, but it might affect some of the nasty low level debug printks.
621 *
622 * OTOH, access to sched clock is not guaranteed across suspend/resume on
623 * all systems either so it depends on the hardware in use.
624 *
625 * If that turns out to be a real problem then this could be mitigated by
626 * using sched clock in a similar way as during early boot. But it's not as
627 * trivial as on early boot because it needs some careful protection
628 * against the clock monotonic timestamp jumping backwards on resume.
629 */
ktime_get_fast_timestamps(struct ktime_timestamps * snapshot)630 void ktime_get_fast_timestamps(struct ktime_timestamps *snapshot)
631 {
632 struct timekeeper *tk = &tk_core.timekeeper;
633
634 snapshot->real = __ktime_get_real_fast(&tk_fast_mono, &snapshot->mono);
635 snapshot->boot = snapshot->mono + ktime_to_ns(data_race(tk->offs_boot));
636 }
637
638 /**
639 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
640 * @tk: Timekeeper to snapshot.
641 *
642 * It generally is unsafe to access the clocksource after timekeeping has been
643 * suspended, so take a snapshot of the readout base of @tk and use it as the
644 * fast timekeeper's readout base while suspended. It will return the same
645 * number of cycles every time until timekeeping is resumed at which time the
646 * proper readout base for the fast timekeeper will be restored automatically.
647 */
halt_fast_timekeeper(const struct timekeeper * tk)648 static void halt_fast_timekeeper(const struct timekeeper *tk)
649 {
650 static struct tk_read_base tkr_dummy;
651 const struct tk_read_base *tkr = &tk->tkr_mono;
652
653 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
654 cycles_at_suspend = tk_clock_read(tkr);
655 tkr_dummy.clock = &dummy_clock;
656 tkr_dummy.base_real = tkr->base + tk->offs_real;
657 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
658
659 tkr = &tk->tkr_raw;
660 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
661 tkr_dummy.clock = &dummy_clock;
662 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
663 }
664
665 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
666
update_pvclock_gtod(struct timekeeper * tk,bool was_set)667 static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
668 {
669 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
670 }
671
672 /**
673 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
674 * @nb: Pointer to the notifier block to register
675 */
pvclock_gtod_register_notifier(struct notifier_block * nb)676 int pvclock_gtod_register_notifier(struct notifier_block *nb)
677 {
678 struct timekeeper *tk = &tk_core.timekeeper;
679 unsigned long flags;
680 int ret;
681
682 raw_spin_lock_irqsave(&timekeeper_lock, flags);
683 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
684 update_pvclock_gtod(tk, true);
685 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
686
687 return ret;
688 }
689 EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
690
691 /**
692 * pvclock_gtod_unregister_notifier - unregister a pvclock
693 * timedata update listener
694 * @nb: Pointer to the notifier block to unregister
695 */
pvclock_gtod_unregister_notifier(struct notifier_block * nb)696 int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
697 {
698 unsigned long flags;
699 int ret;
700
701 raw_spin_lock_irqsave(&timekeeper_lock, flags);
702 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
703 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
704
705 return ret;
706 }
707 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
708
709 /*
710 * tk_update_leap_state - helper to update the next_leap_ktime
711 */
tk_update_leap_state(struct timekeeper * tk)712 static inline void tk_update_leap_state(struct timekeeper *tk)
713 {
714 tk->next_leap_ktime = ntp_get_next_leap();
715 if (tk->next_leap_ktime != KTIME_MAX)
716 /* Convert to monotonic time */
717 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
718 }
719
720 /*
721 * Update the ktime_t based scalar nsec members of the timekeeper
722 */
tk_update_ktime_data(struct timekeeper * tk)723 static inline void tk_update_ktime_data(struct timekeeper *tk)
724 {
725 u64 seconds;
726 u32 nsec;
727
728 /*
729 * The xtime based monotonic readout is:
730 * nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
731 * The ktime based monotonic readout is:
732 * nsec = base_mono + now();
733 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
734 */
735 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
736 nsec = (u32) tk->wall_to_monotonic.tv_nsec;
737 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
738
739 /*
740 * The sum of the nanoseconds portions of xtime and
741 * wall_to_monotonic can be greater/equal one second. Take
742 * this into account before updating tk->ktime_sec.
743 */
744 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
745 if (nsec >= NSEC_PER_SEC)
746 seconds++;
747 tk->ktime_sec = seconds;
748
749 /* Update the monotonic raw base */
750 tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
751 }
752
753 /* must hold timekeeper_lock */
timekeeping_update(struct timekeeper * tk,unsigned int action)754 static void timekeeping_update(struct timekeeper *tk, unsigned int action)
755 {
756 if (action & TK_CLEAR_NTP) {
757 tk->ntp_error = 0;
758 ntp_clear();
759 }
760
761 tk_update_leap_state(tk);
762 tk_update_ktime_data(tk);
763
764 update_vsyscall(tk);
765 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
766
767 tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
768 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
769 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
770
771 if (action & TK_CLOCK_WAS_SET)
772 tk->clock_was_set_seq++;
773 /*
774 * The mirroring of the data to the shadow-timekeeper needs
775 * to happen last here to ensure we don't over-write the
776 * timekeeper structure on the next update with stale data
777 */
778 if (action & TK_MIRROR)
779 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
780 sizeof(tk_core.timekeeper));
781 }
782
783 /**
784 * timekeeping_forward_now - update clock to the current time
785 * @tk: Pointer to the timekeeper to update
786 *
787 * Forward the current clock to update its state since the last call to
788 * update_wall_time(). This is useful before significant clock changes,
789 * as it avoids having to deal with this time offset explicitly.
790 */
timekeeping_forward_now(struct timekeeper * tk)791 static void timekeeping_forward_now(struct timekeeper *tk)
792 {
793 u64 cycle_now, delta;
794
795 cycle_now = tk_clock_read(&tk->tkr_mono);
796 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
797 tk->tkr_mono.cycle_last = cycle_now;
798 tk->tkr_raw.cycle_last = cycle_now;
799
800 while (delta > 0) {
801 u64 max = tk->tkr_mono.clock->max_cycles;
802 u64 incr = delta < max ? delta : max;
803
804 tk->tkr_mono.xtime_nsec += incr * tk->tkr_mono.mult;
805 tk->tkr_raw.xtime_nsec += incr * tk->tkr_raw.mult;
806 tk_normalize_xtime(tk);
807 delta -= incr;
808 }
809 }
810
811 /**
812 * ktime_get_real_ts64 - Returns the time of day in a timespec64.
813 * @ts: pointer to the timespec to be set
814 *
815 * Returns the time of day in a timespec64 (WARN if suspended).
816 */
ktime_get_real_ts64(struct timespec64 * ts)817 void ktime_get_real_ts64(struct timespec64 *ts)
818 {
819 struct timekeeper *tk = &tk_core.timekeeper;
820 unsigned int seq;
821 u64 nsecs;
822
823 WARN_ON(timekeeping_suspended);
824
825 do {
826 seq = read_seqcount_begin(&tk_core.seq);
827
828 ts->tv_sec = tk->xtime_sec;
829 nsecs = timekeeping_get_ns(&tk->tkr_mono);
830
831 } while (read_seqcount_retry(&tk_core.seq, seq));
832
833 ts->tv_nsec = 0;
834 timespec64_add_ns(ts, nsecs);
835 }
836 EXPORT_SYMBOL(ktime_get_real_ts64);
837
ktime_get(void)838 ktime_t ktime_get(void)
839 {
840 struct timekeeper *tk = &tk_core.timekeeper;
841 unsigned int seq;
842 ktime_t base;
843 u64 nsecs;
844
845 WARN_ON(timekeeping_suspended);
846
847 do {
848 seq = read_seqcount_begin(&tk_core.seq);
849 base = tk->tkr_mono.base;
850 nsecs = timekeeping_get_ns(&tk->tkr_mono);
851
852 } while (read_seqcount_retry(&tk_core.seq, seq));
853
854 return ktime_add_ns(base, nsecs);
855 }
856 EXPORT_SYMBOL_GPL(ktime_get);
857
ktime_get_resolution_ns(void)858 u32 ktime_get_resolution_ns(void)
859 {
860 struct timekeeper *tk = &tk_core.timekeeper;
861 unsigned int seq;
862 u32 nsecs;
863
864 WARN_ON(timekeeping_suspended);
865
866 do {
867 seq = read_seqcount_begin(&tk_core.seq);
868 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
869 } while (read_seqcount_retry(&tk_core.seq, seq));
870
871 return nsecs;
872 }
873 EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
874
875 static ktime_t *offsets[TK_OFFS_MAX] = {
876 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
877 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
878 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
879 };
880
ktime_get_with_offset(enum tk_offsets offs)881 ktime_t ktime_get_with_offset(enum tk_offsets offs)
882 {
883 struct timekeeper *tk = &tk_core.timekeeper;
884 unsigned int seq;
885 ktime_t base, *offset = offsets[offs];
886 u64 nsecs;
887
888 WARN_ON(timekeeping_suspended);
889
890 do {
891 seq = read_seqcount_begin(&tk_core.seq);
892 base = ktime_add(tk->tkr_mono.base, *offset);
893 nsecs = timekeeping_get_ns(&tk->tkr_mono);
894
895 } while (read_seqcount_retry(&tk_core.seq, seq));
896
897 return ktime_add_ns(base, nsecs);
898
899 }
900 EXPORT_SYMBOL_GPL(ktime_get_with_offset);
901
ktime_get_coarse_with_offset(enum tk_offsets offs)902 ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
903 {
904 struct timekeeper *tk = &tk_core.timekeeper;
905 unsigned int seq;
906 ktime_t base, *offset = offsets[offs];
907 u64 nsecs;
908
909 WARN_ON(timekeeping_suspended);
910
911 do {
912 seq = read_seqcount_begin(&tk_core.seq);
913 base = ktime_add(tk->tkr_mono.base, *offset);
914 nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
915
916 } while (read_seqcount_retry(&tk_core.seq, seq));
917
918 return ktime_add_ns(base, nsecs);
919 }
920 EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
921
922 /**
923 * ktime_mono_to_any() - convert monotonic time to any other time
924 * @tmono: time to convert.
925 * @offs: which offset to use
926 */
ktime_mono_to_any(ktime_t tmono,enum tk_offsets offs)927 ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
928 {
929 ktime_t *offset = offsets[offs];
930 unsigned int seq;
931 ktime_t tconv;
932
933 do {
934 seq = read_seqcount_begin(&tk_core.seq);
935 tconv = ktime_add(tmono, *offset);
936 } while (read_seqcount_retry(&tk_core.seq, seq));
937
938 return tconv;
939 }
940 EXPORT_SYMBOL_GPL(ktime_mono_to_any);
941
942 /**
943 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
944 */
ktime_get_raw(void)945 ktime_t ktime_get_raw(void)
946 {
947 struct timekeeper *tk = &tk_core.timekeeper;
948 unsigned int seq;
949 ktime_t base;
950 u64 nsecs;
951
952 do {
953 seq = read_seqcount_begin(&tk_core.seq);
954 base = tk->tkr_raw.base;
955 nsecs = timekeeping_get_ns(&tk->tkr_raw);
956
957 } while (read_seqcount_retry(&tk_core.seq, seq));
958
959 return ktime_add_ns(base, nsecs);
960 }
961 EXPORT_SYMBOL_GPL(ktime_get_raw);
962
963 /**
964 * ktime_get_ts64 - get the monotonic clock in timespec64 format
965 * @ts: pointer to timespec variable
966 *
967 * The function calculates the monotonic clock from the realtime
968 * clock and the wall_to_monotonic offset and stores the result
969 * in normalized timespec64 format in the variable pointed to by @ts.
970 */
ktime_get_ts64(struct timespec64 * ts)971 void ktime_get_ts64(struct timespec64 *ts)
972 {
973 struct timekeeper *tk = &tk_core.timekeeper;
974 struct timespec64 tomono;
975 unsigned int seq;
976 u64 nsec;
977
978 WARN_ON(timekeeping_suspended);
979
980 do {
981 seq = read_seqcount_begin(&tk_core.seq);
982 ts->tv_sec = tk->xtime_sec;
983 nsec = timekeeping_get_ns(&tk->tkr_mono);
984 tomono = tk->wall_to_monotonic;
985
986 } while (read_seqcount_retry(&tk_core.seq, seq));
987
988 ts->tv_sec += tomono.tv_sec;
989 ts->tv_nsec = 0;
990 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
991 }
992 EXPORT_SYMBOL_GPL(ktime_get_ts64);
993
994 /**
995 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
996 *
997 * Returns the seconds portion of CLOCK_MONOTONIC with a single non
998 * serialized read. tk->ktime_sec is of type 'unsigned long' so this
999 * works on both 32 and 64 bit systems. On 32 bit systems the readout
1000 * covers ~136 years of uptime which should be enough to prevent
1001 * premature wrap arounds.
1002 */
ktime_get_seconds(void)1003 time64_t ktime_get_seconds(void)
1004 {
1005 struct timekeeper *tk = &tk_core.timekeeper;
1006
1007 WARN_ON(timekeeping_suspended);
1008 return tk->ktime_sec;
1009 }
1010 EXPORT_SYMBOL_GPL(ktime_get_seconds);
1011
1012 /**
1013 * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
1014 *
1015 * Returns the wall clock seconds since 1970.
1016 *
1017 * For 64bit systems the fast access to tk->xtime_sec is preserved. On
1018 * 32bit systems the access must be protected with the sequence
1019 * counter to provide "atomic" access to the 64bit tk->xtime_sec
1020 * value.
1021 */
ktime_get_real_seconds(void)1022 time64_t ktime_get_real_seconds(void)
1023 {
1024 struct timekeeper *tk = &tk_core.timekeeper;
1025 time64_t seconds;
1026 unsigned int seq;
1027
1028 if (IS_ENABLED(CONFIG_64BIT))
1029 return tk->xtime_sec;
1030
1031 do {
1032 seq = read_seqcount_begin(&tk_core.seq);
1033 seconds = tk->xtime_sec;
1034
1035 } while (read_seqcount_retry(&tk_core.seq, seq));
1036
1037 return seconds;
1038 }
1039 EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
1040
1041 /**
1042 * __ktime_get_real_seconds - The same as ktime_get_real_seconds
1043 * but without the sequence counter protect. This internal function
1044 * is called just when timekeeping lock is already held.
1045 */
__ktime_get_real_seconds(void)1046 noinstr time64_t __ktime_get_real_seconds(void)
1047 {
1048 struct timekeeper *tk = &tk_core.timekeeper;
1049
1050 return tk->xtime_sec;
1051 }
1052
1053 /**
1054 * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
1055 * @systime_snapshot: pointer to struct receiving the system time snapshot
1056 */
ktime_get_snapshot(struct system_time_snapshot * systime_snapshot)1057 void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
1058 {
1059 struct timekeeper *tk = &tk_core.timekeeper;
1060 unsigned int seq;
1061 ktime_t base_raw;
1062 ktime_t base_real;
1063 u64 nsec_raw;
1064 u64 nsec_real;
1065 u64 now;
1066
1067 WARN_ON_ONCE(timekeeping_suspended);
1068
1069 do {
1070 seq = read_seqcount_begin(&tk_core.seq);
1071 now = tk_clock_read(&tk->tkr_mono);
1072 systime_snapshot->cs_id = tk->tkr_mono.clock->id;
1073 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
1074 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
1075 base_real = ktime_add(tk->tkr_mono.base,
1076 tk_core.timekeeper.offs_real);
1077 base_raw = tk->tkr_raw.base;
1078 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
1079 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
1080 } while (read_seqcount_retry(&tk_core.seq, seq));
1081
1082 systime_snapshot->cycles = now;
1083 systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
1084 systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
1085 }
1086 EXPORT_SYMBOL_GPL(ktime_get_snapshot);
1087
1088 /* Scale base by mult/div checking for overflow */
scale64_check_overflow(u64 mult,u64 div,u64 * base)1089 static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
1090 {
1091 u64 tmp, rem;
1092
1093 tmp = div64_u64_rem(*base, div, &rem);
1094
1095 if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
1096 ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
1097 return -EOVERFLOW;
1098 tmp *= mult;
1099
1100 rem = div64_u64(rem * mult, div);
1101 *base = tmp + rem;
1102 return 0;
1103 }
1104
1105 /**
1106 * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
1107 * @history: Snapshot representing start of history
1108 * @partial_history_cycles: Cycle offset into history (fractional part)
1109 * @total_history_cycles: Total history length in cycles
1110 * @discontinuity: True indicates clock was set on history period
1111 * @ts: Cross timestamp that should be adjusted using
1112 * partial/total ratio
1113 *
1114 * Helper function used by get_device_system_crosststamp() to correct the
1115 * crosstimestamp corresponding to the start of the current interval to the
1116 * system counter value (timestamp point) provided by the driver. The
1117 * total_history_* quantities are the total history starting at the provided
1118 * reference point and ending at the start of the current interval. The cycle
1119 * count between the driver timestamp point and the start of the current
1120 * interval is partial_history_cycles.
1121 */
adjust_historical_crosststamp(struct system_time_snapshot * history,u64 partial_history_cycles,u64 total_history_cycles,bool discontinuity,struct system_device_crosststamp * ts)1122 static int adjust_historical_crosststamp(struct system_time_snapshot *history,
1123 u64 partial_history_cycles,
1124 u64 total_history_cycles,
1125 bool discontinuity,
1126 struct system_device_crosststamp *ts)
1127 {
1128 struct timekeeper *tk = &tk_core.timekeeper;
1129 u64 corr_raw, corr_real;
1130 bool interp_forward;
1131 int ret;
1132
1133 if (total_history_cycles == 0 || partial_history_cycles == 0)
1134 return 0;
1135
1136 /* Interpolate shortest distance from beginning or end of history */
1137 interp_forward = partial_history_cycles > total_history_cycles / 2;
1138 partial_history_cycles = interp_forward ?
1139 total_history_cycles - partial_history_cycles :
1140 partial_history_cycles;
1141
1142 /*
1143 * Scale the monotonic raw time delta by:
1144 * partial_history_cycles / total_history_cycles
1145 */
1146 corr_raw = (u64)ktime_to_ns(
1147 ktime_sub(ts->sys_monoraw, history->raw));
1148 ret = scale64_check_overflow(partial_history_cycles,
1149 total_history_cycles, &corr_raw);
1150 if (ret)
1151 return ret;
1152
1153 /*
1154 * If there is a discontinuity in the history, scale monotonic raw
1155 * correction by:
1156 * mult(real)/mult(raw) yielding the realtime correction
1157 * Otherwise, calculate the realtime correction similar to monotonic
1158 * raw calculation
1159 */
1160 if (discontinuity) {
1161 corr_real = mul_u64_u32_div
1162 (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
1163 } else {
1164 corr_real = (u64)ktime_to_ns(
1165 ktime_sub(ts->sys_realtime, history->real));
1166 ret = scale64_check_overflow(partial_history_cycles,
1167 total_history_cycles, &corr_real);
1168 if (ret)
1169 return ret;
1170 }
1171
1172 /* Fixup monotonic raw and real time time values */
1173 if (interp_forward) {
1174 ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
1175 ts->sys_realtime = ktime_add_ns(history->real, corr_real);
1176 } else {
1177 ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
1178 ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
1179 }
1180
1181 return 0;
1182 }
1183
1184 /*
1185 * timestamp_in_interval - true if ts is chronologically in [start, end]
1186 *
1187 * True if ts occurs chronologically at or after start, and before or at end.
1188 */
timestamp_in_interval(u64 start,u64 end,u64 ts)1189 static bool timestamp_in_interval(u64 start, u64 end, u64 ts)
1190 {
1191 if (ts >= start && ts <= end)
1192 return true;
1193 if (start > end && (ts >= start || ts <= end))
1194 return true;
1195 return false;
1196 }
1197
convert_clock(u64 * val,u32 numerator,u32 denominator)1198 static bool convert_clock(u64 *val, u32 numerator, u32 denominator)
1199 {
1200 u64 rem, res;
1201
1202 if (!numerator || !denominator)
1203 return false;
1204
1205 res = div64_u64_rem(*val, denominator, &rem) * numerator;
1206 *val = res + div_u64(rem * numerator, denominator);
1207 return true;
1208 }
1209
convert_base_to_cs(struct system_counterval_t * scv)1210 static bool convert_base_to_cs(struct system_counterval_t *scv)
1211 {
1212 struct clocksource *cs = tk_core.timekeeper.tkr_mono.clock;
1213 struct clocksource_base *base;
1214 u32 num, den;
1215
1216 /* The timestamp was taken from the time keeper clock source */
1217 if (cs->id == scv->cs_id)
1218 return true;
1219
1220 /*
1221 * Check whether cs_id matches the base clock. Prevent the compiler from
1222 * re-evaluating @base as the clocksource might change concurrently.
1223 */
1224 base = READ_ONCE(cs->base);
1225 if (!base || base->id != scv->cs_id)
1226 return false;
1227
1228 num = scv->use_nsecs ? cs->freq_khz : base->numerator;
1229 den = scv->use_nsecs ? USEC_PER_SEC : base->denominator;
1230
1231 if (!convert_clock(&scv->cycles, num, den))
1232 return false;
1233
1234 scv->cycles += base->offset;
1235 return true;
1236 }
1237
convert_cs_to_base(u64 * cycles,enum clocksource_ids base_id)1238 static bool convert_cs_to_base(u64 *cycles, enum clocksource_ids base_id)
1239 {
1240 struct clocksource *cs = tk_core.timekeeper.tkr_mono.clock;
1241 struct clocksource_base *base;
1242
1243 /*
1244 * Check whether base_id matches the base clock. Prevent the compiler from
1245 * re-evaluating @base as the clocksource might change concurrently.
1246 */
1247 base = READ_ONCE(cs->base);
1248 if (!base || base->id != base_id)
1249 return false;
1250
1251 *cycles -= base->offset;
1252 if (!convert_clock(cycles, base->denominator, base->numerator))
1253 return false;
1254 return true;
1255 }
1256
convert_ns_to_cs(u64 * delta)1257 static bool convert_ns_to_cs(u64 *delta)
1258 {
1259 struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono;
1260
1261 if (BITS_TO_BYTES(fls64(*delta) + tkr->shift) >= sizeof(*delta))
1262 return false;
1263
1264 *delta = div_u64((*delta << tkr->shift) - tkr->xtime_nsec, tkr->mult);
1265 return true;
1266 }
1267
1268 /**
1269 * ktime_real_to_base_clock() - Convert CLOCK_REALTIME timestamp to a base clock timestamp
1270 * @treal: CLOCK_REALTIME timestamp to convert
1271 * @base_id: base clocksource id
1272 * @cycles: pointer to store the converted base clock timestamp
1273 *
1274 * Converts a supplied, future realtime clock value to the corresponding base clock value.
1275 *
1276 * Return: true if the conversion is successful, false otherwise.
1277 */
ktime_real_to_base_clock(ktime_t treal,enum clocksource_ids base_id,u64 * cycles)1278 bool ktime_real_to_base_clock(ktime_t treal, enum clocksource_ids base_id, u64 *cycles)
1279 {
1280 struct timekeeper *tk = &tk_core.timekeeper;
1281 unsigned int seq;
1282 u64 delta;
1283
1284 do {
1285 seq = read_seqcount_begin(&tk_core.seq);
1286 if ((u64)treal < tk->tkr_mono.base_real)
1287 return false;
1288 delta = (u64)treal - tk->tkr_mono.base_real;
1289 if (!convert_ns_to_cs(&delta))
1290 return false;
1291 *cycles = tk->tkr_mono.cycle_last + delta;
1292 if (!convert_cs_to_base(cycles, base_id))
1293 return false;
1294 } while (read_seqcount_retry(&tk_core.seq, seq));
1295
1296 return true;
1297 }
1298 EXPORT_SYMBOL_GPL(ktime_real_to_base_clock);
1299
1300 /**
1301 * get_device_system_crosststamp - Synchronously capture system/device timestamp
1302 * @get_time_fn: Callback to get simultaneous device time and
1303 * system counter from the device driver
1304 * @ctx: Context passed to get_time_fn()
1305 * @history_begin: Historical reference point used to interpolate system
1306 * time when counter provided by the driver is before the current interval
1307 * @xtstamp: Receives simultaneously captured system and device time
1308 *
1309 * Reads a timestamp from a device and correlates it to system time
1310 */
get_device_system_crosststamp(int (* get_time_fn)(ktime_t * device_time,struct system_counterval_t * sys_counterval,void * ctx),void * ctx,struct system_time_snapshot * history_begin,struct system_device_crosststamp * xtstamp)1311 int get_device_system_crosststamp(int (*get_time_fn)
1312 (ktime_t *device_time,
1313 struct system_counterval_t *sys_counterval,
1314 void *ctx),
1315 void *ctx,
1316 struct system_time_snapshot *history_begin,
1317 struct system_device_crosststamp *xtstamp)
1318 {
1319 struct system_counterval_t system_counterval;
1320 struct timekeeper *tk = &tk_core.timekeeper;
1321 u64 cycles, now, interval_start;
1322 unsigned int clock_was_set_seq = 0;
1323 ktime_t base_real, base_raw;
1324 u64 nsec_real, nsec_raw;
1325 u8 cs_was_changed_seq;
1326 unsigned int seq;
1327 bool do_interp;
1328 int ret;
1329
1330 do {
1331 seq = read_seqcount_begin(&tk_core.seq);
1332 /*
1333 * Try to synchronously capture device time and a system
1334 * counter value calling back into the device driver
1335 */
1336 ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
1337 if (ret)
1338 return ret;
1339
1340 /*
1341 * Verify that the clocksource ID associated with the captured
1342 * system counter value is the same as for the currently
1343 * installed timekeeper clocksource
1344 */
1345 if (system_counterval.cs_id == CSID_GENERIC ||
1346 !convert_base_to_cs(&system_counterval))
1347 return -ENODEV;
1348 cycles = system_counterval.cycles;
1349
1350 /*
1351 * Check whether the system counter value provided by the
1352 * device driver is on the current timekeeping interval.
1353 */
1354 now = tk_clock_read(&tk->tkr_mono);
1355 interval_start = tk->tkr_mono.cycle_last;
1356 if (!timestamp_in_interval(interval_start, now, cycles)) {
1357 clock_was_set_seq = tk->clock_was_set_seq;
1358 cs_was_changed_seq = tk->cs_was_changed_seq;
1359 cycles = interval_start;
1360 do_interp = true;
1361 } else {
1362 do_interp = false;
1363 }
1364
1365 base_real = ktime_add(tk->tkr_mono.base,
1366 tk_core.timekeeper.offs_real);
1367 base_raw = tk->tkr_raw.base;
1368
1369 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, cycles);
1370 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, cycles);
1371 } while (read_seqcount_retry(&tk_core.seq, seq));
1372
1373 xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
1374 xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1375
1376 /*
1377 * Interpolate if necessary, adjusting back from the start of the
1378 * current interval
1379 */
1380 if (do_interp) {
1381 u64 partial_history_cycles, total_history_cycles;
1382 bool discontinuity;
1383
1384 /*
1385 * Check that the counter value is not before the provided
1386 * history reference and that the history doesn't cross a
1387 * clocksource change
1388 */
1389 if (!history_begin ||
1390 !timestamp_in_interval(history_begin->cycles,
1391 cycles, system_counterval.cycles) ||
1392 history_begin->cs_was_changed_seq != cs_was_changed_seq)
1393 return -EINVAL;
1394 partial_history_cycles = cycles - system_counterval.cycles;
1395 total_history_cycles = cycles - history_begin->cycles;
1396 discontinuity =
1397 history_begin->clock_was_set_seq != clock_was_set_seq;
1398
1399 ret = adjust_historical_crosststamp(history_begin,
1400 partial_history_cycles,
1401 total_history_cycles,
1402 discontinuity, xtstamp);
1403 if (ret)
1404 return ret;
1405 }
1406
1407 return 0;
1408 }
1409 EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
1410
1411 /**
1412 * timekeeping_clocksource_has_base - Check whether the current clocksource
1413 * is based on given a base clock
1414 * @id: base clocksource ID
1415 *
1416 * Note: The return value is a snapshot which can become invalid right
1417 * after the function returns.
1418 *
1419 * Return: true if the timekeeper clocksource has a base clock with @id,
1420 * false otherwise
1421 */
timekeeping_clocksource_has_base(enum clocksource_ids id)1422 bool timekeeping_clocksource_has_base(enum clocksource_ids id)
1423 {
1424 /*
1425 * This is a snapshot, so no point in using the sequence
1426 * count. Just prevent the compiler from re-evaluating @base as the
1427 * clocksource might change concurrently.
1428 */
1429 struct clocksource_base *base = READ_ONCE(tk_core.timekeeper.tkr_mono.clock->base);
1430
1431 return base ? base->id == id : false;
1432 }
1433 EXPORT_SYMBOL_GPL(timekeeping_clocksource_has_base);
1434
1435 /**
1436 * do_settimeofday64 - Sets the time of day.
1437 * @ts: pointer to the timespec64 variable containing the new time
1438 *
1439 * Sets the time of day to the new time and update NTP and notify hrtimers
1440 */
do_settimeofday64(const struct timespec64 * ts)1441 int do_settimeofday64(const struct timespec64 *ts)
1442 {
1443 struct timekeeper *tk = &tk_core.timekeeper;
1444 struct timespec64 ts_delta, xt;
1445 unsigned long flags;
1446 int ret = 0;
1447
1448 if (!timespec64_valid_settod(ts))
1449 return -EINVAL;
1450
1451 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1452 write_seqcount_begin(&tk_core.seq);
1453
1454 timekeeping_forward_now(tk);
1455
1456 xt = tk_xtime(tk);
1457 ts_delta = timespec64_sub(*ts, xt);
1458
1459 if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1460 ret = -EINVAL;
1461 goto out;
1462 }
1463
1464 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
1465
1466 tk_set_xtime(tk, ts);
1467 out:
1468 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1469
1470 write_seqcount_end(&tk_core.seq);
1471 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1472
1473 /* Signal hrtimers about time change */
1474 clock_was_set(CLOCK_SET_WALL);
1475
1476 if (!ret) {
1477 audit_tk_injoffset(ts_delta);
1478 add_device_randomness(ts, sizeof(*ts));
1479 }
1480
1481 return ret;
1482 }
1483 EXPORT_SYMBOL(do_settimeofday64);
1484
1485 /**
1486 * timekeeping_inject_offset - Adds or subtracts from the current time.
1487 * @ts: Pointer to the timespec variable containing the offset
1488 *
1489 * Adds or subtracts an offset value from the current time.
1490 */
timekeeping_inject_offset(const struct timespec64 * ts)1491 static int timekeeping_inject_offset(const struct timespec64 *ts)
1492 {
1493 struct timekeeper *tk = &tk_core.timekeeper;
1494 unsigned long flags;
1495 struct timespec64 tmp;
1496 int ret = 0;
1497
1498 if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
1499 return -EINVAL;
1500
1501 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1502 write_seqcount_begin(&tk_core.seq);
1503
1504 timekeeping_forward_now(tk);
1505
1506 /* Make sure the proposed value is valid */
1507 tmp = timespec64_add(tk_xtime(tk), *ts);
1508 if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
1509 !timespec64_valid_settod(&tmp)) {
1510 ret = -EINVAL;
1511 goto error;
1512 }
1513
1514 tk_xtime_add(tk, ts);
1515 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts));
1516
1517 error: /* even if we error out, we forwarded the time, so call update */
1518 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1519
1520 write_seqcount_end(&tk_core.seq);
1521 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1522
1523 /* Signal hrtimers about time change */
1524 clock_was_set(CLOCK_SET_WALL);
1525
1526 return ret;
1527 }
1528
1529 /*
1530 * Indicates if there is an offset between the system clock and the hardware
1531 * clock/persistent clock/rtc.
1532 */
1533 int persistent_clock_is_local;
1534
1535 /*
1536 * Adjust the time obtained from the CMOS to be UTC time instead of
1537 * local time.
1538 *
1539 * This is ugly, but preferable to the alternatives. Otherwise we
1540 * would either need to write a program to do it in /etc/rc (and risk
1541 * confusion if the program gets run more than once; it would also be
1542 * hard to make the program warp the clock precisely n hours) or
1543 * compile in the timezone information into the kernel. Bad, bad....
1544 *
1545 * - TYT, 1992-01-01
1546 *
1547 * The best thing to do is to keep the CMOS clock in universal time (UTC)
1548 * as real UNIX machines always do it. This avoids all headaches about
1549 * daylight saving times and warping kernel clocks.
1550 */
timekeeping_warp_clock(void)1551 void timekeeping_warp_clock(void)
1552 {
1553 if (sys_tz.tz_minuteswest != 0) {
1554 struct timespec64 adjust;
1555
1556 persistent_clock_is_local = 1;
1557 adjust.tv_sec = sys_tz.tz_minuteswest * 60;
1558 adjust.tv_nsec = 0;
1559 timekeeping_inject_offset(&adjust);
1560 }
1561 }
1562
1563 /*
1564 * __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
1565 */
__timekeeping_set_tai_offset(struct timekeeper * tk,s32 tai_offset)1566 static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1567 {
1568 tk->tai_offset = tai_offset;
1569 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1570 }
1571
1572 /*
1573 * change_clocksource - Swaps clocksources if a new one is available
1574 *
1575 * Accumulates current time interval and initializes new clocksource
1576 */
change_clocksource(void * data)1577 static int change_clocksource(void *data)
1578 {
1579 struct timekeeper *tk = &tk_core.timekeeper;
1580 struct clocksource *new, *old = NULL;
1581 unsigned long flags;
1582 bool change = false;
1583
1584 new = (struct clocksource *) data;
1585
1586 /*
1587 * If the cs is in module, get a module reference. Succeeds
1588 * for built-in code (owner == NULL) as well.
1589 */
1590 if (try_module_get(new->owner)) {
1591 if (!new->enable || new->enable(new) == 0)
1592 change = true;
1593 else
1594 module_put(new->owner);
1595 }
1596
1597 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1598 write_seqcount_begin(&tk_core.seq);
1599
1600 timekeeping_forward_now(tk);
1601
1602 if (change) {
1603 old = tk->tkr_mono.clock;
1604 tk_setup_internals(tk, new);
1605 }
1606
1607 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1608
1609 write_seqcount_end(&tk_core.seq);
1610 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1611
1612 if (old) {
1613 if (old->disable)
1614 old->disable(old);
1615
1616 module_put(old->owner);
1617 }
1618
1619 return 0;
1620 }
1621
1622 /**
1623 * timekeeping_notify - Install a new clock source
1624 * @clock: pointer to the clock source
1625 *
1626 * This function is called from clocksource.c after a new, better clock
1627 * source has been registered. The caller holds the clocksource_mutex.
1628 */
timekeeping_notify(struct clocksource * clock)1629 int timekeeping_notify(struct clocksource *clock)
1630 {
1631 struct timekeeper *tk = &tk_core.timekeeper;
1632
1633 if (tk->tkr_mono.clock == clock)
1634 return 0;
1635 stop_machine(change_clocksource, clock, NULL);
1636 tick_clock_notify();
1637 return tk->tkr_mono.clock == clock ? 0 : -1;
1638 }
1639
1640 /**
1641 * ktime_get_raw_ts64 - Returns the raw monotonic time in a timespec
1642 * @ts: pointer to the timespec64 to be set
1643 *
1644 * Returns the raw monotonic time (completely un-modified by ntp)
1645 */
ktime_get_raw_ts64(struct timespec64 * ts)1646 void ktime_get_raw_ts64(struct timespec64 *ts)
1647 {
1648 struct timekeeper *tk = &tk_core.timekeeper;
1649 unsigned int seq;
1650 u64 nsecs;
1651
1652 do {
1653 seq = read_seqcount_begin(&tk_core.seq);
1654 ts->tv_sec = tk->raw_sec;
1655 nsecs = timekeeping_get_ns(&tk->tkr_raw);
1656
1657 } while (read_seqcount_retry(&tk_core.seq, seq));
1658
1659 ts->tv_nsec = 0;
1660 timespec64_add_ns(ts, nsecs);
1661 }
1662 EXPORT_SYMBOL(ktime_get_raw_ts64);
1663
1664
1665 /**
1666 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
1667 */
timekeeping_valid_for_hres(void)1668 int timekeeping_valid_for_hres(void)
1669 {
1670 struct timekeeper *tk = &tk_core.timekeeper;
1671 unsigned int seq;
1672 int ret;
1673
1674 do {
1675 seq = read_seqcount_begin(&tk_core.seq);
1676
1677 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1678
1679 } while (read_seqcount_retry(&tk_core.seq, seq));
1680
1681 return ret;
1682 }
1683
1684 /**
1685 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
1686 */
timekeeping_max_deferment(void)1687 u64 timekeeping_max_deferment(void)
1688 {
1689 struct timekeeper *tk = &tk_core.timekeeper;
1690 unsigned int seq;
1691 u64 ret;
1692
1693 do {
1694 seq = read_seqcount_begin(&tk_core.seq);
1695
1696 ret = tk->tkr_mono.clock->max_idle_ns;
1697
1698 } while (read_seqcount_retry(&tk_core.seq, seq));
1699
1700 return ret;
1701 }
1702
1703 /**
1704 * read_persistent_clock64 - Return time from the persistent clock.
1705 * @ts: Pointer to the storage for the readout value
1706 *
1707 * Weak dummy function for arches that do not yet support it.
1708 * Reads the time from the battery backed persistent clock.
1709 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
1710 *
1711 * XXX - Do be sure to remove it once all arches implement it.
1712 */
read_persistent_clock64(struct timespec64 * ts)1713 void __weak read_persistent_clock64(struct timespec64 *ts)
1714 {
1715 ts->tv_sec = 0;
1716 ts->tv_nsec = 0;
1717 }
1718
1719 /**
1720 * read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
1721 * from the boot.
1722 * @wall_time: current time as returned by persistent clock
1723 * @boot_offset: offset that is defined as wall_time - boot_time
1724 *
1725 * Weak dummy function for arches that do not yet support it.
1726 *
1727 * The default function calculates offset based on the current value of
1728 * local_clock(). This way architectures that support sched_clock() but don't
1729 * support dedicated boot time clock will provide the best estimate of the
1730 * boot time.
1731 */
1732 void __weak __init
read_persistent_wall_and_boot_offset(struct timespec64 * wall_time,struct timespec64 * boot_offset)1733 read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
1734 struct timespec64 *boot_offset)
1735 {
1736 read_persistent_clock64(wall_time);
1737 *boot_offset = ns_to_timespec64(local_clock());
1738 }
1739
1740 /*
1741 * Flag reflecting whether timekeeping_resume() has injected sleeptime.
1742 *
1743 * The flag starts of false and is only set when a suspend reaches
1744 * timekeeping_suspend(), timekeeping_resume() sets it to false when the
1745 * timekeeper clocksource is not stopping across suspend and has been
1746 * used to update sleep time. If the timekeeper clocksource has stopped
1747 * then the flag stays true and is used by the RTC resume code to decide
1748 * whether sleeptime must be injected and if so the flag gets false then.
1749 *
1750 * If a suspend fails before reaching timekeeping_resume() then the flag
1751 * stays false and prevents erroneous sleeptime injection.
1752 */
1753 static bool suspend_timing_needed;
1754
1755 /* Flag for if there is a persistent clock on this platform */
1756 static bool persistent_clock_exists;
1757
1758 /*
1759 * timekeeping_init - Initializes the clocksource and common timekeeping values
1760 */
timekeeping_init(void)1761 void __init timekeeping_init(void)
1762 {
1763 struct timespec64 wall_time, boot_offset, wall_to_mono;
1764 struct timekeeper *tk = &tk_core.timekeeper;
1765 struct clocksource *clock;
1766 unsigned long flags;
1767
1768 read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
1769 if (timespec64_valid_settod(&wall_time) &&
1770 timespec64_to_ns(&wall_time) > 0) {
1771 persistent_clock_exists = true;
1772 } else if (timespec64_to_ns(&wall_time) != 0) {
1773 pr_warn("Persistent clock returned invalid value");
1774 wall_time = (struct timespec64){0};
1775 }
1776
1777 if (timespec64_compare(&wall_time, &boot_offset) < 0)
1778 boot_offset = (struct timespec64){0};
1779
1780 /*
1781 * We want set wall_to_mono, so the following is true:
1782 * wall time + wall_to_mono = boot time
1783 */
1784 wall_to_mono = timespec64_sub(boot_offset, wall_time);
1785
1786 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1787 write_seqcount_begin(&tk_core.seq);
1788 ntp_init();
1789
1790 clock = clocksource_default_clock();
1791 if (clock->enable)
1792 clock->enable(clock);
1793 tk_setup_internals(tk, clock);
1794
1795 tk_set_xtime(tk, &wall_time);
1796 tk->raw_sec = 0;
1797
1798 tk_set_wall_to_mono(tk, wall_to_mono);
1799
1800 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1801
1802 write_seqcount_end(&tk_core.seq);
1803 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1804 }
1805
1806 /* time in seconds when suspend began for persistent clock */
1807 static struct timespec64 timekeeping_suspend_time;
1808
1809 /**
1810 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1811 * @tk: Pointer to the timekeeper to be updated
1812 * @delta: Pointer to the delta value in timespec64 format
1813 *
1814 * Takes a timespec offset measuring a suspend interval and properly
1815 * adds the sleep offset to the timekeeping variables.
1816 */
__timekeeping_inject_sleeptime(struct timekeeper * tk,const struct timespec64 * delta)1817 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1818 const struct timespec64 *delta)
1819 {
1820 if (!timespec64_valid_strict(delta)) {
1821 printk_deferred(KERN_WARNING
1822 "__timekeeping_inject_sleeptime: Invalid "
1823 "sleep delta value!\n");
1824 return;
1825 }
1826 tk_xtime_add(tk, delta);
1827 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1828 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1829 tk_debug_account_sleep_time(delta);
1830 }
1831
1832 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1833 /*
1834 * We have three kinds of time sources to use for sleep time
1835 * injection, the preference order is:
1836 * 1) non-stop clocksource
1837 * 2) persistent clock (ie: RTC accessible when irqs are off)
1838 * 3) RTC
1839 *
1840 * 1) and 2) are used by timekeeping, 3) by RTC subsystem.
1841 * If system has neither 1) nor 2), 3) will be used finally.
1842 *
1843 *
1844 * If timekeeping has injected sleeptime via either 1) or 2),
1845 * 3) becomes needless, so in this case we don't need to call
1846 * rtc_resume(), and this is what timekeeping_rtc_skipresume()
1847 * means.
1848 */
timekeeping_rtc_skipresume(void)1849 bool timekeeping_rtc_skipresume(void)
1850 {
1851 return !suspend_timing_needed;
1852 }
1853
1854 /*
1855 * 1) can be determined whether to use or not only when doing
1856 * timekeeping_resume() which is invoked after rtc_suspend(),
1857 * so we can't skip rtc_suspend() surely if system has 1).
1858 *
1859 * But if system has 2), 2) will definitely be used, so in this
1860 * case we don't need to call rtc_suspend(), and this is what
1861 * timekeeping_rtc_skipsuspend() means.
1862 */
timekeeping_rtc_skipsuspend(void)1863 bool timekeeping_rtc_skipsuspend(void)
1864 {
1865 return persistent_clock_exists;
1866 }
1867
1868 /**
1869 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
1870 * @delta: pointer to a timespec64 delta value
1871 *
1872 * This hook is for architectures that cannot support read_persistent_clock64
1873 * because their RTC/persistent clock is only accessible when irqs are enabled.
1874 * and also don't have an effective nonstop clocksource.
1875 *
1876 * This function should only be called by rtc_resume(), and allows
1877 * a suspend offset to be injected into the timekeeping values.
1878 */
timekeeping_inject_sleeptime64(const struct timespec64 * delta)1879 void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
1880 {
1881 struct timekeeper *tk = &tk_core.timekeeper;
1882 unsigned long flags;
1883
1884 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1885 write_seqcount_begin(&tk_core.seq);
1886
1887 suspend_timing_needed = false;
1888
1889 timekeeping_forward_now(tk);
1890
1891 __timekeeping_inject_sleeptime(tk, delta);
1892
1893 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1894
1895 write_seqcount_end(&tk_core.seq);
1896 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1897
1898 /* Signal hrtimers about time change */
1899 clock_was_set(CLOCK_SET_WALL | CLOCK_SET_BOOT);
1900 }
1901 #endif
1902
1903 /**
1904 * timekeeping_resume - Resumes the generic timekeeping subsystem.
1905 */
timekeeping_resume(void)1906 void timekeeping_resume(void)
1907 {
1908 struct timekeeper *tk = &tk_core.timekeeper;
1909 struct clocksource *clock = tk->tkr_mono.clock;
1910 unsigned long flags;
1911 struct timespec64 ts_new, ts_delta;
1912 u64 cycle_now, nsec;
1913 bool inject_sleeptime = false;
1914
1915 read_persistent_clock64(&ts_new);
1916
1917 clockevents_resume();
1918 clocksource_resume();
1919
1920 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1921 write_seqcount_begin(&tk_core.seq);
1922
1923 /*
1924 * After system resumes, we need to calculate the suspended time and
1925 * compensate it for the OS time. There are 3 sources that could be
1926 * used: Nonstop clocksource during suspend, persistent clock and rtc
1927 * device.
1928 *
1929 * One specific platform may have 1 or 2 or all of them, and the
1930 * preference will be:
1931 * suspend-nonstop clocksource -> persistent clock -> rtc
1932 * The less preferred source will only be tried if there is no better
1933 * usable source. The rtc part is handled separately in rtc core code.
1934 */
1935 cycle_now = tk_clock_read(&tk->tkr_mono);
1936 nsec = clocksource_stop_suspend_timing(clock, cycle_now);
1937 if (nsec > 0) {
1938 ts_delta = ns_to_timespec64(nsec);
1939 inject_sleeptime = true;
1940 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1941 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1942 inject_sleeptime = true;
1943 }
1944
1945 if (inject_sleeptime) {
1946 suspend_timing_needed = false;
1947 __timekeeping_inject_sleeptime(tk, &ts_delta);
1948 }
1949
1950 /* Re-base the last cycle value */
1951 tk->tkr_mono.cycle_last = cycle_now;
1952 tk->tkr_raw.cycle_last = cycle_now;
1953
1954 tk->ntp_error = 0;
1955 timekeeping_suspended = 0;
1956 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1957 write_seqcount_end(&tk_core.seq);
1958 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1959
1960 touch_softlockup_watchdog();
1961
1962 /* Resume the clockevent device(s) and hrtimers */
1963 tick_resume();
1964 /* Notify timerfd as resume is equivalent to clock_was_set() */
1965 timerfd_resume();
1966 }
1967
timekeeping_suspend(void)1968 int timekeeping_suspend(void)
1969 {
1970 struct timekeeper *tk = &tk_core.timekeeper;
1971 unsigned long flags;
1972 struct timespec64 delta, delta_delta;
1973 static struct timespec64 old_delta;
1974 struct clocksource *curr_clock;
1975 u64 cycle_now;
1976
1977 read_persistent_clock64(&timekeeping_suspend_time);
1978
1979 /*
1980 * On some systems the persistent_clock can not be detected at
1981 * timekeeping_init by its return value, so if we see a valid
1982 * value returned, update the persistent_clock_exists flag.
1983 */
1984 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1985 persistent_clock_exists = true;
1986
1987 suspend_timing_needed = true;
1988
1989 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1990 write_seqcount_begin(&tk_core.seq);
1991 timekeeping_forward_now(tk);
1992 timekeeping_suspended = 1;
1993
1994 /*
1995 * Since we've called forward_now, cycle_last stores the value
1996 * just read from the current clocksource. Save this to potentially
1997 * use in suspend timing.
1998 */
1999 curr_clock = tk->tkr_mono.clock;
2000 cycle_now = tk->tkr_mono.cycle_last;
2001 clocksource_start_suspend_timing(curr_clock, cycle_now);
2002
2003 if (persistent_clock_exists) {
2004 /*
2005 * To avoid drift caused by repeated suspend/resumes,
2006 * which each can add ~1 second drift error,
2007 * try to compensate so the difference in system time
2008 * and persistent_clock time stays close to constant.
2009 */
2010 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
2011 delta_delta = timespec64_sub(delta, old_delta);
2012 if (abs(delta_delta.tv_sec) >= 2) {
2013 /*
2014 * if delta_delta is too large, assume time correction
2015 * has occurred and set old_delta to the current delta.
2016 */
2017 old_delta = delta;
2018 } else {
2019 /* Otherwise try to adjust old_system to compensate */
2020 timekeeping_suspend_time =
2021 timespec64_add(timekeeping_suspend_time, delta_delta);
2022 }
2023 }
2024
2025 timekeeping_update(tk, TK_MIRROR);
2026 halt_fast_timekeeper(tk);
2027 write_seqcount_end(&tk_core.seq);
2028 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2029
2030 tick_suspend();
2031 clocksource_suspend();
2032 clockevents_suspend();
2033
2034 return 0;
2035 }
2036
2037 /* sysfs resume/suspend bits for timekeeping */
2038 static struct syscore_ops timekeeping_syscore_ops = {
2039 .resume = timekeeping_resume,
2040 .suspend = timekeeping_suspend,
2041 };
2042
timekeeping_init_ops(void)2043 static int __init timekeeping_init_ops(void)
2044 {
2045 register_syscore_ops(&timekeeping_syscore_ops);
2046 return 0;
2047 }
2048 device_initcall(timekeeping_init_ops);
2049
2050 /*
2051 * Apply a multiplier adjustment to the timekeeper
2052 */
timekeeping_apply_adjustment(struct timekeeper * tk,s64 offset,s32 mult_adj)2053 static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
2054 s64 offset,
2055 s32 mult_adj)
2056 {
2057 s64 interval = tk->cycle_interval;
2058
2059 if (mult_adj == 0) {
2060 return;
2061 } else if (mult_adj == -1) {
2062 interval = -interval;
2063 offset = -offset;
2064 } else if (mult_adj != 1) {
2065 interval *= mult_adj;
2066 offset *= mult_adj;
2067 }
2068
2069 /*
2070 * So the following can be confusing.
2071 *
2072 * To keep things simple, lets assume mult_adj == 1 for now.
2073 *
2074 * When mult_adj != 1, remember that the interval and offset values
2075 * have been appropriately scaled so the math is the same.
2076 *
2077 * The basic idea here is that we're increasing the multiplier
2078 * by one, this causes the xtime_interval to be incremented by
2079 * one cycle_interval. This is because:
2080 * xtime_interval = cycle_interval * mult
2081 * So if mult is being incremented by one:
2082 * xtime_interval = cycle_interval * (mult + 1)
2083 * Its the same as:
2084 * xtime_interval = (cycle_interval * mult) + cycle_interval
2085 * Which can be shortened to:
2086 * xtime_interval += cycle_interval
2087 *
2088 * So offset stores the non-accumulated cycles. Thus the current
2089 * time (in shifted nanoseconds) is:
2090 * now = (offset * adj) + xtime_nsec
2091 * Now, even though we're adjusting the clock frequency, we have
2092 * to keep time consistent. In other words, we can't jump back
2093 * in time, and we also want to avoid jumping forward in time.
2094 *
2095 * So given the same offset value, we need the time to be the same
2096 * both before and after the freq adjustment.
2097 * now = (offset * adj_1) + xtime_nsec_1
2098 * now = (offset * adj_2) + xtime_nsec_2
2099 * So:
2100 * (offset * adj_1) + xtime_nsec_1 =
2101 * (offset * adj_2) + xtime_nsec_2
2102 * And we know:
2103 * adj_2 = adj_1 + 1
2104 * So:
2105 * (offset * adj_1) + xtime_nsec_1 =
2106 * (offset * (adj_1+1)) + xtime_nsec_2
2107 * (offset * adj_1) + xtime_nsec_1 =
2108 * (offset * adj_1) + offset + xtime_nsec_2
2109 * Canceling the sides:
2110 * xtime_nsec_1 = offset + xtime_nsec_2
2111 * Which gives us:
2112 * xtime_nsec_2 = xtime_nsec_1 - offset
2113 * Which simplifies to:
2114 * xtime_nsec -= offset
2115 */
2116 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
2117 /* NTP adjustment caused clocksource mult overflow */
2118 WARN_ON_ONCE(1);
2119 return;
2120 }
2121
2122 tk->tkr_mono.mult += mult_adj;
2123 tk->xtime_interval += interval;
2124 tk->tkr_mono.xtime_nsec -= offset;
2125 }
2126
2127 /*
2128 * Adjust the timekeeper's multiplier to the correct frequency
2129 * and also to reduce the accumulated error value.
2130 */
timekeeping_adjust(struct timekeeper * tk,s64 offset)2131 static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
2132 {
2133 u32 mult;
2134
2135 /*
2136 * Determine the multiplier from the current NTP tick length.
2137 * Avoid expensive division when the tick length doesn't change.
2138 */
2139 if (likely(tk->ntp_tick == ntp_tick_length())) {
2140 mult = tk->tkr_mono.mult - tk->ntp_err_mult;
2141 } else {
2142 tk->ntp_tick = ntp_tick_length();
2143 mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) -
2144 tk->xtime_remainder, tk->cycle_interval);
2145 }
2146
2147 /*
2148 * If the clock is behind the NTP time, increase the multiplier by 1
2149 * to catch up with it. If it's ahead and there was a remainder in the
2150 * tick division, the clock will slow down. Otherwise it will stay
2151 * ahead until the tick length changes to a non-divisible value.
2152 */
2153 tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0;
2154 mult += tk->ntp_err_mult;
2155
2156 timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult);
2157
2158 if (unlikely(tk->tkr_mono.clock->maxadj &&
2159 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
2160 > tk->tkr_mono.clock->maxadj))) {
2161 printk_once(KERN_WARNING
2162 "Adjusting %s more than 11%% (%ld vs %ld)\n",
2163 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
2164 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
2165 }
2166
2167 /*
2168 * It may be possible that when we entered this function, xtime_nsec
2169 * was very small. Further, if we're slightly speeding the clocksource
2170 * in the code above, its possible the required corrective factor to
2171 * xtime_nsec could cause it to underflow.
2172 *
2173 * Now, since we have already accumulated the second and the NTP
2174 * subsystem has been notified via second_overflow(), we need to skip
2175 * the next update.
2176 */
2177 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
2178 tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC <<
2179 tk->tkr_mono.shift;
2180 tk->xtime_sec--;
2181 tk->skip_second_overflow = 1;
2182 }
2183 }
2184
2185 /*
2186 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
2187 *
2188 * Helper function that accumulates the nsecs greater than a second
2189 * from the xtime_nsec field to the xtime_secs field.
2190 * It also calls into the NTP code to handle leapsecond processing.
2191 */
accumulate_nsecs_to_secs(struct timekeeper * tk)2192 static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
2193 {
2194 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
2195 unsigned int clock_set = 0;
2196
2197 while (tk->tkr_mono.xtime_nsec >= nsecps) {
2198 int leap;
2199
2200 tk->tkr_mono.xtime_nsec -= nsecps;
2201 tk->xtime_sec++;
2202
2203 /*
2204 * Skip NTP update if this second was accumulated before,
2205 * i.e. xtime_nsec underflowed in timekeeping_adjust()
2206 */
2207 if (unlikely(tk->skip_second_overflow)) {
2208 tk->skip_second_overflow = 0;
2209 continue;
2210 }
2211
2212 /* Figure out if its a leap sec and apply if needed */
2213 leap = second_overflow(tk->xtime_sec);
2214 if (unlikely(leap)) {
2215 struct timespec64 ts;
2216
2217 tk->xtime_sec += leap;
2218
2219 ts.tv_sec = leap;
2220 ts.tv_nsec = 0;
2221 tk_set_wall_to_mono(tk,
2222 timespec64_sub(tk->wall_to_monotonic, ts));
2223
2224 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
2225
2226 clock_set = TK_CLOCK_WAS_SET;
2227 }
2228 }
2229 return clock_set;
2230 }
2231
2232 /*
2233 * logarithmic_accumulation - shifted accumulation of cycles
2234 *
2235 * This functions accumulates a shifted interval of cycles into
2236 * a shifted interval nanoseconds. Allows for O(log) accumulation
2237 * loop.
2238 *
2239 * Returns the unconsumed cycles.
2240 */
logarithmic_accumulation(struct timekeeper * tk,u64 offset,u32 shift,unsigned int * clock_set)2241 static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
2242 u32 shift, unsigned int *clock_set)
2243 {
2244 u64 interval = tk->cycle_interval << shift;
2245 u64 snsec_per_sec;
2246
2247 /* If the offset is smaller than a shifted interval, do nothing */
2248 if (offset < interval)
2249 return offset;
2250
2251 /* Accumulate one shifted interval */
2252 offset -= interval;
2253 tk->tkr_mono.cycle_last += interval;
2254 tk->tkr_raw.cycle_last += interval;
2255
2256 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2257 *clock_set |= accumulate_nsecs_to_secs(tk);
2258
2259 /* Accumulate raw time */
2260 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
2261 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
2262 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
2263 tk->tkr_raw.xtime_nsec -= snsec_per_sec;
2264 tk->raw_sec++;
2265 }
2266
2267 /* Accumulate error between NTP and clock interval */
2268 tk->ntp_error += tk->ntp_tick << shift;
2269 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2270 (tk->ntp_error_shift + shift);
2271
2272 return offset;
2273 }
2274
2275 /*
2276 * timekeeping_advance - Updates the timekeeper to the current time and
2277 * current NTP tick length
2278 */
timekeeping_advance(enum timekeeping_adv_mode mode)2279 static bool timekeeping_advance(enum timekeeping_adv_mode mode)
2280 {
2281 struct timekeeper *real_tk = &tk_core.timekeeper;
2282 struct timekeeper *tk = &shadow_timekeeper;
2283 u64 offset;
2284 int shift = 0, maxshift;
2285 unsigned int clock_set = 0;
2286 unsigned long flags;
2287
2288 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2289
2290 /* Make sure we're fully resumed: */
2291 if (unlikely(timekeeping_suspended))
2292 goto out;
2293
2294 offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
2295 tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
2296
2297 /* Check if there's really nothing to do */
2298 if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
2299 goto out;
2300
2301 /* Do some additional sanity checking */
2302 timekeeping_check_update(tk, offset);
2303
2304 /*
2305 * With NO_HZ we may have to accumulate many cycle_intervals
2306 * (think "ticks") worth of time at once. To do this efficiently,
2307 * we calculate the largest doubling multiple of cycle_intervals
2308 * that is smaller than the offset. We then accumulate that
2309 * chunk in one go, and then try to consume the next smaller
2310 * doubled multiple.
2311 */
2312 shift = ilog2(offset) - ilog2(tk->cycle_interval);
2313 shift = max(0, shift);
2314 /* Bound shift to one less than what overflows tick_length */
2315 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2316 shift = min(shift, maxshift);
2317 while (offset >= tk->cycle_interval) {
2318 offset = logarithmic_accumulation(tk, offset, shift,
2319 &clock_set);
2320 if (offset < tk->cycle_interval<<shift)
2321 shift--;
2322 }
2323
2324 /* Adjust the multiplier to correct NTP error */
2325 timekeeping_adjust(tk, offset);
2326
2327 /*
2328 * Finally, make sure that after the rounding
2329 * xtime_nsec isn't larger than NSEC_PER_SEC
2330 */
2331 clock_set |= accumulate_nsecs_to_secs(tk);
2332
2333 write_seqcount_begin(&tk_core.seq);
2334 /*
2335 * Update the real timekeeper.
2336 *
2337 * We could avoid this memcpy by switching pointers, but that
2338 * requires changes to all other timekeeper usage sites as
2339 * well, i.e. move the timekeeper pointer getter into the
2340 * spinlocked/seqcount protected sections. And we trade this
2341 * memcpy under the tk_core.seq against one before we start
2342 * updating.
2343 */
2344 timekeeping_update(tk, clock_set);
2345 memcpy(real_tk, tk, sizeof(*tk));
2346 /* The memcpy must come last. Do not put anything here! */
2347 write_seqcount_end(&tk_core.seq);
2348 out:
2349 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2350
2351 return !!clock_set;
2352 }
2353
2354 /**
2355 * update_wall_time - Uses the current clocksource to increment the wall time
2356 *
2357 */
update_wall_time(void)2358 void update_wall_time(void)
2359 {
2360 if (timekeeping_advance(TK_ADV_TICK))
2361 clock_was_set_delayed();
2362 }
2363
2364 /**
2365 * getboottime64 - Return the real time of system boot.
2366 * @ts: pointer to the timespec64 to be set
2367 *
2368 * Returns the wall-time of boot in a timespec64.
2369 *
2370 * This is based on the wall_to_monotonic offset and the total suspend
2371 * time. Calls to settimeofday will affect the value returned (which
2372 * basically means that however wrong your real time clock is at boot time,
2373 * you get the right time here).
2374 */
getboottime64(struct timespec64 * ts)2375 void getboottime64(struct timespec64 *ts)
2376 {
2377 struct timekeeper *tk = &tk_core.timekeeper;
2378 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2379
2380 *ts = ktime_to_timespec64(t);
2381 }
2382 EXPORT_SYMBOL_GPL(getboottime64);
2383
ktime_get_coarse_real_ts64(struct timespec64 * ts)2384 void ktime_get_coarse_real_ts64(struct timespec64 *ts)
2385 {
2386 struct timekeeper *tk = &tk_core.timekeeper;
2387 unsigned int seq;
2388
2389 do {
2390 seq = read_seqcount_begin(&tk_core.seq);
2391
2392 *ts = tk_xtime(tk);
2393 } while (read_seqcount_retry(&tk_core.seq, seq));
2394 }
2395 EXPORT_SYMBOL(ktime_get_coarse_real_ts64);
2396
ktime_get_coarse_ts64(struct timespec64 * ts)2397 void ktime_get_coarse_ts64(struct timespec64 *ts)
2398 {
2399 struct timekeeper *tk = &tk_core.timekeeper;
2400 struct timespec64 now, mono;
2401 unsigned int seq;
2402
2403 do {
2404 seq = read_seqcount_begin(&tk_core.seq);
2405
2406 now = tk_xtime(tk);
2407 mono = tk->wall_to_monotonic;
2408 } while (read_seqcount_retry(&tk_core.seq, seq));
2409
2410 set_normalized_timespec64(ts, now.tv_sec + mono.tv_sec,
2411 now.tv_nsec + mono.tv_nsec);
2412 }
2413 EXPORT_SYMBOL(ktime_get_coarse_ts64);
2414
2415 /*
2416 * Must hold jiffies_lock
2417 */
do_timer(unsigned long ticks)2418 void do_timer(unsigned long ticks)
2419 {
2420 jiffies_64 += ticks;
2421 calc_global_load();
2422 }
2423
2424 /**
2425 * ktime_get_update_offsets_now - hrtimer helper
2426 * @cwsseq: pointer to check and store the clock was set sequence number
2427 * @offs_real: pointer to storage for monotonic -> realtime offset
2428 * @offs_boot: pointer to storage for monotonic -> boottime offset
2429 * @offs_tai: pointer to storage for monotonic -> clock tai offset
2430 *
2431 * Returns current monotonic time and updates the offsets if the
2432 * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
2433 * different.
2434 *
2435 * Called from hrtimer_interrupt() or retrigger_next_event()
2436 */
ktime_get_update_offsets_now(unsigned int * cwsseq,ktime_t * offs_real,ktime_t * offs_boot,ktime_t * offs_tai)2437 ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2438 ktime_t *offs_boot, ktime_t *offs_tai)
2439 {
2440 struct timekeeper *tk = &tk_core.timekeeper;
2441 unsigned int seq;
2442 ktime_t base;
2443 u64 nsecs;
2444
2445 do {
2446 seq = read_seqcount_begin(&tk_core.seq);
2447
2448 base = tk->tkr_mono.base;
2449 nsecs = timekeeping_get_ns(&tk->tkr_mono);
2450 base = ktime_add_ns(base, nsecs);
2451
2452 if (*cwsseq != tk->clock_was_set_seq) {
2453 *cwsseq = tk->clock_was_set_seq;
2454 *offs_real = tk->offs_real;
2455 *offs_boot = tk->offs_boot;
2456 *offs_tai = tk->offs_tai;
2457 }
2458
2459 /* Handle leapsecond insertion adjustments */
2460 if (unlikely(base >= tk->next_leap_ktime))
2461 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2462
2463 } while (read_seqcount_retry(&tk_core.seq, seq));
2464
2465 return base;
2466 }
2467
2468 /*
2469 * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
2470 */
timekeeping_validate_timex(const struct __kernel_timex * txc)2471 static int timekeeping_validate_timex(const struct __kernel_timex *txc)
2472 {
2473 if (txc->modes & ADJ_ADJTIME) {
2474 /* singleshot must not be used with any other mode bits */
2475 if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
2476 return -EINVAL;
2477 if (!(txc->modes & ADJ_OFFSET_READONLY) &&
2478 !capable(CAP_SYS_TIME))
2479 return -EPERM;
2480 } else {
2481 /* In order to modify anything, you gotta be super-user! */
2482 if (txc->modes && !capable(CAP_SYS_TIME))
2483 return -EPERM;
2484 /*
2485 * if the quartz is off by more than 10% then
2486 * something is VERY wrong!
2487 */
2488 if (txc->modes & ADJ_TICK &&
2489 (txc->tick < 900000/USER_HZ ||
2490 txc->tick > 1100000/USER_HZ))
2491 return -EINVAL;
2492 }
2493
2494 if (txc->modes & ADJ_SETOFFSET) {
2495 /* In order to inject time, you gotta be super-user! */
2496 if (!capable(CAP_SYS_TIME))
2497 return -EPERM;
2498
2499 /*
2500 * Validate if a timespec/timeval used to inject a time
2501 * offset is valid. Offsets can be positive or negative, so
2502 * we don't check tv_sec. The value of the timeval/timespec
2503 * is the sum of its fields,but *NOTE*:
2504 * The field tv_usec/tv_nsec must always be non-negative and
2505 * we can't have more nanoseconds/microseconds than a second.
2506 */
2507 if (txc->time.tv_usec < 0)
2508 return -EINVAL;
2509
2510 if (txc->modes & ADJ_NANO) {
2511 if (txc->time.tv_usec >= NSEC_PER_SEC)
2512 return -EINVAL;
2513 } else {
2514 if (txc->time.tv_usec >= USEC_PER_SEC)
2515 return -EINVAL;
2516 }
2517 }
2518
2519 /*
2520 * Check for potential multiplication overflows that can
2521 * only happen on 64-bit systems:
2522 */
2523 if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
2524 if (LLONG_MIN / PPM_SCALE > txc->freq)
2525 return -EINVAL;
2526 if (LLONG_MAX / PPM_SCALE < txc->freq)
2527 return -EINVAL;
2528 }
2529
2530 return 0;
2531 }
2532
2533 /**
2534 * random_get_entropy_fallback - Returns the raw clock source value,
2535 * used by random.c for platforms with no valid random_get_entropy().
2536 */
random_get_entropy_fallback(void)2537 unsigned long random_get_entropy_fallback(void)
2538 {
2539 struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono;
2540 struct clocksource *clock = READ_ONCE(tkr->clock);
2541
2542 if (unlikely(timekeeping_suspended || !clock))
2543 return 0;
2544 return clock->read(clock);
2545 }
2546 EXPORT_SYMBOL_GPL(random_get_entropy_fallback);
2547
2548 /**
2549 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
2550 * @txc: Pointer to kernel_timex structure containing NTP parameters
2551 */
do_adjtimex(struct __kernel_timex * txc)2552 int do_adjtimex(struct __kernel_timex *txc)
2553 {
2554 struct timekeeper *tk = &tk_core.timekeeper;
2555 struct audit_ntp_data ad;
2556 bool offset_set = false;
2557 bool clock_set = false;
2558 struct timespec64 ts;
2559 unsigned long flags;
2560 s32 orig_tai, tai;
2561 int ret;
2562
2563 /* Validate the data before disabling interrupts */
2564 ret = timekeeping_validate_timex(txc);
2565 if (ret)
2566 return ret;
2567 add_device_randomness(txc, sizeof(*txc));
2568
2569 if (txc->modes & ADJ_SETOFFSET) {
2570 struct timespec64 delta;
2571 delta.tv_sec = txc->time.tv_sec;
2572 delta.tv_nsec = txc->time.tv_usec;
2573 if (!(txc->modes & ADJ_NANO))
2574 delta.tv_nsec *= 1000;
2575 ret = timekeeping_inject_offset(&delta);
2576 if (ret)
2577 return ret;
2578
2579 offset_set = delta.tv_sec != 0;
2580 audit_tk_injoffset(delta);
2581 }
2582
2583 audit_ntp_init(&ad);
2584
2585 ktime_get_real_ts64(&ts);
2586 add_device_randomness(&ts, sizeof(ts));
2587
2588 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2589 write_seqcount_begin(&tk_core.seq);
2590
2591 orig_tai = tai = tk->tai_offset;
2592 ret = __do_adjtimex(txc, &ts, &tai, &ad);
2593
2594 if (tai != orig_tai) {
2595 __timekeeping_set_tai_offset(tk, tai);
2596 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2597 clock_set = true;
2598 }
2599 tk_update_leap_state(tk);
2600
2601 write_seqcount_end(&tk_core.seq);
2602 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2603
2604 audit_ntp_log(&ad);
2605
2606 /* Update the multiplier immediately if frequency was set directly */
2607 if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
2608 clock_set |= timekeeping_advance(TK_ADV_FREQ);
2609
2610 if (clock_set)
2611 clock_was_set(CLOCK_SET_WALL);
2612
2613 ntp_notify_cmos_timer(offset_set);
2614
2615 return ret;
2616 }
2617
2618 #ifdef CONFIG_NTP_PPS
2619 /**
2620 * hardpps() - Accessor function to NTP __hardpps function
2621 * @phase_ts: Pointer to timespec64 structure representing phase timestamp
2622 * @raw_ts: Pointer to timespec64 structure representing raw timestamp
2623 */
hardpps(const struct timespec64 * phase_ts,const struct timespec64 * raw_ts)2624 void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2625 {
2626 unsigned long flags;
2627
2628 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2629 write_seqcount_begin(&tk_core.seq);
2630
2631 __hardpps(phase_ts, raw_ts);
2632
2633 write_seqcount_end(&tk_core.seq);
2634 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2635 }
2636 EXPORT_SYMBOL(hardpps);
2637 #endif /* CONFIG_NTP_PPS */
2638