1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Common time routines among all ppc machines.
4 *
5 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
6 * Paul Mackerras' version and mine for PReP and Pmac.
7 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
8 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
9 *
10 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
11 * to make clock more stable (2.4.0-test5). The only thing
12 * that this code assumes is that the timebases have been synchronized
13 * by firmware on SMP and are never stopped (never do sleep
14 * on SMP then, nap and doze are OK).
15 *
16 * Speeded up do_gettimeofday by getting rid of references to
17 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
18 *
19 * TODO (not necessarily in this file):
20 * - improve precision and reproducibility of timebase frequency
21 * measurement at boot time.
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
25 *
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
28 */
29
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/sched.h>
33 #include <linux/sched/clock.h>
34 #include <linux/kernel.h>
35 #include <linux/param.h>
36 #include <linux/string.h>
37 #include <linux/mm.h>
38 #include <linux/interrupt.h>
39 #include <linux/timex.h>
40 #include <linux/kernel_stat.h>
41 #include <linux/time.h>
42 #include <linux/init.h>
43 #include <linux/profile.h>
44 #include <linux/cpu.h>
45 #include <linux/security.h>
46 #include <linux/percpu.h>
47 #include <linux/rtc.h>
48 #include <linux/jiffies.h>
49 #include <linux/posix-timers.h>
50 #include <linux/irq.h>
51 #include <linux/delay.h>
52 #include <linux/irq_work.h>
53 #include <linux/of_clk.h>
54 #include <linux/suspend.h>
55 #include <linux/sched/cputime.h>
56 #include <linux/sched/clock.h>
57 #include <linux/processor.h>
58 #include <asm/trace.h>
59
60 #include <asm/interrupt.h>
61 #include <asm/io.h>
62 #include <asm/nvram.h>
63 #include <asm/cache.h>
64 #include <asm/machdep.h>
65 #include <linux/uaccess.h>
66 #include <asm/time.h>
67 #include <asm/prom.h>
68 #include <asm/irq.h>
69 #include <asm/div64.h>
70 #include <asm/smp.h>
71 #include <asm/vdso_datapage.h>
72 #include <asm/firmware.h>
73 #include <asm/asm-prototypes.h>
74
75 /* powerpc clocksource/clockevent code */
76
77 #include <linux/clockchips.h>
78 #include <linux/timekeeper_internal.h>
79
80 static u64 timebase_read(struct clocksource *);
81 static struct clocksource clocksource_timebase = {
82 .name = "timebase",
83 .rating = 400,
84 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
85 .mask = CLOCKSOURCE_MASK(64),
86 .read = timebase_read,
87 .vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER,
88 };
89
90 #define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
91 u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
92
93 static int decrementer_set_next_event(unsigned long evt,
94 struct clock_event_device *dev);
95 static int decrementer_shutdown(struct clock_event_device *evt);
96
97 struct clock_event_device decrementer_clockevent = {
98 .name = "decrementer",
99 .rating = 200,
100 .irq = 0,
101 .set_next_event = decrementer_set_next_event,
102 .set_state_oneshot_stopped = decrementer_shutdown,
103 .set_state_shutdown = decrementer_shutdown,
104 .tick_resume = decrementer_shutdown,
105 .features = CLOCK_EVT_FEAT_ONESHOT |
106 CLOCK_EVT_FEAT_C3STOP,
107 };
108 EXPORT_SYMBOL(decrementer_clockevent);
109
110 DEFINE_PER_CPU(u64, decrementers_next_tb);
111 static DEFINE_PER_CPU(struct clock_event_device, decrementers);
112
113 #define XSEC_PER_SEC (1024*1024)
114
115 #ifdef CONFIG_PPC64
116 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
117 #else
118 /* compute ((xsec << 12) * max) >> 32 */
119 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
120 #endif
121
122 unsigned long tb_ticks_per_jiffy;
123 unsigned long tb_ticks_per_usec = 100; /* sane default */
124 EXPORT_SYMBOL(tb_ticks_per_usec);
125 unsigned long tb_ticks_per_sec;
126 EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
127
128 DEFINE_SPINLOCK(rtc_lock);
129 EXPORT_SYMBOL_GPL(rtc_lock);
130
131 static u64 tb_to_ns_scale __read_mostly;
132 static unsigned tb_to_ns_shift __read_mostly;
133 static u64 boot_tb __read_mostly;
134
135 extern struct timezone sys_tz;
136 static long timezone_offset;
137
138 unsigned long ppc_proc_freq;
139 EXPORT_SYMBOL_GPL(ppc_proc_freq);
140 unsigned long ppc_tb_freq;
141 EXPORT_SYMBOL_GPL(ppc_tb_freq);
142
143 bool tb_invalid;
144
145 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
146 /*
147 * Factor for converting from cputime_t (timebase ticks) to
148 * microseconds. This is stored as 0.64 fixed-point binary fraction.
149 */
150 u64 __cputime_usec_factor;
151 EXPORT_SYMBOL(__cputime_usec_factor);
152
153 #ifdef CONFIG_PPC_SPLPAR
154 void (*dtl_consumer)(struct dtl_entry *, u64);
155 #endif
156
calc_cputime_factors(void)157 static void calc_cputime_factors(void)
158 {
159 struct div_result res;
160
161 div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
162 __cputime_usec_factor = res.result_low;
163 }
164
165 /*
166 * Read the SPURR on systems that have it, otherwise the PURR,
167 * or if that doesn't exist return the timebase value passed in.
168 */
read_spurr(unsigned long tb)169 static inline unsigned long read_spurr(unsigned long tb)
170 {
171 if (cpu_has_feature(CPU_FTR_SPURR))
172 return mfspr(SPRN_SPURR);
173 if (cpu_has_feature(CPU_FTR_PURR))
174 return mfspr(SPRN_PURR);
175 return tb;
176 }
177
178 #ifdef CONFIG_PPC_SPLPAR
179
180 #include <asm/dtl.h>
181
182 /*
183 * Scan the dispatch trace log and count up the stolen time.
184 * Should be called with interrupts disabled.
185 */
scan_dispatch_log(u64 stop_tb)186 static u64 scan_dispatch_log(u64 stop_tb)
187 {
188 u64 i = local_paca->dtl_ridx;
189 struct dtl_entry *dtl = local_paca->dtl_curr;
190 struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
191 struct lppaca *vpa = local_paca->lppaca_ptr;
192 u64 tb_delta;
193 u64 stolen = 0;
194 u64 dtb;
195
196 if (!dtl)
197 return 0;
198
199 if (i == be64_to_cpu(vpa->dtl_idx))
200 return 0;
201 while (i < be64_to_cpu(vpa->dtl_idx)) {
202 dtb = be64_to_cpu(dtl->timebase);
203 tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
204 be32_to_cpu(dtl->ready_to_enqueue_time);
205 barrier();
206 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
207 /* buffer has overflowed */
208 i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
209 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
210 continue;
211 }
212 if (dtb > stop_tb)
213 break;
214 if (dtl_consumer)
215 dtl_consumer(dtl, i);
216 stolen += tb_delta;
217 ++i;
218 ++dtl;
219 if (dtl == dtl_end)
220 dtl = local_paca->dispatch_log;
221 }
222 local_paca->dtl_ridx = i;
223 local_paca->dtl_curr = dtl;
224 return stolen;
225 }
226
227 /*
228 * Accumulate stolen time by scanning the dispatch trace log.
229 * Called on entry from user mode.
230 */
accumulate_stolen_time(void)231 void notrace accumulate_stolen_time(void)
232 {
233 u64 sst, ust;
234 unsigned long save_irq_soft_mask = irq_soft_mask_return();
235 struct cpu_accounting_data *acct = &local_paca->accounting;
236
237 /* We are called early in the exception entry, before
238 * soft/hard_enabled are sync'ed to the expected state
239 * for the exception. We are hard disabled but the PACA
240 * needs to reflect that so various debug stuff doesn't
241 * complain
242 */
243 irq_soft_mask_set(IRQS_DISABLED);
244
245 sst = scan_dispatch_log(acct->starttime_user);
246 ust = scan_dispatch_log(acct->starttime);
247 acct->stime -= sst;
248 acct->utime -= ust;
249 acct->steal_time += ust + sst;
250
251 irq_soft_mask_set(save_irq_soft_mask);
252 }
253
calculate_stolen_time(u64 stop_tb)254 static inline u64 calculate_stolen_time(u64 stop_tb)
255 {
256 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
257 return 0;
258
259 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
260 return scan_dispatch_log(stop_tb);
261
262 return 0;
263 }
264
265 #else /* CONFIG_PPC_SPLPAR */
calculate_stolen_time(u64 stop_tb)266 static inline u64 calculate_stolen_time(u64 stop_tb)
267 {
268 return 0;
269 }
270
271 #endif /* CONFIG_PPC_SPLPAR */
272
273 /*
274 * Account time for a transition between system, hard irq
275 * or soft irq state.
276 */
vtime_delta_scaled(struct cpu_accounting_data * acct,unsigned long now,unsigned long stime)277 static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct,
278 unsigned long now, unsigned long stime)
279 {
280 unsigned long stime_scaled = 0;
281 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
282 unsigned long nowscaled, deltascaled;
283 unsigned long utime, utime_scaled;
284
285 nowscaled = read_spurr(now);
286 deltascaled = nowscaled - acct->startspurr;
287 acct->startspurr = nowscaled;
288 utime = acct->utime - acct->utime_sspurr;
289 acct->utime_sspurr = acct->utime;
290
291 /*
292 * Because we don't read the SPURR on every kernel entry/exit,
293 * deltascaled includes both user and system SPURR ticks.
294 * Apportion these ticks to system SPURR ticks and user
295 * SPURR ticks in the same ratio as the system time (delta)
296 * and user time (udelta) values obtained from the timebase
297 * over the same interval. The system ticks get accounted here;
298 * the user ticks get saved up in paca->user_time_scaled to be
299 * used by account_process_tick.
300 */
301 stime_scaled = stime;
302 utime_scaled = utime;
303 if (deltascaled != stime + utime) {
304 if (utime) {
305 stime_scaled = deltascaled * stime / (stime + utime);
306 utime_scaled = deltascaled - stime_scaled;
307 } else {
308 stime_scaled = deltascaled;
309 }
310 }
311 acct->utime_scaled += utime_scaled;
312 #endif
313
314 return stime_scaled;
315 }
316
vtime_delta(struct cpu_accounting_data * acct,unsigned long * stime_scaled,unsigned long * steal_time)317 static unsigned long vtime_delta(struct cpu_accounting_data *acct,
318 unsigned long *stime_scaled,
319 unsigned long *steal_time)
320 {
321 unsigned long now, stime;
322
323 WARN_ON_ONCE(!irqs_disabled());
324
325 now = mftb();
326 stime = now - acct->starttime;
327 acct->starttime = now;
328
329 *stime_scaled = vtime_delta_scaled(acct, now, stime);
330
331 *steal_time = calculate_stolen_time(now);
332
333 return stime;
334 }
335
vtime_delta_kernel(struct cpu_accounting_data * acct,unsigned long * stime,unsigned long * stime_scaled)336 static void vtime_delta_kernel(struct cpu_accounting_data *acct,
337 unsigned long *stime, unsigned long *stime_scaled)
338 {
339 unsigned long steal_time;
340
341 *stime = vtime_delta(acct, stime_scaled, &steal_time);
342 *stime -= min(*stime, steal_time);
343 acct->steal_time += steal_time;
344 }
345
vtime_account_kernel(struct task_struct * tsk)346 void vtime_account_kernel(struct task_struct *tsk)
347 {
348 struct cpu_accounting_data *acct = get_accounting(tsk);
349 unsigned long stime, stime_scaled;
350
351 vtime_delta_kernel(acct, &stime, &stime_scaled);
352
353 if (tsk->flags & PF_VCPU) {
354 acct->gtime += stime;
355 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
356 acct->utime_scaled += stime_scaled;
357 #endif
358 } else {
359 acct->stime += stime;
360 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
361 acct->stime_scaled += stime_scaled;
362 #endif
363 }
364 }
365 EXPORT_SYMBOL_GPL(vtime_account_kernel);
366
vtime_account_idle(struct task_struct * tsk)367 void vtime_account_idle(struct task_struct *tsk)
368 {
369 unsigned long stime, stime_scaled, steal_time;
370 struct cpu_accounting_data *acct = get_accounting(tsk);
371
372 stime = vtime_delta(acct, &stime_scaled, &steal_time);
373 acct->idle_time += stime + steal_time;
374 }
375
vtime_account_irq_field(struct cpu_accounting_data * acct,unsigned long * field)376 static void vtime_account_irq_field(struct cpu_accounting_data *acct,
377 unsigned long *field)
378 {
379 unsigned long stime, stime_scaled;
380
381 vtime_delta_kernel(acct, &stime, &stime_scaled);
382 *field += stime;
383 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
384 acct->stime_scaled += stime_scaled;
385 #endif
386 }
387
vtime_account_softirq(struct task_struct * tsk)388 void vtime_account_softirq(struct task_struct *tsk)
389 {
390 struct cpu_accounting_data *acct = get_accounting(tsk);
391 vtime_account_irq_field(acct, &acct->softirq_time);
392 }
393
vtime_account_hardirq(struct task_struct * tsk)394 void vtime_account_hardirq(struct task_struct *tsk)
395 {
396 struct cpu_accounting_data *acct = get_accounting(tsk);
397 vtime_account_irq_field(acct, &acct->hardirq_time);
398 }
399
vtime_flush_scaled(struct task_struct * tsk,struct cpu_accounting_data * acct)400 static void vtime_flush_scaled(struct task_struct *tsk,
401 struct cpu_accounting_data *acct)
402 {
403 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
404 if (acct->utime_scaled)
405 tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
406 if (acct->stime_scaled)
407 tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
408
409 acct->utime_scaled = 0;
410 acct->utime_sspurr = 0;
411 acct->stime_scaled = 0;
412 #endif
413 }
414
415 /*
416 * Account the whole cputime accumulated in the paca
417 * Must be called with interrupts disabled.
418 * Assumes that vtime_account_kernel/idle() has been called
419 * recently (i.e. since the last entry from usermode) so that
420 * get_paca()->user_time_scaled is up to date.
421 */
vtime_flush(struct task_struct * tsk)422 void vtime_flush(struct task_struct *tsk)
423 {
424 struct cpu_accounting_data *acct = get_accounting(tsk);
425
426 if (acct->utime)
427 account_user_time(tsk, cputime_to_nsecs(acct->utime));
428
429 if (acct->gtime)
430 account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
431
432 if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) {
433 account_steal_time(cputime_to_nsecs(acct->steal_time));
434 acct->steal_time = 0;
435 }
436
437 if (acct->idle_time)
438 account_idle_time(cputime_to_nsecs(acct->idle_time));
439
440 if (acct->stime)
441 account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
442 CPUTIME_SYSTEM);
443
444 if (acct->hardirq_time)
445 account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
446 CPUTIME_IRQ);
447 if (acct->softirq_time)
448 account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
449 CPUTIME_SOFTIRQ);
450
451 vtime_flush_scaled(tsk, acct);
452
453 acct->utime = 0;
454 acct->gtime = 0;
455 acct->idle_time = 0;
456 acct->stime = 0;
457 acct->hardirq_time = 0;
458 acct->softirq_time = 0;
459 }
460
461 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
462 #define calc_cputime_factors()
463 #endif
464
__delay(unsigned long loops)465 void __delay(unsigned long loops)
466 {
467 unsigned long start;
468
469 spin_begin();
470 if (tb_invalid) {
471 /*
472 * TB is in error state and isn't ticking anymore.
473 * HMI handler was unable to recover from TB error.
474 * Return immediately, so that kernel won't get stuck here.
475 */
476 spin_cpu_relax();
477 } else {
478 start = mftb();
479 while (mftb() - start < loops)
480 spin_cpu_relax();
481 }
482 spin_end();
483 }
484 EXPORT_SYMBOL(__delay);
485
udelay(unsigned long usecs)486 void udelay(unsigned long usecs)
487 {
488 __delay(tb_ticks_per_usec * usecs);
489 }
490 EXPORT_SYMBOL(udelay);
491
492 #ifdef CONFIG_SMP
profile_pc(struct pt_regs * regs)493 unsigned long profile_pc(struct pt_regs *regs)
494 {
495 unsigned long pc = instruction_pointer(regs);
496
497 if (in_lock_functions(pc))
498 return regs->link;
499
500 return pc;
501 }
502 EXPORT_SYMBOL(profile_pc);
503 #endif
504
505 #ifdef CONFIG_IRQ_WORK
506
507 /*
508 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
509 */
510 #ifdef CONFIG_PPC64
test_irq_work_pending(void)511 static inline unsigned long test_irq_work_pending(void)
512 {
513 unsigned long x;
514
515 asm volatile("lbz %0,%1(13)"
516 : "=r" (x)
517 : "i" (offsetof(struct paca_struct, irq_work_pending)));
518 return x;
519 }
520
set_irq_work_pending_flag(void)521 static inline void set_irq_work_pending_flag(void)
522 {
523 asm volatile("stb %0,%1(13)" : :
524 "r" (1),
525 "i" (offsetof(struct paca_struct, irq_work_pending)));
526 }
527
clear_irq_work_pending(void)528 static inline void clear_irq_work_pending(void)
529 {
530 asm volatile("stb %0,%1(13)" : :
531 "r" (0),
532 "i" (offsetof(struct paca_struct, irq_work_pending)));
533 }
534
535 #else /* 32-bit */
536
537 DEFINE_PER_CPU(u8, irq_work_pending);
538
539 #define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
540 #define test_irq_work_pending() __this_cpu_read(irq_work_pending)
541 #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
542
543 #endif /* 32 vs 64 bit */
544
arch_irq_work_raise(void)545 void arch_irq_work_raise(void)
546 {
547 /*
548 * 64-bit code that uses irq soft-mask can just cause an immediate
549 * interrupt here that gets soft masked, if this is called under
550 * local_irq_disable(). It might be possible to prevent that happening
551 * by noticing interrupts are disabled and setting decrementer pending
552 * to be replayed when irqs are enabled. The problem there is that
553 * tracing can call irq_work_raise, including in code that does low
554 * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
555 * which could get tangled up if we're messing with the same state
556 * here.
557 */
558 preempt_disable();
559 set_irq_work_pending_flag();
560 set_dec(1);
561 preempt_enable();
562 }
563
564 #else /* CONFIG_IRQ_WORK */
565
566 #define test_irq_work_pending() 0
567 #define clear_irq_work_pending()
568
569 #endif /* CONFIG_IRQ_WORK */
570
571 /*
572 * timer_interrupt - gets called when the decrementer overflows,
573 * with interrupts disabled.
574 */
DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)575 DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
576 {
577 struct clock_event_device *evt = this_cpu_ptr(&decrementers);
578 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
579 struct pt_regs *old_regs;
580 u64 now;
581
582 /*
583 * Some implementations of hotplug will get timer interrupts while
584 * offline, just ignore these.
585 */
586 if (unlikely(!cpu_online(smp_processor_id()))) {
587 set_dec(decrementer_max);
588 return;
589 }
590
591 /* Ensure a positive value is written to the decrementer, or else
592 * some CPUs will continue to take decrementer exceptions. When the
593 * PPC_WATCHDOG (decrementer based) is configured, keep this at most
594 * 31 bits, which is about 4 seconds on most systems, which gives
595 * the watchdog a chance of catching timer interrupt hard lockups.
596 */
597 if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
598 set_dec(0x7fffffff);
599 else
600 set_dec(decrementer_max);
601
602 /* Conditionally hard-enable interrupts now that the DEC has been
603 * bumped to its maximum value
604 */
605 may_hard_irq_enable();
606
607
608 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
609 if (atomic_read(&ppc_n_lost_interrupts) != 0)
610 do_IRQ(regs);
611 #endif
612
613 old_regs = set_irq_regs(regs);
614
615 trace_timer_interrupt_entry(regs);
616
617 if (test_irq_work_pending()) {
618 clear_irq_work_pending();
619 irq_work_run();
620 }
621
622 now = get_tb();
623 if (now >= *next_tb) {
624 *next_tb = ~(u64)0;
625 if (evt->event_handler)
626 evt->event_handler(evt);
627 __this_cpu_inc(irq_stat.timer_irqs_event);
628 } else {
629 now = *next_tb - now;
630 if (now <= decrementer_max)
631 set_dec(now);
632 /* We may have raced with new irq work */
633 if (test_irq_work_pending())
634 set_dec(1);
635 __this_cpu_inc(irq_stat.timer_irqs_others);
636 }
637
638 trace_timer_interrupt_exit(regs);
639
640 set_irq_regs(old_regs);
641 }
642 EXPORT_SYMBOL(timer_interrupt);
643
644 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
timer_broadcast_interrupt(void)645 void timer_broadcast_interrupt(void)
646 {
647 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
648
649 *next_tb = ~(u64)0;
650 tick_receive_broadcast();
651 __this_cpu_inc(irq_stat.broadcast_irqs_event);
652 }
653 #endif
654
655 #ifdef CONFIG_SUSPEND
generic_suspend_disable_irqs(void)656 static void generic_suspend_disable_irqs(void)
657 {
658 /* Disable the decrementer, so that it doesn't interfere
659 * with suspending.
660 */
661
662 set_dec(decrementer_max);
663 local_irq_disable();
664 set_dec(decrementer_max);
665 }
666
generic_suspend_enable_irqs(void)667 static void generic_suspend_enable_irqs(void)
668 {
669 local_irq_enable();
670 }
671
672 /* Overrides the weak version in kernel/power/main.c */
arch_suspend_disable_irqs(void)673 void arch_suspend_disable_irqs(void)
674 {
675 if (ppc_md.suspend_disable_irqs)
676 ppc_md.suspend_disable_irqs();
677 generic_suspend_disable_irqs();
678 }
679
680 /* Overrides the weak version in kernel/power/main.c */
arch_suspend_enable_irqs(void)681 void arch_suspend_enable_irqs(void)
682 {
683 generic_suspend_enable_irqs();
684 if (ppc_md.suspend_enable_irqs)
685 ppc_md.suspend_enable_irqs();
686 }
687 #endif
688
tb_to_ns(unsigned long long ticks)689 unsigned long long tb_to_ns(unsigned long long ticks)
690 {
691 return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
692 }
693 EXPORT_SYMBOL_GPL(tb_to_ns);
694
695 /*
696 * Scheduler clock - returns current time in nanosec units.
697 *
698 * Note: mulhdu(a, b) (multiply high double unsigned) returns
699 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
700 * are 64-bit unsigned numbers.
701 */
sched_clock(void)702 notrace unsigned long long sched_clock(void)
703 {
704 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
705 }
706
707
708 #ifdef CONFIG_PPC_PSERIES
709
710 /*
711 * Running clock - attempts to give a view of time passing for a virtualised
712 * kernels.
713 * Uses the VTB register if available otherwise a next best guess.
714 */
running_clock(void)715 unsigned long long running_clock(void)
716 {
717 /*
718 * Don't read the VTB as a host since KVM does not switch in host
719 * timebase into the VTB when it takes a guest off the CPU, reading the
720 * VTB would result in reading 'last switched out' guest VTB.
721 *
722 * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
723 * would be unsafe to rely only on the #ifdef above.
724 */
725 if (firmware_has_feature(FW_FEATURE_LPAR) &&
726 cpu_has_feature(CPU_FTR_ARCH_207S))
727 return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
728
729 /*
730 * This is a next best approximation without a VTB.
731 * On a host which is running bare metal there should never be any stolen
732 * time and on a host which doesn't do any virtualisation TB *should* equal
733 * VTB so it makes no difference anyway.
734 */
735 return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
736 }
737 #endif
738
get_freq(char * name,int cells,unsigned long * val)739 static int __init get_freq(char *name, int cells, unsigned long *val)
740 {
741 struct device_node *cpu;
742 const __be32 *fp;
743 int found = 0;
744
745 /* The cpu node should have timebase and clock frequency properties */
746 cpu = of_find_node_by_type(NULL, "cpu");
747
748 if (cpu) {
749 fp = of_get_property(cpu, name, NULL);
750 if (fp) {
751 found = 1;
752 *val = of_read_ulong(fp, cells);
753 }
754
755 of_node_put(cpu);
756 }
757
758 return found;
759 }
760
start_cpu_decrementer(void)761 static void start_cpu_decrementer(void)
762 {
763 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
764 unsigned int tcr;
765
766 /* Clear any pending timer interrupts */
767 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
768
769 tcr = mfspr(SPRN_TCR);
770 /*
771 * The watchdog may have already been enabled by u-boot. So leave
772 * TRC[WP] (Watchdog Period) alone.
773 */
774 tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */
775 tcr |= TCR_DIE; /* Enable decrementer */
776 mtspr(SPRN_TCR, tcr);
777 #endif
778 }
779
generic_calibrate_decr(void)780 void __init generic_calibrate_decr(void)
781 {
782 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
783
784 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
785 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
786
787 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
788 "(not found)\n");
789 }
790
791 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
792
793 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
794 !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
795
796 printk(KERN_ERR "WARNING: Estimating processor frequency "
797 "(not found)\n");
798 }
799 }
800
update_persistent_clock64(struct timespec64 now)801 int update_persistent_clock64(struct timespec64 now)
802 {
803 struct rtc_time tm;
804
805 if (!ppc_md.set_rtc_time)
806 return -ENODEV;
807
808 rtc_time64_to_tm(now.tv_sec + 1 + timezone_offset, &tm);
809
810 return ppc_md.set_rtc_time(&tm);
811 }
812
__read_persistent_clock(struct timespec64 * ts)813 static void __read_persistent_clock(struct timespec64 *ts)
814 {
815 struct rtc_time tm;
816 static int first = 1;
817
818 ts->tv_nsec = 0;
819 /* XXX this is a litle fragile but will work okay in the short term */
820 if (first) {
821 first = 0;
822 if (ppc_md.time_init)
823 timezone_offset = ppc_md.time_init();
824
825 /* get_boot_time() isn't guaranteed to be safe to call late */
826 if (ppc_md.get_boot_time) {
827 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
828 return;
829 }
830 }
831 if (!ppc_md.get_rtc_time) {
832 ts->tv_sec = 0;
833 return;
834 }
835 ppc_md.get_rtc_time(&tm);
836
837 ts->tv_sec = rtc_tm_to_time64(&tm);
838 }
839
read_persistent_clock64(struct timespec64 * ts)840 void read_persistent_clock64(struct timespec64 *ts)
841 {
842 __read_persistent_clock(ts);
843
844 /* Sanitize it in case real time clock is set below EPOCH */
845 if (ts->tv_sec < 0) {
846 ts->tv_sec = 0;
847 ts->tv_nsec = 0;
848 }
849
850 }
851
852 /* clocksource code */
timebase_read(struct clocksource * cs)853 static notrace u64 timebase_read(struct clocksource *cs)
854 {
855 return (u64)get_tb();
856 }
857
clocksource_init(void)858 static void __init clocksource_init(void)
859 {
860 struct clocksource *clock = &clocksource_timebase;
861
862 if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
863 printk(KERN_ERR "clocksource: %s is already registered\n",
864 clock->name);
865 return;
866 }
867
868 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
869 clock->name, clock->mult, clock->shift);
870 }
871
decrementer_set_next_event(unsigned long evt,struct clock_event_device * dev)872 static int decrementer_set_next_event(unsigned long evt,
873 struct clock_event_device *dev)
874 {
875 __this_cpu_write(decrementers_next_tb, get_tb() + evt);
876 set_dec(evt);
877
878 /* We may have raced with new irq work */
879 if (test_irq_work_pending())
880 set_dec(1);
881
882 return 0;
883 }
884
decrementer_shutdown(struct clock_event_device * dev)885 static int decrementer_shutdown(struct clock_event_device *dev)
886 {
887 decrementer_set_next_event(decrementer_max, dev);
888 return 0;
889 }
890
register_decrementer_clockevent(int cpu)891 static void register_decrementer_clockevent(int cpu)
892 {
893 struct clock_event_device *dec = &per_cpu(decrementers, cpu);
894
895 *dec = decrementer_clockevent;
896 dec->cpumask = cpumask_of(cpu);
897
898 clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max);
899
900 printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
901 dec->name, dec->mult, dec->shift, cpu);
902
903 /* Set values for KVM, see kvm_emulate_dec() */
904 decrementer_clockevent.mult = dec->mult;
905 decrementer_clockevent.shift = dec->shift;
906 }
907
enable_large_decrementer(void)908 static void enable_large_decrementer(void)
909 {
910 if (!cpu_has_feature(CPU_FTR_ARCH_300))
911 return;
912
913 if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
914 return;
915
916 /*
917 * If we're running as the hypervisor we need to enable the LD manually
918 * otherwise firmware should have done it for us.
919 */
920 if (cpu_has_feature(CPU_FTR_HVMODE))
921 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
922 }
923
set_decrementer_max(void)924 static void __init set_decrementer_max(void)
925 {
926 struct device_node *cpu;
927 u32 bits = 32;
928
929 /* Prior to ISAv3 the decrementer is always 32 bit */
930 if (!cpu_has_feature(CPU_FTR_ARCH_300))
931 return;
932
933 cpu = of_find_node_by_type(NULL, "cpu");
934
935 if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
936 if (bits > 64 || bits < 32) {
937 pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
938 bits = 32;
939 }
940
941 /* calculate the signed maximum given this many bits */
942 decrementer_max = (1ul << (bits - 1)) - 1;
943 }
944
945 of_node_put(cpu);
946
947 pr_info("time_init: %u bit decrementer (max: %llx)\n",
948 bits, decrementer_max);
949 }
950
init_decrementer_clockevent(void)951 static void __init init_decrementer_clockevent(void)
952 {
953 register_decrementer_clockevent(smp_processor_id());
954 }
955
secondary_cpu_time_init(void)956 void secondary_cpu_time_init(void)
957 {
958 /* Enable and test the large decrementer for this cpu */
959 enable_large_decrementer();
960
961 /* Start the decrementer on CPUs that have manual control
962 * such as BookE
963 */
964 start_cpu_decrementer();
965
966 /* FIME: Should make unrelatred change to move snapshot_timebase
967 * call here ! */
968 register_decrementer_clockevent(smp_processor_id());
969 }
970
971 /* This function is only called on the boot processor */
time_init(void)972 void __init time_init(void)
973 {
974 struct div_result res;
975 u64 scale;
976 unsigned shift;
977
978 /* Normal PowerPC with timebase register */
979 ppc_md.calibrate_decr();
980 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
981 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
982 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
983 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
984
985 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
986 tb_ticks_per_sec = ppc_tb_freq;
987 tb_ticks_per_usec = ppc_tb_freq / 1000000;
988 calc_cputime_factors();
989
990 /*
991 * Compute scale factor for sched_clock.
992 * The calibrate_decr() function has set tb_ticks_per_sec,
993 * which is the timebase frequency.
994 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
995 * the 128-bit result as a 64.64 fixed-point number.
996 * We then shift that number right until it is less than 1.0,
997 * giving us the scale factor and shift count to use in
998 * sched_clock().
999 */
1000 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
1001 scale = res.result_low;
1002 for (shift = 0; res.result_high != 0; ++shift) {
1003 scale = (scale >> 1) | (res.result_high << 63);
1004 res.result_high >>= 1;
1005 }
1006 tb_to_ns_scale = scale;
1007 tb_to_ns_shift = shift;
1008 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
1009 boot_tb = get_tb();
1010
1011 /* If platform provided a timezone (pmac), we correct the time */
1012 if (timezone_offset) {
1013 sys_tz.tz_minuteswest = -timezone_offset / 60;
1014 sys_tz.tz_dsttime = 0;
1015 }
1016
1017 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1018
1019 /* initialise and enable the large decrementer (if we have one) */
1020 set_decrementer_max();
1021 enable_large_decrementer();
1022
1023 /* Start the decrementer on CPUs that have manual control
1024 * such as BookE
1025 */
1026 start_cpu_decrementer();
1027
1028 /* Register the clocksource */
1029 clocksource_init();
1030
1031 init_decrementer_clockevent();
1032 tick_setup_hrtimer_broadcast();
1033
1034 of_clk_init(NULL);
1035 enable_sched_clock_irqtime();
1036 }
1037
1038 /*
1039 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1040 * result.
1041 */
div128_by_32(u64 dividend_high,u64 dividend_low,unsigned divisor,struct div_result * dr)1042 void div128_by_32(u64 dividend_high, u64 dividend_low,
1043 unsigned divisor, struct div_result *dr)
1044 {
1045 unsigned long a, b, c, d;
1046 unsigned long w, x, y, z;
1047 u64 ra, rb, rc;
1048
1049 a = dividend_high >> 32;
1050 b = dividend_high & 0xffffffff;
1051 c = dividend_low >> 32;
1052 d = dividend_low & 0xffffffff;
1053
1054 w = a / divisor;
1055 ra = ((u64)(a - (w * divisor)) << 32) + b;
1056
1057 rb = ((u64) do_div(ra, divisor) << 32) + c;
1058 x = ra;
1059
1060 rc = ((u64) do_div(rb, divisor) << 32) + d;
1061 y = rb;
1062
1063 do_div(rc, divisor);
1064 z = rc;
1065
1066 dr->result_high = ((u64)w << 32) + x;
1067 dr->result_low = ((u64)y << 32) + z;
1068
1069 }
1070
1071 /* We don't need to calibrate delay, we use the CPU timebase for that */
calibrate_delay(void)1072 void calibrate_delay(void)
1073 {
1074 /* Some generic code (such as spinlock debug) use loops_per_jiffy
1075 * as the number of __delay(1) in a jiffy, so make it so
1076 */
1077 loops_per_jiffy = tb_ticks_per_jiffy;
1078 }
1079
1080 #if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
rtc_generic_get_time(struct device * dev,struct rtc_time * tm)1081 static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
1082 {
1083 ppc_md.get_rtc_time(tm);
1084 return 0;
1085 }
1086
rtc_generic_set_time(struct device * dev,struct rtc_time * tm)1087 static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
1088 {
1089 if (!ppc_md.set_rtc_time)
1090 return -EOPNOTSUPP;
1091
1092 if (ppc_md.set_rtc_time(tm) < 0)
1093 return -EOPNOTSUPP;
1094
1095 return 0;
1096 }
1097
1098 static const struct rtc_class_ops rtc_generic_ops = {
1099 .read_time = rtc_generic_get_time,
1100 .set_time = rtc_generic_set_time,
1101 };
1102
rtc_init(void)1103 static int __init rtc_init(void)
1104 {
1105 struct platform_device *pdev;
1106
1107 if (!ppc_md.get_rtc_time)
1108 return -ENODEV;
1109
1110 pdev = platform_device_register_data(NULL, "rtc-generic", -1,
1111 &rtc_generic_ops,
1112 sizeof(rtc_generic_ops));
1113
1114 return PTR_ERR_OR_ZERO(pdev);
1115 }
1116
1117 device_initcall(rtc_init);
1118 #endif
1119