1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Time of day based timer functions. 4 * 5 * S390 version 6 * Copyright IBM Corp. 1999, 2008 7 * Author(s): Hartmut Penner (hp@de.ibm.com), 8 * Martin Schwidefsky (schwidefsky@de.ibm.com), 9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) 10 * 11 * Derived from "arch/i386/kernel/time.c" 12 * Copyright (C) 1991, 1992, 1995 Linus Torvalds 13 */ 14 15 #define KMSG_COMPONENT "time" 16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 17 18 #include <linux/kernel_stat.h> 19 #include <linux/errno.h> 20 #include <linux/export.h> 21 #include <linux/sched.h> 22 #include <linux/sched/clock.h> 23 #include <linux/kernel.h> 24 #include <linux/param.h> 25 #include <linux/string.h> 26 #include <linux/mm.h> 27 #include <linux/interrupt.h> 28 #include <linux/cpu.h> 29 #include <linux/stop_machine.h> 30 #include <linux/time.h> 31 #include <linux/device.h> 32 #include <linux/delay.h> 33 #include <linux/init.h> 34 #include <linux/smp.h> 35 #include <linux/types.h> 36 #include <linux/profile.h> 37 #include <linux/timex.h> 38 #include <linux/notifier.h> 39 #include <linux/timekeeper_internal.h> 40 #include <linux/clockchips.h> 41 #include <linux/gfp.h> 42 #include <linux/kprobes.h> 43 #include <linux/uaccess.h> 44 #include <vdso/vsyscall.h> 45 #include <vdso/clocksource.h> 46 #include <vdso/helpers.h> 47 #include <asm/facility.h> 48 #include <asm/delay.h> 49 #include <asm/div64.h> 50 #include <asm/vdso.h> 51 #include <asm/irq.h> 52 #include <asm/irq_regs.h> 53 #include <asm/vtimer.h> 54 #include <asm/stp.h> 55 #include <asm/cio.h> 56 #include "entry.h" 57 58 unsigned char tod_clock_base[16] __aligned(8) = { 59 /* Force to data section. */ 60 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 61 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 62 }; 63 EXPORT_SYMBOL_GPL(tod_clock_base); 64 65 u64 clock_comparator_max = -1ULL; 66 EXPORT_SYMBOL_GPL(clock_comparator_max); 67 68 static DEFINE_PER_CPU(struct clock_event_device, comparators); 69 70 ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier); 71 EXPORT_SYMBOL(s390_epoch_delta_notifier); 72 73 unsigned char ptff_function_mask[16]; 74 75 static unsigned long long lpar_offset; 76 static unsigned long long initial_leap_seconds; 77 static unsigned long long tod_steering_end; 78 static long long tod_steering_delta; 79 80 /* 81 * Get time offsets with PTFF 82 */ 83 void __init time_early_init(void) 84 { 85 struct ptff_qto qto; 86 struct ptff_qui qui; 87 88 /* Initialize TOD steering parameters */ 89 tod_steering_end = *(unsigned long long *) &tod_clock_base[1]; 90 vdso_data->arch_data.tod_steering_end = tod_steering_end; 91 92 if (!test_facility(28)) 93 return; 94 95 ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF); 96 97 /* get LPAR offset */ 98 if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0) 99 lpar_offset = qto.tod_epoch_difference; 100 101 /* get initial leap seconds */ 102 if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0) 103 initial_leap_seconds = (unsigned long long) 104 ((long) qui.old_leap * 4096000000L); 105 } 106 107 /* 108 * Scheduler clock - returns current time in nanosec units. 109 */ 110 unsigned long long notrace sched_clock(void) 111 { 112 return tod_to_ns(get_tod_clock_monotonic()); 113 } 114 NOKPROBE_SYMBOL(sched_clock); 115 116 static void ext_to_timespec64(unsigned char *clk, struct timespec64 *xt) 117 { 118 unsigned long long high, low, rem, sec, nsec; 119 120 /* Split extendnd TOD clock to micro-seconds and sub-micro-seconds */ 121 high = (*(unsigned long long *) clk) >> 4; 122 low = (*(unsigned long long *)&clk[7]) << 4; 123 /* Calculate seconds and nano-seconds */ 124 sec = high; 125 rem = do_div(sec, 1000000); 126 nsec = (((low >> 32) + (rem << 32)) * 1000) >> 32; 127 128 xt->tv_sec = sec; 129 xt->tv_nsec = nsec; 130 } 131 132 void clock_comparator_work(void) 133 { 134 struct clock_event_device *cd; 135 136 S390_lowcore.clock_comparator = clock_comparator_max; 137 cd = this_cpu_ptr(&comparators); 138 cd->event_handler(cd); 139 } 140 141 static int s390_next_event(unsigned long delta, 142 struct clock_event_device *evt) 143 { 144 S390_lowcore.clock_comparator = get_tod_clock() + delta; 145 set_clock_comparator(S390_lowcore.clock_comparator); 146 return 0; 147 } 148 149 /* 150 * Set up lowcore and control register of the current cpu to 151 * enable TOD clock and clock comparator interrupts. 152 */ 153 void init_cpu_timer(void) 154 { 155 struct clock_event_device *cd; 156 int cpu; 157 158 S390_lowcore.clock_comparator = clock_comparator_max; 159 set_clock_comparator(S390_lowcore.clock_comparator); 160 161 cpu = smp_processor_id(); 162 cd = &per_cpu(comparators, cpu); 163 cd->name = "comparator"; 164 cd->features = CLOCK_EVT_FEAT_ONESHOT; 165 cd->mult = 16777; 166 cd->shift = 12; 167 cd->min_delta_ns = 1; 168 cd->min_delta_ticks = 1; 169 cd->max_delta_ns = LONG_MAX; 170 cd->max_delta_ticks = ULONG_MAX; 171 cd->rating = 400; 172 cd->cpumask = cpumask_of(cpu); 173 cd->set_next_event = s390_next_event; 174 175 clockevents_register_device(cd); 176 177 /* Enable clock comparator timer interrupt. */ 178 __ctl_set_bit(0,11); 179 180 /* Always allow the timing alert external interrupt. */ 181 __ctl_set_bit(0, 4); 182 } 183 184 static void clock_comparator_interrupt(struct ext_code ext_code, 185 unsigned int param32, 186 unsigned long param64) 187 { 188 inc_irq_stat(IRQEXT_CLK); 189 if (S390_lowcore.clock_comparator == clock_comparator_max) 190 set_clock_comparator(S390_lowcore.clock_comparator); 191 } 192 193 static void stp_timing_alert(struct stp_irq_parm *); 194 195 static void timing_alert_interrupt(struct ext_code ext_code, 196 unsigned int param32, unsigned long param64) 197 { 198 inc_irq_stat(IRQEXT_TLA); 199 if (param32 & 0x00038000) 200 stp_timing_alert((struct stp_irq_parm *) ¶m32); 201 } 202 203 static void stp_reset(void); 204 205 void read_persistent_clock64(struct timespec64 *ts) 206 { 207 unsigned char clk[STORE_CLOCK_EXT_SIZE]; 208 __u64 delta; 209 210 delta = initial_leap_seconds + TOD_UNIX_EPOCH; 211 get_tod_clock_ext(clk); 212 *(__u64 *) &clk[1] -= delta; 213 if (*(__u64 *) &clk[1] > delta) 214 clk[0]--; 215 ext_to_timespec64(clk, ts); 216 } 217 218 void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time, 219 struct timespec64 *boot_offset) 220 { 221 unsigned char clk[STORE_CLOCK_EXT_SIZE]; 222 struct timespec64 boot_time; 223 __u64 delta; 224 225 delta = initial_leap_seconds + TOD_UNIX_EPOCH; 226 memcpy(clk, tod_clock_base, STORE_CLOCK_EXT_SIZE); 227 *(__u64 *)&clk[1] -= delta; 228 if (*(__u64 *)&clk[1] > delta) 229 clk[0]--; 230 ext_to_timespec64(clk, &boot_time); 231 232 read_persistent_clock64(wall_time); 233 *boot_offset = timespec64_sub(*wall_time, boot_time); 234 } 235 236 static u64 read_tod_clock(struct clocksource *cs) 237 { 238 unsigned long long now, adj; 239 240 preempt_disable(); /* protect from changes to steering parameters */ 241 now = get_tod_clock(); 242 adj = tod_steering_end - now; 243 if (unlikely((s64) adj > 0)) 244 /* 245 * manually steer by 1 cycle every 2^16 cycles. This 246 * corresponds to shifting the tod delta by 15. 1s is 247 * therefore steered in ~9h. The adjust will decrease 248 * over time, until it finally reaches 0. 249 */ 250 now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15); 251 preempt_enable(); 252 return now; 253 } 254 255 static struct clocksource clocksource_tod = { 256 .name = "tod", 257 .rating = 400, 258 .read = read_tod_clock, 259 .mask = CLOCKSOURCE_MASK(64), 260 .mult = 1000, 261 .shift = 12, 262 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 263 .vdso_clock_mode = VDSO_CLOCKMODE_TOD, 264 }; 265 266 struct clocksource * __init clocksource_default_clock(void) 267 { 268 return &clocksource_tod; 269 } 270 271 /* 272 * Initialize the TOD clock and the CPU timer of 273 * the boot cpu. 274 */ 275 void __init time_init(void) 276 { 277 /* Reset time synchronization interfaces. */ 278 stp_reset(); 279 280 /* request the clock comparator external interrupt */ 281 if (register_external_irq(EXT_IRQ_CLK_COMP, clock_comparator_interrupt)) 282 panic("Couldn't request external interrupt 0x1004"); 283 284 /* request the timing alert external interrupt */ 285 if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt)) 286 panic("Couldn't request external interrupt 0x1406"); 287 288 if (__clocksource_register(&clocksource_tod) != 0) 289 panic("Could not register TOD clock source"); 290 291 /* Enable TOD clock interrupts on the boot cpu. */ 292 init_cpu_timer(); 293 294 /* Enable cpu timer interrupts on the boot cpu. */ 295 vtime_init(); 296 } 297 298 static DEFINE_PER_CPU(atomic_t, clock_sync_word); 299 static DEFINE_MUTEX(clock_sync_mutex); 300 static unsigned long clock_sync_flags; 301 302 #define CLOCK_SYNC_HAS_STP 0 303 #define CLOCK_SYNC_STP 1 304 #define CLOCK_SYNC_STPINFO_VALID 2 305 306 /* 307 * The get_clock function for the physical clock. It will get the current 308 * TOD clock, subtract the LPAR offset and write the result to *clock. 309 * The function returns 0 if the clock is in sync with the external time 310 * source. If the clock mode is local it will return -EOPNOTSUPP and 311 * -EAGAIN if the clock is not in sync with the external reference. 312 */ 313 int get_phys_clock(unsigned long *clock) 314 { 315 atomic_t *sw_ptr; 316 unsigned int sw0, sw1; 317 318 sw_ptr = &get_cpu_var(clock_sync_word); 319 sw0 = atomic_read(sw_ptr); 320 *clock = get_tod_clock() - lpar_offset; 321 sw1 = atomic_read(sw_ptr); 322 put_cpu_var(clock_sync_word); 323 if (sw0 == sw1 && (sw0 & 0x80000000U)) 324 /* Success: time is in sync. */ 325 return 0; 326 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) 327 return -EOPNOTSUPP; 328 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) 329 return -EACCES; 330 return -EAGAIN; 331 } 332 EXPORT_SYMBOL(get_phys_clock); 333 334 /* 335 * Make get_phys_clock() return -EAGAIN. 336 */ 337 static void disable_sync_clock(void *dummy) 338 { 339 atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word); 340 /* 341 * Clear the in-sync bit 2^31. All get_phys_clock calls will 342 * fail until the sync bit is turned back on. In addition 343 * increase the "sequence" counter to avoid the race of an 344 * stp event and the complete recovery against get_phys_clock. 345 */ 346 atomic_andnot(0x80000000, sw_ptr); 347 atomic_inc(sw_ptr); 348 } 349 350 /* 351 * Make get_phys_clock() return 0 again. 352 * Needs to be called from a context disabled for preemption. 353 */ 354 static void enable_sync_clock(void) 355 { 356 atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word); 357 atomic_or(0x80000000, sw_ptr); 358 } 359 360 /* 361 * Function to check if the clock is in sync. 362 */ 363 static inline int check_sync_clock(void) 364 { 365 atomic_t *sw_ptr; 366 int rc; 367 368 sw_ptr = &get_cpu_var(clock_sync_word); 369 rc = (atomic_read(sw_ptr) & 0x80000000U) != 0; 370 put_cpu_var(clock_sync_word); 371 return rc; 372 } 373 374 /* 375 * Apply clock delta to the global data structures. 376 * This is called once on the CPU that performed the clock sync. 377 */ 378 static void clock_sync_global(unsigned long long delta) 379 { 380 unsigned long now, adj; 381 struct ptff_qto qto; 382 383 /* Fixup the monotonic sched clock. */ 384 *(unsigned long long *) &tod_clock_base[1] += delta; 385 if (*(unsigned long long *) &tod_clock_base[1] < delta) 386 /* Epoch overflow */ 387 tod_clock_base[0]++; 388 /* Adjust TOD steering parameters. */ 389 now = get_tod_clock(); 390 adj = tod_steering_end - now; 391 if (unlikely((s64) adj >= 0)) 392 /* Calculate how much of the old adjustment is left. */ 393 tod_steering_delta = (tod_steering_delta < 0) ? 394 -(adj >> 15) : (adj >> 15); 395 tod_steering_delta += delta; 396 if ((abs(tod_steering_delta) >> 48) != 0) 397 panic("TOD clock sync offset %lli is too large to drift\n", 398 tod_steering_delta); 399 tod_steering_end = now + (abs(tod_steering_delta) << 15); 400 vdso_data->arch_data.tod_steering_end = tod_steering_end; 401 402 /* Update LPAR offset. */ 403 if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0) 404 lpar_offset = qto.tod_epoch_difference; 405 /* Call the TOD clock change notifier. */ 406 atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta); 407 } 408 409 /* 410 * Apply clock delta to the per-CPU data structures of this CPU. 411 * This is called for each online CPU after the call to clock_sync_global. 412 */ 413 static void clock_sync_local(unsigned long long delta) 414 { 415 /* Add the delta to the clock comparator. */ 416 if (S390_lowcore.clock_comparator != clock_comparator_max) { 417 S390_lowcore.clock_comparator += delta; 418 set_clock_comparator(S390_lowcore.clock_comparator); 419 } 420 /* Adjust the last_update_clock time-stamp. */ 421 S390_lowcore.last_update_clock += delta; 422 } 423 424 /* Single threaded workqueue used for stp sync events */ 425 static struct workqueue_struct *time_sync_wq; 426 427 static void __init time_init_wq(void) 428 { 429 if (time_sync_wq) 430 return; 431 time_sync_wq = create_singlethread_workqueue("timesync"); 432 } 433 434 struct clock_sync_data { 435 atomic_t cpus; 436 int in_sync; 437 unsigned long long clock_delta; 438 }; 439 440 /* 441 * Server Time Protocol (STP) code. 442 */ 443 static bool stp_online; 444 static struct stp_sstpi stp_info; 445 static void *stp_page; 446 447 static void stp_work_fn(struct work_struct *work); 448 static DEFINE_MUTEX(stp_work_mutex); 449 static DECLARE_WORK(stp_work, stp_work_fn); 450 static struct timer_list stp_timer; 451 452 static int __init early_parse_stp(char *p) 453 { 454 return kstrtobool(p, &stp_online); 455 } 456 early_param("stp", early_parse_stp); 457 458 /* 459 * Reset STP attachment. 460 */ 461 static void __init stp_reset(void) 462 { 463 int rc; 464 465 stp_page = (void *) get_zeroed_page(GFP_ATOMIC); 466 rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL); 467 if (rc == 0) 468 set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags); 469 else if (stp_online) { 470 pr_warn("The real or virtual hardware system does not provide an STP interface\n"); 471 free_page((unsigned long) stp_page); 472 stp_page = NULL; 473 stp_online = false; 474 } 475 } 476 477 static void stp_timeout(struct timer_list *unused) 478 { 479 queue_work(time_sync_wq, &stp_work); 480 } 481 482 static int __init stp_init(void) 483 { 484 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) 485 return 0; 486 timer_setup(&stp_timer, stp_timeout, 0); 487 time_init_wq(); 488 if (!stp_online) 489 return 0; 490 queue_work(time_sync_wq, &stp_work); 491 return 0; 492 } 493 494 arch_initcall(stp_init); 495 496 /* 497 * STP timing alert. There are three causes: 498 * 1) timing status change 499 * 2) link availability change 500 * 3) time control parameter change 501 * In all three cases we are only interested in the clock source state. 502 * If a STP clock source is now available use it. 503 */ 504 static void stp_timing_alert(struct stp_irq_parm *intparm) 505 { 506 if (intparm->tsc || intparm->lac || intparm->tcpc) 507 queue_work(time_sync_wq, &stp_work); 508 } 509 510 /* 511 * STP sync check machine check. This is called when the timing state 512 * changes from the synchronized state to the unsynchronized state. 513 * After a STP sync check the clock is not in sync. The machine check 514 * is broadcasted to all cpus at the same time. 515 */ 516 int stp_sync_check(void) 517 { 518 disable_sync_clock(NULL); 519 return 1; 520 } 521 522 /* 523 * STP island condition machine check. This is called when an attached 524 * server attempts to communicate over an STP link and the servers 525 * have matching CTN ids and have a valid stratum-1 configuration 526 * but the configurations do not match. 527 */ 528 int stp_island_check(void) 529 { 530 disable_sync_clock(NULL); 531 return 1; 532 } 533 534 void stp_queue_work(void) 535 { 536 queue_work(time_sync_wq, &stp_work); 537 } 538 539 static int __store_stpinfo(void) 540 { 541 int rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); 542 543 if (rc) 544 clear_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags); 545 else 546 set_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags); 547 return rc; 548 } 549 550 static int stpinfo_valid(void) 551 { 552 return stp_online && test_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags); 553 } 554 555 static int stp_sync_clock(void *data) 556 { 557 struct clock_sync_data *sync = data; 558 unsigned long long clock_delta, flags; 559 static int first; 560 int rc; 561 562 enable_sync_clock(); 563 if (xchg(&first, 1) == 0) { 564 /* Wait until all other cpus entered the sync function. */ 565 while (atomic_read(&sync->cpus) != 0) 566 cpu_relax(); 567 rc = 0; 568 if (stp_info.todoff[0] || stp_info.todoff[1] || 569 stp_info.todoff[2] || stp_info.todoff[3] || 570 stp_info.tmd != 2) { 571 flags = vdso_update_begin(); 572 rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, 573 &clock_delta); 574 if (rc == 0) { 575 sync->clock_delta = clock_delta; 576 clock_sync_global(clock_delta); 577 rc = __store_stpinfo(); 578 if (rc == 0 && stp_info.tmd != 2) 579 rc = -EAGAIN; 580 } 581 vdso_update_end(flags); 582 } 583 sync->in_sync = rc ? -EAGAIN : 1; 584 xchg(&first, 0); 585 } else { 586 /* Slave */ 587 atomic_dec(&sync->cpus); 588 /* Wait for in_sync to be set. */ 589 while (READ_ONCE(sync->in_sync) == 0) 590 __udelay(1); 591 } 592 if (sync->in_sync != 1) 593 /* Didn't work. Clear per-cpu in sync bit again. */ 594 disable_sync_clock(NULL); 595 /* Apply clock delta to per-CPU fields of this CPU. */ 596 clock_sync_local(sync->clock_delta); 597 598 return 0; 599 } 600 601 static int stp_clear_leap(void) 602 { 603 struct __kernel_timex txc; 604 int ret; 605 606 memset(&txc, 0, sizeof(txc)); 607 608 ret = do_adjtimex(&txc); 609 if (ret < 0) 610 return ret; 611 612 txc.modes = ADJ_STATUS; 613 txc.status &= ~(STA_INS|STA_DEL); 614 return do_adjtimex(&txc); 615 } 616 617 static void stp_check_leap(void) 618 { 619 struct stp_stzi stzi; 620 struct stp_lsoib *lsoib = &stzi.lsoib; 621 struct __kernel_timex txc; 622 int64_t timediff; 623 int leapdiff, ret; 624 625 if (!stp_info.lu || !check_sync_clock()) { 626 /* 627 * Either a scheduled leap second was removed by the operator, 628 * or STP is out of sync. In both cases, clear the leap second 629 * kernel flags. 630 */ 631 if (stp_clear_leap() < 0) 632 pr_err("failed to clear leap second flags\n"); 633 return; 634 } 635 636 if (chsc_stzi(stp_page, &stzi, sizeof(stzi))) { 637 pr_err("stzi failed\n"); 638 return; 639 } 640 641 timediff = tod_to_ns(lsoib->nlsout - get_tod_clock()) / NSEC_PER_SEC; 642 leapdiff = lsoib->nlso - lsoib->also; 643 644 if (leapdiff != 1 && leapdiff != -1) { 645 pr_err("Cannot schedule %d leap seconds\n", leapdiff); 646 return; 647 } 648 649 if (timediff < 0) { 650 if (stp_clear_leap() < 0) 651 pr_err("failed to clear leap second flags\n"); 652 } else if (timediff < 7200) { 653 memset(&txc, 0, sizeof(txc)); 654 ret = do_adjtimex(&txc); 655 if (ret < 0) 656 return; 657 658 txc.modes = ADJ_STATUS; 659 if (leapdiff > 0) 660 txc.status |= STA_INS; 661 else 662 txc.status |= STA_DEL; 663 ret = do_adjtimex(&txc); 664 if (ret < 0) 665 pr_err("failed to set leap second flags\n"); 666 /* arm Timer to clear leap second flags */ 667 mod_timer(&stp_timer, jiffies + msecs_to_jiffies(14400 * MSEC_PER_SEC)); 668 } else { 669 /* The day the leap second is scheduled for hasn't been reached. Retry 670 * in one hour. 671 */ 672 mod_timer(&stp_timer, jiffies + msecs_to_jiffies(3600 * MSEC_PER_SEC)); 673 } 674 } 675 676 /* 677 * STP work. Check for the STP state and take over the clock 678 * synchronization if the STP clock source is usable. 679 */ 680 static void stp_work_fn(struct work_struct *work) 681 { 682 struct clock_sync_data stp_sync; 683 int rc; 684 685 /* prevent multiple execution. */ 686 mutex_lock(&stp_work_mutex); 687 688 if (!stp_online) { 689 chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL); 690 del_timer_sync(&stp_timer); 691 goto out_unlock; 692 } 693 694 rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xf0e0, NULL); 695 if (rc) 696 goto out_unlock; 697 698 rc = __store_stpinfo(); 699 if (rc || stp_info.c == 0) 700 goto out_unlock; 701 702 /* Skip synchronization if the clock is already in sync. */ 703 if (!check_sync_clock()) { 704 memset(&stp_sync, 0, sizeof(stp_sync)); 705 cpus_read_lock(); 706 atomic_set(&stp_sync.cpus, num_online_cpus() - 1); 707 stop_machine_cpuslocked(stp_sync_clock, &stp_sync, cpu_online_mask); 708 cpus_read_unlock(); 709 } 710 711 if (!check_sync_clock()) 712 /* 713 * There is a usable clock but the synchonization failed. 714 * Retry after a second. 715 */ 716 mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC)); 717 else if (stp_info.lu) 718 stp_check_leap(); 719 720 out_unlock: 721 mutex_unlock(&stp_work_mutex); 722 } 723 724 /* 725 * STP subsys sysfs interface functions 726 */ 727 static struct bus_type stp_subsys = { 728 .name = "stp", 729 .dev_name = "stp", 730 }; 731 732 static ssize_t ctn_id_show(struct device *dev, 733 struct device_attribute *attr, 734 char *buf) 735 { 736 ssize_t ret = -ENODATA; 737 738 mutex_lock(&stp_work_mutex); 739 if (stpinfo_valid()) 740 ret = sprintf(buf, "%016llx\n", 741 *(unsigned long long *) stp_info.ctnid); 742 mutex_unlock(&stp_work_mutex); 743 return ret; 744 } 745 746 static DEVICE_ATTR_RO(ctn_id); 747 748 static ssize_t ctn_type_show(struct device *dev, 749 struct device_attribute *attr, 750 char *buf) 751 { 752 ssize_t ret = -ENODATA; 753 754 mutex_lock(&stp_work_mutex); 755 if (stpinfo_valid()) 756 ret = sprintf(buf, "%i\n", stp_info.ctn); 757 mutex_unlock(&stp_work_mutex); 758 return ret; 759 } 760 761 static DEVICE_ATTR_RO(ctn_type); 762 763 static ssize_t dst_offset_show(struct device *dev, 764 struct device_attribute *attr, 765 char *buf) 766 { 767 ssize_t ret = -ENODATA; 768 769 mutex_lock(&stp_work_mutex); 770 if (stpinfo_valid() && (stp_info.vbits & 0x2000)) 771 ret = sprintf(buf, "%i\n", (int)(s16) stp_info.dsto); 772 mutex_unlock(&stp_work_mutex); 773 return ret; 774 } 775 776 static DEVICE_ATTR_RO(dst_offset); 777 778 static ssize_t leap_seconds_show(struct device *dev, 779 struct device_attribute *attr, 780 char *buf) 781 { 782 ssize_t ret = -ENODATA; 783 784 mutex_lock(&stp_work_mutex); 785 if (stpinfo_valid() && (stp_info.vbits & 0x8000)) 786 ret = sprintf(buf, "%i\n", (int)(s16) stp_info.leaps); 787 mutex_unlock(&stp_work_mutex); 788 return ret; 789 } 790 791 static DEVICE_ATTR_RO(leap_seconds); 792 793 static ssize_t stratum_show(struct device *dev, 794 struct device_attribute *attr, 795 char *buf) 796 { 797 ssize_t ret = -ENODATA; 798 799 mutex_lock(&stp_work_mutex); 800 if (stpinfo_valid()) 801 ret = sprintf(buf, "%i\n", (int)(s16) stp_info.stratum); 802 mutex_unlock(&stp_work_mutex); 803 return ret; 804 } 805 806 static DEVICE_ATTR_RO(stratum); 807 808 static ssize_t time_offset_show(struct device *dev, 809 struct device_attribute *attr, 810 char *buf) 811 { 812 ssize_t ret = -ENODATA; 813 814 mutex_lock(&stp_work_mutex); 815 if (stpinfo_valid() && (stp_info.vbits & 0x0800)) 816 ret = sprintf(buf, "%i\n", (int) stp_info.tto); 817 mutex_unlock(&stp_work_mutex); 818 return ret; 819 } 820 821 static DEVICE_ATTR_RO(time_offset); 822 823 static ssize_t time_zone_offset_show(struct device *dev, 824 struct device_attribute *attr, 825 char *buf) 826 { 827 ssize_t ret = -ENODATA; 828 829 mutex_lock(&stp_work_mutex); 830 if (stpinfo_valid() && (stp_info.vbits & 0x4000)) 831 ret = sprintf(buf, "%i\n", (int)(s16) stp_info.tzo); 832 mutex_unlock(&stp_work_mutex); 833 return ret; 834 } 835 836 static DEVICE_ATTR_RO(time_zone_offset); 837 838 static ssize_t timing_mode_show(struct device *dev, 839 struct device_attribute *attr, 840 char *buf) 841 { 842 ssize_t ret = -ENODATA; 843 844 mutex_lock(&stp_work_mutex); 845 if (stpinfo_valid()) 846 ret = sprintf(buf, "%i\n", stp_info.tmd); 847 mutex_unlock(&stp_work_mutex); 848 return ret; 849 } 850 851 static DEVICE_ATTR_RO(timing_mode); 852 853 static ssize_t timing_state_show(struct device *dev, 854 struct device_attribute *attr, 855 char *buf) 856 { 857 ssize_t ret = -ENODATA; 858 859 mutex_lock(&stp_work_mutex); 860 if (stpinfo_valid()) 861 ret = sprintf(buf, "%i\n", stp_info.tst); 862 mutex_unlock(&stp_work_mutex); 863 return ret; 864 } 865 866 static DEVICE_ATTR_RO(timing_state); 867 868 static ssize_t online_show(struct device *dev, 869 struct device_attribute *attr, 870 char *buf) 871 { 872 return sprintf(buf, "%i\n", stp_online); 873 } 874 875 static ssize_t online_store(struct device *dev, 876 struct device_attribute *attr, 877 const char *buf, size_t count) 878 { 879 unsigned int value; 880 881 value = simple_strtoul(buf, NULL, 0); 882 if (value != 0 && value != 1) 883 return -EINVAL; 884 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) 885 return -EOPNOTSUPP; 886 mutex_lock(&clock_sync_mutex); 887 stp_online = value; 888 if (stp_online) 889 set_bit(CLOCK_SYNC_STP, &clock_sync_flags); 890 else 891 clear_bit(CLOCK_SYNC_STP, &clock_sync_flags); 892 queue_work(time_sync_wq, &stp_work); 893 mutex_unlock(&clock_sync_mutex); 894 return count; 895 } 896 897 /* 898 * Can't use DEVICE_ATTR because the attribute should be named 899 * stp/online but dev_attr_online already exists in this file .. 900 */ 901 static DEVICE_ATTR_RW(online); 902 903 static struct device_attribute *stp_attributes[] = { 904 &dev_attr_ctn_id, 905 &dev_attr_ctn_type, 906 &dev_attr_dst_offset, 907 &dev_attr_leap_seconds, 908 &dev_attr_online, 909 &dev_attr_stratum, 910 &dev_attr_time_offset, 911 &dev_attr_time_zone_offset, 912 &dev_attr_timing_mode, 913 &dev_attr_timing_state, 914 NULL 915 }; 916 917 static int __init stp_init_sysfs(void) 918 { 919 struct device_attribute **attr; 920 int rc; 921 922 rc = subsys_system_register(&stp_subsys, NULL); 923 if (rc) 924 goto out; 925 for (attr = stp_attributes; *attr; attr++) { 926 rc = device_create_file(stp_subsys.dev_root, *attr); 927 if (rc) 928 goto out_unreg; 929 } 930 return 0; 931 out_unreg: 932 for (; attr >= stp_attributes; attr--) 933 device_remove_file(stp_subsys.dev_root, *attr); 934 bus_unregister(&stp_subsys); 935 out: 936 return rc; 937 } 938 939 device_initcall(stp_init_sysfs); 940