| File: | kern/kern_ntptime.c |
| Warning: | line 310, column 10 Copies out a struct with uncleared padding (>= 4 bytes) |
| 1 | /*- |
| 2 | *********************************************************************** |
| 3 | * * |
| 4 | * Copyright (c) David L. Mills 1993-2001 * |
| 5 | * * |
| 6 | * Permission to use, copy, modify, and distribute this software and * |
| 7 | * its documentation for any purpose and without fee is hereby * |
| 8 | * granted, provided that the above copyright notice appears in all * |
| 9 | * copies and that both the copyright notice and this permission * |
| 10 | * notice appear in supporting documentation, and that the name * |
| 11 | * University of Delaware not be used in advertising or publicity * |
| 12 | * pertaining to distribution of the software without specific, * |
| 13 | * written prior permission. The University of Delaware makes no * |
| 14 | * representations about the suitability this software for any * |
| 15 | * purpose. It is provided "as is" without express or implied * |
| 16 | * warranty. * |
| 17 | * * |
| 18 | **********************************************************************/ |
| 19 | |
| 20 | /* |
| 21 | * Adapted from the original sources for FreeBSD and timecounters by: |
| 22 | * Poul-Henning Kamp <[email protected]>. |
| 23 | * |
| 24 | * The 32bit version of the "LP" macros seems a bit past its "sell by" |
| 25 | * date so I have retained only the 64bit version and included it directly |
| 26 | * in this file. |
| 27 | * |
| 28 | * Only minor changes done to interface with the timecounters over in |
| 29 | * sys/kern/kern_clock.c. Some of the comments below may be (even more) |
| 30 | * confusing and/or plain wrong in that context. |
| 31 | */ |
| 32 | |
| 33 | #include <sys/cdefs.h> |
| 34 | __FBSDID("$FreeBSD: releng/11.0/sys/kern/kern_ntptime.c 302252 2016-06-28 16:43:23Z kib $")__asm__(".ident\t\"" "$FreeBSD: releng/11.0/sys/kern/kern_ntptime.c 302252 2016-06-28 16:43:23Z kib $" "\""); |
| 35 | |
| 36 | #include "opt_ntp.h" |
| 37 | |
| 38 | #include <sys/param.h> |
| 39 | #include <sys/systm.h> |
| 40 | #include <sys/sysproto.h> |
| 41 | #include <sys/eventhandler.h> |
| 42 | #include <sys/kernel.h> |
| 43 | #include <sys/priv.h> |
| 44 | #include <sys/proc.h> |
| 45 | #include <sys/lock.h> |
| 46 | #include <sys/mutex.h> |
| 47 | #include <sys/time.h> |
| 48 | #include <sys/timex.h> |
| 49 | #include <sys/timetc.h> |
| 50 | #include <sys/timepps.h> |
| 51 | #include <sys/syscallsubr.h> |
| 52 | #include <sys/sysctl.h> |
| 53 | |
| 54 | #ifdef PPS_SYNC |
| 55 | FEATURE(pps_sync, "Support usage of external PPS signal by kernel PLL")static struct sysctl_oid sysctl___kern_features_pps_sync = { . oid_parent = ((&(&sysctl___kern_features)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000 | (0x80000000 | 0x00008000)), .oid_arg1 = ( ((int *)((void *)0))), .oid_arg2 = (1), .oid_name = ("pps_sync" ), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Support usage of external PPS signal by kernel PLL" }; __asm__ (".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set" ); static void const * const __set_sysctl_set_sym_sysctl___kern_features_pps_sync __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_features_pps_sync); _Static_assert ((((0x80000000 | 0x00008000) & 0xf) == 0 || ((0x80000000 | 0x00008000) & 0) == 2) && sizeof(int) == sizeof( *(((int *)((void *)0)))), "compile-time assertion failed"); |
| 56 | #endif |
| 57 | |
| 58 | /* |
| 59 | * Single-precision macros for 64-bit machines |
| 60 | */ |
| 61 | typedef int64_t l_fp; |
| 62 | #define L_ADD(v, u)((v) += (u)) ((v) += (u)) |
| 63 | #define L_SUB(v, u)((v) -= (u)) ((v) -= (u)) |
| 64 | #define L_ADDHI(v, a)((v) += (int64_t)(a) << 32) ((v) += (int64_t)(a) << 32) |
| 65 | #define L_NEG(v)((v) = -(v)) ((v) = -(v)) |
| 66 | #define L_RSHIFT(v, n)do { if ((v) < 0) (v) = -(-(v) >> (n)); else (v) = ( v) >> (n); } while (0) \ |
| 67 | do { \ |
| 68 | if ((v) < 0) \ |
| 69 | (v) = -(-(v) >> (n)); \ |
| 70 | else \ |
| 71 | (v) = (v) >> (n); \ |
| 72 | } while (0) |
| 73 | #define L_MPY(v, a)((v) *= (a)) ((v) *= (a)) |
| 74 | #define L_CLR(v)((v) = 0) ((v) = 0) |
| 75 | #define L_ISNEG(v)((v) < 0) ((v) < 0) |
| 76 | #define L_LINT(v, a)((v) = (int64_t)(a) << 32) ((v) = (int64_t)(a) << 32) |
| 77 | #define L_GINT(v)((v) < 0 ? -(-(v) >> 32) : (v) >> 32) ((v) < 0 ? -(-(v) >> 32) : (v) >> 32) |
| 78 | |
| 79 | /* |
| 80 | * Generic NTP kernel interface |
| 81 | * |
| 82 | * These routines constitute the Network Time Protocol (NTP) interfaces |
| 83 | * for user and daemon application programs. The ntp_gettime() routine |
| 84 | * provides the time, maximum error (synch distance) and estimated error |
| 85 | * (dispersion) to client user application programs. The ntp_adjtime() |
| 86 | * routine is used by the NTP daemon to adjust the system clock to an |
| 87 | * externally derived time. The time offset and related variables set by |
| 88 | * this routine are used by other routines in this module to adjust the |
| 89 | * phase and frequency of the clock discipline loop which controls the |
| 90 | * system clock. |
| 91 | * |
| 92 | * When the kernel time is reckoned directly in nanoseconds (NTP_NANO |
| 93 | * defined), the time at each tick interrupt is derived directly from |
| 94 | * the kernel time variable. When the kernel time is reckoned in |
| 95 | * microseconds, (NTP_NANO undefined), the time is derived from the |
| 96 | * kernel time variable together with a variable representing the |
| 97 | * leftover nanoseconds at the last tick interrupt. In either case, the |
| 98 | * current nanosecond time is reckoned from these values plus an |
| 99 | * interpolated value derived by the clock routines in another |
| 100 | * architecture-specific module. The interpolation can use either a |
| 101 | * dedicated counter or a processor cycle counter (PCC) implemented in |
| 102 | * some architectures. |
| 103 | * |
| 104 | * Note that all routines must run at priority splclock or higher. |
| 105 | */ |
| 106 | /* |
| 107 | * Phase/frequency-lock loop (PLL/FLL) definitions |
| 108 | * |
| 109 | * The nanosecond clock discipline uses two variable types, time |
| 110 | * variables and frequency variables. Both types are represented as 64- |
| 111 | * bit fixed-point quantities with the decimal point between two 32-bit |
| 112 | * halves. On a 32-bit machine, each half is represented as a single |
| 113 | * word and mathematical operations are done using multiple-precision |
| 114 | * arithmetic. On a 64-bit machine, ordinary computer arithmetic is |
| 115 | * used. |
| 116 | * |
| 117 | * A time variable is a signed 64-bit fixed-point number in ns and |
| 118 | * fraction. It represents the remaining time offset to be amortized |
| 119 | * over succeeding tick interrupts. The maximum time offset is about |
| 120 | * 0.5 s and the resolution is about 2.3e-10 ns. |
| 121 | * |
| 122 | * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 |
| 123 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 |
| 124 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| 125 | * |s s s| ns | |
| 126 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| 127 | * | fraction | |
| 128 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| 129 | * |
| 130 | * A frequency variable is a signed 64-bit fixed-point number in ns/s |
| 131 | * and fraction. It represents the ns and fraction to be added to the |
| 132 | * kernel time variable at each second. The maximum frequency offset is |
| 133 | * about +-500000 ns/s and the resolution is about 2.3e-10 ns/s. |
| 134 | * |
| 135 | * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 |
| 136 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 |
| 137 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| 138 | * |s s s s s s s s s s s s s| ns/s | |
| 139 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| 140 | * | fraction | |
| 141 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| 142 | */ |
| 143 | /* |
| 144 | * The following variables establish the state of the PLL/FLL and the |
| 145 | * residual time and frequency offset of the local clock. |
| 146 | */ |
| 147 | #define SHIFT_PLL4 4 /* PLL loop gain (shift) */ |
| 148 | #define SHIFT_FLL2 2 /* FLL loop gain (shift) */ |
| 149 | |
| 150 | static int time_state = TIME_OK0; /* clock state */ |
| 151 | int time_status = STA_UNSYNC0x0040; /* clock status bits */ |
| 152 | static long time_tai; /* TAI offset (s) */ |
| 153 | static long time_monitor; /* last time offset scaled (ns) */ |
| 154 | static long time_constant; /* poll interval (shift) (s) */ |
| 155 | static long time_precision = 1; /* clock precision (ns) */ |
| 156 | static long time_maxerror = MAXPHASE500000000L / 1000; /* maximum error (us) */ |
| 157 | long time_esterror = MAXPHASE500000000L / 1000; /* estimated error (us) */ |
| 158 | static long time_reftime; /* uptime at last adjustment (s) */ |
| 159 | static l_fp time_offset; /* time offset (ns) */ |
| 160 | static l_fp time_freq; /* frequency offset (ns/s) */ |
| 161 | static l_fp time_adj; /* tick adjust (ns/s) */ |
| 162 | |
| 163 | static int64_t time_adjtime; /* correction from adjtime(2) (usec) */ |
| 164 | |
| 165 | static struct mtx ntpadj_lock; |
| 166 | MTX_SYSINIT(ntpadj, &ntpadj_lock, "ntpadj",static struct mtx_args ntpadj_args = { (&ntpadj_lock), ("ntpadj" ), (ifdef PPS_SYNC 0x00000001 else 0x00000000 endif) }; static struct sysinit ntpadj_mtx_sysinit_sys_init = { SI_SUB_LOCK, SI_ORDER_MIDDLE , (sysinit_cfunc_t)(sysinit_nfunc_t)mtx_sysinit, ((void *)(& ntpadj_args)) }; __asm__(".globl " "__start_set_sysinit_set") ; __asm__(".globl " "__stop_set_sysinit_set"); static void const * const __set_sysinit_set_sym_ntpadj_mtx_sysinit_sys_init __attribute__ ((__section__("set_" "sysinit_set"))) __attribute__((__used__ )) = &(ntpadj_mtx_sysinit_sys_init); static struct sysinit ntpadj_mtx_sysuninit_sys_uninit = { SI_SUB_LOCK, SI_ORDER_MIDDLE , (sysinit_cfunc_t)(sysinit_nfunc_t)_mtx_destroy, ((void *)(( (void *)(__uintptr_t)(volatile void *)(&(&ntpadj_lock )->mtx_lock)))) }; __asm__(".globl " "__start_set_sysuninit_set" ); __asm__(".globl " "__stop_set_sysuninit_set"); static void const * const __set_sysuninit_set_sym_ntpadj_mtx_sysuninit_sys_uninit __attribute__((__section__("set_" "sysuninit_set"))) __attribute__ ((__used__)) = &(ntpadj_mtx_sysuninit_sys_uninit) |
| 167 | #ifdef PPS_SYNCstatic struct mtx_args ntpadj_args = { (&ntpadj_lock), ("ntpadj" ), (ifdef PPS_SYNC 0x00000001 else 0x00000000 endif) }; static struct sysinit ntpadj_mtx_sysinit_sys_init = { SI_SUB_LOCK, SI_ORDER_MIDDLE , (sysinit_cfunc_t)(sysinit_nfunc_t)mtx_sysinit, ((void *)(& ntpadj_args)) }; __asm__(".globl " "__start_set_sysinit_set") ; __asm__(".globl " "__stop_set_sysinit_set"); static void const * const __set_sysinit_set_sym_ntpadj_mtx_sysinit_sys_init __attribute__ ((__section__("set_" "sysinit_set"))) __attribute__((__used__ )) = &(ntpadj_mtx_sysinit_sys_init); static struct sysinit ntpadj_mtx_sysuninit_sys_uninit = { SI_SUB_LOCK, SI_ORDER_MIDDLE , (sysinit_cfunc_t)(sysinit_nfunc_t)_mtx_destroy, ((void *)(( (void *)(__uintptr_t)(volatile void *)(&(&ntpadj_lock )->mtx_lock)))) }; __asm__(".globl " "__start_set_sysuninit_set" ); __asm__(".globl " "__stop_set_sysuninit_set"); static void const * const __set_sysuninit_set_sym_ntpadj_mtx_sysuninit_sys_uninit __attribute__((__section__("set_" "sysuninit_set"))) __attribute__ ((__used__)) = &(ntpadj_mtx_sysuninit_sys_uninit) |
| 168 | MTX_SPINstatic struct mtx_args ntpadj_args = { (&ntpadj_lock), ("ntpadj" ), (ifdef PPS_SYNC 0x00000001 else 0x00000000 endif) }; static struct sysinit ntpadj_mtx_sysinit_sys_init = { SI_SUB_LOCK, SI_ORDER_MIDDLE , (sysinit_cfunc_t)(sysinit_nfunc_t)mtx_sysinit, ((void *)(& ntpadj_args)) }; __asm__(".globl " "__start_set_sysinit_set") ; __asm__(".globl " "__stop_set_sysinit_set"); static void const * const __set_sysinit_set_sym_ntpadj_mtx_sysinit_sys_init __attribute__ ((__section__("set_" "sysinit_set"))) __attribute__((__used__ )) = &(ntpadj_mtx_sysinit_sys_init); static struct sysinit ntpadj_mtx_sysuninit_sys_uninit = { SI_SUB_LOCK, SI_ORDER_MIDDLE , (sysinit_cfunc_t)(sysinit_nfunc_t)_mtx_destroy, ((void *)(( (void *)(__uintptr_t)(volatile void *)(&(&ntpadj_lock )->mtx_lock)))) }; __asm__(".globl " "__start_set_sysuninit_set" ); __asm__(".globl " "__stop_set_sysuninit_set"); static void const * const __set_sysuninit_set_sym_ntpadj_mtx_sysuninit_sys_uninit __attribute__((__section__("set_" "sysuninit_set"))) __attribute__ ((__used__)) = &(ntpadj_mtx_sysuninit_sys_uninit) |
| 169 | #elsestatic struct mtx_args ntpadj_args = { (&ntpadj_lock), ("ntpadj" ), (ifdef PPS_SYNC 0x00000001 else 0x00000000 endif) }; static struct sysinit ntpadj_mtx_sysinit_sys_init = { SI_SUB_LOCK, SI_ORDER_MIDDLE , (sysinit_cfunc_t)(sysinit_nfunc_t)mtx_sysinit, ((void *)(& ntpadj_args)) }; __asm__(".globl " "__start_set_sysinit_set") ; __asm__(".globl " "__stop_set_sysinit_set"); static void const * const __set_sysinit_set_sym_ntpadj_mtx_sysinit_sys_init __attribute__ ((__section__("set_" "sysinit_set"))) __attribute__((__used__ )) = &(ntpadj_mtx_sysinit_sys_init); static struct sysinit ntpadj_mtx_sysuninit_sys_uninit = { SI_SUB_LOCK, SI_ORDER_MIDDLE , (sysinit_cfunc_t)(sysinit_nfunc_t)_mtx_destroy, ((void *)(( (void *)(__uintptr_t)(volatile void *)(&(&ntpadj_lock )->mtx_lock)))) }; __asm__(".globl " "__start_set_sysuninit_set" ); __asm__(".globl " "__stop_set_sysuninit_set"); static void const * const __set_sysuninit_set_sym_ntpadj_mtx_sysuninit_sys_uninit __attribute__((__section__("set_" "sysuninit_set"))) __attribute__ ((__used__)) = &(ntpadj_mtx_sysuninit_sys_uninit) |
| 170 | MTX_DEFstatic struct mtx_args ntpadj_args = { (&ntpadj_lock), ("ntpadj" ), (ifdef PPS_SYNC 0x00000001 else 0x00000000 endif) }; static struct sysinit ntpadj_mtx_sysinit_sys_init = { SI_SUB_LOCK, SI_ORDER_MIDDLE , (sysinit_cfunc_t)(sysinit_nfunc_t)mtx_sysinit, ((void *)(& ntpadj_args)) }; __asm__(".globl " "__start_set_sysinit_set") ; __asm__(".globl " "__stop_set_sysinit_set"); static void const * const __set_sysinit_set_sym_ntpadj_mtx_sysinit_sys_init __attribute__ ((__section__("set_" "sysinit_set"))) __attribute__((__used__ )) = &(ntpadj_mtx_sysinit_sys_init); static struct sysinit ntpadj_mtx_sysuninit_sys_uninit = { SI_SUB_LOCK, SI_ORDER_MIDDLE , (sysinit_cfunc_t)(sysinit_nfunc_t)_mtx_destroy, ((void *)(( (void *)(__uintptr_t)(volatile void *)(&(&ntpadj_lock )->mtx_lock)))) }; __asm__(".globl " "__start_set_sysuninit_set" ); __asm__(".globl " "__stop_set_sysuninit_set"); static void const * const __set_sysuninit_set_sym_ntpadj_mtx_sysuninit_sys_uninit __attribute__((__section__("set_" "sysuninit_set"))) __attribute__ ((__used__)) = &(ntpadj_mtx_sysuninit_sys_uninit) |
| 171 | #endifstatic struct mtx_args ntpadj_args = { (&ntpadj_lock), ("ntpadj" ), (ifdef PPS_SYNC 0x00000001 else 0x00000000 endif) }; static struct sysinit ntpadj_mtx_sysinit_sys_init = { SI_SUB_LOCK, SI_ORDER_MIDDLE , (sysinit_cfunc_t)(sysinit_nfunc_t)mtx_sysinit, ((void *)(& ntpadj_args)) }; __asm__(".globl " "__start_set_sysinit_set") ; __asm__(".globl " "__stop_set_sysinit_set"); static void const * const __set_sysinit_set_sym_ntpadj_mtx_sysinit_sys_init __attribute__ ((__section__("set_" "sysinit_set"))) __attribute__((__used__ )) = &(ntpadj_mtx_sysinit_sys_init); static struct sysinit ntpadj_mtx_sysuninit_sys_uninit = { SI_SUB_LOCK, SI_ORDER_MIDDLE , (sysinit_cfunc_t)(sysinit_nfunc_t)_mtx_destroy, ((void *)(( (void *)(__uintptr_t)(volatile void *)(&(&ntpadj_lock )->mtx_lock)))) }; __asm__(".globl " "__start_set_sysuninit_set" ); __asm__(".globl " "__stop_set_sysuninit_set"); static void const * const __set_sysuninit_set_sym_ntpadj_mtx_sysuninit_sys_uninit __attribute__((__section__("set_" "sysuninit_set"))) __attribute__ ((__used__)) = &(ntpadj_mtx_sysuninit_sys_uninit) |
| 172 | )static struct mtx_args ntpadj_args = { (&ntpadj_lock), ("ntpadj" ), (ifdef PPS_SYNC 0x00000001 else 0x00000000 endif) }; static struct sysinit ntpadj_mtx_sysinit_sys_init = { SI_SUB_LOCK, SI_ORDER_MIDDLE , (sysinit_cfunc_t)(sysinit_nfunc_t)mtx_sysinit, ((void *)(& ntpadj_args)) }; __asm__(".globl " "__start_set_sysinit_set") ; __asm__(".globl " "__stop_set_sysinit_set"); static void const * const __set_sysinit_set_sym_ntpadj_mtx_sysinit_sys_init __attribute__ ((__section__("set_" "sysinit_set"))) __attribute__((__used__ )) = &(ntpadj_mtx_sysinit_sys_init); static struct sysinit ntpadj_mtx_sysuninit_sys_uninit = { SI_SUB_LOCK, SI_ORDER_MIDDLE , (sysinit_cfunc_t)(sysinit_nfunc_t)_mtx_destroy, ((void *)(( (void *)(__uintptr_t)(volatile void *)(&(&ntpadj_lock )->mtx_lock)))) }; __asm__(".globl " "__start_set_sysuninit_set" ); __asm__(".globl " "__stop_set_sysuninit_set"); static void const * const __set_sysuninit_set_sym_ntpadj_mtx_sysuninit_sys_uninit __attribute__((__section__("set_" "sysuninit_set"))) __attribute__ ((__used__)) = &(ntpadj_mtx_sysuninit_sys_uninit); |
| 173 | |
| 174 | /* |
| 175 | * When PPS_SYNC is defined, hardpps() function is provided which can |
| 176 | * be legitimately called from interrupt filters. Due to this, use |
| 177 | * spinlock for ntptime state protection, otherwise sleepable mutex is |
| 178 | * adequate. |
| 179 | */ |
| 180 | #ifdef PPS_SYNC |
| 181 | #define NTPADJ_LOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((( &ntpadj_lock))))->mtx_lock != 0x00000004 || !atomic_cmpset_long (&(((((&ntpadj_lock)))))->mtx_lock, 0x00000004, (_tid )))) __mtx_lock_sleep(&(((((&ntpadj_lock)))))->mtx_lock , _tid, (((0))), ((((void *)0))), ((0))); else do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire-> id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); } while (0) mtx_lock_spin(&ntpadj_lock)do { uintptr_t _tid = (uintptr_t)((__curthread())); spinlock_enter (); if ((((((&ntpadj_lock))))->mtx_lock != 0x00000004 || !atomic_cmpset_long(&(((((&ntpadj_lock)))))->mtx_lock , 0x00000004, (_tid)))) { if (((((&ntpadj_lock))))->mtx_lock == _tid) ((((&ntpadj_lock))))->lock_object.lo_data++; else _mtx_lock_spin_cookie(&(((((&ntpadj_lock)))))-> mtx_lock, _tid, (((0))), ((((void *)0))), ((0))); } else do { (void)0; do { if (__builtin_expect((sdt_lockstat___spin__acquire ->id), 0)) (*sdt_probe_func)(sdt_lockstat___spin__acquire-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); } while (0) |
| 182 | #define NTPADJ_UNLOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((& ntpadj_lock))))->lock_object.lo_data == 0) do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release-> id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); if (((((&ntpadj_lock))))->mtx_lock != _tid || !atomic_cmpset_long (&(((((&ntpadj_lock)))))->mtx_lock, (_tid), 0x00000004 )) __mtx_unlock_sleep(&(((((&ntpadj_lock)))))->mtx_lock , (((0))), ((((void *)0))), ((0))); } while (0) mtx_unlock_spin(&ntpadj_lock)do { if (((((((&ntpadj_lock)))))->lock_object.lo_data != 0)) ((((&ntpadj_lock))))->lock_object.lo_data--; else { do { (void)0; do { if (__builtin_expect((sdt_lockstat___spin__release ->id), 0)) (*sdt_probe_func)(sdt_lockstat___spin__release-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); atomic_store_rel_long(&(((((&ntpadj_lock)))))->mtx_lock , 0x00000004); } spinlock_exit(); } while (0) |
| 183 | #else |
| 184 | #define NTPADJ_LOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((( &ntpadj_lock))))->mtx_lock != 0x00000004 || !atomic_cmpset_long (&(((((&ntpadj_lock)))))->mtx_lock, 0x00000004, (_tid )))) __mtx_lock_sleep(&(((((&ntpadj_lock)))))->mtx_lock , _tid, (((0))), ((((void *)0))), ((0))); else do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire-> id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); } while (0) mtx_lock(&ntpadj_lock)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((( &ntpadj_lock))))->mtx_lock != 0x00000004 || !atomic_cmpset_long (&(((((&ntpadj_lock)))))->mtx_lock, 0x00000004, (_tid )))) __mtx_lock_sleep(&(((((&ntpadj_lock)))))->mtx_lock , _tid, (((0))), ((((void *)0))), ((0))); else do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire-> id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); } while (0) |
| 185 | #define NTPADJ_UNLOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((& ntpadj_lock))))->lock_object.lo_data == 0) do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release-> id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); if (((((&ntpadj_lock))))->mtx_lock != _tid || !atomic_cmpset_long (&(((((&ntpadj_lock)))))->mtx_lock, (_tid), 0x00000004 )) __mtx_unlock_sleep(&(((((&ntpadj_lock)))))->mtx_lock , (((0))), ((((void *)0))), ((0))); } while (0) mtx_unlock(&ntpadj_lock)do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((& ntpadj_lock))))->lock_object.lo_data == 0) do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release-> id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); if (((((&ntpadj_lock))))->mtx_lock != _tid || !atomic_cmpset_long (&(((((&ntpadj_lock)))))->mtx_lock, (_tid), 0x00000004 )) __mtx_unlock_sleep(&(((((&ntpadj_lock)))))->mtx_lock , (((0))), ((((void *)0))), ((0))); } while (0) |
| 186 | #endif |
| 187 | #define NTPADJ_ASSERT_LOCKED()(void)0 mtx_assert(&ntpadj_lock, MA_OWNED)(void)0 |
| 188 | |
| 189 | #ifdef PPS_SYNC |
| 190 | /* |
| 191 | * The following variables are used when a pulse-per-second (PPS) signal |
| 192 | * is available and connected via a modem control lead. They establish |
| 193 | * the engineering parameters of the clock discipline loop when |
| 194 | * controlled by the PPS signal. |
| 195 | */ |
| 196 | #define PPS_FAVG 2 /* min freq avg interval (s) (shift) */ |
| 197 | #define PPS_FAVGDEF 8 /* default freq avg int (s) (shift) */ |
| 198 | #define PPS_FAVGMAX 15 /* max freq avg interval (s) (shift) */ |
| 199 | #define PPS_PAVG 4 /* phase avg interval (s) (shift) */ |
| 200 | #define PPS_VALID 120 /* PPS signal watchdog max (s) */ |
| 201 | #define PPS_MAXWANDER 100000 /* max PPS wander (ns/s) */ |
| 202 | #define PPS_POPCORN 2 /* popcorn spike threshold (shift) */ |
| 203 | |
| 204 | static struct timespec pps_tf[3]; /* phase median filter */ |
| 205 | static l_fp pps_freq; /* scaled frequency offset (ns/s) */ |
| 206 | static long pps_fcount; /* frequency accumulator */ |
| 207 | static long pps_jitter; /* nominal jitter (ns) */ |
| 208 | static long pps_stabil; /* nominal stability (scaled ns/s) */ |
| 209 | static long pps_lastsec; /* time at last calibration (s) */ |
| 210 | static int pps_valid; /* signal watchdog counter */ |
| 211 | static int pps_shift = PPS_FAVG; /* interval duration (s) (shift) */ |
| 212 | static int pps_shiftmax = PPS_FAVGDEF; /* max interval duration (s) (shift) */ |
| 213 | static int pps_intcnt; /* wander counter */ |
| 214 | |
| 215 | /* |
| 216 | * PPS signal quality monitors |
| 217 | */ |
| 218 | static long pps_calcnt; /* calibration intervals */ |
| 219 | static long pps_jitcnt; /* jitter limit exceeded */ |
| 220 | static long pps_stbcnt; /* stability limit exceeded */ |
| 221 | static long pps_errcnt; /* calibration errors */ |
| 222 | #endif /* PPS_SYNC */ |
| 223 | /* |
| 224 | * End of phase/frequency-lock loop (PLL/FLL) definitions |
| 225 | */ |
| 226 | |
| 227 | static void ntp_init(void); |
| 228 | static void hardupdate(long offset); |
| 229 | static void ntp_gettime1(struct ntptimeval *ntvp); |
| 230 | static bool ntp_is_time_error(int tsl); |
| 231 | |
| 232 | static bool |
| 233 | ntp_is_time_error(int tsl) |
| 234 | { |
| 235 | |
| 236 | /* |
| 237 | * Status word error decode. If any of these conditions occur, |
| 238 | * an error is returned, instead of the status word. Most |
| 239 | * applications will care only about the fact the system clock |
| 240 | * may not be trusted, not about the details. |
| 241 | * |
| 242 | * Hardware or software error |
| 243 | */ |
| 244 | if ((tsl & (STA_UNSYNC0x0040 | STA_CLOCKERR0x1000)) || |
| 245 | |
| 246 | /* |
| 247 | * PPS signal lost when either time or frequency synchronization |
| 248 | * requested |
| 249 | */ |
| 250 | (tsl & (STA_PPSFREQ0x0002 | STA_PPSTIME0x0004) && |
| 251 | !(tsl & STA_PPSSIGNAL0x0100)) || |
| 252 | |
| 253 | /* |
| 254 | * PPS jitter exceeded when time synchronization requested |
| 255 | */ |
| 256 | (tsl & STA_PPSTIME0x0004 && tsl & STA_PPSJITTER0x0200) || |
| 257 | |
| 258 | /* |
| 259 | * PPS wander exceeded or calibration error when frequency |
| 260 | * synchronization requested |
| 261 | */ |
| 262 | (tsl & STA_PPSFREQ0x0002 && |
| 263 | tsl & (STA_PPSWANDER0x0400 | STA_PPSERROR0x0800))) |
| 264 | return (true1); |
| 265 | |
| 266 | return (false0); |
| 267 | } |
| 268 | |
| 269 | static void |
| 270 | ntp_gettime1(struct ntptimeval *ntvp) |
| 271 | { |
| 272 | struct timespec atv; /* nanosecond time */ |
| 273 | |
| 274 | NTPADJ_ASSERT_LOCKED()(void)0; |
| 275 | |
| 276 | nanotime(&atv); |
| 277 | ntvp->time.tv_sec = atv.tv_sec; |
| 278 | ntvp->time.tv_nsec = atv.tv_nsec; |
| 279 | ntvp->maxerror = time_maxerror; |
| 280 | ntvp->esterror = time_esterror; |
| 281 | ntvp->tai = time_tai; |
| 282 | ntvp->time_state = time_state; |
| 283 | |
| 284 | if (ntp_is_time_error(time_status)) |
| 285 | ntvp->time_state = TIME_ERROR5; |
| 286 | } |
| 287 | |
| 288 | /* |
| 289 | * ntp_gettime() - NTP user application interface |
| 290 | * |
| 291 | * See the timex.h header file for synopsis and API description. Note that |
| 292 | * the TAI offset is returned in the ntvtimeval.tai structure member. |
| 293 | */ |
| 294 | #ifndef _SYS_SYSPROTO_H_ |
| 295 | struct ntp_gettime_args { |
| 296 | struct ntptimeval *ntvp; |
| 297 | }; |
| 298 | #endif |
| 299 | /* ARGSUSED */ |
| 300 | int |
| 301 | sys_ntp_gettime(struct thread *td, struct ntp_gettime_args *uap) |
| 302 | { |
| 303 | struct ntptimeval ntv; |
| 304 | |
| 305 | NTPADJ_LOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((( &ntpadj_lock))))->mtx_lock != 0x00000004 || !atomic_cmpset_long (&(((((&ntpadj_lock)))))->mtx_lock, 0x00000004, (_tid )))) __mtx_lock_sleep(&(((((&ntpadj_lock)))))->mtx_lock , _tid, (((0))), ((((void *)0))), ((0))); else do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire-> id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); } while (0); |
| 306 | ntp_gettime1(&ntv); |
| 307 | NTPADJ_UNLOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((& ntpadj_lock))))->lock_object.lo_data == 0) do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release-> id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); if (((((&ntpadj_lock))))->mtx_lock != _tid || !atomic_cmpset_long (&(((((&ntpadj_lock)))))->mtx_lock, (_tid), 0x00000004 )) __mtx_unlock_sleep(&(((((&ntpadj_lock)))))->mtx_lock , (((0))), ((((void *)0))), ((0))); } while (0); |
| 308 | |
| 309 | td->td_retvaltd_uretoff.tdu_retval[0] = ntv.time_state; |
| 310 | return (copyout(&ntv, uap->ntvp, sizeof(ntv))); |
Copies out a struct with uncleared padding (>= 4 bytes) | |
| 311 | } |
| 312 | |
| 313 | static int |
| 314 | ntp_sysctl(SYSCTL_HANDLER_ARGSstruct sysctl_oid *oidp, void *arg1, intmax_t arg2, struct sysctl_req *req) |
| 315 | { |
| 316 | struct ntptimeval ntv; /* temporary structure */ |
| 317 | |
| 318 | NTPADJ_LOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((( &ntpadj_lock))))->mtx_lock != 0x00000004 || !atomic_cmpset_long (&(((((&ntpadj_lock)))))->mtx_lock, 0x00000004, (_tid )))) __mtx_lock_sleep(&(((((&ntpadj_lock)))))->mtx_lock , _tid, (((0))), ((((void *)0))), ((0))); else do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire-> id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); } while (0); |
| 319 | ntp_gettime1(&ntv); |
| 320 | NTPADJ_UNLOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((& ntpadj_lock))))->lock_object.lo_data == 0) do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release-> id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); if (((((&ntpadj_lock))))->mtx_lock != _tid || !atomic_cmpset_long (&(((((&ntpadj_lock)))))->mtx_lock, (_tid), 0x00000004 )) __mtx_unlock_sleep(&(((((&ntpadj_lock)))))->mtx_lock , (((0))), ((((void *)0))), ((0))); } while (0); |
| 321 | |
| 322 | return (sysctl_handle_opaque(oidp, &ntv, sizeof(ntv), req)); |
| 323 | } |
| 324 | |
| 325 | SYSCTL_NODE(_kern, OID_AUTO, ntp_pll, CTLFLAG_RW, 0, "")struct sysctl_oid sysctl___kern_ntp_pll = { .oid_parent = ((& (&sysctl___kern)->oid_children)), .oid_children = { (( void *)0) }, .oid_number = ((-1)), .oid_kind = (1|((0x80000000 |0x40000000))), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), . oid_name = ("ntp_pll"), .oid_handler = (0), .oid_fmt = ("N"), .oid_descr = "" }; __asm__(".globl " "__start_set_sysctl_set" ); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ntp_pll __attribute__ ((__section__("set_" "sysctl_set"))) __attribute__((__used__) ) = &(sysctl___kern_ntp_pll); _Static_assert((((0x80000000 |0x40000000)) & 0xf) == 0 || (((0x80000000|0x40000000)) & 0) == 1, "compile-time assertion failed"); |
| 326 | SYSCTL_PROC(_kern_ntp_pll, OID_AUTO, gettime, CTLTYPE_OPAQUE | CTLFLAG_RD |static struct sysctl_oid sysctl___kern_ntp_pll_gettime = { .oid_parent = ((&(&sysctl___kern_ntp_pll)->oid_children)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = ((5 | 0x80000000 | 0x00040000)), .oid_arg1 = (0), .oid_arg2 = (sizeof(struct ntptimeval )), .oid_name = ("gettime"), .oid_handler = (ntp_sysctl), .oid_fmt = ("S,ntptimeval"), .oid_descr = "" }; __asm__(".globl " "__start_set_sysctl_set" ); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ntp_pll_gettime __attribute__ ((__section__("set_" "sysctl_set"))) __attribute__((__used__) ) = &(sysctl___kern_ntp_pll_gettime); _Static_assert(((5 | 0x80000000 | 0x00040000) & 0xf) != 0, "compile-time assertion failed" ) |
| 327 | CTLFLAG_MPSAFE, 0, sizeof(struct ntptimeval) , ntp_sysctl, "S,ntptimeval",static struct sysctl_oid sysctl___kern_ntp_pll_gettime = { .oid_parent = ((&(&sysctl___kern_ntp_pll)->oid_children)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = ((5 | 0x80000000 | 0x00040000)), .oid_arg1 = (0), .oid_arg2 = (sizeof(struct ntptimeval )), .oid_name = ("gettime"), .oid_handler = (ntp_sysctl), .oid_fmt = ("S,ntptimeval"), .oid_descr = "" }; __asm__(".globl " "__start_set_sysctl_set" ); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ntp_pll_gettime __attribute__ ((__section__("set_" "sysctl_set"))) __attribute__((__used__) ) = &(sysctl___kern_ntp_pll_gettime); _Static_assert(((5 | 0x80000000 | 0x00040000) & 0xf) != 0, "compile-time assertion failed" ) |
| 328 | "")static struct sysctl_oid sysctl___kern_ntp_pll_gettime = { .oid_parent = ((&(&sysctl___kern_ntp_pll)->oid_children)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = ((5 | 0x80000000 | 0x00040000)), .oid_arg1 = (0), .oid_arg2 = (sizeof(struct ntptimeval )), .oid_name = ("gettime"), .oid_handler = (ntp_sysctl), .oid_fmt = ("S,ntptimeval"), .oid_descr = "" }; __asm__(".globl " "__start_set_sysctl_set" ); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ntp_pll_gettime __attribute__ ((__section__("set_" "sysctl_set"))) __attribute__((__used__) ) = &(sysctl___kern_ntp_pll_gettime); _Static_assert(((5 | 0x80000000 | 0x00040000) & 0xf) != 0, "compile-time assertion failed" ); |
| 329 | |
| 330 | #ifdef PPS_SYNC |
| 331 | SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shiftmax, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_ntp_pll_pps_shiftmax = { .oid_parent = ((&(&sysctl___kern_ntp_pll)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = ( &pps_shiftmax), .oid_arg2 = (0), .oid_name = ("pps_shiftmax" ), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Max interval duration (sec) (shift)" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set" ); static void const * const __set_sysctl_set_sym_sysctl___kern_ntp_pll_pps_shiftmax __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_ntp_pll_pps_shiftmax); _Static_assert (((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000 |0x40000000)) & 0) == 2) && sizeof(int) == sizeof (*(&pps_shiftmax)), "compile-time assertion failed") |
| 332 | &pps_shiftmax, 0, "Max interval duration (sec) (shift)")static struct sysctl_oid sysctl___kern_ntp_pll_pps_shiftmax = { .oid_parent = ((&(&sysctl___kern_ntp_pll)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = ( &pps_shiftmax), .oid_arg2 = (0), .oid_name = ("pps_shiftmax" ), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Max interval duration (sec) (shift)" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set" ); static void const * const __set_sysctl_set_sym_sysctl___kern_ntp_pll_pps_shiftmax __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_ntp_pll_pps_shiftmax); _Static_assert (((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000 |0x40000000)) & 0) == 2) && sizeof(int) == sizeof (*(&pps_shiftmax)), "compile-time assertion failed"); |
| 333 | SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shift, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_ntp_pll_pps_shift = { . oid_parent = ((&(&sysctl___kern_ntp_pll)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = ( &pps_shift), .oid_arg2 = (0), .oid_name = ("pps_shift"), . oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Interval duration (sec) (shift)" }; __asm__(".globl " "__start_set_sysctl_set" ); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ntp_pll_pps_shift __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_ntp_pll_pps_shift); _Static_assert (((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000 |0x40000000)) & 0) == 2) && sizeof(int) == sizeof (*(&pps_shift)), "compile-time assertion failed") |
| 334 | &pps_shift, 0, "Interval duration (sec) (shift)")static struct sysctl_oid sysctl___kern_ntp_pll_pps_shift = { . oid_parent = ((&(&sysctl___kern_ntp_pll)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = ( &pps_shift), .oid_arg2 = (0), .oid_name = ("pps_shift"), . oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Interval duration (sec) (shift)" }; __asm__(".globl " "__start_set_sysctl_set" ); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ntp_pll_pps_shift __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_ntp_pll_pps_shift); _Static_assert (((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000 |0x40000000)) & 0) == 2) && sizeof(int) == sizeof (*(&pps_shift)), "compile-time assertion failed"); |
| 335 | SYSCTL_LONG(_kern_ntp_pll, OID_AUTO, time_monitor, CTLFLAG_RD,static struct sysctl_oid sysctl___kern_ntp_pll_time_monitor = { .oid_parent = ((&(&sysctl___kern_ntp_pll)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (7 | 0x00040000 | (0x80000000)), .oid_arg1 = (&time_monitor ), .oid_arg2 = (0), .oid_name = ("time_monitor"), .oid_handler = (sysctl_handle_long), .oid_fmt = ("L"), .oid_descr = "Last time offset scaled (ns)" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ntp_pll_time_monitor __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_ntp_pll_time_monitor); _Static_assert ((((0x80000000) & 0xf) == 0 || ((0x80000000) & 0) == 7 ) && sizeof(long) == sizeof(*(&time_monitor)), "compile-time assertion failed" ) |
| 336 | &time_monitor, 0, "Last time offset scaled (ns)")static struct sysctl_oid sysctl___kern_ntp_pll_time_monitor = { .oid_parent = ((&(&sysctl___kern_ntp_pll)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (7 | 0x00040000 | (0x80000000)), .oid_arg1 = (&time_monitor ), .oid_arg2 = (0), .oid_name = ("time_monitor"), .oid_handler = (sysctl_handle_long), .oid_fmt = ("L"), .oid_descr = "Last time offset scaled (ns)" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ntp_pll_time_monitor __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_ntp_pll_time_monitor); _Static_assert ((((0x80000000) & 0xf) == 0 || ((0x80000000) & 0) == 7 ) && sizeof(long) == sizeof(*(&time_monitor)), "compile-time assertion failed" ); |
| 337 | |
| 338 | SYSCTL_S64(_kern_ntp_pll, OID_AUTO, pps_freq, CTLFLAG_RD | CTLFLAG_MPSAFE,static struct sysctl_oid sysctl___kern_ntp_pll_pps_freq = { . oid_parent = ((&(&sysctl___kern_ntp_pll)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (4 | 0x00040000 | (0x80000000 | 0x00040000)), .oid_arg1 = ( &pps_freq), .oid_arg2 = (0), .oid_name = ("pps_freq"), .oid_handler = (sysctl_handle_64), .oid_fmt = ("Q"), .oid_descr = "Scaled frequency offset (ns/sec)" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ntp_pll_pps_freq __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_ntp_pll_pps_freq); _Static_assert ((((0x80000000 | 0x00040000) & 0xf) == 0 || ((0x80000000 | 0x00040000) & 0) == 4) && sizeof(int64_t) == sizeof (*(&pps_freq)), "compile-time assertion failed") |
| 339 | &pps_freq, 0,static struct sysctl_oid sysctl___kern_ntp_pll_pps_freq = { . oid_parent = ((&(&sysctl___kern_ntp_pll)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (4 | 0x00040000 | (0x80000000 | 0x00040000)), .oid_arg1 = ( &pps_freq), .oid_arg2 = (0), .oid_name = ("pps_freq"), .oid_handler = (sysctl_handle_64), .oid_fmt = ("Q"), .oid_descr = "Scaled frequency offset (ns/sec)" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ntp_pll_pps_freq __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_ntp_pll_pps_freq); _Static_assert ((((0x80000000 | 0x00040000) & 0xf) == 0 || ((0x80000000 | 0x00040000) & 0) == 4) && sizeof(int64_t) == sizeof (*(&pps_freq)), "compile-time assertion failed") |
| 340 | "Scaled frequency offset (ns/sec)")static struct sysctl_oid sysctl___kern_ntp_pll_pps_freq = { . oid_parent = ((&(&sysctl___kern_ntp_pll)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (4 | 0x00040000 | (0x80000000 | 0x00040000)), .oid_arg1 = ( &pps_freq), .oid_arg2 = (0), .oid_name = ("pps_freq"), .oid_handler = (sysctl_handle_64), .oid_fmt = ("Q"), .oid_descr = "Scaled frequency offset (ns/sec)" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ntp_pll_pps_freq __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_ntp_pll_pps_freq); _Static_assert ((((0x80000000 | 0x00040000) & 0xf) == 0 || ((0x80000000 | 0x00040000) & 0) == 4) && sizeof(int64_t) == sizeof (*(&pps_freq)), "compile-time assertion failed"); |
| 341 | SYSCTL_S64(_kern_ntp_pll, OID_AUTO, time_freq, CTLFLAG_RD | CTLFLAG_MPSAFE,static struct sysctl_oid sysctl___kern_ntp_pll_time_freq = { . oid_parent = ((&(&sysctl___kern_ntp_pll)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (4 | 0x00040000 | (0x80000000 | 0x00040000)), .oid_arg1 = ( &time_freq), .oid_arg2 = (0), .oid_name = ("time_freq"), . oid_handler = (sysctl_handle_64), .oid_fmt = ("Q"), .oid_descr = "Frequency offset (ns/sec)" }; __asm__(".globl " "__start_set_sysctl_set" ); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ntp_pll_time_freq __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_ntp_pll_time_freq); _Static_assert ((((0x80000000 | 0x00040000) & 0xf) == 0 || ((0x80000000 | 0x00040000) & 0) == 4) && sizeof(int64_t) == sizeof (*(&time_freq)), "compile-time assertion failed") |
| 342 | &time_freq, 0,static struct sysctl_oid sysctl___kern_ntp_pll_time_freq = { . oid_parent = ((&(&sysctl___kern_ntp_pll)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (4 | 0x00040000 | (0x80000000 | 0x00040000)), .oid_arg1 = ( &time_freq), .oid_arg2 = (0), .oid_name = ("time_freq"), . oid_handler = (sysctl_handle_64), .oid_fmt = ("Q"), .oid_descr = "Frequency offset (ns/sec)" }; __asm__(".globl " "__start_set_sysctl_set" ); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ntp_pll_time_freq __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_ntp_pll_time_freq); _Static_assert ((((0x80000000 | 0x00040000) & 0xf) == 0 || ((0x80000000 | 0x00040000) & 0) == 4) && sizeof(int64_t) == sizeof (*(&time_freq)), "compile-time assertion failed") |
| 343 | "Frequency offset (ns/sec)")static struct sysctl_oid sysctl___kern_ntp_pll_time_freq = { . oid_parent = ((&(&sysctl___kern_ntp_pll)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (4 | 0x00040000 | (0x80000000 | 0x00040000)), .oid_arg1 = ( &time_freq), .oid_arg2 = (0), .oid_name = ("time_freq"), . oid_handler = (sysctl_handle_64), .oid_fmt = ("Q"), .oid_descr = "Frequency offset (ns/sec)" }; __asm__(".globl " "__start_set_sysctl_set" ); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_ntp_pll_time_freq __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_ntp_pll_time_freq); _Static_assert ((((0x80000000 | 0x00040000) & 0xf) == 0 || ((0x80000000 | 0x00040000) & 0) == 4) && sizeof(int64_t) == sizeof (*(&time_freq)), "compile-time assertion failed"); |
| 344 | #endif |
| 345 | |
| 346 | /* |
| 347 | * ntp_adjtime() - NTP daemon application interface |
| 348 | * |
| 349 | * See the timex.h header file for synopsis and API description. Note that |
| 350 | * the timex.constant structure member has a dual purpose to set the time |
| 351 | * constant and to set the TAI offset. |
| 352 | */ |
| 353 | #ifndef _SYS_SYSPROTO_H_ |
| 354 | struct ntp_adjtime_args { |
| 355 | struct timex *tp; |
| 356 | }; |
| 357 | #endif |
| 358 | |
| 359 | int |
| 360 | sys_ntp_adjtime(struct thread *td, struct ntp_adjtime_args *uap) |
| 361 | { |
| 362 | struct timex ntv; /* temporary structure */ |
| 363 | long freq; /* frequency ns/s) */ |
| 364 | int modes; /* mode bits from structure */ |
| 365 | int error, retval; |
| 366 | |
| 367 | error = copyin((caddr_t)uap->tp, (caddr_t)&ntv, sizeof(ntv)); |
| 368 | if (error) |
| 369 | return (error); |
| 370 | |
| 371 | /* |
| 372 | * Update selected clock variables - only the superuser can |
| 373 | * change anything. Note that there is no error checking here on |
| 374 | * the assumption the superuser should know what it is doing. |
| 375 | * Note that either the time constant or TAI offset are loaded |
| 376 | * from the ntv.constant member, depending on the mode bits. If |
| 377 | * the STA_PLL bit in the status word is cleared, the state and |
| 378 | * status words are reset to the initial values at boot. |
| 379 | */ |
| 380 | modes = ntv.modes; |
| 381 | if (modes) |
| 382 | error = priv_check(td, PRIV_NTP_ADJTIME16); |
| 383 | if (error != 0) |
| 384 | return (error); |
| 385 | NTPADJ_LOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((( &ntpadj_lock))))->mtx_lock != 0x00000004 || !atomic_cmpset_long (&(((((&ntpadj_lock)))))->mtx_lock, 0x00000004, (_tid )))) __mtx_lock_sleep(&(((((&ntpadj_lock)))))->mtx_lock , _tid, (((0))), ((((void *)0))), ((0))); else do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire-> id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); } while (0); |
| 386 | if (modes & MOD_MAXERROR0x0004) |
| 387 | time_maxerror = ntv.maxerror; |
| 388 | if (modes & MOD_ESTERROR0x0008) |
| 389 | time_esterror = ntv.esterror; |
| 390 | if (modes & MOD_STATUS0x0010) { |
| 391 | if (time_status & STA_PLL0x0001 && !(ntv.status & STA_PLL0x0001)) { |
| 392 | time_state = TIME_OK0; |
| 393 | time_status = STA_UNSYNC0x0040; |
| 394 | #ifdef PPS_SYNC |
| 395 | pps_shift = PPS_FAVG; |
| 396 | #endif /* PPS_SYNC */ |
| 397 | } |
| 398 | time_status &= STA_RONLY(0x0100 | 0x0200 | 0x0400 | 0x0800 | 0x1000 | 0x2000 | 0x4000 | 0x8000); |
| 399 | time_status |= ntv.status & ~STA_RONLY(0x0100 | 0x0200 | 0x0400 | 0x0800 | 0x1000 | 0x2000 | 0x4000 | 0x8000); |
| 400 | } |
| 401 | if (modes & MOD_TIMECONST0x0020) { |
| 402 | if (ntv.constant < 0) |
| 403 | time_constant = 0; |
| 404 | else if (ntv.constant > MAXTC10) |
| 405 | time_constant = MAXTC10; |
| 406 | else |
| 407 | time_constant = ntv.constant; |
| 408 | } |
| 409 | if (modes & MOD_TAI0x0080) { |
| 410 | if (ntv.constant > 0) /* XXX zero & negative numbers ? */ |
| 411 | time_tai = ntv.constant; |
| 412 | } |
| 413 | #ifdef PPS_SYNC |
| 414 | if (modes & MOD_PPSMAX0x0040) { |
| 415 | if (ntv.shift < PPS_FAVG) |
| 416 | pps_shiftmax = PPS_FAVG; |
| 417 | else if (ntv.shift > PPS_FAVGMAX) |
| 418 | pps_shiftmax = PPS_FAVGMAX; |
| 419 | else |
| 420 | pps_shiftmax = ntv.shift; |
| 421 | } |
| 422 | #endif /* PPS_SYNC */ |
| 423 | if (modes & MOD_NANO0x2000) |
| 424 | time_status |= STA_NANO0x2000; |
| 425 | if (modes & MOD_MICRO0x1000) |
| 426 | time_status &= ~STA_NANO0x2000; |
| 427 | if (modes & MOD_CLKB0x4000) |
| 428 | time_status |= STA_CLK0x8000; |
| 429 | if (modes & MOD_CLKA0x8000) |
| 430 | time_status &= ~STA_CLK0x8000; |
| 431 | if (modes & MOD_FREQUENCY0x0002) { |
| 432 | freq = (ntv.freq * 1000LL) >> 16; |
| 433 | if (freq > MAXFREQ500000L) |
| 434 | L_LINT(time_freq, MAXFREQ)((time_freq) = (int64_t)(500000L) << 32); |
| 435 | else if (freq < -MAXFREQ500000L) |
| 436 | L_LINT(time_freq, -MAXFREQ)((time_freq) = (int64_t)(-500000L) << 32); |
| 437 | else { |
| 438 | /* |
| 439 | * ntv.freq is [PPM * 2^16] = [us/s * 2^16] |
| 440 | * time_freq is [ns/s * 2^32] |
| 441 | */ |
| 442 | time_freq = ntv.freq * 1000LL * 65536LL; |
| 443 | } |
| 444 | #ifdef PPS_SYNC |
| 445 | pps_freq = time_freq; |
| 446 | #endif /* PPS_SYNC */ |
| 447 | } |
| 448 | if (modes & MOD_OFFSET0x0001) { |
| 449 | if (time_status & STA_NANO0x2000) |
| 450 | hardupdate(ntv.offset); |
| 451 | else |
| 452 | hardupdate(ntv.offset * 1000); |
| 453 | } |
| 454 | |
| 455 | /* |
| 456 | * Retrieve all clock variables. Note that the TAI offset is |
| 457 | * returned only by ntp_gettime(); |
| 458 | */ |
| 459 | if (time_status & STA_NANO0x2000) |
| 460 | ntv.offset = L_GINT(time_offset)((time_offset) < 0 ? -(-(time_offset) >> 32) : (time_offset ) >> 32); |
| 461 | else |
| 462 | ntv.offset = L_GINT(time_offset)((time_offset) < 0 ? -(-(time_offset) >> 32) : (time_offset ) >> 32) / 1000; /* XXX rounding ? */ |
| 463 | ntv.freq = L_GINT((time_freq / 1000LL) << 16)(((time_freq / 1000LL) << 16) < 0 ? -(-((time_freq / 1000LL) << 16) >> 32) : ((time_freq / 1000LL) << 16) >> 32); |
| 464 | ntv.maxerror = time_maxerror; |
| 465 | ntv.esterror = time_esterror; |
| 466 | ntv.status = time_status; |
| 467 | ntv.constant = time_constant; |
| 468 | if (time_status & STA_NANO0x2000) |
| 469 | ntv.precision = time_precision; |
| 470 | else |
| 471 | ntv.precision = time_precision / 1000; |
| 472 | ntv.tolerance = MAXFREQ500000L * SCALE_PPM(65536 / 1000); |
| 473 | #ifdef PPS_SYNC |
| 474 | ntv.shift = pps_shift; |
| 475 | ntv.ppsfreq = L_GINT((pps_freq / 1000LL) << 16)(((pps_freq / 1000LL) << 16) < 0 ? -(-((pps_freq / 1000LL ) << 16) >> 32) : ((pps_freq / 1000LL) << 16 ) >> 32); |
| 476 | if (time_status & STA_NANO0x2000) |
| 477 | ntv.jitter = pps_jitter; |
| 478 | else |
| 479 | ntv.jitter = pps_jitter / 1000; |
| 480 | ntv.stabil = pps_stabil; |
| 481 | ntv.calcnt = pps_calcnt; |
| 482 | ntv.errcnt = pps_errcnt; |
| 483 | ntv.jitcnt = pps_jitcnt; |
| 484 | ntv.stbcnt = pps_stbcnt; |
| 485 | #endif /* PPS_SYNC */ |
| 486 | retval = ntp_is_time_error(time_status) ? TIME_ERROR5 : time_state; |
| 487 | NTPADJ_UNLOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((& ntpadj_lock))))->lock_object.lo_data == 0) do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release-> id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); if (((((&ntpadj_lock))))->mtx_lock != _tid || !atomic_cmpset_long (&(((((&ntpadj_lock)))))->mtx_lock, (_tid), 0x00000004 )) __mtx_unlock_sleep(&(((((&ntpadj_lock)))))->mtx_lock , (((0))), ((((void *)0))), ((0))); } while (0); |
| 488 | |
| 489 | error = copyout((caddr_t)&ntv, (caddr_t)uap->tp, sizeof(ntv)); |
| 490 | if (error == 0) |
| 491 | td->td_retvaltd_uretoff.tdu_retval[0] = retval; |
| 492 | return (error); |
| 493 | } |
| 494 | |
| 495 | /* |
| 496 | * second_overflow() - called after ntp_tick_adjust() |
| 497 | * |
| 498 | * This routine is ordinarily called immediately following the above |
| 499 | * routine ntp_tick_adjust(). While these two routines are normally |
| 500 | * combined, they are separated here only for the purposes of |
| 501 | * simulation. |
| 502 | */ |
| 503 | void |
| 504 | ntp_update_second(int64_t *adjustment, time_t *newsec) |
| 505 | { |
| 506 | int tickrate; |
| 507 | l_fp ftemp; /* 32/64-bit temporary */ |
| 508 | |
| 509 | /* |
| 510 | * On rollover of the second both the nanosecond and microsecond |
| 511 | * clocks are updated and the state machine cranked as |
| 512 | * necessary. The phase adjustment to be used for the next |
| 513 | * second is calculated and the maximum error is increased by |
| 514 | * the tolerance. |
| 515 | */ |
| 516 | time_maxerror += MAXFREQ500000L / 1000; |
| 517 | |
| 518 | /* |
| 519 | * Leap second processing. If in leap-insert state at |
| 520 | * the end of the day, the system clock is set back one |
| 521 | * second; if in leap-delete state, the system clock is |
| 522 | * set ahead one second. The nano_time() routine or |
| 523 | * external clock driver will insure that reported time |
| 524 | * is always monotonic. |
| 525 | */ |
| 526 | switch (time_state) { |
| 527 | |
| 528 | /* |
| 529 | * No warning. |
| 530 | */ |
| 531 | case TIME_OK0: |
| 532 | if (time_status & STA_INS0x0010) |
| 533 | time_state = TIME_INS1; |
| 534 | else if (time_status & STA_DEL0x0020) |
| 535 | time_state = TIME_DEL2; |
| 536 | break; |
| 537 | |
| 538 | /* |
| 539 | * Insert second 23:59:60 following second |
| 540 | * 23:59:59. |
| 541 | */ |
| 542 | case TIME_INS1: |
| 543 | if (!(time_status & STA_INS0x0010)) |
| 544 | time_state = TIME_OK0; |
| 545 | else if ((*newsec) % 86400 == 0) { |
| 546 | (*newsec)--; |
| 547 | time_state = TIME_OOP3; |
| 548 | time_tai++; |
| 549 | } |
| 550 | break; |
| 551 | |
| 552 | /* |
| 553 | * Delete second 23:59:59. |
| 554 | */ |
| 555 | case TIME_DEL2: |
| 556 | if (!(time_status & STA_DEL0x0020)) |
| 557 | time_state = TIME_OK0; |
| 558 | else if (((*newsec) + 1) % 86400 == 0) { |
| 559 | (*newsec)++; |
| 560 | time_tai--; |
| 561 | time_state = TIME_WAIT4; |
| 562 | } |
| 563 | break; |
| 564 | |
| 565 | /* |
| 566 | * Insert second in progress. |
| 567 | */ |
| 568 | case TIME_OOP3: |
| 569 | time_state = TIME_WAIT4; |
| 570 | break; |
| 571 | |
| 572 | /* |
| 573 | * Wait for status bits to clear. |
| 574 | */ |
| 575 | case TIME_WAIT4: |
| 576 | if (!(time_status & (STA_INS0x0010 | STA_DEL0x0020))) |
| 577 | time_state = TIME_OK0; |
| 578 | } |
| 579 | |
| 580 | /* |
| 581 | * Compute the total time adjustment for the next second |
| 582 | * in ns. The offset is reduced by a factor depending on |
| 583 | * whether the PPS signal is operating. Note that the |
| 584 | * value is in effect scaled by the clock frequency, |
| 585 | * since the adjustment is added at each tick interrupt. |
| 586 | */ |
| 587 | ftemp = time_offset; |
| 588 | #ifdef PPS_SYNC |
| 589 | /* XXX even if PPS signal dies we should finish adjustment ? */ |
| 590 | if (time_status & STA_PPSTIME0x0004 && time_status & |
| 591 | STA_PPSSIGNAL0x0100) |
| 592 | L_RSHIFT(ftemp, pps_shift)do { if ((ftemp) < 0) (ftemp) = -(-(ftemp) >> (pps_shift )); else (ftemp) = (ftemp) >> (pps_shift); } while (0); |
| 593 | else |
| 594 | L_RSHIFT(ftemp, SHIFT_PLL + time_constant)do { if ((ftemp) < 0) (ftemp) = -(-(ftemp) >> (4 + time_constant )); else (ftemp) = (ftemp) >> (4 + time_constant); } while (0); |
| 595 | #else |
| 596 | L_RSHIFT(ftemp, SHIFT_PLL + time_constant)do { if ((ftemp) < 0) (ftemp) = -(-(ftemp) >> (4 + time_constant )); else (ftemp) = (ftemp) >> (4 + time_constant); } while (0); |
| 597 | #endif /* PPS_SYNC */ |
| 598 | time_adj = ftemp; |
| 599 | L_SUB(time_offset, ftemp)((time_offset) -= (ftemp)); |
| 600 | L_ADD(time_adj, time_freq)((time_adj) += (time_freq)); |
| 601 | |
| 602 | /* |
| 603 | * Apply any correction from adjtime(2). If more than one second |
| 604 | * off we slew at a rate of 5ms/s (5000 PPM) else 500us/s (500PPM) |
| 605 | * until the last second is slewed the final < 500 usecs. |
| 606 | */ |
| 607 | if (time_adjtime != 0) { |
| 608 | if (time_adjtime > 1000000) |
| 609 | tickrate = 5000; |
| 610 | else if (time_adjtime < -1000000) |
| 611 | tickrate = -5000; |
| 612 | else if (time_adjtime > 500) |
| 613 | tickrate = 500; |
| 614 | else if (time_adjtime < -500) |
| 615 | tickrate = -500; |
| 616 | else |
| 617 | tickrate = time_adjtime; |
| 618 | time_adjtime -= tickrate; |
| 619 | L_LINT(ftemp, tickrate * 1000)((ftemp) = (int64_t)(tickrate * 1000) << 32); |
| 620 | L_ADD(time_adj, ftemp)((time_adj) += (ftemp)); |
| 621 | } |
| 622 | *adjustment = time_adj; |
| 623 | |
| 624 | #ifdef PPS_SYNC |
| 625 | if (pps_valid > 0) |
| 626 | pps_valid--; |
| 627 | else |
| 628 | time_status &= ~STA_PPSSIGNAL0x0100; |
| 629 | #endif /* PPS_SYNC */ |
| 630 | } |
| 631 | |
| 632 | /* |
| 633 | * ntp_init() - initialize variables and structures |
| 634 | * |
| 635 | * This routine must be called after the kernel variables hz and tick |
| 636 | * are set or changed and before the next tick interrupt. In this |
| 637 | * particular implementation, these values are assumed set elsewhere in |
| 638 | * the kernel. The design allows the clock frequency and tick interval |
| 639 | * to be changed while the system is running. So, this routine should |
| 640 | * probably be integrated with the code that does that. |
| 641 | */ |
| 642 | static void |
| 643 | ntp_init(void) |
| 644 | { |
| 645 | |
| 646 | /* |
| 647 | * The following variables are initialized only at startup. Only |
| 648 | * those structures not cleared by the compiler need to be |
| 649 | * initialized, and these only in the simulator. In the actual |
| 650 | * kernel, any nonzero values here will quickly evaporate. |
| 651 | */ |
| 652 | L_CLR(time_offset)((time_offset) = 0); |
| 653 | L_CLR(time_freq)((time_freq) = 0); |
| 654 | #ifdef PPS_SYNC |
| 655 | pps_tf[0].tv_sec = pps_tf[0].tv_nsec = 0; |
| 656 | pps_tf[1].tv_sec = pps_tf[1].tv_nsec = 0; |
| 657 | pps_tf[2].tv_sec = pps_tf[2].tv_nsec = 0; |
| 658 | pps_fcount = 0; |
| 659 | L_CLR(pps_freq)((pps_freq) = 0); |
| 660 | #endif /* PPS_SYNC */ |
| 661 | } |
| 662 | |
| 663 | SYSINIT(ntpclocks, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, ntp_init, NULL)static struct sysinit ntpclocks_sys_init = { SI_SUB_CLOCKS, SI_ORDER_MIDDLE , (sysinit_cfunc_t)(sysinit_nfunc_t)ntp_init, ((void *)(((void *)0))) }; __asm__(".globl " "__start_set_sysinit_set"); __asm__ (".globl " "__stop_set_sysinit_set"); static void const * const __set_sysinit_set_sym_ntpclocks_sys_init __attribute__((__section__ ("set_" "sysinit_set"))) __attribute__((__used__)) = &(ntpclocks_sys_init ); |
| 664 | |
| 665 | /* |
| 666 | * hardupdate() - local clock update |
| 667 | * |
| 668 | * This routine is called by ntp_adjtime() to update the local clock |
| 669 | * phase and frequency. The implementation is of an adaptive-parameter, |
| 670 | * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new |
| 671 | * time and frequency offset estimates for each call. If the kernel PPS |
| 672 | * discipline code is configured (PPS_SYNC), the PPS signal itself |
| 673 | * determines the new time offset, instead of the calling argument. |
| 674 | * Presumably, calls to ntp_adjtime() occur only when the caller |
| 675 | * believes the local clock is valid within some bound (+-128 ms with |
| 676 | * NTP). If the caller's time is far different than the PPS time, an |
| 677 | * argument will ensue, and it's not clear who will lose. |
| 678 | * |
| 679 | * For uncompensated quartz crystal oscillators and nominal update |
| 680 | * intervals less than 256 s, operation should be in phase-lock mode, |
| 681 | * where the loop is disciplined to phase. For update intervals greater |
| 682 | * than 1024 s, operation should be in frequency-lock mode, where the |
| 683 | * loop is disciplined to frequency. Between 256 s and 1024 s, the mode |
| 684 | * is selected by the STA_MODE status bit. |
| 685 | */ |
| 686 | static void |
| 687 | hardupdate(offset) |
| 688 | long offset; /* clock offset (ns) */ |
| 689 | { |
| 690 | long mtemp; |
| 691 | l_fp ftemp; |
| 692 | |
| 693 | NTPADJ_ASSERT_LOCKED()(void)0; |
| 694 | |
| 695 | /* |
| 696 | * Select how the phase is to be controlled and from which |
| 697 | * source. If the PPS signal is present and enabled to |
| 698 | * discipline the time, the PPS offset is used; otherwise, the |
| 699 | * argument offset is used. |
| 700 | */ |
| 701 | if (!(time_status & STA_PLL0x0001)) |
| 702 | return; |
| 703 | if (!(time_status & STA_PPSTIME0x0004 && time_status & |
| 704 | STA_PPSSIGNAL0x0100)) { |
| 705 | if (offset > MAXPHASE500000000L) |
| 706 | time_monitor = MAXPHASE500000000L; |
| 707 | else if (offset < -MAXPHASE500000000L) |
| 708 | time_monitor = -MAXPHASE500000000L; |
| 709 | else |
| 710 | time_monitor = offset; |
| 711 | L_LINT(time_offset, time_monitor)((time_offset) = (int64_t)(time_monitor) << 32); |
| 712 | } |
| 713 | |
| 714 | /* |
| 715 | * Select how the frequency is to be controlled and in which |
| 716 | * mode (PLL or FLL). If the PPS signal is present and enabled |
| 717 | * to discipline the frequency, the PPS frequency is used; |
| 718 | * otherwise, the argument offset is used to compute it. |
| 719 | */ |
| 720 | if (time_status & STA_PPSFREQ0x0002 && time_status & STA_PPSSIGNAL0x0100) { |
| 721 | time_reftime = time_uptime; |
| 722 | return; |
| 723 | } |
| 724 | if (time_status & STA_FREQHOLD0x0080 || time_reftime == 0) |
| 725 | time_reftime = time_uptime; |
| 726 | mtemp = time_uptime - time_reftime; |
| 727 | L_LINT(ftemp, time_monitor)((ftemp) = (int64_t)(time_monitor) << 32); |
| 728 | L_RSHIFT(ftemp, (SHIFT_PLL + 2 + time_constant) << 1)do { if ((ftemp) < 0) (ftemp) = -(-(ftemp) >> ((4 + 2 + time_constant) << 1)); else (ftemp) = (ftemp) >> ((4 + 2 + time_constant) << 1); } while (0); |
| 729 | L_MPY(ftemp, mtemp)((ftemp) *= (mtemp)); |
| 730 | L_ADD(time_freq, ftemp)((time_freq) += (ftemp)); |
| 731 | time_status &= ~STA_MODE0x4000; |
| 732 | if (mtemp >= MINSEC256 && (time_status & STA_FLL0x0008 || mtemp > |
| 733 | MAXSEC2048)) { |
| 734 | L_LINT(ftemp, (time_monitor << 4) / mtemp)((ftemp) = (int64_t)((time_monitor << 4) / mtemp) << 32); |
| 735 | L_RSHIFT(ftemp, SHIFT_FLL + 4)do { if ((ftemp) < 0) (ftemp) = -(-(ftemp) >> (2 + 4 )); else (ftemp) = (ftemp) >> (2 + 4); } while (0); |
| 736 | L_ADD(time_freq, ftemp)((time_freq) += (ftemp)); |
| 737 | time_status |= STA_MODE0x4000; |
| 738 | } |
| 739 | time_reftime = time_uptime; |
| 740 | if (L_GINT(time_freq)((time_freq) < 0 ? -(-(time_freq) >> 32) : (time_freq ) >> 32) > MAXFREQ500000L) |
| 741 | L_LINT(time_freq, MAXFREQ)((time_freq) = (int64_t)(500000L) << 32); |
| 742 | else if (L_GINT(time_freq)((time_freq) < 0 ? -(-(time_freq) >> 32) : (time_freq ) >> 32) < -MAXFREQ500000L) |
| 743 | L_LINT(time_freq, -MAXFREQ)((time_freq) = (int64_t)(-500000L) << 32); |
| 744 | } |
| 745 | |
| 746 | #ifdef PPS_SYNC |
| 747 | /* |
| 748 | * hardpps() - discipline CPU clock oscillator to external PPS signal |
| 749 | * |
| 750 | * This routine is called at each PPS interrupt in order to discipline |
| 751 | * the CPU clock oscillator to the PPS signal. There are two independent |
| 752 | * first-order feedback loops, one for the phase, the other for the |
| 753 | * frequency. The phase loop measures and grooms the PPS phase offset |
| 754 | * and leaves it in a handy spot for the seconds overflow routine. The |
| 755 | * frequency loop averages successive PPS phase differences and |
| 756 | * calculates the PPS frequency offset, which is also processed by the |
| 757 | * seconds overflow routine. The code requires the caller to capture the |
| 758 | * time and architecture-dependent hardware counter values in |
| 759 | * nanoseconds at the on-time PPS signal transition. |
| 760 | * |
| 761 | * Note that, on some Unix systems this routine runs at an interrupt |
| 762 | * priority level higher than the timer interrupt routine hardclock(). |
| 763 | * Therefore, the variables used are distinct from the hardclock() |
| 764 | * variables, except for the actual time and frequency variables, which |
| 765 | * are determined by this routine and updated atomically. |
| 766 | */ |
| 767 | void |
| 768 | hardpps(tsp, nsec) |
| 769 | struct timespec *tsp; /* time at PPS */ |
| 770 | long nsec; /* hardware counter at PPS */ |
| 771 | { |
| 772 | long u_sec, u_nsec, v_nsec; /* temps */ |
| 773 | l_fp ftemp; |
| 774 | |
| 775 | NTPADJ_LOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((( &ntpadj_lock))))->mtx_lock != 0x00000004 || !atomic_cmpset_long (&(((((&ntpadj_lock)))))->mtx_lock, 0x00000004, (_tid )))) __mtx_lock_sleep(&(((((&ntpadj_lock)))))->mtx_lock , _tid, (((0))), ((((void *)0))), ((0))); else do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire-> id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); } while (0); |
| 776 | |
| 777 | /* |
| 778 | * The signal is first processed by a range gate and frequency |
| 779 | * discriminator. The range gate rejects noise spikes outside |
| 780 | * the range +-500 us. The frequency discriminator rejects input |
| 781 | * signals with apparent frequency outside the range 1 +-500 |
| 782 | * PPM. If two hits occur in the same second, we ignore the |
| 783 | * later hit; if not and a hit occurs outside the range gate, |
| 784 | * keep the later hit for later comparison, but do not process |
| 785 | * it. |
| 786 | */ |
| 787 | time_status |= STA_PPSSIGNAL0x0100 | STA_PPSJITTER0x0200; |
| 788 | time_status &= ~(STA_PPSWANDER0x0400 | STA_PPSERROR0x0800); |
| 789 | pps_valid = PPS_VALID; |
| 790 | u_sec = tsp->tv_sec; |
| 791 | u_nsec = tsp->tv_nsec; |
| 792 | if (u_nsec >= (NANOSECOND1000000000L >> 1)) { |
| 793 | u_nsec -= NANOSECOND1000000000L; |
| 794 | u_sec++; |
| 795 | } |
| 796 | v_nsec = u_nsec - pps_tf[0].tv_nsec; |
| 797 | if (u_sec == pps_tf[0].tv_sec && v_nsec < NANOSECOND1000000000L - MAXFREQ500000L) |
| 798 | goto out; |
| 799 | pps_tf[2] = pps_tf[1]; |
| 800 | pps_tf[1] = pps_tf[0]; |
| 801 | pps_tf[0].tv_sec = u_sec; |
| 802 | pps_tf[0].tv_nsec = u_nsec; |
| 803 | |
| 804 | /* |
| 805 | * Compute the difference between the current and previous |
| 806 | * counter values. If the difference exceeds 0.5 s, assume it |
| 807 | * has wrapped around, so correct 1.0 s. If the result exceeds |
| 808 | * the tick interval, the sample point has crossed a tick |
| 809 | * boundary during the last second, so correct the tick. Very |
| 810 | * intricate. |
| 811 | */ |
| 812 | u_nsec = nsec; |
| 813 | if (u_nsec > (NANOSECOND1000000000L >> 1)) |
| 814 | u_nsec -= NANOSECOND1000000000L; |
| 815 | else if (u_nsec < -(NANOSECOND1000000000L >> 1)) |
| 816 | u_nsec += NANOSECOND1000000000L; |
| 817 | pps_fcount += u_nsec; |
| 818 | if (v_nsec > MAXFREQ500000L || v_nsec < -MAXFREQ500000L) |
| 819 | goto out; |
| 820 | time_status &= ~STA_PPSJITTER0x0200; |
| 821 | |
| 822 | /* |
| 823 | * A three-stage median filter is used to help denoise the PPS |
| 824 | * time. The median sample becomes the time offset estimate; the |
| 825 | * difference between the other two samples becomes the time |
| 826 | * dispersion (jitter) estimate. |
| 827 | */ |
| 828 | if (pps_tf[0].tv_nsec > pps_tf[1].tv_nsec) { |
| 829 | if (pps_tf[1].tv_nsec > pps_tf[2].tv_nsec) { |
| 830 | v_nsec = pps_tf[1].tv_nsec; /* 0 1 2 */ |
| 831 | u_nsec = pps_tf[0].tv_nsec - pps_tf[2].tv_nsec; |
| 832 | } else if (pps_tf[2].tv_nsec > pps_tf[0].tv_nsec) { |
| 833 | v_nsec = pps_tf[0].tv_nsec; /* 2 0 1 */ |
| 834 | u_nsec = pps_tf[2].tv_nsec - pps_tf[1].tv_nsec; |
| 835 | } else { |
| 836 | v_nsec = pps_tf[2].tv_nsec; /* 0 2 1 */ |
| 837 | u_nsec = pps_tf[0].tv_nsec - pps_tf[1].tv_nsec; |
| 838 | } |
| 839 | } else { |
| 840 | if (pps_tf[1].tv_nsec < pps_tf[2].tv_nsec) { |
| 841 | v_nsec = pps_tf[1].tv_nsec; /* 2 1 0 */ |
| 842 | u_nsec = pps_tf[2].tv_nsec - pps_tf[0].tv_nsec; |
| 843 | } else if (pps_tf[2].tv_nsec < pps_tf[0].tv_nsec) { |
| 844 | v_nsec = pps_tf[0].tv_nsec; /* 1 0 2 */ |
| 845 | u_nsec = pps_tf[1].tv_nsec - pps_tf[2].tv_nsec; |
| 846 | } else { |
| 847 | v_nsec = pps_tf[2].tv_nsec; /* 1 2 0 */ |
| 848 | u_nsec = pps_tf[1].tv_nsec - pps_tf[0].tv_nsec; |
| 849 | } |
| 850 | } |
| 851 | |
| 852 | /* |
| 853 | * Nominal jitter is due to PPS signal noise and interrupt |
| 854 | * latency. If it exceeds the popcorn threshold, the sample is |
| 855 | * discarded. otherwise, if so enabled, the time offset is |
| 856 | * updated. We can tolerate a modest loss of data here without |
| 857 | * much degrading time accuracy. |
| 858 | * |
| 859 | * The measurements being checked here were made with the system |
| 860 | * timecounter, so the popcorn threshold is not allowed to fall below |
| 861 | * the number of nanoseconds in two ticks of the timecounter. For a |
| 862 | * timecounter running faster than 1 GHz the lower bound is 2ns, just |
| 863 | * to avoid a nonsensical threshold of zero. |
| 864 | */ |
| 865 | if (u_nsec > lmax(pps_jitter << PPS_POPCORN, |
| 866 | 2 * (NANOSECOND1000000000L / (long)qmin(NANOSECOND1000000000L, tc_getfrequency())))) { |
| 867 | time_status |= STA_PPSJITTER0x0200; |
| 868 | pps_jitcnt++; |
| 869 | } else if (time_status & STA_PPSTIME0x0004) { |
| 870 | time_monitor = -v_nsec; |
| 871 | L_LINT(time_offset, time_monitor)((time_offset) = (int64_t)(time_monitor) << 32); |
| 872 | } |
| 873 | pps_jitter += (u_nsec - pps_jitter) >> PPS_FAVG; |
| 874 | u_sec = pps_tf[0].tv_sec - pps_lastsec; |
| 875 | if (u_sec < (1 << pps_shift)) |
| 876 | goto out; |
| 877 | |
| 878 | /* |
| 879 | * At the end of the calibration interval the difference between |
| 880 | * the first and last counter values becomes the scaled |
| 881 | * frequency. It will later be divided by the length of the |
| 882 | * interval to determine the frequency update. If the frequency |
| 883 | * exceeds a sanity threshold, or if the actual calibration |
| 884 | * interval is not equal to the expected length, the data are |
| 885 | * discarded. We can tolerate a modest loss of data here without |
| 886 | * much degrading frequency accuracy. |
| 887 | */ |
| 888 | pps_calcnt++; |
| 889 | v_nsec = -pps_fcount; |
| 890 | pps_lastsec = pps_tf[0].tv_sec; |
| 891 | pps_fcount = 0; |
| 892 | u_nsec = MAXFREQ500000L << pps_shift; |
| 893 | if (v_nsec > u_nsec || v_nsec < -u_nsec || u_sec != (1 << pps_shift)) { |
| 894 | time_status |= STA_PPSERROR0x0800; |
| 895 | pps_errcnt++; |
| 896 | goto out; |
| 897 | } |
| 898 | |
| 899 | /* |
| 900 | * Here the raw frequency offset and wander (stability) is |
| 901 | * calculated. If the wander is less than the wander threshold |
| 902 | * for four consecutive averaging intervals, the interval is |
| 903 | * doubled; if it is greater than the threshold for four |
| 904 | * consecutive intervals, the interval is halved. The scaled |
| 905 | * frequency offset is converted to frequency offset. The |
| 906 | * stability metric is calculated as the average of recent |
| 907 | * frequency changes, but is used only for performance |
| 908 | * monitoring. |
| 909 | */ |
| 910 | L_LINT(ftemp, v_nsec)((ftemp) = (int64_t)(v_nsec) << 32); |
| 911 | L_RSHIFT(ftemp, pps_shift)do { if ((ftemp) < 0) (ftemp) = -(-(ftemp) >> (pps_shift )); else (ftemp) = (ftemp) >> (pps_shift); } while (0); |
| 912 | L_SUB(ftemp, pps_freq)((ftemp) -= (pps_freq)); |
| 913 | u_nsec = L_GINT(ftemp)((ftemp) < 0 ? -(-(ftemp) >> 32) : (ftemp) >> 32 ); |
| 914 | if (u_nsec > PPS_MAXWANDER) { |
| 915 | L_LINT(ftemp, PPS_MAXWANDER)((ftemp) = (int64_t)(PPS_MAXWANDER) << 32); |
| 916 | pps_intcnt--; |
| 917 | time_status |= STA_PPSWANDER0x0400; |
| 918 | pps_stbcnt++; |
| 919 | } else if (u_nsec < -PPS_MAXWANDER) { |
| 920 | L_LINT(ftemp, -PPS_MAXWANDER)((ftemp) = (int64_t)(-PPS_MAXWANDER) << 32); |
| 921 | pps_intcnt--; |
| 922 | time_status |= STA_PPSWANDER0x0400; |
| 923 | pps_stbcnt++; |
| 924 | } else { |
| 925 | pps_intcnt++; |
| 926 | } |
| 927 | if (pps_intcnt >= 4) { |
| 928 | pps_intcnt = 4; |
| 929 | if (pps_shift < pps_shiftmax) { |
| 930 | pps_shift++; |
| 931 | pps_intcnt = 0; |
| 932 | } |
| 933 | } else if (pps_intcnt <= -4 || pps_shift > pps_shiftmax) { |
| 934 | pps_intcnt = -4; |
| 935 | if (pps_shift > PPS_FAVG) { |
| 936 | pps_shift--; |
| 937 | pps_intcnt = 0; |
| 938 | } |
| 939 | } |
| 940 | if (u_nsec < 0) |
| 941 | u_nsec = -u_nsec; |
| 942 | pps_stabil += (u_nsec * SCALE_PPM(65536 / 1000) - pps_stabil) >> PPS_FAVG; |
| 943 | |
| 944 | /* |
| 945 | * The PPS frequency is recalculated and clamped to the maximum |
| 946 | * MAXFREQ. If enabled, the system clock frequency is updated as |
| 947 | * well. |
| 948 | */ |
| 949 | L_ADD(pps_freq, ftemp)((pps_freq) += (ftemp)); |
| 950 | u_nsec = L_GINT(pps_freq)((pps_freq) < 0 ? -(-(pps_freq) >> 32) : (pps_freq) >> 32); |
| 951 | if (u_nsec > MAXFREQ500000L) |
| 952 | L_LINT(pps_freq, MAXFREQ)((pps_freq) = (int64_t)(500000L) << 32); |
| 953 | else if (u_nsec < -MAXFREQ500000L) |
| 954 | L_LINT(pps_freq, -MAXFREQ)((pps_freq) = (int64_t)(-500000L) << 32); |
| 955 | if (time_status & STA_PPSFREQ0x0002) |
| 956 | time_freq = pps_freq; |
| 957 | |
| 958 | out: |
| 959 | NTPADJ_UNLOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((& ntpadj_lock))))->lock_object.lo_data == 0) do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release-> id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); if (((((&ntpadj_lock))))->mtx_lock != _tid || !atomic_cmpset_long (&(((((&ntpadj_lock)))))->mtx_lock, (_tid), 0x00000004 )) __mtx_unlock_sleep(&(((((&ntpadj_lock)))))->mtx_lock , (((0))), ((((void *)0))), ((0))); } while (0); |
| 960 | } |
| 961 | #endif /* PPS_SYNC */ |
| 962 | |
| 963 | #ifndef _SYS_SYSPROTO_H_ |
| 964 | struct adjtime_args { |
| 965 | struct timeval *delta; |
| 966 | struct timeval *olddelta; |
| 967 | }; |
| 968 | #endif |
| 969 | /* ARGSUSED */ |
| 970 | int |
| 971 | sys_adjtime(struct thread *td, struct adjtime_args *uap) |
| 972 | { |
| 973 | struct timeval delta, olddelta, *deltap; |
| 974 | int error; |
| 975 | |
| 976 | if (uap->delta) { |
| 977 | error = copyin(uap->delta, &delta, sizeof(delta)); |
| 978 | if (error) |
| 979 | return (error); |
| 980 | deltap = δ |
| 981 | } else |
| 982 | deltap = NULL((void *)0); |
| 983 | error = kern_adjtime(td, deltap, &olddelta); |
| 984 | if (uap->olddelta && error == 0) |
| 985 | error = copyout(&olddelta, uap->olddelta, sizeof(olddelta)); |
| 986 | return (error); |
| 987 | } |
| 988 | |
| 989 | int |
| 990 | kern_adjtime(struct thread *td, struct timeval *delta, struct timeval *olddelta) |
| 991 | { |
| 992 | struct timeval atv; |
| 993 | int64_t ltr, ltw; |
| 994 | int error; |
| 995 | |
| 996 | if (delta != NULL((void *)0)) { |
| 997 | error = priv_check(td, PRIV_ADJTIME15); |
| 998 | if (error != 0) |
| 999 | return (error); |
| 1000 | ltw = (int64_t)delta->tv_sec * 1000000 + delta->tv_usec; |
| 1001 | } |
| 1002 | NTPADJ_LOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((( &ntpadj_lock))))->mtx_lock != 0x00000004 || !atomic_cmpset_long (&(((((&ntpadj_lock)))))->mtx_lock, 0x00000004, (_tid )))) __mtx_lock_sleep(&(((((&ntpadj_lock)))))->mtx_lock , _tid, (((0))), ((((void *)0))), ((0))); else do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__acquire-> id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__acquire-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); } while (0); |
| 1003 | ltr = time_adjtime; |
| 1004 | if (delta != NULL((void *)0)) |
| 1005 | time_adjtime = ltw; |
| 1006 | NTPADJ_UNLOCK()do { uintptr_t _tid = (uintptr_t)((__curthread())); if (((((& ntpadj_lock))))->lock_object.lo_data == 0) do { (void)0; do { if (__builtin_expect((sdt_lockstat___adaptive__release-> id), 0)) (*sdt_probe_func)(sdt_lockstat___adaptive__release-> id, (uintptr_t) (((&ntpadj_lock))), (uintptr_t) 0, (uintptr_t ) 0, (uintptr_t) 0, (uintptr_t) 0); } while (0); } while (0); if (((((&ntpadj_lock))))->mtx_lock != _tid || !atomic_cmpset_long (&(((((&ntpadj_lock)))))->mtx_lock, (_tid), 0x00000004 )) __mtx_unlock_sleep(&(((((&ntpadj_lock)))))->mtx_lock , (((0))), ((((void *)0))), ((0))); } while (0); |
| 1007 | if (olddelta != NULL((void *)0)) { |
| 1008 | atv.tv_sec = ltr / 1000000; |
| 1009 | atv.tv_usec = ltr % 1000000; |
| 1010 | if (atv.tv_usec < 0) { |
| 1011 | atv.tv_usec += 1000000; |
| 1012 | atv.tv_sec--; |
| 1013 | } |
| 1014 | *olddelta = atv; |
| 1015 | } |
| 1016 | return (0); |
| 1017 | } |
| 1018 | |
| 1019 | static struct callout resettodr_callout; |
| 1020 | static int resettodr_period = 1800; |
| 1021 | |
| 1022 | static void |
| 1023 | periodic_resettodr(void *arg __unused__attribute__((__unused__))) |
| 1024 | { |
| 1025 | |
| 1026 | /* |
| 1027 | * Read of time_status is lock-less, which is fine since |
| 1028 | * ntp_is_time_error() operates on the consistent read value. |
| 1029 | */ |
| 1030 | if (!ntp_is_time_error(time_status)) |
| 1031 | resettodr(); |
| 1032 | if (resettodr_period > 0) |
| 1033 | callout_schedule(&resettodr_callout, resettodr_period * hz); |
| 1034 | } |
| 1035 | |
| 1036 | static void |
| 1037 | shutdown_resettodr(void *arg __unused__attribute__((__unused__)), int howto __unused__attribute__((__unused__))) |
| 1038 | { |
| 1039 | |
| 1040 | callout_drain(&resettodr_callout)_callout_stop_safe(&resettodr_callout, 0x0001, ((void *)0 )); |
| 1041 | /* Another unlocked read of time_status */ |
| 1042 | if (resettodr_period > 0 && !ntp_is_time_error(time_status)) |
| 1043 | resettodr(); |
| 1044 | } |
| 1045 | |
| 1046 | static int |
| 1047 | sysctl_resettodr_period(SYSCTL_HANDLER_ARGSstruct sysctl_oid *oidp, void *arg1, intmax_t arg2, struct sysctl_req *req) |
| 1048 | { |
| 1049 | int error; |
| 1050 | |
| 1051 | error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); |
| 1052 | if (error || !req->newptr) |
| 1053 | return (error); |
| 1054 | if (cold) |
| 1055 | goto done; |
| 1056 | if (resettodr_period == 0) |
| 1057 | callout_stop(&resettodr_callout)_callout_stop_safe(&resettodr_callout, 0, ((void *)0)); |
| 1058 | else |
| 1059 | callout_reset(&resettodr_callout, resettodr_period * hz,callout_reset_sbt_on(((&resettodr_callout)), tick_sbt * ( (resettodr_period * hz)), 0, ((periodic_resettodr)), ((((void *)0))), (-1), 0x0100) |
| 1060 | periodic_resettodr, NULL)callout_reset_sbt_on(((&resettodr_callout)), tick_sbt * ( (resettodr_period * hz)), 0, ((periodic_resettodr)), ((((void *)0))), (-1), 0x0100); |
| 1061 | done: |
| 1062 | return (0); |
| 1063 | } |
| 1064 | |
| 1065 | SYSCTL_PROC(_machdep, OID_AUTO, rtc_save_period, CTLTYPE_INT | CTLFLAG_RWTUN |static struct sysctl_oid sysctl___machdep_rtc_save_period = { .oid_parent = ((&(&sysctl___machdep)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = ((2 | ((0x80000000|0x40000000)|0x00080000) | 0x00040000)), .oid_arg1 = (&resettodr_period), .oid_arg2 = (1800), .oid_name = ("rtc_save_period"), .oid_handler = (sysctl_resettodr_period ), .oid_fmt = ("I"), .oid_descr = "Save system time to RTC with this period (in seconds)" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___machdep_rtc_save_period __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___machdep_rtc_save_period); _Static_assert (((2 | ((0x80000000|0x40000000)|0x00080000) | 0x00040000) & 0xf) != 0, "compile-time assertion failed") |
| 1066 | CTLFLAG_MPSAFE, &resettodr_period, 1800, sysctl_resettodr_period, "I",static struct sysctl_oid sysctl___machdep_rtc_save_period = { .oid_parent = ((&(&sysctl___machdep)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = ((2 | ((0x80000000|0x40000000)|0x00080000) | 0x00040000)), .oid_arg1 = (&resettodr_period), .oid_arg2 = (1800), .oid_name = ("rtc_save_period"), .oid_handler = (sysctl_resettodr_period ), .oid_fmt = ("I"), .oid_descr = "Save system time to RTC with this period (in seconds)" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___machdep_rtc_save_period __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___machdep_rtc_save_period); _Static_assert (((2 | ((0x80000000|0x40000000)|0x00080000) | 0x00040000) & 0xf) != 0, "compile-time assertion failed") |
| 1067 | "Save system time to RTC with this period (in seconds)")static struct sysctl_oid sysctl___machdep_rtc_save_period = { .oid_parent = ((&(&sysctl___machdep)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = ((2 | ((0x80000000|0x40000000)|0x00080000) | 0x00040000)), .oid_arg1 = (&resettodr_period), .oid_arg2 = (1800), .oid_name = ("rtc_save_period"), .oid_handler = (sysctl_resettodr_period ), .oid_fmt = ("I"), .oid_descr = "Save system time to RTC with this period (in seconds)" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___machdep_rtc_save_period __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___machdep_rtc_save_period); _Static_assert (((2 | ((0x80000000|0x40000000)|0x00080000) | 0x00040000) & 0xf) != 0, "compile-time assertion failed"); |
| 1068 | |
| 1069 | static void |
| 1070 | start_periodic_resettodr(void *arg __unused__attribute__((__unused__))) |
| 1071 | { |
| 1072 | |
| 1073 | EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_resettodr, NULL,eventhandler_register(((void *)0), "shutdown_pre_sync", shutdown_resettodr , ((void *)0), 0) |
| 1074 | SHUTDOWN_PRI_FIRST)eventhandler_register(((void *)0), "shutdown_pre_sync", shutdown_resettodr , ((void *)0), 0); |
| 1075 | callout_init(&resettodr_callout, 1); |
| 1076 | if (resettodr_period == 0) |
| 1077 | return; |
| 1078 | callout_reset(&resettodr_callout, resettodr_period * hz,callout_reset_sbt_on(((&resettodr_callout)), tick_sbt * ( (resettodr_period * hz)), 0, ((periodic_resettodr)), ((((void *)0))), (-1), 0x0100) |
| 1079 | periodic_resettodr, NULL)callout_reset_sbt_on(((&resettodr_callout)), tick_sbt * ( (resettodr_period * hz)), 0, ((periodic_resettodr)), ((((void *)0))), (-1), 0x0100); |
| 1080 | } |
| 1081 | |
| 1082 | SYSINIT(periodic_resettodr, SI_SUB_LAST, SI_ORDER_MIDDLE,static struct sysinit periodic_resettodr_sys_init = { SI_SUB_LAST , SI_ORDER_MIDDLE, (sysinit_cfunc_t)(sysinit_nfunc_t)start_periodic_resettodr , ((void *)(((void *)0))) }; __asm__(".globl " "__start_set_sysinit_set" ); __asm__(".globl " "__stop_set_sysinit_set"); static void const * const __set_sysinit_set_sym_periodic_resettodr_sys_init __attribute__ ((__section__("set_" "sysinit_set"))) __attribute__((__used__ )) = &(periodic_resettodr_sys_init) |
| 1083 | start_periodic_resettodr, NULL)static struct sysinit periodic_resettodr_sys_init = { SI_SUB_LAST , SI_ORDER_MIDDLE, (sysinit_cfunc_t)(sysinit_nfunc_t)start_periodic_resettodr , ((void *)(((void *)0))) }; __asm__(".globl " "__start_set_sysinit_set" ); __asm__(".globl " "__stop_set_sysinit_set"); static void const * const __set_sysinit_set_sym_periodic_resettodr_sys_init __attribute__ ((__section__("set_" "sysinit_set"))) __attribute__((__used__ )) = &(periodic_resettodr_sys_init); |