sys: convert Linux driver to use generic offset functions

Strip all slewing code (adjtime(), freq locked nano PLL, fast tick
slewing) from the Linux driver and use the new generic frequency only
slewing instead. The advantages include stable clock control with very
short update intervals, good control of the slewing frequency, cheap
cooking of raw time stamps and unlimited frequency offset.
This commit is contained in:
Miroslav Lichvar 2014-05-14 17:08:00 +02:00
parent fc235a3f16
commit ec4542bbe4
4 changed files with 42 additions and 800 deletions

2
configure vendored
View file

@ -326,7 +326,7 @@ case $SYSTEM in
esac
;;
Linux* )
EXTRA_OBJECTS="sys_linux.o wrap_adjtimex.o"
EXTRA_OBJECTS="sys_generic.o sys_linux.o wrap_adjtimex.o"
try_linuxcaps=1
try_rtc=1
try_setsched=1

View file

@ -53,22 +53,19 @@ int LockAll = 0;
#endif
#include "localp.h"
#include "sys_generic.h"
#include "sys_linux.h"
#include "sched.h"
#include "util.h"
#include "conf.h"
#include "logging.h"
#include "wrap_adjtimex.h"
static long current_tick;
/* This is the value of tick, in seconds, including the current vernier
frequency term */
static double current_total_tick;
/* This is the uncompensated system tick value */
static int nominal_tick;
/* The maximum amount by which 'tick' can be biased away from 'nominal_tick'
(sys_adjtimex() in the kernel bounds this to 10%) */
static int max_tick_bias;
/* This is the scaling required to go between absolute ppm and the
scaled ppm used as an argument to adjtimex. Because chronyd is to an extent
'closed loop' maybe it doesn't matter if this is wrongly determined, UNLESS
@ -87,27 +84,12 @@ static double freq_scale;
static int hz;
static double dhz; /* And dbl prec version of same for arithmetic */
/* ================================================== */
/* Flag indicating whether adjtimex() returns the remaining time adjustment
or not. If not we have to read the outstanding adjustment by setting it to
zero, examining the return value and setting the outstanding adjustment back
again. */
static int have_readonly_adjtime;
/* Flag indicating whether kernel supports PLL in nanosecond resolution.
If supported, it will be used instead of adjtime() for very small
adjustments. */
static int have_nanopll;
/* Flag indicating whether adjtimex() can step the clock */
static int have_setoffset;
/* ================================================== */
static void handle_end_of_slew(void *anything);
/* The assumed rate at which the effective frequency and tick values are
updated in the kernel */
static int tick_update_hz;
/* ================================================== */
@ -122,521 +104,6 @@ our_round(double x) {
return y;
}
/* ================================================== */
/* Amount of outstanding offset to process */
static double offset_register;
/* Flag set true if an adjtime slew was started and still may be running */
static int slow_slewing;
/* Flag set true if a PLL nano slew was started and still may be running */
static int nano_slewing;
/* Flag set true if a fast slew (one done by altering tick) is being
run at the moment */
static int fast_slewing;
/* The amount by which the fast slew was supposed to slew the clock */
static double fast_slew_wanted;
/* The value programmed into the kernel's 'tick' variable whilst
slewing a large offset */
static long slewing_tick;
/* The timeval (raw) at which a fast slew was started. We need to
know this for two reasons. First, if we want to change the
frequency midway through, we will want to abort the slew and return
the unprocessed portion to the offset register to start again
later. Second, when the end of the slew expires, we need to know
precisely how long we have been slewing for, so that we can negate
the excess and slew it back the other way. */
static struct timeval slew_start_tv;
/* This is the ID returned to use by the scheduler's timeout handler.
We need this if we subsequently wish to abort a slew, because we will have to
dequeue the timeout */
static SCH_TimeoutID slew_timeout_id;
/* The adjustment that we apply to 'tick', in seconds, whilst applying
a fast slew */
static double delta_total_tick;
/* Maximum length of one fast slew */
#define MAX_FASTSLEW_TIMEOUT (3600 * 24 * 7)
/* Max amount of time that we wish to slew by using adjtime (or its
equivalent). If more than this is outstanding, we alter the value
of tick instead, for a set period. Set this according to the
amount of time that a dial-up clock might need to be shifted
assuming it is resync'ed about once per day. (TBC) */
#define MAX_ADJUST_WITH_ADJTIME (0.2)
/* Max amount of time that should be adjusted by kernel PLL */
#define MAX_ADJUST_WITH_NANOPLL (0.5)
/* The amount by which we alter 'tick' when doing a large slew */
static int slew_delta_tick;
/* The maximum amount by which 'tick' can be biased away from 'nominal_tick'
(sys_adjtimex() in the kernel bounds this to 10%) */
static int max_tick_bias;
/* The latest time at which system clock may still be slewed by previous
adjtime() call and maximum offset correction error it can cause */
static struct timeval slow_slew_error_end;
static int slow_slew_error;
/* Timeval at which the latest nano PLL adjustment was started and maximum
offset correction error it can cause */
static struct timeval nano_slew_error_start;
static int nano_slew_error;
/* The latest time at which 'tick' in kernel may be actually updated
and maximum offset correction error it can cause */
static struct timeval fast_slew_error_end;
static double fast_slew_error;
/* The rate at which frequency and tick values are updated in kernel. */
static int tick_update_hz;
#define MIN_PLL_TIME_CONSTANT 0
#define MAX_PLL_TIME_CONSTANT 10
/* PLL time constant used when adjusting offset by PLL */
static long pll_time_constant;
/* Suggested offset correction rate (correction time * offset) */
static double correction_rate;
/* Kernel time constant shift */
static int shift_pll;
/* ================================================== */
/* These routines are used to estimate maximum error in offset correction */
static void
update_slow_slew_error(int offset)
{
struct timeval now, newend;
if (offset == 0 && slow_slew_error == 0)
return;
if (gettimeofday(&now, NULL) < 0) {
LOG_FATAL(LOGF_SysLinux, "gettimeofday() failed");
}
if (offset < 0)
offset = -offset;
/* assume 500ppm rate and one sec delay, plus 10 percent for fast slewing */
UTI_AddDoubleToTimeval(&now, (offset + 999) / 500 * 1.1, &newend);
if (offset > 500)
offset = 500;
if (slow_slew_error > offset) {
double previous_left;
UTI_DiffTimevalsToDouble(&previous_left, &slow_slew_error_end, &now);
if (previous_left > 0.0) {
if (offset == 0)
newend = slow_slew_error_end;
offset = slow_slew_error;
}
}
slow_slew_error = offset;
slow_slew_error_end = newend;
}
static double
get_slow_slew_error(struct timeval *now)
{
double left;
if (slow_slew_error == 0)
return 0.0;
UTI_DiffTimevalsToDouble(&left, &slow_slew_error_end, now);
return left > 0.0 ? slow_slew_error / 1e6 : 0.0;
}
static void
update_nano_slew_error(long offset, int new)
{
struct timeval now;
double ago;
if (offset == 0 && nano_slew_error == 0)
return;
/* maximum error in offset reported by adjtimex */
offset /= (1 << (shift_pll + pll_time_constant)) - (new ? 0 : 1);
if (offset < 0)
offset = -offset;
if (new || nano_slew_error_start.tv_sec > 0) {
if (gettimeofday(&now, NULL) < 0) {
LOG_FATAL(LOGF_SysLinux, "gettimeofday() failed");
}
}
/* When PLL offset is newly set, use the maximum of the old and new error.
Otherwise use the minimum, but only when the last update is older than
1.1 seconds to be sure the previous adjustment is already gone. */
if (!new) {
if (nano_slew_error > offset) {
if (nano_slew_error_start.tv_sec == 0) {
nano_slew_error = offset;
} else {
UTI_DiffTimevalsToDouble(&ago, &now, &nano_slew_error_start);
if (ago > 1.1) {
nano_slew_error_start.tv_sec = 0;
nano_slew_error = offset;
}
}
}
} else {
if (nano_slew_error < offset)
nano_slew_error = offset;
nano_slew_error_start = now;
}
}
static double
get_nano_slew_error(void)
{
if (nano_slew_error == 0)
return 0.0;
return nano_slew_error / 1e9;
}
static void
update_fast_slew_error(struct timeval *now)
{
double max_tick;
max_tick = current_total_tick +
(delta_total_tick > 0.0 ? delta_total_tick : 0.0);
UTI_AddDoubleToTimeval(now, 1e6 * max_tick / nominal_tick / tick_update_hz,
&fast_slew_error_end);
fast_slew_error = fabs(1e6 * delta_total_tick / nominal_tick / tick_update_hz);
}
static double
get_fast_slew_error(struct timeval *now)
{
double left;
if (fast_slew_error == 0.0)
return 0.0;
UTI_DiffTimevalsToDouble(&left, &fast_slew_error_end, now);
if (left < -10.0)
fast_slew_error = 0.0;
return left > 0.0 ? fast_slew_error : 0.0;
}
/* ================================================== */
/* Select PLL time constant according to the suggested correction rate. */
static long
get_pll_constant(double offset)
{
long c;
double corr_time;
if (offset < 1e-9)
return MIN_PLL_TIME_CONSTANT;
corr_time = correction_rate / offset;
for (c = MIN_PLL_TIME_CONSTANT; c < MAX_PLL_TIME_CONSTANT; c++)
if (corr_time < 1 << (c + 1 + shift_pll))
break;
return c;
}
/* ================================================== */
/* This routine stops a fast slew, determines how long the slew has
been running for, and consequently how much adjustment has actually
been applied. It can be used both when a slew finishes naturally
due to a timeout, and when a slew is being aborted. */
static void
stop_fast_slew(void)
{
struct timeval T1;
double fast_slew_done;
double slew_duration;
/* Should never get here unless this is true */
assert(fast_slewing);
/* Now set the thing off */
if (gettimeofday(&T1, NULL) < 0) {
LOG_FATAL(LOGF_SysLinux, "gettimeofday() failed");
}
if (TMX_SetTick(current_tick) < 0) {
LOG_FATAL(LOGF_SysLinux, "adjtimex() failed");
}
fast_slewing = 0;
UTI_DiffTimevalsToDouble(&slew_duration, &T1, &slew_start_tv);
/* Compute the dispersion we have introduced by changing tick this
way. We handle this by adding dispersion to all statistics held
at higher levels in the system. */
update_fast_slew_error(&T1);
lcl_InvokeDispersionNotifyHandlers(fast_slew_error);
fast_slew_done = delta_total_tick * slew_duration /
(current_total_tick + delta_total_tick);
offset_register += (fast_slew_wanted + fast_slew_done);
}
/* ================================================== */
/* This routine reschedules fast slew timeout according
to the current frequency and offset */
static void
adjust_fast_slew(double old_tick, double old_delta_tick)
{
struct timeval tv, end_of_slew;
double fast_slew_done, slew_duration, dseconds;
assert(fast_slewing);
if (gettimeofday(&tv, NULL) < 0) {
LOG_FATAL(LOGF_SysLinux, "gettimeofday() failed");
}
UTI_DiffTimevalsToDouble(&slew_duration, &tv, &slew_start_tv);
fast_slew_done = old_delta_tick * slew_duration / (old_tick + old_delta_tick);
offset_register += fast_slew_wanted + fast_slew_done;
dseconds = -offset_register * (current_total_tick + delta_total_tick) / delta_total_tick;
if (dseconds > MAX_FASTSLEW_TIMEOUT)
dseconds = MAX_FASTSLEW_TIMEOUT;
UTI_AddDoubleToTimeval(&tv, dseconds, &end_of_slew);
slew_start_tv = tv;
fast_slew_wanted = offset_register;
offset_register = 0.0;
SCH_RemoveTimeout(slew_timeout_id);
slew_timeout_id = SCH_AddTimeout(&end_of_slew, handle_end_of_slew, NULL);
}
/* ================================================== */
/* This routine is called to start a clock offset adjustment */
static void
initiate_slew(void)
{
double dseconds;
long tick_adjust;
long offset;
struct timeval T0;
struct timeval end_of_slew;
/* Don't want to get here if we already have an adjust on the go! */
assert(!fast_slewing);
if (offset_register == 0.0) {
return;
}
/* Cancel any slewing that is running */
if (slow_slewing) {
offset = 0;
if (TMX_ApplyOffset(&offset) < 0) {
LOG_FATAL(LOGF_SysLinux, "adjtimex() failed");
}
offset_register -= (double) offset / 1.0e6;
slow_slewing = 0;
update_slow_slew_error(0);
} else if (nano_slewing) {
if (TMX_GetPLLOffsetLeft(&offset) < 0) {
LOG_FATAL(LOGF_SysLinux, "adjtimex() failed");
}
offset_register -= (double) offset / 1.0e9;
update_nano_slew_error(offset, 0);
offset = 0;
if (TMX_ApplyPLLOffset(offset, MIN_PLL_TIME_CONSTANT) < 0) {
LOG_FATAL(LOGF_SysLinux, "adjtimex() failed");
}
nano_slewing = 0;
update_nano_slew_error(offset, 1);
}
if (have_nanopll && fabs(offset_register) < MAX_ADJUST_WITH_NANOPLL) {
/* Use the PLL with fixed frequency to do the shift. Until the kernel has a
support for linear offset adjustments with programmable rate this is the
best we can do. */
offset = 1.0e9 * -offset_register;
/* First adjustment after accrue_offset() sets the PLL time constant */
if (pll_time_constant < 0) {
pll_time_constant = get_pll_constant(fabs(offset_register));
}
assert(pll_time_constant >= MIN_PLL_TIME_CONSTANT &&
pll_time_constant <= MAX_PLL_TIME_CONSTANT);
if (TMX_ApplyPLLOffset(offset, pll_time_constant) < 0) {
LOG_FATAL(LOGF_SysLinux, "adjtimex() failed");
}
offset_register = 0.0; /* Don't keep the sub-nanosecond leftover */
nano_slewing = 1;
update_nano_slew_error(offset, 1);
} else if (fabs(offset_register) < MAX_ADJUST_WITH_ADJTIME) {
/* Use adjtime to do the shift */
offset = our_round(1.0e6 * -offset_register);
offset_register += offset / 1.0e6;
if (offset != 0) {
if (TMX_ApplyOffset(&offset) < 0) {
LOG_FATAL(LOGF_SysLinux, "adjtimex() failed");
}
slow_slewing = 1;
update_slow_slew_error(offset);
}
} else {
/* If the system clock has a high drift rate, the combination of
current_tick + slew_delta_tick could be outside the range that adjtimex
will accept. To prevent this, the tick adjustment that is used to slew
an error off the clock is clamped according to what tick_adjust is.
*/
long min_allowed_tick, max_allowed_tick;
min_allowed_tick = nominal_tick - max_tick_bias;
max_allowed_tick = nominal_tick + max_tick_bias;
if (offset_register > 0) {
if (current_tick <= min_allowed_tick) {
return;
}
slewing_tick = current_tick - slew_delta_tick;
if (slewing_tick < min_allowed_tick) {
slewing_tick = min_allowed_tick;
}
} else {
if (current_tick >= max_allowed_tick) {
return;
}
slewing_tick = current_tick + slew_delta_tick;
if (slewing_tick > max_allowed_tick) {
slewing_tick = max_allowed_tick;
}
}
tick_adjust = slewing_tick - current_tick;
delta_total_tick = (double) tick_adjust / 1.0e6;
dseconds = - offset_register * (current_total_tick + delta_total_tick) / delta_total_tick;
assert(dseconds > 0.0);
/* Now set the thing off */
if (gettimeofday(&T0, NULL) < 0) {
LOG_FATAL(LOGF_SysLinux, "gettimeofday() failed");
}
if (TMX_SetTick(slewing_tick) < 0) {
LOG_FATAL(LOGF_SysLinux, "adjtimex() failed");
}
/* Compute the dispersion we have introduced by changing tick this
way. We handle this by adding dispersion to all statistics held
at higher levels in the system. */
update_fast_slew_error(&T0);
lcl_InvokeDispersionNotifyHandlers(fast_slew_error);
fast_slewing = 1;
slew_start_tv = T0;
if (dseconds > MAX_FASTSLEW_TIMEOUT)
dseconds = MAX_FASTSLEW_TIMEOUT;
UTI_AddDoubleToTimeval(&T0, dseconds, &end_of_slew);
slew_timeout_id = SCH_AddTimeout(&end_of_slew, handle_end_of_slew, NULL);
fast_slew_wanted = offset_register;
offset_register = 0.0;
}
}
/* ================================================== */
/* This is the callback routine invoked by the scheduler at the end of
a slew. */
static void
handle_end_of_slew(void *anything)
{
stop_fast_slew();
initiate_slew(); /* To do any fine trimming required */
}
/* ================================================== */
/* This routine is used to abort a slew that is in progress, if any */
static void
abort_slew(void)
{
if (fast_slewing) {
stop_fast_slew();
SCH_RemoveTimeout(slew_timeout_id);
}
}
/* ================================================== */
/* This routine accrues an offset into the offset register, and starts
a slew if required.
The offset argument is measured in seconds. Positive means the
clock needs to be slewed backwards (i.e. is currently fast of true
time) */
static void
accrue_offset(double offset, double corr_rate)
{
/* Add the new offset to the register */
offset_register += offset;
correction_rate = corr_rate;
/* Select a new time constant on the next adjustment */
pll_time_constant = -1;
if (!fast_slewing) {
initiate_slew();
} else {
adjust_fast_slew(current_total_tick, delta_total_tick);
}
}
/* ================================================== */
/* Positive means currently fast of true time, i.e. jump backwards */
@ -646,10 +113,6 @@ apply_step_offset(double offset)
struct timeval old_time, new_time;
double err;
if (fast_slewing) {
abort_slew();
}
if (have_setoffset) {
if (TMX_ApplyStepOffset(-offset) < 0) {
LOG_FATAL(LOGF_SysLinux, "adjtimex() failed");
@ -672,9 +135,6 @@ apply_step_offset(double offset)
UTI_DiffTimevalsToDouble(&err, &old_time, &new_time);
lcl_InvokeDispersionNotifyHandlers(fabs(err));
}
initiate_slew();
}
/* ================================================== */
@ -688,10 +148,8 @@ static double
set_frequency(double freq_ppm)
{
long required_tick;
long min_allowed_tick, max_allowed_tick;
double required_freq; /* what we use */
double scaled_freq; /* what adjtimex & the kernel use */
double old_total_tick;
int required_delta_tick;
required_delta_tick = our_round(freq_ppm / dhz);
@ -699,42 +157,12 @@ set_frequency(double freq_ppm)
required_tick = nominal_tick - required_delta_tick;
scaled_freq = freq_scale * required_freq;
min_allowed_tick = nominal_tick - max_tick_bias;
max_allowed_tick = nominal_tick + max_tick_bias;
if (required_tick < min_allowed_tick || required_tick > max_allowed_tick) {
LOG(LOGS_WARN, LOGF_SysLinux, "Required tick %ld outside allowed range (%ld .. %ld)", required_tick, min_allowed_tick, max_allowed_tick);
if (required_tick < min_allowed_tick) {
required_tick = min_allowed_tick;
} else {
required_tick = max_allowed_tick;
}
}
current_tick = required_tick;
old_total_tick = current_total_tick;
current_total_tick = ((double)current_tick + required_freq/dhz) / 1.0e6 ;
/* Don't change tick if we are fast slewing, just reschedule the timeout */
if (fast_slewing) {
required_tick = slewing_tick;
}
if (TMX_SetFrequency(&scaled_freq, required_tick) < 0) {
LOG_FATAL(LOGF_SysLinux, "adjtimex failed for set_frequency, freq_ppm=%10.4e scaled_freq=%10.4e required_tick=%ld",
freq_ppm, scaled_freq, required_tick);
}
if (fast_slewing) {
double old_delta_tick;
old_delta_tick = delta_total_tick;
delta_total_tick = ((double)slewing_tick + required_freq/dhz) / 1.0e6 -
current_total_tick;
adjust_fast_slew(old_total_tick, old_delta_tick);
}
return dhz * (nominal_tick - current_tick) - scaled_freq / freq_scale;
return dhz * (nominal_tick - required_tick) - scaled_freq / freq_scale;
}
/* ================================================== */
@ -752,10 +180,6 @@ read_frequency(void)
LOG_FATAL(LOGF_SysLinux, "adjtimex() failed");
}
if (fast_slewing) {
tick -= slewing_tick - current_tick;
}
tick_term = dhz * (double)(nominal_tick - tick);
freq_term = unscaled_freq / freq_scale;
@ -765,82 +189,6 @@ read_frequency(void)
#endif
return tick_term - freq_term;
}
/* ================================================== */
/* Given a raw time, determine the correction in seconds to generate
the 'cooked' time. The correction has to be added to the
raw time */
static void
get_offset_correction(struct timeval *raw,
double *corr, double *err)
{
/* Correction is given by these things :
1. Any value in offset register
2. Amount of fast slew remaining
3. Any amount of adjtime correction remaining
4. Any amount of nanopll correction remaining */
double fast_slew_duration;
double fast_slew_achieved;
double fast_slew_remaining;
long offset, noffset, toffset;
if (!slow_slewing) {
offset = 0;
} else {
if (have_readonly_adjtime) {
if (TMX_GetOffsetLeft(&offset) < 0) {
LOG_FATAL(LOGF_SysLinux, "adjtimex() failed");
}
} else {
toffset = 0;
if (TMX_ApplyOffset(&toffset) < 0) {
LOG_FATAL(LOGF_SysLinux, "adjtimex() failed");
}
offset = toffset;
if (TMX_ApplyOffset(&toffset) < 0) {
LOG_FATAL(LOGF_SysLinux, "adjtimex() failed");
}
}
if (offset == 0) {
/* adjtime slew has finished */
slow_slewing = 0;
}
}
if (!nano_slewing) {
noffset = 0;
} else {
if (TMX_GetPLLOffsetLeft(&noffset) < 0) {
LOG_FATAL(LOGF_SysLinux, "adjtimex() failed");
}
if (noffset == 0) {
nano_slewing = 0;
}
}
if (fast_slewing) {
UTI_DiffTimevalsToDouble(&fast_slew_duration, raw, &slew_start_tv);
fast_slew_achieved = delta_total_tick * fast_slew_duration /
(current_total_tick + delta_total_tick);
fast_slew_remaining = fast_slew_wanted + fast_slew_achieved;
} else {
fast_slew_remaining = 0.0;
}
*corr = - (offset_register + fast_slew_remaining) + offset / 1.0e6 + noffset / 1.0e9;
if (err) {
update_slow_slew_error(offset);
update_nano_slew_error(noffset, 0);
*err = get_slow_slew_error(raw) + get_fast_slew_error(raw) + get_nano_slew_error();;
}
}
/* ================================================== */
@ -972,7 +320,6 @@ get_version_specific_details(void)
dshift_hz = (double)(1UL << shift_hz);
basic_freq_scale = dshift_hz / dhz;
nominal_tick = (1000000L + (hz/2))/hz; /* Mirror declaration in kernel */
slew_delta_tick = nominal_tick / 12;
max_tick_bias = nominal_tick / 10;
tick_update_hz = hz;
@ -1031,21 +378,6 @@ get_version_specific_details(void)
}
}
/* ADJ_OFFSET_SS_READ support. It's available since 2.6.24,
but was buggy until 2.6.28. */
if (kernelvercmp(major, minor, patch, 2, 6, 28) < 0) {
have_readonly_adjtime = 0;
} else {
have_readonly_adjtime = 1;
}
/* ADJ_NANO support */
if (kernelvercmp(major, minor, patch, 2, 6, 27) < 0) {
have_nanopll = 0;
} else {
have_nanopll = 1;
}
/* ADJ_SETOFFSET support */
if (kernelvercmp(major, minor, patch, 2, 6, 39) < 0) {
have_setoffset = 0;
@ -1053,21 +385,14 @@ get_version_specific_details(void)
have_setoffset = 1;
}
/* PLL time constant changed in 2.6.31 */
if (kernelvercmp(major, minor, patch, 2, 6, 31) < 0) {
shift_pll = 4;
} else {
shift_pll = 2;
}
/* Override freq_scale if it appears in conf file */
CNF_GetLinuxFreqScale(&set_config_freq_scale, &config_freq_scale);
if (set_config_freq_scale) {
freq_scale = config_freq_scale;
}
DEBUG_LOG(LOGF_SysLinux, "hz=%d shift_hz=%d freq_scale=%.8f nominal_tick=%d slew_delta_tick=%d max_tick_bias=%d shift_pll=%d",
hz, shift_hz, freq_scale, nominal_tick, slew_delta_tick, max_tick_bias, shift_pll);
DEBUG_LOG(LOGF_SysLinux, "hz=%d shift_hz=%d freq_scale=%.8f nominal_tick=%d max_tick_bias=%d",
hz, shift_hz, freq_scale, nominal_tick, max_tick_bias);
}
/* ================================================== */
@ -1076,29 +401,12 @@ get_version_specific_details(void)
void
SYS_Linux_Initialise(void)
{
long offset;
double freq;
offset_register = 0.0;
fast_slewing = 0;
get_version_specific_details();
offset = 0;
if (TMX_ApplyOffset(&offset) < 0) {
if (TMX_ResetOffset() < 0) {
LOG_FATAL(LOGF_SysLinux, "adjtimex() failed");
}
if (have_readonly_adjtime && (TMX_GetOffsetLeft(&offset) < 0 || offset)) {
LOG(LOGS_INFO, LOGF_SysLinux, "adjtimex() doesn't support ADJ_OFFSET_SS_READ");
have_readonly_adjtime = 0;
}
if (have_nanopll && TMX_EnableNanoPLL() < 0) {
LOG(LOGS_INFO, LOGF_SysLinux, "adjtimex() doesn't support nanosecond PLL");
have_nanopll = 0;
}
if (have_setoffset && TMX_TestStepOffset() < 0) {
LOG(LOGS_INFO, LOGF_SysLinux, "adjtimex() doesn't support ADJ_SETOFFSET");
have_setoffset = 0;
@ -1106,13 +414,10 @@ SYS_Linux_Initialise(void)
TMX_SetSync(CNF_GetRtcSync());
/* Read current kernel frequency */
TMX_GetFrequency(&freq, &current_tick);
current_total_tick = (current_tick + freq / freq_scale / dhz) / 1.0e6;
lcl_RegisterSystemDrivers(read_frequency, set_frequency,
accrue_offset, apply_step_offset,
get_offset_correction, set_leap);
SYS_Generic_CompleteFreqDriver(1.0e6 * max_tick_bias / nominal_tick,
1.0 / tick_update_hz,
read_frequency, set_frequency,
apply_step_offset, set_leap);
}
/* ================================================== */
@ -1121,9 +426,7 @@ SYS_Linux_Initialise(void)
void
SYS_Linux_Finalise(void)
{
/* Must *NOT* leave a fast slew running - clock would drift way off
if the daemon is not restarted */
abort_slew();
SYS_Generic_Finalise();
}
/* ================================================== */

View file

@ -37,26 +37,30 @@
static int status = 0;
int
TMX_SetTick(long tick)
TMX_ResetOffset(void)
{
struct timex txc;
txc.modes = ADJ_TICK;
txc.tick = tick;
return adjtimex(&txc);
}
int
TMX_ApplyOffset(long *offset)
{
struct timex txc;
int result;
/* Reset adjtime() offset */
txc.modes = ADJ_OFFSET_SINGLESHOT;
txc.offset = *offset;
result = adjtimex(&txc);
*offset = txc.offset;
return result;
txc.offset = 0;
if (adjtimex(&txc) < 0)
return -1;
/* Reset PLL offset */
txc.modes = ADJ_OFFSET | ADJ_STATUS;
txc.status = STA_PLL;
txc.offset = 0;
if (adjtimex(&txc) < 0)
return -1;
/* Set status back */
txc.modes = ADJ_STATUS;
txc.modes = status;
if (adjtimex(&txc) < 0)
return -1;
return 0;
}
int
@ -93,17 +97,6 @@ TMX_GetFrequency(double *freq, long *tick)
return result;
}
int
TMX_GetOffsetLeft(long *offset)
{
struct timex txc;
int result;
txc.modes = ADJ_OFFSET_SS_READ;
result = adjtimex(&txc);
*offset = txc.offset;
return result;
}
int
TMX_ReadCurrentParams(struct tmx_params *params)
{
@ -184,47 +177,6 @@ int TMX_SetSync(int sync)
return adjtimex(&txc);
}
int
TMX_EnableNanoPLL(void)
{
struct timex txc;
int result;
txc.modes = ADJ_STATUS | ADJ_OFFSET | ADJ_TIMECONST | ADJ_NANO;
txc.status = STA_PLL | STA_FREQHOLD;
txc.offset = 0;
txc.constant = 0;
result = adjtimex(&txc);
if (result < 0 || !(txc.status & STA_NANO) || txc.offset || txc.constant)
return -1;
status |= STA_PLL | STA_FREQHOLD;
return result;
}
int
TMX_ApplyPLLOffset(long offset, long constant)
{
struct timex txc;
txc.modes = ADJ_OFFSET | ADJ_TIMECONST | ADJ_NANO;
txc.offset = offset;
txc.constant = constant;
return adjtimex(&txc);
}
int
TMX_GetPLLOffsetLeft(long *offset)
{
struct timex txc;
int result;
txc.modes = 0;
result = adjtimex(&txc);
*offset = txc.offset;
return result;
}
int
TMX_TestStepOffset(void)
{
@ -240,7 +192,7 @@ TMX_TestStepOffset(void)
if (adjtimex(&txc) < 0 || txc.maxerror != 0)
return -1;
txc.modes = ADJ_SETOFFSET;
txc.modes = ADJ_SETOFFSET | ADJ_NANO;
txc.time.tv_sec = 0;
txc.time.tv_usec = 0;
if (adjtimex(&txc) < 0 || txc.maxerror < 100000)
@ -254,21 +206,13 @@ TMX_ApplyStepOffset(double offset)
{
struct timex txc;
txc.modes = ADJ_SETOFFSET;
txc.modes = ADJ_SETOFFSET | ADJ_NANO;
if (offset >= 0) {
txc.time.tv_sec = offset;
} else {
txc.time.tv_sec = offset - 1;
}
/* ADJ_NANO changes the status even with ADJ_SETOFFSET, use it only when
STA_NANO is already enabled */
if (status & STA_PLL) {
txc.modes |= ADJ_NANO;
txc.time.tv_usec = 1e9 * (offset - txc.time.tv_sec);
} else {
txc.time.tv_usec = 1e6 * (offset - txc.time.tv_sec);
}
txc.time.tv_usec = 1.0e9 * (offset - txc.time.tv_sec);
return adjtimex(&txc);
}

View file

@ -64,17 +64,12 @@ struct tmx_params {
long stbcnt;
};
int TMX_SetTick(long tick);
int TMX_ApplyOffset(long *offset);
int TMX_ResetOffset(void);
int TMX_SetFrequency(double *freq, long tick);
int TMX_GetFrequency(double *freq, long *tick);
int TMX_GetOffsetLeft(long *offset);
int TMX_ReadCurrentParams(struct tmx_params *params);
int TMX_SetLeap(int leap);
int TMX_SetSync(int sync);
int TMX_EnableNanoPLL(void);
int TMX_ApplyPLLOffset(long offset, long constant);
int TMX_GetPLLOffsetLeft(long *offset);
int TMX_TestStepOffset(void);
int TMX_ApplyStepOffset(double offset);