TIMER: Timing corrections and enhancements

- Add support to query remaining usecs on pending events.
- Generalized the sim_interval adjustment in sim_idle to allow more than
   a single decrement.
- More overhead reduction when idling.
- Carry usec values as double through out to fully support long wall clock
   delays.
This commit is contained in:
Mark Pizzolato 2016-12-30 10:26:59 -08:00
parent e52ac6d57f
commit c9276407e6
6 changed files with 150 additions and 57 deletions

Binary file not shown.

46
scp.c
View file

@ -8927,7 +8927,7 @@ do {
sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Processing Event for %s\n", sim_uname (uptr));
AIO_EVENT_BEGIN(uptr);
if (uptr->usecs_remaining)
reason = sim_timer_activate_after_d (uptr, uptr->usecs_remaining);
reason = sim_timer_activate_after (uptr, uptr->usecs_remaining);
else {
if (uptr->action != NULL)
reason = uptr->action (uptr);
@ -9058,23 +9058,33 @@ t_stat sim_activate_after_abs (UNIT *uptr, uint32 event_time)
return _sim_activate_after_abs (uptr, event_time);
}
t_stat _sim_activate_after_abs (UNIT *uptr, uint32 event_time)
t_stat sim_activate_after_abs_d (UNIT *uptr, double event_time)
{
AIO_ACTIVATE (_sim_activate_after_abs, uptr, event_time);
return _sim_activate_after_abs (uptr, event_time);
}
t_stat _sim_activate_after_abs (UNIT *uptr, double event_time)
{
AIO_VALIDATE; /* Can't call asynchronously */
sim_cancel (uptr);
return _sim_activate_after (uptr, event_time);
}
t_stat sim_activate_after (UNIT *uptr, uint32 usec_delay)
{
return _sim_activate_after (uptr, (double)usec_delay);
}
t_stat sim_activate_after_d (UNIT *uptr, double usec_delay)
{
return _sim_activate_after (uptr, usec_delay);
}
t_stat _sim_activate_after (UNIT *uptr, uint32 usec_delay)
t_stat _sim_activate_after (UNIT *uptr, double usec_delay)
{
AIO_VALIDATE; /* Can't call asynchronously */
if (sim_is_active (uptr)) /* already active? */
return SCPE_OK;
AIO_ACTIVATE (_sim_activate_after, uptr, usec_delay);
return sim_timer_activate_after (uptr, usec_delay);
}
@ -9176,11 +9186,35 @@ for (cptr = sim_clock_queue; cptr != QUEUE_LIST_END; cptr = cptr->next) {
else
accum = accum + cptr->time;
if (cptr == uptr)
return accum + 1;
return accum + 1 + (int32)((uptr->usecs_remaining * sim_timer_inst_per_sec ()) / 1000000.0);
}
return 0;
}
double sim_activate_time_usecs (UNIT *uptr)
{
UNIT *cptr;
int32 accum;
double result;
AIO_VALIDATE;
result = sim_timer_activate_time_usecs (uptr);
if (result >= 0)
return result;
accum = 0;
for (cptr = sim_clock_queue; cptr != QUEUE_LIST_END; cptr = cptr->next) {
if (cptr == sim_clock_queue) {
if (sim_interval > 0)
accum = accum + sim_interval;
}
else
accum = accum + cptr->time;
if (cptr == uptr)
return 1.0 + uptr->usecs_remaining + ((1000000.0 * accum) / sim_timer_inst_per_sec ());
}
return 0.0;
}
/* sim_gtime - return global time
sim_grtime - return global time with rollover

7
scp.h
View file

@ -122,12 +122,15 @@ t_stat _sim_activate (UNIT *uptr, int32 interval);
t_stat sim_activate_abs (UNIT *uptr, int32 interval);
t_stat sim_activate_notbefore (UNIT *uptr, int32 rtime);
t_stat sim_activate_after (UNIT *uptr, uint32 usecs_walltime);
t_stat _sim_activate_after (UNIT *uptr, uint32 usecs_walltime);
t_stat sim_activate_after_d (UNIT *uptr, double usecs_walltime);
t_stat _sim_activate_after (UNIT *uptr, double usecs_walltime);
t_stat sim_activate_after_abs (UNIT *uptr, uint32 usecs_walltime);
t_stat _sim_activate_after_abs (UNIT *uptr, uint32 usecs_walltime);
t_stat sim_activate_after_abs_d (UNIT *uptr, double usecs_walltime);
t_stat _sim_activate_after_abs (UNIT *uptr, double usecs_walltime);
t_stat sim_cancel (UNIT *uptr);
t_bool sim_is_active (UNIT *uptr);
int32 sim_activate_time (UNIT *uptr);
double sim_activate_time_usecs (UNIT *uptr);
t_stat sim_run_boot_prep (int32 flag);
double sim_gtime (void);
uint32 sim_grtime (void);

View file

@ -748,6 +748,7 @@ static uint32 rtc_clock_time_idled_last[SIM_NTIMERS+1] = { 0 };/* total time idl
static uint32 rtc_clock_calib_skip_idle[SIM_NTIMERS+1] = { 0 };/* Calibrations skipped due to idling */
static uint32 rtc_clock_calib_gap2big[SIM_NTIMERS+1] = { 0 };/* Calibrations skipped Gap Too Big */
static uint32 rtc_clock_calib_backwards[SIM_NTIMERS+1] = { 0 };/* Calibrations skipped Clock Running Backwards */
static uint32 sim_idle_cyc_ms = 0; /* Cycles per millisecond while not idling */
UNIT sim_timer_units[SIM_NTIMERS+1]; /* Clock assist units */
/* one for each timer and one for an internal */
@ -946,6 +947,8 @@ if (last_idle_pct > (100 - sim_idle_calib_pct)) {
return rtc_currd[tmr]; /* avoid calibrating idle checks */
}
new_gtime = sim_gtime();
if ((last_idle_pct == 0) && (delta_rtime != 0))
sim_idle_cyc_ms = (uint32)((new_gtime - rtc_gtime[tmr]) / delta_rtime);
if (sim_asynch_timer) {
/* An asynchronous clock, merely needs to divide the number of */
/* instructions actually executed by the clock rate. */
@ -967,7 +970,8 @@ rtc_gtime[tmr] = new_gtime; /* save instruction time
/* instructions which was returned the last time it was called. */
if (delta_rtime == 0) /* gap too small? */
rtc_based[tmr] = rtc_based[tmr] * ticksper; /* slew wide */
else rtc_based[tmr] = (int32) (((double) rtc_based[tmr] * (double) rtc_nxintv[tmr]) /
else
rtc_based[tmr] = (int32) (((double) rtc_based[tmr] * (double) rtc_nxintv[tmr]) /
((double) delta_rtime));/* new base rate */
delta_vtime = rtc_vtime[tmr] - rtc_rtime[tmr]; /* gap */
if (delta_vtime > SIM_TMAX) /* limit gap */
@ -1119,6 +1123,8 @@ for (tmr=clocks=0; tmr<=SIM_NTIMERS; ++tmr) {
}
fprintf (st, " Current Insts Per Tick: %s\n", sim_fmt_numeric ((double)rtc_currd[tmr]));
fprintf (st, " Initializations: %d\n", rtc_calib_initializations[tmr]);
fprintf (st, " Ticks: %s\n", sim_fmt_numeric ((double)(rtc_clock_ticks[tmr])));
if (rtc_clock_ticks_tot[tmr]+rtc_clock_ticks[tmr] != rtc_clock_ticks[tmr])
fprintf (st, " Total Ticks: %s\n", sim_fmt_numeric ((double)(rtc_clock_ticks_tot[tmr]+rtc_clock_ticks[tmr])));
if (rtc_clock_skew_max[tmr] != 0.0)
fprintf (st, " Peak Clock Skew: %s%s\n", sim_fmt_secs (fabs(rtc_clock_skew_max[tmr])), (rtc_clock_skew_max[tmr] < 0) ? " fast" : " slow");
@ -1229,6 +1235,7 @@ return SCPE_OK;
}
REG sim_timer_reg[] = {
{ DRDATAD (IDLE_CYC_MS, sim_idle_cyc_ms, 32, "Cycles Per Millisecond"), PV_RSPC|REG_RO},
{ NULL }
};
@ -1389,17 +1396,15 @@ return SCPE_OK;
w = ms_to_wait / ms_per_wait
*/
t_bool sim_idle (uint32 tmr, t_bool sin_cyc)
t_bool sim_idle (uint32 tmr, int sin_cyc)
{
uint32 cyc_ms = 0;
uint32 w_ms, w_idle, act_ms;
int32 act_cyc;
if (rtc_clock_catchup_pending[tmr]) { /* Catchup clock tick pending? */
sim_debug (DBG_CAL, &sim_timer_dev, "sim_idle(tmr=%d, sin_cyc=%d) - accelerating pending catch-up tick before idling %s\n", tmr, sin_cyc, sim_uname (sim_clock_unit[tmr]));
sim_activate_abs (&sim_timer_units[tmr], 0);
if (sin_cyc)
sim_interval = sim_interval - 1;
sim_interval -= sin_cyc;
return FALSE;
}
if ((!sim_idle_enab) || /* idling disabled */
@ -1412,14 +1417,12 @@ if ((!sim_idle_enab) || /* idling disabled */
((rtc_elapsed[tmr] < sim_idle_stable) ? "not stable" :
((sim_clock_queue != QUEUE_LIST_END) ? sim_uname (sim_clock_queue) :
"")), rtc_elapsed[tmr], rtc_ticks[tmr]);
if (sin_cyc)
sim_interval = sim_interval - 1;
sim_interval -= sin_cyc;
return FALSE;
}
if (_rtcn_tick_catchup_check(tmr, 0)) {
sim_debug (DBG_CAL, &sim_timer_dev, "sim_idle(tmr=%d, sin_cyc=%d) - rescheduling catchup tick for %s\n", tmr, sin_cyc, sim_uname (sim_clock_unit[tmr]));
if (sin_cyc)
sim_interval = sim_interval - 1;
sim_interval -= sin_cyc;
return FALSE;
}
/*
@ -1446,14 +1449,14 @@ if (_rtcn_tick_catchup_check(tmr, 0)) {
means something, while not idling when it isn't enabled.
*/
sim_debug (DBG_TRC, &sim_timer_dev, "sim_idle(tmr=%d, sin_cyc=%d)\n", tmr, sin_cyc);
cyc_ms = (rtc_currd[tmr] * rtc_hz[tmr]) / 1000; /* cycles per msec */
if ((sim_idle_rate_ms == 0) || (cyc_ms == 0)) { /* not possible? */
if (sin_cyc)
sim_interval = sim_interval - 1;
sim_debug (DBG_IDL, &sim_timer_dev, "not possible idle_rate_ms=%d - cyc/ms=%d\n", sim_idle_rate_ms, cyc_ms);
if (sim_idle_cyc_ms == 0)
sim_idle_cyc_ms = (rtc_currd[tmr] * rtc_hz[tmr]) / 1000;/* cycles per msec */
if ((sim_idle_rate_ms == 0) || (sim_idle_cyc_ms == 0)) {/* not possible? */
sim_interval -= sin_cyc;
sim_debug (DBG_IDL, &sim_timer_dev, "not possible idle_rate_ms=%d - cyc/ms=%d\n", sim_idle_rate_ms, sim_idle_cyc_ms);
return FALSE;
}
w_ms = (uint32) sim_interval / cyc_ms; /* ms to wait */
w_ms = (uint32) sim_interval / sim_idle_cyc_ms; /* ms to wait */
/* When the host system has a clock tick which is less frequent than the */
/* simulated system's clock, idling will cause delays which will miss */
/* simulated clock ticks. To accomodate this, and still allow idling, if */
@ -1464,8 +1467,7 @@ if (rtc_clock_catchup_eligible[tmr])
else
w_idle = (w_ms * 1000) / sim_idle_rate_ms; /* 1000 * intervals to wait */
if (w_idle < 500) { /* shorter than 1/2 the interval? */
if (sin_cyc)
sim_interval = sim_interval - 1;
sim_interval -= sin_cyc;
sim_debug (DBG_IDL, &sim_timer_dev, "no wait\n");
return FALSE;
}
@ -1475,9 +1477,8 @@ else
sim_debug (DBG_IDL, &sim_timer_dev, "sleeping for %d ms - pending event on %s in %d instructions\n", w_ms, sim_uname(sim_clock_queue), sim_interval);
act_ms = sim_idle_ms_sleep (w_ms); /* wait */
rtc_clock_time_idled[tmr] += act_ms;
act_cyc = act_ms * cyc_ms;
if (act_ms < w_ms) /* awakened early? */
act_cyc += (cyc_ms * sim_idle_rate_ms) / 2; /* account for half an interval's worth of cycles */
act_cyc = act_ms * sim_idle_cyc_ms;
act_cyc += (sim_idle_cyc_ms * sim_idle_rate_ms) / 2; /* account for half an interval's worth of cycles */
if (sim_interval > act_cyc)
sim_interval = sim_interval - act_cyc; /* count down sim_interval */
else
@ -1813,7 +1814,7 @@ if (stat == SCPE_OK) {
cptr->usecs_remaining = 0;
sim_debug (DBG_QUE, &sim_timer_dev, " remnant: %.0f - next %s after cosched interval: %d ticks\n", usec_delay, (sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) ? sim_uname (sim_clock_cosched_queue[tmr]) : "", sim_cosched_interval[tmr]);
sim_timer_activate_after_d (cptr, usec_delay);
sim_timer_activate_after (cptr, usec_delay);
}
else {
sim_debug (DBG_QUE, &sim_timer_dev, " - next %s after cosched interval: %d ticks\n", (sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) ? sim_uname (sim_clock_cosched_queue[tmr]) : "", sim_cosched_interval[tmr]);
@ -1868,16 +1869,13 @@ clock_gettime (CLOCK_REALTIME, now);
static t_bool _rtcn_tick_catchup_check (int32 tmr, int32 time)
{
double tnow;
if ((!sim_catchup_ticks) ||
((tmr < 0) || (tmr >= SIM_NTIMERS)))
return FALSE;
tnow = sim_timenow_double();
if ((rtc_hz[tmr] > sim_os_tick_hz) && /* faster than host tick */
(!rtc_clock_catchup_eligible[tmr]) && /* not eligible yet? */
(time != -1)) { /* called from ack? */
rtc_clock_catchup_base_time[tmr] = tnow;
rtc_clock_catchup_base_time[tmr] = sim_timenow_double();
rtc_clock_ticks_tot[tmr] += rtc_clock_ticks[tmr];
rtc_clock_ticks[tmr] = 0;
rtc_calib_tick_time_tot[tmr] += rtc_calib_tick_time[tmr];
@ -1890,13 +1888,17 @@ if ((rtc_hz[tmr] > sim_os_tick_hz) && /* faster than host tick */
sim_debug (DBG_QUE, &sim_timer_dev, "_rtcn_tick_catchup_check() - Enabling catchup ticks for %s\n", sim_uname (sim_clock_unit[tmr]));
return TRUE;
}
if (rtc_clock_catchup_eligible[tmr] &&
(tnow > (rtc_clock_catchup_base_time[tmr] + (rtc_calib_tick_time[tmr] + rtc_clock_tick_size[tmr])))) {
if (rtc_clock_catchup_eligible[tmr])
{
double tnow = sim_timenow_double();
if (tnow > (rtc_clock_catchup_base_time[tmr] + (rtc_calib_tick_time[tmr] + rtc_clock_tick_size[tmr]))) {
sim_debug (DBG_QUE, &sim_timer_dev, "_rtcn_tick_catchup_check(%d) - scheduling catchup tick for %s which is behind %s\n", time, sim_uname (sim_clock_unit[tmr]), sim_fmt_secs (tnow > (rtc_clock_catchup_base_time[tmr] + (rtc_calib_tick_time[tmr] + rtc_clock_tick_size[tmr]))));
rtc_clock_catchup_pending[tmr] = TRUE;
sim_activate_abs (&sim_timer_units[tmr], (time < 0) ? 0 : time);
return TRUE;
}
}
return FALSE;
}
@ -2274,15 +2276,10 @@ return inst_per_sec;
t_stat sim_timer_activate (UNIT *uptr, int32 interval)
{
AIO_VALIDATE;
return sim_timer_activate_after (uptr, (uint32)((interval * 1000000.0) / sim_timer_inst_per_sec ()));
return sim_timer_activate_after (uptr, (double)((interval * 1000000.0) / sim_timer_inst_per_sec ()));
}
t_stat sim_timer_activate_after (UNIT *uptr, uint32 usec_delay)
{
return sim_timer_activate_after_d (uptr, (double)usec_delay);
}
t_stat sim_timer_activate_after_d (UNIT *uptr, double usec_delay)
t_stat sim_timer_activate_after (UNIT *uptr, double usec_delay)
{
int inst_delay, tmr;
double inst_delay_d, inst_per_usec;
@ -2715,3 +2712,62 @@ for (tmr=0; tmr<SIM_NTIMERS; tmr++)
return sim_activate_time (&sim_timer_units[tmr]);
return -1; /* Not found. */
}
double sim_timer_activate_time_usecs (UNIT *uptr)
{
UNIT *cptr;
int32 tmr;
#if defined(SIM_ASYNCH_CLOCKS)
if (uptr->a_is_active == &_sim_wallclock_is_active) {
double d_result;
pthread_mutex_lock (&sim_timer_lock);
if (uptr == sim_wallclock_entry) {
d_result = uptr->a_due_gtime - sim_gtime ();
if (d_result < 0.0)
d_result = 0.0;
if (d_result > (double)0x7FFFFFFE)
d_result = (double)0x7FFFFFFE;
pthread_mutex_unlock (&sim_timer_lock);
return (1000000.0 * (d_result / sim_timer_inst_per_sec ())) + 1;
}
for (cptr = sim_wallclock_queue;
cptr != QUEUE_LIST_END;
cptr = cptr->a_next)
if (uptr == cptr) {
d_result = uptr->a_due_gtime - sim_gtime ();
if (d_result < 0.0)
d_result = 0.0;
if (d_result > (double)0x7FFFFFFE)
d_result = (double)0x7FFFFFFE;
pthread_mutex_unlock (&sim_timer_lock);
return (1000000.0 * (d_result / sim_timer_inst_per_sec ())) + 1;
}
pthread_mutex_unlock (&sim_timer_lock);
}
if (uptr->a_next)
return (1000000.0 * (uptr->a_event_time / sim_timer_inst_per_sec ())) + 1;
#endif /* defined(SIM_ASYNCH_CLOCKS) */
if (uptr->cancel == &_sim_coschedule_cancel) {
for (tmr=0; tmr<=SIM_NTIMERS; tmr++) {
int32 accum;
accum = sim_cosched_interval[tmr];
for (cptr = sim_clock_cosched_queue[tmr]; cptr != QUEUE_LIST_END; cptr = cptr->next) {
if (cptr != sim_clock_cosched_queue[tmr])
accum += cptr->time;
if (cptr == uptr) {
if (accum > 0)
--accum;
return uptr->usecs_remaining + (1000000.0 * ((rtc_currd[tmr] * accum) + sim_activate_time (&sim_timer_units[tmr])) / sim_timer_inst_per_sec ());
}
}
}
}
for (tmr=0; tmr<SIM_NTIMERS; tmr++)
if (sim_clock_unit[tmr] == uptr)
return (1000000.0 * sim_activate_time (&sim_timer_units[tmr])) / sim_timer_inst_per_sec ();
return -1.0; /* Not found. */
}

View file

@ -117,7 +117,7 @@ int32 sim_rtc_calb (int32 ticksper);
t_stat sim_set_timers (int32 arg, CONST char *cptr);
t_stat sim_show_timers (FILE* st, DEVICE *dptr, UNIT* uptr, int32 val, CONST char* desc);
t_stat sim_show_clock_queues (FILE *st, DEVICE *dptr, UNIT *uptr, int32 flag, CONST char *cptr);
t_bool sim_idle (uint32 tmr, t_bool sin_cyc);
t_bool sim_idle (uint32 tmr, int sin_cyc);
t_stat sim_set_throt (int32 arg, CONST char *cptr);
t_stat sim_show_throt (FILE *st, DEVICE *dnotused, UNIT *unotused, int32 flag, CONST char *cptr);
t_stat sim_set_idle (UNIT *uptr, int32 val, CONST char *cptr, void *desc);
@ -133,9 +133,9 @@ void sim_start_timer_services (void);
void sim_stop_timer_services (void);
t_stat sim_timer_change_asynch (void);
t_stat sim_timer_activate (UNIT *uptr, int32 interval);
t_stat sim_timer_activate_after (UNIT *uptr, uint32 usec_delay);
t_stat sim_timer_activate_after_d (UNIT *uptr, double usec_delay);
t_stat sim_timer_activate_after (UNIT *uptr, double usec_delay);
int32 sim_timer_activate_time (UNIT *uptr);
double sim_timer_activate_time_usecs (UNIT *uptr);
t_bool sim_timer_is_active (UNIT *uptr);
t_bool sim_timer_cancel (UNIT *uptr);
t_stat sim_register_clock_unit (UNIT *uptr);

View file

@ -3867,11 +3867,11 @@ t_stat tmxr_activate_after (UNIT *uptr, uint32 usecs_walltime)
#if defined(SIM_ASYNCH_MUX)
if ((!(uptr->dynflags & UNIT_TM_POLL)) ||
(!sim_asynch_enabled)) {
return _sim_activate_after (uptr, usecs_walltime);
return _sim_activate_after (uptr, (double)usecs_walltime);
}
return SCPE_OK;
#else
return _sim_activate_after (uptr, usecs_walltime);
return _sim_activate_after (uptr, (double)usecs_walltime);
#endif
}
@ -3880,11 +3880,11 @@ t_stat tmxr_activate_after_abs (UNIT *uptr, uint32 usecs_walltime)
#if defined(SIM_ASYNCH_MUX)
if ((!(uptr->dynflags & UNIT_TM_POLL)) ||
(!sim_asynch_enabled)) {
return _sim_activate_after_abs (uptr, usecs_walltime);
return _sim_activate_after_abs (uptr, (double)usecs_walltime);
}
return SCPE_OK;
#else
return _sim_activate_after_abs (uptr, usecs_walltime);
return _sim_activate_after_abs (uptr, (double)usecs_walltime);
#endif
}