TIMER: Add support for catchup clock ticks and cleaned up asynchronous clocks

Asynchronous clocks are now built for all simulators which are built with
SIM_ASYNCH_IO defined.  The default behavior has asynchronous clocks
disabled since this is still experimental, but it can be enabled with
SET TIMER ASYNC.

Catchup clock ticks are now available, but since they're experimental,
they aren't enabled by default.  Catchup ticks are only available if the
simulators clock device calls sim_rtcn_tick_ack to acknowledge processing
of clock ticks.  The VAX simulators have been modified to leverage this.
Catchup clock ticks can be enabled with SET TIMER CATCHUP

Additionally, an idle threshold is provided which can be used to
influence when clock calibration may be suppressed.  The default is not
to suppress calibration activities.

The various timer behaviors are visible with the SHOW TIMER command.

The state of the operating timer facilities is visible with: SHOW CLOCK

Timer events which are queued are visible with the SHOW QUEUE command.
This commit is contained in:
Mark Pizzolato 2016-11-16 23:50:53 -08:00
parent 32bf2629b1
commit 39d2944ede
19 changed files with 1049 additions and 679 deletions

View file

@ -121,6 +121,20 @@ Host platforms which have libSDL available can leverage this functionality.
Asynchronous support exists for console I/O and most multiplexer
devices. (Still experimental - not currently by default)
#### Clock/Timer Enhancements
* Asynchronhous clocks ticks exist to better support modern processors
that have variable clock speeds. The initial clock calibration model
presumed a constant simulated instruction execution rate.
Modern processors have variable processor speeds which breaks this
key assumption.
* Strategies to make up for missed clock ticks are now available
(independent of asynchronous tick generation). These strategies
generate catch-up clock ticks to keep the simulator passage of
time consistent with wall clock time. Simulator time while idling
or throttling is now consistent. Reasonable idling behavior is
now possible without requiring that the host system clock tick be
10ms or less.
#### Ethernet Transport Enhancements
* UDP packet transport. Direct simulator connections to HECnet can be
made without running a local packet bridge program.

View file

@ -248,6 +248,8 @@ void iccs_wr (int32 data)
{
if ((data & CSR_IE) == 0)
CLR_INT (CLK);
if (data & CSR_DONE) /* Interrupt Acked? */
sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */
clk_csr = (clk_csr & ~CLKCSR_RW) | (data & CLKCSR_RW);
return;
}

View file

@ -226,6 +226,8 @@ void iccs_wr (int32 data)
{
if ((data & CSR_IE) == 0)
CLR_INT (CLK);
if (data & CSR_DONE) /* Interrupt Acked? */
sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */
clk_csr = (clk_csr & ~CLKCSR_RW) | (data & CLKCSR_RW);
return;
}

View file

@ -644,6 +644,8 @@ if ((val & TMR_CSR_RUN) == 0) { /* clearing run? */
if (tmr_iccs & TMR_CSR_RUN) /* run 1 -> 0? */
tmr_icr = icr_rd (TRUE); /* update itr */
}
if (val & CSR_DONE) /* Interrupt Acked? */
sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */
tmr_iccs = tmr_iccs & ~(val & TMR_CSR_W1C); /* W1C csr */
tmr_iccs = (tmr_iccs & ~TMR_CSR_WR) | /* new r/w */
(val & TMR_CSR_WR);
@ -859,7 +861,7 @@ int32 todr_rd (void)
TOY *toy = (TOY *)clk_unit.filebuf;
struct timespec base, now, val;
clock_gettime(CLOCK_REALTIME, &now); /* get curr time */
sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */
base.tv_sec = toy->toy_gmtbase;
base.tv_nsec = toy->toy_gmtbasemsec * 1000000;
sim_timespec_diff (&val, &now, &base);
@ -874,8 +876,7 @@ struct timespec now, val, base;
/* Save the GMT time when set value was 0 to record the base for future
read operations in "battery backed-up" state */
if (-1 == clock_gettime(CLOCK_REALTIME, &now)) /* get curr time */
return; /* error? */
sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */
val.tv_sec = ((uint32)data) / 100;
val.tv_nsec = (((uint32)data) % 100) * 10000000;
sim_timespec_diff (&base, &now, &val); /* base = now - data */

View file

@ -666,6 +666,8 @@ if ((val & TMR_CSR_RUN) == 0) { /* clearing run? */
if (tmr_iccs & TMR_CSR_RUN) /* run 1 -> 0? */
tmr_icr = icr_rd (TRUE); /* update itr */
}
if (val & CSR_DONE) /* Interrupt Acked? */
sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */
tmr_iccs = tmr_iccs & ~(val & TMR_CSR_W1C); /* W1C csr */
tmr_iccs = (tmr_iccs & ~TMR_CSR_WR) | /* new r/w */
(val & TMR_CSR_WR);
@ -895,7 +897,7 @@ int32 todr_rd (void)
TOY *toy = (TOY *)clk_unit.filebuf;
struct timespec base, now, val;
clock_gettime(CLOCK_REALTIME, &now); /* get curr time */
sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */
base.tv_sec = toy->toy_gmtbase;
base.tv_nsec = toy->toy_gmtbasemsec * 1000000;
sim_timespec_diff (&val, &now, &base);
@ -911,8 +913,7 @@ struct timespec now, val, base;
/* Save the GMT time when set value was 0 to record the base for future
read operations in "battery backed-up" state */
if (-1 == clock_gettime(CLOCK_REALTIME, &now)) /* get curr time */
return; /* error? */
sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */
val.tv_sec = ((uint32)data) / 100;
val.tv_nsec = (((uint32)data) % 100) * 10000000;
sim_timespec_diff (&base, &now, &val); /* base = now - data */

View file

@ -623,6 +623,8 @@ if ((val & TMR_CSR_RUN) == 0) { /* clearing run? */
if (tmr_iccs & TMR_CSR_RUN) /* run 1 -> 0? */
tmr_icr = icr_rd (); /* update itr */
}
if (val & CSR_DONE) /* Interrupt Acked? */
sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */
tmr_iccs = tmr_iccs & ~(val & TMR_CSR_W1C); /* W1C csr */
tmr_iccs = (tmr_iccs & ~TMR_CSR_WR) | /* new r/w */
(val & TMR_CSR_WR);
@ -835,7 +837,7 @@ int32 todr_rd (void)
TOY *toy = (TOY *)clk_unit.filebuf;
struct timespec base, now, val;
clock_gettime(CLOCK_REALTIME, &now); /* get curr time */
sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */
base.tv_sec = toy->toy_gmtbase;
base.tv_nsec = toy->toy_gmtbasemsec * 1000000;
sim_timespec_diff (&val, &now, &base);
@ -851,8 +853,7 @@ struct timespec now, val, base;
/* Save the GMT time when set value was 0 to record the base for future
read operations in "battery backed-up" state */
if (-1 == clock_gettime(CLOCK_REALTIME, &now)) /* get curr time */
return; /* error? */
sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */
val.tv_sec = ((uint32)data) / 100;
val.tv_nsec = (((uint32)data) % 100) * 10000000;
sim_timespec_diff (&base, &now, &val); /* base = now - data */

View file

@ -747,6 +747,8 @@ if ((val & TMR_CSR_RUN) == 0) { /* clearing run? */
if (tmr_iccs & TMR_CSR_RUN) /* run 1 -> 0? */
tmr_icr = icr_rd (TRUE); /* update itr */
}
if (val & CSR_DONE) /* Interrupt Acked? */
sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */
tmr_iccs = tmr_iccs & ~(val & TMR_CSR_W1C); /* W1C csr */
tmr_iccs = (tmr_iccs & ~TMR_CSR_WR) | /* new r/w */
(val & TMR_CSR_WR);
@ -962,7 +964,7 @@ int32 todr_rd (void)
TOY *toy = (TOY *)clk_unit.filebuf;
struct timespec base, now, val;
clock_gettime(CLOCK_REALTIME, &now); /* get curr time */
sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */
base.tv_sec = toy->toy_gmtbase;
base.tv_nsec = toy->toy_gmtbasemsec * 1000000;
sim_timespec_diff (&val, &now, &base);
@ -977,8 +979,7 @@ struct timespec now, val, base;
/* Save the GMT time when set value was 0 to record the base for future
read operations in "battery backed-up" state */
if (-1 == clock_gettime(CLOCK_REALTIME, &now)) /* get curr time */
return; /* error? */
sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */
val.tv_sec = ((uint32)data) / 100;
val.tv_nsec = (((uint32)data) % 100) * 10000000;
sim_timespec_diff (&base, &now, &val); /* base = now - data */

View file

@ -428,12 +428,12 @@ MTAB cpu_mod[] = {
};
DEBTAB cpu_deb[] = {
{ "INTEXC", LOG_CPU_I },
{ "REI", LOG_CPU_R },
{ "CONTEXT", LOG_CPU_P },
{ "EVENT", SIM_DBG_EVENT },
{ "ACTIVATE", SIM_DBG_ACTIVATE },
{ "ASYNCH", SIM_DBG_AIO_QUEUE },
{ "INTEXC", LOG_CPU_I, "interrupt and exception activities" },
{ "REI", LOG_CPU_R, "REI activities" },
{ "CONTEXT", LOG_CPU_P, "context switching activities" },
{ "EVENT", SIM_DBG_EVENT, "event dispatch activities" },
{ "ACTIVATE", SIM_DBG_ACTIVATE, "queue insertion activities" },
{ "ASYNCH", SIM_DBG_AIO_QUEUE, "asynch queue activities" },
{ NULL, 0 }
};

View file

@ -299,6 +299,8 @@ void iccs_wr (int32 data)
{
if ((data & CSR_IE) == 0)
CLR_INT (CLK);
if (data & CSR_DONE) /* Interrupt Acked? */
sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */
clk_csr = (clk_csr & ~CLKCSR_RW) | (data & CLKCSR_RW);
return;
}
@ -484,7 +486,7 @@ if (0 == todr_reg) { /* clock running? */
in the 32bit TODR. This is the 33bit value 0x100000000/100 to get seconds */
#define TOY_MAX_SECS (0x40000000/25)
clock_gettime(CLOCK_REALTIME, &now); /* get curr time */
sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */
base.tv_sec = toy->toy_gmtbase;
base.tv_nsec = toy->toy_gmtbasemsec * 1000000;
sim_timespec_diff (&val, &now, &base);
@ -507,8 +509,7 @@ struct timespec now, val, base;
/* Save the GMT time when set value was 0 to record the base for future
read operations in "battery backed-up" state */
if (-1 == clock_gettime(CLOCK_REALTIME, &now)) /* get curr time */
return; /* error? */
sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */
val.tv_sec = ((uint32)data) / 100;
val.tv_nsec = (((uint32)data) % 100) * 10000000;
sim_timespec_diff (&base, &now, &val); /* base = now - data */

View file

@ -162,11 +162,13 @@ int32 wtc_rd (int32 pa)
int32 rg = (pa >> 1) & 0xF;
int32 val = 0;
time_t curr;
struct timespec now;
static int mdays[12] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
struct tm *ctm = NULL;
if (rg < 10) { /* time reg? */
curr = time (NULL); /* get curr time */
sim_rtcn_get_time (&now, TMR_CLK);
curr = now.tv_sec; /* get curr time */
if (curr == (time_t) -1) /* error? */
return 0;
ctm = localtime (&curr); /* decompose */
@ -296,7 +298,8 @@ return SCPE_OK;
t_stat wtc_set (UNIT *uptr, int32 val, CONST char *cptr, void *desc)
{
if (cptr != NULL) wtc_mode = strcmp(cptr, "STD");
if (cptr != NULL)
wtc_mode = ((strcmp(cptr, "STD") != 0) ? WTC_MODE_VMS : WTC_MODE_STD);
return SCPE_OK;
}

Binary file not shown.

57
scp.c
View file

@ -328,12 +328,65 @@ pthread_cond_t sim_tmxr_poll_cond = PTHREAD_COND_INITIALIZER;
int32 sim_tmxr_poll_count;
pthread_t sim_asynch_main_threadid;
UNIT * volatile sim_asynch_queue;
UNIT * volatile sim_wallclock_queue;
UNIT * volatile sim_wallclock_entry;
t_bool sim_asynch_enabled = TRUE;
int32 sim_asynch_check;
int32 sim_asynch_latency = 4000; /* 4 usec interrupt latency */
int32 sim_asynch_inst_latency = 20; /* assume 5 mip simulator */
int sim_aio_update_queue (void)
{
int migrated = 0;
if (AIO_QUEUE_VAL != QUEUE_LIST_END) { /* List !Empty */
UNIT *q, *uptr;
int32 a_event_time;
do
q = AIO_QUEUE_VAL;
while (q != AIO_QUEUE_SET(QUEUE_LIST_END, q)); /* Grab current queue */
while (q != QUEUE_LIST_END) { /* List !Empty */
sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Migrating Asynch event for %s after %d instructions\n", sim_uname(q), q->a_event_time);
++migrated;
uptr = q;
q = q->a_next;
uptr->a_next = NULL; /* hygiene */
if (uptr->a_activate_call != &sim_activate_notbefore) {
a_event_time = uptr->a_event_time-((sim_asynch_inst_latency+1)/2);
if (a_event_time < 0)
a_event_time = 0;
}
else
a_event_time = uptr->a_event_time;
uptr->a_activate_call (uptr, a_event_time);
if (uptr->a_check_completion) {
sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Calling Completion Check for asynch event on %s\n", sim_uname(uptr));
uptr->a_check_completion (uptr);
}
}
}
return migrated;
}
void sim_aio_activate (ACTIVATE_API caller, UNIT *uptr, int32 event_time)
{
sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Queueing Asynch event for %s after %d instructions\n", sim_uname(uptr), event_time);
if (uptr->a_next) {
uptr->a_activate_call = sim_activate_abs;
}
else {
UNIT *q;
uptr->a_event_time = event_time;
uptr->a_activate_call = caller;
do {
q = AIO_QUEUE_VAL;
uptr->a_next = q; /* Mark as on list */
} while (q != AIO_QUEUE_SET(uptr, q));
}
sim_asynch_check = 0; /* try to force check */
if (sim_idle_wait) {
sim_debug (TIMER_DBG_IDLE, &sim_timer_dev, "waking due to event on %s after %d instructions\n", sim_uname(uptr), event_time);
pthread_cond_signal (&sim_asynch_wake);
}
}
#else
t_bool sim_asynch_enabled = FALSE;
#endif

4
scp.h
View file

@ -281,6 +281,10 @@ extern t_addr sim_brk_match_addr;
extern BRKTYPTAB *sim_brk_type_desc; /* type descriptions */
extern FILE *stdnul;
extern t_bool sim_asynch_enabled;
#if defined(SIM_ASYNCH_IO)
int sim_aio_update_queue (void);
void sim_aio_activate (ACTIVATE_API caller, UNIT *uptr, int32 event_time);
#endif
/* VM interface */

View file

@ -541,10 +541,10 @@ struct UNIT {
void *up7; /* device specific */
void *up8; /* device specific */
void *tmxr; /* TMXR linkage */
void (*cancel)(UNIT *);
#ifdef SIM_ASYNCH_IO
void (*a_check_completion)(UNIT *);
t_bool (*a_is_active)(UNIT *);
void (*a_cancel)(UNIT *);
UNIT *a_next; /* next asynch active */
int32 a_event_time;
ACTIVATE_API a_activate_call;
@ -931,6 +931,8 @@ struct FILEREF {
#if defined (SIM_ASYNCH_IO)
#include <pthread.h>
#define SIM_ASYNCH_CLOCKS 1
extern pthread_mutex_t sim_asynch_lock;
extern pthread_cond_t sim_asynch_wake;
extern pthread_mutex_t sim_timer_lock;
@ -941,8 +943,6 @@ extern pthread_cond_t sim_tmxr_poll_cond;
extern pthread_mutex_t sim_tmxr_poll_lock;
extern pthread_t sim_asynch_main_threadid;
extern UNIT * volatile sim_asynch_queue;
extern UNIT * volatile sim_wallclock_queue;
extern UNIT * volatile sim_wallclock_entry;
extern volatile t_bool sim_idle_wait;
extern int32 sim_asynch_check;
extern int32 sim_asynch_latency;
@ -959,7 +959,7 @@ extern int32 sim_asynch_inst_latency;
#define AIO_TLS
#endif
#define AIO_QUEUE_CHECK(que, lock) \
if (1) { \
do { \
UNIT *_cptr; \
if (lock) \
pthread_mutex_lock (lock); \
@ -976,83 +976,17 @@ extern int32 sim_asynch_inst_latency;
} \
if (lock) \
pthread_mutex_unlock (lock); \
} else (void)0
} while (0)
#define AIO_MAIN_THREAD (pthread_equal ( pthread_self(), sim_asynch_main_threadid ))
#define AIO_LOCK \
pthread_mutex_lock(&sim_asynch_lock)
#define AIO_UNLOCK \
pthread_mutex_unlock(&sim_asynch_lock)
#define AIO_IS_ACTIVE(uptr) (((uptr)->a_is_active ? (uptr)->a_is_active (uptr) : FALSE) || ((uptr)->a_next))
#if !defined(SIM_ASYNCH_MUX) && !defined(SIM_ASYNCH_CLOCKS)
#if defined(SIM_ASYNCH_MUX)
#define AIO_CANCEL(uptr) \
if ((uptr)->a_cancel) \
(uptr)->a_cancel (uptr); \
else \
(void)0
#endif /* !defined(SIM_ASYNCH_MUX) && !defined(SIM_ASYNCH_CLOCKS) */
#if !defined(SIM_ASYNCH_MUX) && defined(SIM_ASYNCH_CLOCKS)
#define AIO_CANCEL(uptr) \
if ((uptr)->a_cancel) \
(uptr)->a_cancel (uptr); \
else { \
AIO_UPDATE_QUEUE; \
if ((uptr)->a_next) { \
UNIT *cptr; \
pthread_mutex_lock (&sim_timer_lock); \
if ((uptr) == sim_wallclock_queue) { \
sim_wallclock_queue = (uptr)->a_next; \
(uptr)->a_next = NULL; \
sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Timer Event for %s\n", sim_uname(uptr));\
sim_timer_event_canceled = TRUE; \
pthread_cond_signal (&sim_timer_wake); \
} \
else \
for (cptr = sim_wallclock_queue; \
(cptr != QUEUE_LIST_END); \
cptr = cptr->a_next) \
if (cptr->a_next == (uptr)) { \
cptr->a_next = (uptr)->a_next; \
(uptr)->a_next = NULL; \
sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Timer Event for %s\n", sim_uname(uptr));\
break; \
} \
if ((uptr)->a_next == NULL) \
(uptr)->a_due_time = (uptr)->a_usec_delay = 0; \
else { \
int tmr; \
for (tmr=0; tmr<SIM_NTIMERS; tmr++) { \
if ((uptr) == sim_clock_cosched_queue[tmr]) { \
sim_clock_cosched_queue[tmr] = (uptr)->a_next; \
(uptr)->a_next = NULL; \
} \
else \
for (cptr = sim_clock_cosched_queue[tmr]; \
(cptr != QUEUE_LIST_END); \
cptr = cptr->a_next) \
if (cptr->a_next == (uptr)) { \
cptr->a_next = (uptr)->a_next; \
(uptr)->a_next = NULL; \
break; \
} \
if ((uptr)->a_next == NULL) { \
sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Clock Coscheduling Event for %s\n", sim_uname(uptr));\
} \
} \
} \
while (sim_timer_event_canceled) { \
pthread_mutex_unlock (&sim_timer_lock); \
sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Waiting for Timer Event cancelation for %s\n", sim_uname(uptr));\
sim_os_ms_sleep (0); \
pthread_mutex_lock (&sim_timer_lock); \
} \
pthread_mutex_unlock (&sim_timer_lock); \
} \
}
#endif
#if defined(SIM_ASYNCH_MUX) && !defined(SIM_ASYNCH_CLOCKS)
#define AIO_CANCEL(uptr) \
if ((uptr)->a_cancel) \
(uptr)->a_cancel (uptr); \
if ((uptr)->cancel) \
(uptr)->cancel (uptr); \
else { \
if (((uptr)->dynflags & UNIT_TM_POLL) && \
!((uptr)->next) && !((uptr)->a_next)) { \
@ -1061,92 +995,19 @@ extern int32 sim_asynch_inst_latency;
(uptr)->a_poll_waiter_count = 0; \
} \
}
#endif /* defined(SIM_ASYNCH_MUX) && !defined(SIM_ASYNCH_CLOCKS) */
#if defined(SIM_ASYNCH_MUX) && defined(SIM_ASYNCH_CLOCKS)
#endif /* defined(SIM_ASYNCH_MUX) */
#if !defined(AIO_CANCEL)
#define AIO_CANCEL(uptr) \
if ((uptr)->a_cancel) \
(uptr)->a_cancel (uptr); \
else { \
AIO_UPDATE_QUEUE; \
if (((uptr)->dynflags & UNIT_TM_POLL) && \
!((uptr)->next) && !((uptr)->a_next)) { \
(uptr)->a_polling_now = FALSE; \
sim_tmxr_poll_count -= (uptr)->a_poll_waiter_count; \
(uptr)->a_poll_waiter_count = 0; \
} \
if ((uptr)->a_next) { \
UNIT *cptr; \
pthread_mutex_lock (&sim_timer_lock); \
if ((uptr) == sim_wallclock_queue) { \
sim_wallclock_queue = (uptr)->a_next; \
(uptr)->a_next = NULL; \
sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Timer Event for %s\n", sim_uname(uptr));\
sim_timer_event_canceled = TRUE; \
pthread_cond_signal (&sim_timer_wake); \
} \
else \
for (cptr = sim_wallclock_queue; \
(cptr != QUEUE_LIST_END); \
cptr = cptr->a_next) \
if (cptr->a_next == (uptr)) { \
cptr->a_next = (uptr)->a_next; \
(uptr)->a_next = NULL; \
sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Timer Event for %s\n", sim_uname(uptr));\
break; \
} \
if ((uptr)->a_next == NULL) \
(uptr)->a_due_time = (uptr)->a_usec_delay = 0; \
else { \
if ((uptr) == sim_clock_cosched_queue) { \
sim_clock_cosched_queue = (uptr)->a_next; \
(uptr)->a_next = NULL; \
} \
else \
for (cptr = sim_clock_cosched_queue; \
(cptr != QUEUE_LIST_END); \
cptr = cptr->a_next) \
if (cptr->a_next == (uptr)) { \
cptr->a_next = (uptr)->a_next; \
(uptr)->a_next = NULL; \
break; \
} \
if ((uptr)->a_next == NULL) { \
sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Clock Coscheduling Event for %s\n", sim_uname(uptr));\
} \
} \
while (sim_timer_event_canceled) { \
pthread_mutex_unlock (&sim_timer_lock); \
sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Waiting for Timer Event cancelation for %s\n", sim_uname(uptr));\
sim_os_ms_sleep (0); \
pthread_mutex_lock (&sim_timer_lock); \
} \
pthread_mutex_unlock (&sim_timer_lock); \
} \
}
#endif
if ((uptr)->cancel) \
(uptr)->cancel (uptr)
#endif /* !defined(AIO_CANCEL) */
#if defined(SIM_ASYNCH_CLOCKS)
#define AIO_RETURN_TIME(uptr) \
if (1) { \
pthread_mutex_lock (&sim_timer_lock); \
for (cptr = sim_wallclock_queue; \
cptr != QUEUE_LIST_END; \
cptr = cptr->a_next) \
if ((uptr) == cptr) { \
double inst_per_sec = sim_timer_inst_per_sec (); \
int32 result; \
\
result = (int32)(((uptr)->a_due_time - sim_timenow_double())*inst_per_sec);\
if (result < 0) \
result = 0; \
pthread_mutex_unlock (&sim_timer_lock); \
return result + 1; \
} \
pthread_mutex_unlock (&sim_timer_lock); \
if ((uptr)->a_next) /* On asynch queue? */ \
return (uptr)->a_event_time + 1; \
} \
else \
(void)0
do { \
int32 rtime = sim_timer_activate_time (uptr); \
if (rtime >= 0) \
return rtime; \
} while (0)
#else
#define AIO_RETURN_TIME(uptr) (void)0
#endif
@ -1188,31 +1049,25 @@ extern int32 sim_asynch_inst_latency;
/* which avoids the potential ABA issues. */
#define AIO_QUEUE_MODE "Lock free asynchronous event queue access"
#define AIO_INIT \
if (1) { \
do { \
int tmr; \
sim_asynch_main_threadid = pthread_self(); \
/* Empty list/list end uses the point value (void *)1. \
This allows NULL in an entry's a_next pointer to \
indicate that the entry is not currently in any list */ \
sim_asynch_queue = QUEUE_LIST_END; \
sim_wallclock_queue = QUEUE_LIST_END; \
sim_wallclock_entry = NULL; \
for (tmr=0; tmr<SIM_NTIMERS; tmr++) \
sim_clock_cosched_queue[tmr] = QUEUE_LIST_END; \
} \
else \
(void)0
} while (0)
#define AIO_CLEANUP \
if (1) { \
do { \
pthread_mutex_destroy(&sim_asynch_lock); \
pthread_cond_destroy(&sim_asynch_wake); \
pthread_mutex_destroy(&sim_timer_lock); \
pthread_cond_destroy(&sim_timer_wake); \
pthread_mutex_destroy(&sim_tmxr_poll_lock); \
pthread_cond_destroy(&sim_tmxr_poll_cond); \
} \
else \
(void)0
} while (0)
#ifdef _WIN32
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
#define InterlockedCompareExchangePointer(Destination, Exchange, Comparand) __sync_val_compare_and_swap(Destination, Comparand, Exchange)
@ -1223,98 +1078,19 @@ extern int32 sim_asynch_inst_latency;
#endif
#define AIO_QUEUE_VAL (UNIT *)(InterlockedCompareExchangePointer((void * volatile *)&sim_asynch_queue, (void *)sim_asynch_queue, NULL))
#define AIO_QUEUE_SET(val, queue) (UNIT *)(InterlockedCompareExchangePointer((void * volatile *)&sim_asynch_queue, (void *)val, queue))
#define AIO_UPDATE_QUEUE \
if (AIO_QUEUE_VAL != QUEUE_LIST_END) { /* List !Empty */ \
UNIT *q, *uptr; \
int32 a_event_time; \
do \
q = AIO_QUEUE_VAL; \
while (q != AIO_QUEUE_SET(QUEUE_LIST_END, q)); \
while (q != QUEUE_LIST_END) { /* List !Empty */ \
sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Migrating Asynch event for %s after %d instructions\n", sim_uname(q), q->a_event_time);\
uptr = q; \
q = q->a_next; \
uptr->a_next = NULL; /* hygiene */ \
if (uptr->a_activate_call != &sim_activate_notbefore) { \
a_event_time = uptr->a_event_time-((sim_asynch_inst_latency+1)/2); \
if (a_event_time < 0) \
a_event_time = 0; \
} \
else \
a_event_time = uptr->a_event_time; \
uptr->a_activate_call (uptr, a_event_time); \
if (uptr->a_check_completion) { \
sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Calling Completion Check for asynch event on %s\n", sim_uname(uptr));\
uptr->a_check_completion (uptr); \
} \
} \
} else (void)0
#define AIO_UPDATE_QUEUE sim_aio_update_queue ()
#define AIO_ACTIVATE(caller, uptr, event_time) \
if (!pthread_equal ( pthread_self(), sim_asynch_main_threadid )) { \
UNIT *ouptr = (uptr); \
sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Queueing Asynch event for %s after %d instructions\n", sim_uname(ouptr), event_time);\
if (ouptr->a_next) { \
ouptr->a_activate_call = sim_activate_abs; \
} else { \
UNIT *q, *qe; \
ouptr->a_event_time = event_time; \
ouptr->a_activate_call = (ACTIVATE_API)&caller; \
ouptr->a_next = QUEUE_LIST_END; /* Mark as on list */ \
do { \
do \
q = AIO_QUEUE_VAL; \
while (q != AIO_QUEUE_SET(QUEUE_LIST_END, q));/* Grab current list */\
for (qe = ouptr; qe->a_next != QUEUE_LIST_END; qe = qe->a_next); \
qe->a_next = q; /* append current list */\
do \
q = AIO_QUEUE_VAL; \
while (q != AIO_QUEUE_SET(ouptr, q)); \
ouptr = q; \
} while (ouptr != QUEUE_LIST_END); \
} \
sim_asynch_check = 0; /* try to force check */ \
if (sim_idle_wait) { \
sim_debug (TIMER_DBG_IDLE, &sim_timer_dev, "waking due to event on %s after %d instructions\n", sim_uname(ouptr), event_time);\
pthread_cond_signal (&sim_asynch_wake); \
} \
sim_aio_activate ((ACTIVATE_API)caller, uptr, event_time); \
return SCPE_OK; \
} else (void)0
#define AIO_ACTIVATE_LIST(caller, list, event_time) \
if (list) { \
UNIT *ouptr, *q, *qe; \
sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Queueing Asynch events for %s after %d instructions\n", sim_uname(list), event_time);\
for (qe=(list); qe->a_next != QUEUE_LIST_END;) { \
qe->a_event_time = event_time; \
qe->a_activate_call = (ACTIVATE_API)&caller; \
qe = qe->a_next; \
} \
qe->a_event_time = event_time; \
qe->a_activate_call = (ACTIVATE_API)&caller; \
ouptr = (list); \
do { \
do \
q = AIO_QUEUE_VAL; \
while (q != AIO_QUEUE_SET(QUEUE_LIST_END, q));/* Grab current list */ \
for (qe = ouptr; qe->a_next != QUEUE_LIST_END; qe = qe->a_next); \
qe->a_next = q; /* append current list */ \
do \
q = AIO_QUEUE_VAL; \
while (q != AIO_QUEUE_SET(ouptr, q)); \
ouptr = q; \
} while (ouptr != QUEUE_LIST_END); \
sim_asynch_check = 0; /* try to force check */ \
if (sim_idle_wait) { \
sim_debug (TIMER_DBG_IDLE, &sim_timer_dev, "waking due to event on %s after %d instructions\n", sim_uname(ouptr), event_time);\
pthread_cond_signal (&sim_asynch_wake); \
} \
} else (void)0
#else /* !USE_AIO_INTRINSICS */
/* This approach uses a pthread mutex to manage access to the link list */
/* head sim_asynch_queue. It will always work, but may be slower than the */
/* lock free approach when using USE_AIO_INTRINSICS */
#define AIO_QUEUE_MODE "Lock based asynchronous event queue access"
#define AIO_INIT \
if (1) { \
do { \
int tmr; \
pthread_mutexattr_t attr; \
\
@ -1327,26 +1103,20 @@ extern int32 sim_asynch_inst_latency;
This allows NULL in an entry's a_next pointer to \
indicate that the entry is not currently in any list */ \
sim_asynch_queue = QUEUE_LIST_END; \
sim_wallclock_queue = QUEUE_LIST_END; \
sim_wallclock_entry = NULL; \
for (tmr=0; tmr<SIM_NTIMERS; tmr++) \
sim_clock_cosched_queue[tmr] = QUEUE_LIST_END; \
} \
else \
(void)0
} while (0)
#define AIO_CLEANUP \
if (1) { \
do { \
pthread_mutex_destroy(&sim_asynch_lock); \
pthread_cond_destroy(&sim_asynch_wake); \
pthread_mutex_destroy(&sim_timer_lock); \
pthread_cond_destroy(&sim_timer_wake); \
pthread_mutex_destroy(&sim_tmxr_poll_lock); \
pthread_cond_destroy(&sim_tmxr_poll_cond); \
} \
else \
(void)0
} while (0)
#define AIO_UPDATE_QUEUE \
if (1) { \
do { \
UNIT *uptr; \
AIO_LOCK; \
while (sim_asynch_queue != QUEUE_LIST_END) { /* List !Empty */ \
@ -1371,7 +1141,7 @@ extern int32 sim_asynch_inst_latency;
AIO_LOCK; \
} \
AIO_UNLOCK; \
} else (void)0
} while (0)
#define AIO_ACTIVATE(caller, uptr, event_time) \
if (!pthread_equal ( pthread_self(), sim_asynch_main_threadid )) { \
sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Queueing Asynch event for %s after %d instructions\n", sim_uname(uptr), event_time);\
@ -1392,27 +1162,6 @@ extern int32 sim_asynch_inst_latency;
sim_asynch_check = 0; \
return SCPE_OK; \
} else (void)0
#define AIO_ACTIVATE_LIST(caller, list, event_time) \
if (list) { \
UNIT *qe; \
sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Queueing Asynch events for %s after %d instructions\n", sim_uname(list), event_time);\
for (qe=list; qe->a_next != QUEUE_LIST_END;) { \
qe->a_event_time = event_time; \
qe->a_activate_call = (ACTIVATE_API)&caller; \
qe = qe->a_next; \
} \
qe->a_event_time = event_time; \
qe->a_activate_call = (ACTIVATE_API)&caller; \
AIO_LOCK; \
qe->a_next = sim_asynch_queue; \
sim_asynch_queue = list; \
sim_asynch_check = 0; /* try to force check */ \
if (sim_idle_wait) { \
sim_debug (TIMER_DBG_IDLE, &sim_timer_dev, "waking due to event on %s after %d instructions\n", sim_uname(list), event_time);\
pthread_cond_signal (&sim_asynch_wake); \
} \
AIO_UNLOCK; \
} else (void)0
#endif /* USE_AIO_INTRINSICS */
#define AIO_VALIDATE if (!pthread_equal ( pthread_self(), sim_asynch_main_threadid )) {sim_printf("Improper thread context for operation\n"); abort();}
#define AIO_CHECK_EVENT \
@ -1421,11 +1170,11 @@ extern int32 sim_asynch_inst_latency;
sim_asynch_check = sim_asynch_inst_latency; \
} else (void)0
#define AIO_SET_INTERRUPT_LATENCY(instpersec) \
if (1) { \
do { \
sim_asynch_inst_latency = (int32)((((double)(instpersec))*sim_asynch_latency)/1000000000);\
if (sim_asynch_inst_latency == 0) \
sim_asynch_inst_latency = 1; \
} else (void)0
} while (0)
#else /* !SIM_ASYNCH_IO */
#define AIO_QUEUE_MODE "Asynchronous I/O is not available"
#define AIO_UPDATE_QUEUE
@ -1441,7 +1190,9 @@ extern int32 sim_asynch_inst_latency;
#define AIO_EVENT_BEGIN(uptr)
#define AIO_EVENT_COMPLETE(uptr, reason)
#define AIO_IS_ACTIVE(uptr) FALSE
#define AIO_CANCEL(uptr)
#define AIO_CANCEL(uptr) \
if ((uptr)->cancel) \
(uptr)->cancel (uptr)
#define AIO_SET_INTERRUPT_LATENCY(instpersec)
#define AIO_TLS
#endif /* SIM_ASYNCH_IO */

View file

@ -467,7 +467,7 @@ if (ctx->asynch_io) {
}
uptr->a_check_completion = _disk_completion_dispatch;
uptr->a_is_active = _disk_is_active;
uptr->a_cancel = _disk_cancel;
uptr->cancel = _disk_cancel;
return SCPE_OK;
#endif
}

View file

@ -388,7 +388,7 @@ if (ctx->asynch_io) {
}
uptr->a_check_completion = _tape_completion_dispatch;
uptr->a_is_active = _tape_is_active;
uptr->a_cancel = _tape_cancel;
uptr->cancel = _tape_cancel;
return SCPE_OK;
#endif
}

File diff suppressed because it is too large Load diff

View file

@ -64,7 +64,7 @@ extern "C" {
#if !defined(_TIMESPEC_DEFINED)
#define _TIMESPEC_DEFINED
struct timespec {
long tv_sec;
time_t tv_sec;
long tv_nsec;
};
#endif /* !defined(_TIMESPEC_DEFINED) */
@ -80,7 +80,6 @@ int clock_gettime(int clock_id, struct timespec *tp);
/* about instructions per second */
#define SIM_IDLE_CAL 10 /* ms to calibrate */
#define SIM_IDLE_MAX 10 /* max granularity idle */
#define SIM_IDLE_STMIN 2 /* min sec for stability */
#define SIM_IDLE_STDFLT 20 /* dft sec for stability */
#define SIM_IDLE_STMAX 600 /* max sec for stability */
@ -103,11 +102,11 @@ int clock_gettime(int clock_id, struct timespec *tp);
t_bool sim_timer_init (void);
void sim_timespec_diff (struct timespec *diff, struct timespec *min, struct timespec *sub);
#if defined(SIM_ASYNCH_CLOCKS)
double sim_timenow_double (void);
#endif
int32 sim_rtcn_init (int32 time, int32 tmr);
int32 sim_rtcn_init_unit (UNIT *uptr, int32 time, int32 tmr);
void sim_rtcn_get_time (struct timespec *now, int tmr);
t_stat sim_rtcn_tick_ack (int32 time, int32 tmr);
void sim_rtcn_init_all (void);
int32 sim_rtcn_calb (int32 ticksper, int32 tmr);
int32 sim_rtc_init (int32 time);
@ -130,13 +129,15 @@ void sim_start_timer_services (void);
void sim_stop_timer_services (void);
t_stat sim_timer_change_asynch (void);
t_stat sim_timer_activate_after (UNIT *uptr, uint32 usec_delay);
int32 sim_timer_activate_time (UNIT *uptr);
t_stat sim_register_clock_unit (UNIT *uptr);
t_stat sim_register_clock_unit_tmr (UNIT *uptr, int32 tmr);
t_stat sim_clock_coschedule (UNIT *uptr, int32 interval);
t_stat sim_clock_coschedule_abs (UNIT *uptr, int32 interval);
t_stat sim_clock_coschedule_tmr (UNIT *uptr, int32 tmr, int32 interval);
t_stat sim_clock_coschedule_tmr_abs (UNIT *uptr, int32 tmr, int32 interval);
t_stat sim_clock_coschedule_tmr (UNIT *uptr, int32 tmr, int32 ticks);
t_stat sim_clock_coschedule_tmr_abs (UNIT *uptr, int32 tmr, int32 ticks);
double sim_timer_inst_per_sec (void);
int32 sim_rtcn_tick_size (int32 tmr);
t_bool sim_timer_idle_capable (uint32 *host_ms_sleep_1, uint32 *host_tick_ms);
#define PRIORITY_BELOW_NORMAL -1
#define PRIORITY_NORMAL 0
@ -147,7 +148,7 @@ extern t_bool sim_idle_enab; /* idle enabled flag */
extern volatile t_bool sim_idle_wait; /* idle waiting flag */
extern t_bool sim_asynch_timer;
extern DEVICE sim_timer_dev;
extern UNIT * volatile sim_clock_cosched_queue[SIM_NTIMERS];
extern UNIT * volatile sim_clock_cosched_queue[SIM_NTIMERS+1];
extern const t_bool rtc_avail;
#ifdef __cplusplus

View file

@ -3903,11 +3903,12 @@ return tmxr_clock_coschedule_tmr (uptr, 0, interval);
t_stat tmxr_clock_coschedule_tmr (UNIT *uptr, int32 tmr, int32 interval)
{
TMXR *mp = (TMXR *)uptr->tmxr;
int32 ticks = (interval + (sim_rtcn_tick_size (tmr)/2))/sim_rtcn_tick_size (tmr);/* Convert to ticks */
#if defined(SIM_ASYNCH_MUX)
if ((!(uptr->dynflags & UNIT_TM_POLL)) ||
(!sim_asynch_enabled)) {
return sim_clock_coschedule (uptr, tmr, interval);
return sim_clock_coschedule (uptr, tmr, ticks);
}
return SCPE_OK;
#else
@ -3937,7 +3938,7 @@ if (mp) {
}
}
sim_debug (TIMER_DBG_MUX, &sim_timer_dev, "scheduling %s after interval %d instructions\n", sim_uname (uptr), interval);
return sim_clock_coschedule_tmr (uptr, tmr, interval);
return sim_clock_coschedule_tmr (uptr, tmr, ticks);
#endif
}