Initial merge of Asynchronous Multiplexer and Asynchronous Clock support

This commit is contained in:
Mark Pizzolato 2013-01-17 11:34:51 -08:00
parent a774f05633
commit 4f19d08869
7 changed files with 1449 additions and 31 deletions

75
scp.c
View file

@ -213,6 +213,8 @@
/* Macros and data structures */
#define NOT_MUX_USING_CODE /* sim_tmxr library provider or agnostic */
#include "sim_defs.h"
#include "sim_rev.h"
#include "sim_disk.h"
@ -311,8 +313,17 @@
#if defined (SIM_ASYNCH_IO)
pthread_mutex_t sim_asynch_lock = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t sim_asynch_wake = PTHREAD_COND_INITIALIZER;
pthread_cond_t sim_idle_wake = PTHREAD_COND_INITIALIZER;
pthread_mutex_t sim_timer_lock = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t sim_timer_wake = PTHREAD_COND_INITIALIZER;
pthread_mutex_t sim_tmxr_poll_lock = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t sim_tmxr_poll_cond = PTHREAD_COND_INITIALIZER;
int32 sim_tmxr_poll_count;
pthread_t sim_asynch_main_threadid;
struct sim_unit *sim_asynch_queue = NULL;
UNIT * volatile sim_asynch_queue;
UNIT * volatile sim_wallclock_queue;
UNIT * volatile sim_wallclock_entry;
UNIT * volatile sim_clock_cosched_queue;
t_bool sim_asynch_enabled = TRUE;
int32 sim_asynch_check;
int32 sim_asynch_latency = 4000; /* 4 usec interrupt latency */
@ -722,6 +733,7 @@ static CTAB cmd_table[] = {
"sh{ow} ethernet show ethernet devices\n"
"sh{ow} serial show serial devices\n"
"sh{ow} multiplexer show open multiplexer devices\n"
"sh{ow} clocks show calibrated timers\n"
"sh{ow} on show on condition actions\n" },
{ "DO", &do_cmd, 1,
"do {-V} {-O} {-E} {-Q} <file> {arg,arg...}\b"
@ -1948,6 +1960,7 @@ if (cptr && (*cptr != 0)) /* now eol? */
if (flag == sim_asynch_enabled) /* already set correctly? */
return SCPE_OK;
sim_asynch_enabled = flag;
tmxr_change_async ();
if (1) {
uint32 i, j;
DEVICE *dptr;
@ -2315,6 +2328,7 @@ static SHTAB show_glob_tab[] = {
{ "SERIAL", &sim_show_serial, 0 },
{ "MULTIPLEXER", &tmxr_show_open_devices, 0 },
{ "MUX", &tmxr_show_open_devices, 0 },
{ "CLOCKS", &sim_show_timers, 0 },
{ "ON", &show_on, 0 },
{ NULL, NULL, 0 }
};
@ -2597,8 +2611,41 @@ else {
}
fprintf (st, "asynch latency: %d nanoseconds\n", sim_asynch_latency);
fprintf (st, "asynch instruction latency: %d instructions\n", sim_asynch_inst_latency);
#if defined (SIM_ASYNCH_CLOCKS)
pthread_mutex_lock (&sim_timer_lock);
if (sim_wallclock_queue == QUEUE_LIST_END)
fprintf (st, "%s wall clock event queue empty, time = %.0f\n",
sim_name, sim_time);
else {
fprintf (st, "%s wall clock event queue status, time = %.0f\n",
sim_name, sim_time);
for (uptr = sim_wallclock_queue; uptr != QUEUE_LIST_END; uptr = uptr->next) {
if ((dptr = find_dev_from_unit (uptr)) != NULL) {
fprintf (st, " %s", sim_dname (dptr));
if (dptr->numunits > 1)
fprintf (st, " unit %d", (int32) (uptr - dptr->units));
}
else fprintf (st, " Unknown");
fprintf (st, " after %d usec\n", uptr->a_usec_delay);
}
}
if (sim_clock_cosched_queue != QUEUE_LIST_END) {
fprintf (st, "%s clock co-schedule event queue status, time = %.0f\n",
sim_name, sim_time);
for (uptr = sim_clock_cosched_queue; uptr != QUEUE_LIST_END; uptr = uptr->next) {
if ((dptr = find_dev_from_unit (uptr)) != NULL) {
fprintf (st, " %s", sim_dname (dptr));
if (dptr->numunits > 1)
fprintf (st, " unit %d", (int32) (uptr - dptr->units));
}
else fprintf (st, " Unknown");
fprintf (st, "\n");
}
}
pthread_mutex_unlock (&sim_timer_lock);
#endif /* SIM_ASYNCH_CLOCKS */
pthread_mutex_unlock (&sim_asynch_lock);
#endif
#endif /* SIM_ASYNCH_IO */
return SCPE_OK;
}
@ -3874,23 +3921,32 @@ for (i = 1; (dptr = sim_devices[i]) != NULL; i++) { /* reposition all */
}
}
stop_cpu = 0;
sim_is_running = 1; /* flag running */
if (sim_ttrun () != SCPE_OK) { /* set console mode */
sim_is_running = 0; /* flag idle */
sim_ttcmd ();
return SCPE_TTYERR;
}
if ((r = sim_check_console (30)) != SCPE_OK) { /* check console, error? */
sim_is_running = 0; /* flag idle */
sim_ttcmd ();
return r;
}
if (signal (SIGINT, int_handler) == SIG_ERR) { /* set WRU */
sim_is_running = 0; /* flag idle */
sim_ttcmd ();
return SCPE_SIGERR;
}
#ifdef SIGHUP
if (signal (SIGHUP, int_handler) == SIG_ERR) { /* set WRU */
sim_is_running = 0; /* flag idle */
sim_ttcmd ();
return SCPE_SIGERR;
}
#endif
if (signal (SIGTERM, int_handler) == SIG_ERR) { /* set WRU */
sim_is_running = 0; /* flag idle */
sim_ttcmd ();
return SCPE_SIGERR;
}
if (sim_step) /* set step timer */
@ -3899,12 +3955,13 @@ fflush(stdout); /* flush stdout */
if (sim_log) /* flush log if enabled */
fflush (sim_log);
sim_throt_sched (); /* set throttle */
sim_is_running = 1; /* flag running */
sim_brk_clract (); /* defang actions */
sim_rtcn_init_all (); /* re-init clocks */
sim_start_timer_services (); /* enable wall clock timing */
r = sim_instr();
sim_is_running = 0; /* flag idle */
sim_stop_timer_services (); /* disable wall clock timing */
sim_ttcmd (); /* restore console */
signal (SIGINT, SIG_DFL); /* cancel WRU */
#ifdef SIGHUP
@ -4995,10 +5052,10 @@ t_stat sim_register_internal_device (DEVICE *dptr)
{
uint32 i;
for (i = 0; (sim_devices[i] != NULL); ++i)
for (i = 0; (sim_devices[i] != NULL); i++)
if (sim_devices[i] == dptr)
return SCPE_OK;
for (i = 0; i < sim_internal_device_count; ++i)
for (i = 0; i < sim_internal_device_count; i++)
if (sim_internal_devices[i] == dptr)
return SCPE_OK;
++sim_internal_device_count;
@ -5029,7 +5086,7 @@ for (i = 0; (dptr = sim_devices[i]) != NULL; i++) {
return dptr;
}
}
for (i = 0; i<sim_internal_device_count; ++i) {
for (i = 0; i<sim_internal_device_count; i++) {
dptr = sim_internal_devices[i];
for (j = 0; j < dptr->numunits; j++) {
if (uptr == (dptr->units + j))
@ -5465,6 +5522,9 @@ return SCPE_OK;
/* Event queue package
sim_activate add entry to event queue
sim_activate_abs add entry to event queue even if event already scheduled
sim_activate_notbefure add entry to event queue even if event already scheduled
but not before the specified time
sim_activate_after add entry to event queue after a specified amount of wall time
sim_cancel remove entry from event queue
sim_process_event process entries on event queue
@ -5518,10 +5578,12 @@ do {
sim_interval = sim_clock_queue->time;
else sim_interval = noqueue_time = NOQUEUE_WAIT;
sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Processing Event for %s\n", sim_uname (uptr));
AIO_EVENT_BEGIN(uptr);
if (uptr->action != NULL)
reason = uptr->action (uptr);
else
reason = SCPE_OK;
AIO_EVENT_COMPLETE(uptr, reason);
} while ((reason == SCPE_OK) &&
(sim_interval <= 0) &&
(sim_clock_queue != QUEUE_LIST_END));
@ -5747,6 +5809,7 @@ UNIT *cptr;
int32 accum = 0;
AIO_VALIDATE;
AIO_RETURN_TIME(uptr);
for (cptr = sim_clock_queue; cptr != QUEUE_LIST_END; cptr = cptr->next) {
if (cptr == sim_clock_queue) {
if (sim_interval > 0)

View file

@ -936,6 +936,92 @@ return c;
}
#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_MUX)
extern pthread_mutex_t sim_tmxr_poll_lock;
extern pthread_cond_t sim_tmxr_poll_cond;
extern int32 sim_tmxr_poll_count;
extern t_bool sim_tmxr_poll_running;
extern int32 sim_is_running;
pthread_t sim_console_poll_thread; /* Keyboard Polling Thread Id */
t_bool sim_console_poll_running = FALSE;
pthread_cond_t sim_console_startup_cond;
static void *
_console_poll(void *arg)
{
int sched_policy;
struct sched_param sched_priority;
int poll_timeout_count = 0;
int wait_count = 0;
DEVICE *d;
/* Boost Priority for this I/O thread vs the CPU instruction execution
thread which, in general, won't be readily yielding the processor when
this thread needs to run */
pthread_getschedparam (pthread_self(), &sched_policy, &sched_priority);
++sched_priority.sched_priority;
pthread_setschedparam (pthread_self(), sched_policy, &sched_priority);
sim_debug (DBG_ASY, &sim_con_telnet, "_console_poll() - starting\n");
pthread_mutex_lock (&sim_tmxr_poll_lock);
pthread_cond_signal (&sim_console_startup_cond); /* Signal we're ready to go */
while (sim_asynch_enabled) {
if (!sim_is_running) {
if (wait_count) {
sim_debug (DBG_ASY, d, "_console_poll() - Removing interest in %s. Other interest: %d\n", d->name, sim_con_ldsc.uptr->a_poll_waiter_count);
--sim_con_ldsc.uptr->a_poll_waiter_count;
--sim_tmxr_poll_count;
}
break;
}
/* If we started something, let it finish before polling again */
if (wait_count) {
sim_debug (DBG_ASY, &sim_con_telnet, "_console_poll() - waiting for %d units\n", wait_count);
pthread_cond_wait (&sim_tmxr_poll_cond, &sim_tmxr_poll_lock);
sim_debug (DBG_ASY, &sim_con_telnet, "_console_poll() - continuing with after wait\n");
}
pthread_mutex_unlock (&sim_tmxr_poll_lock);
wait_count = 0;
if (sim_os_poll_kbd_ready (1000)) {
sim_debug (DBG_ASY, &sim_con_telnet, "_console_poll() - Keyboard Data available\n");
pthread_mutex_lock (&sim_tmxr_poll_lock);
++wait_count;
if (!sim_con_ldsc.uptr->a_polling_now) {
sim_con_ldsc.uptr->a_polling_now = TRUE;
sim_con_ldsc.uptr->a_poll_waiter_count = 1;
d = find_dev_from_unit(sim_con_ldsc.uptr);
sim_debug (DBG_ASY, &sim_con_telnet, "_console_poll() - Activating %s\n", d->name);
pthread_mutex_unlock (&sim_tmxr_poll_lock);
_sim_activate (sim_con_ldsc.uptr, 0);
pthread_mutex_lock (&sim_tmxr_poll_lock);
}
else {
d = find_dev_from_unit(sim_con_ldsc.uptr);
sim_debug (DBG_ASY, &sim_con_telnet, "_console_poll() - Already Activated %s %d times\n", d->name, sim_con_ldsc.uptr->a_poll_waiter_count);
++sim_con_ldsc.uptr->a_poll_waiter_count;
}
}
else
pthread_mutex_lock (&sim_tmxr_poll_lock);
sim_tmxr_poll_count += wait_count;
}
pthread_mutex_unlock (&sim_tmxr_poll_lock);
sim_debug (DBG_ASY, &sim_con_telnet, "_console_poll() - exiting\n");
return NULL;
}
#endif /* defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_MUX) */
t_stat sim_ttinit (void)
{
sim_register_internal_device (&sim_con_telnet);
@ -945,8 +1031,18 @@ return sim_os_ttinit ();
t_stat sim_ttrun (void)
{
if (!sim_con_tmxr.ldsc->uptr) /* If simulator didn't declare its input polling unit */
if (!sim_con_tmxr.ldsc->uptr) { /* If simulator didn't declare its input polling unit */
sim_con_unit.flags &= ~UNIT_TM_POLL; /* we can't poll asynchronously */
sim_con_unit.flags |= TMUF_NOASYNCH; /* disable asynchronous behavior */
}
else {
#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_MUX)
if (sim_asynch_enabled) {
sim_con_tmxr.ldsc->uptr->flags |= UNIT_TM_POLL;/* flag console input device as a polling unit */
sim_con_unit.flags |= UNIT_TM_POLL; /* flag as polling unit */
}
#endif
}
#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_MUX)
pthread_mutex_lock (&sim_tmxr_poll_lock);
if (sim_asynch_enabled) {

View file

@ -392,6 +392,16 @@ struct sim_unit {
struct sim_unit *a_next; /* next asynch active */
int32 a_event_time;
t_stat (*a_activate_call)(struct sim_unit *, int32);
/* Asynchronous Polling control */
/* These fields should only be referenced when holding the sim_tmxr_poll_lock */
t_bool a_polling_now; /* polling active flag */
int32 a_poll_waiter_count; /* count of polling threads */
/* waiting for this unit */
/* Asynchronous Timer control */
double a_due_time; /* due time for timer event */
double a_skew; /* accumulated skew being corrected */
double a_last_fired_time; /* time last event fired */
int32 a_usec_delay; /* time delay for timer event */
#endif
};
@ -633,8 +643,18 @@ typedef struct sim_bitfield BITFIELD;
extern pthread_mutex_t sim_asynch_lock;
extern pthread_cond_t sim_asynch_wake;
extern pthread_cond_t sim_idle_wake;
extern pthread_mutex_t sim_timer_lock;
extern pthread_cond_t sim_timer_wake;
extern t_bool sim_timer_event_canceled;
extern int32 sim_tmxr_poll_count;
extern pthread_cond_t sim_tmxr_poll_cond;
extern pthread_mutex_t sim_tmxr_poll_lock;
extern pthread_t sim_asynch_main_threadid;
extern struct sim_unit *sim_asynch_queue;
extern UNIT * volatile sim_asynch_queue;
extern UNIT * volatile sim_wallclock_queue;
extern UNIT * volatile sim_wallclock_entry;
extern UNIT * volatile sim_clock_cosched_queue;
extern volatile t_bool sim_idle_wait;
extern t_bool sim_asynch_enabled;
extern int32 sim_asynch_check;
@ -652,12 +672,129 @@ extern int32 sim_asynch_inst_latency;
#define AIO_TLS
#endif
#define AIO_IS_ACTIVE(uptr) (((uptr)->a_is_active ? (uptr)->a_is_active (uptr) : FALSE) || ((uptr)->a_next))
#define AIO_CANCEL(uptr) if ((uptr)->a_cancel) (uptr)->a_cancel (uptr); else (void)0
#define AIO_LOCK \
pthread_mutex_lock(&sim_asynch_lock)
#define AIO_UNLOCK \
pthread_mutex_unlock(&sim_asynch_lock)
#define AIO_IS_ACTIVE(uptr) (((uptr)->a_is_active ? (uptr)->a_is_active (uptr) : FALSE) || ((uptr)->a_next))
#if !defined(SIM_ASYNCH_MUX) && !defined(SIM_ASYNCH_CLOCKS)
#define AIO_CANCEL(uptr) \
if ((uptr)->a_cancel) \
(uptr)->a_cancel (uptr); \
else \
(void)0
#endif /* !defined(SIM_ASYNCH_MUX) && !defined(SIM_ASYNCH_CLOCKS) */
#if defined(SIM_ASYNCH_MUX) && !defined(SIM_ASYNCH_CLOCKS)
#define AIO_CANCEL(uptr) \
if ((uptr)->a_cancel) \
(uptr)->a_cancel (uptr); \
else { \
if (((uptr)->flags & UNIT_TM_POLL) && \
!((uptr)->next) && !((uptr)->a_next)) { \
(uptr)->a_polling_now = FALSE; \
sim_tmxr_poll_count -= (uptr)->a_poll_waiter_count; \
(uptr)->a_poll_waiter_count = 0; \
} \
}
#endif /* defined(SIM_ASYNCH_MUX) && !defined(SIM_ASYNCH_CLOCKS) */
#if defined(SIM_ASYNCH_MUX) && defined(SIM_ASYNCH_CLOCKS)
#define AIO_CANCEL(uptr) \
if ((uptr)->a_cancel) \
(uptr)->a_cancel (uptr); \
else { \
if (((uptr)->flags & UNIT_TM_POLL) && \
!((uptr)->next) && !((uptr)->a_next)) { \
(uptr)->a_polling_now = FALSE; \
sim_tmxr_poll_count -= (uptr)->a_poll_waiter_count; \
(uptr)->a_poll_waiter_count = 0; \
} \
if (AIO_IS_ACTIVE (uptr)) { \
UNIT *cptr, *nptr; \
AIO_UPDATE_QUEUE; \
pthread_mutex_lock (&sim_timer_lock); \
nptr = QUEUE_LIST_END; \
if ((uptr) == sim_wallclock_queue) { \
sim_wallclock_queue = (uptr)->next; \
(uptr)->next = NULL; \
} \
else \
for (cptr = sim_wallclock_queue; \
(cptr != QUEUE_LIST_END); \
cptr = cptr->next) \
if (cptr->next == (uptr)) { \
cptr->next = (uptr)->next; \
nptr = cptr; \
(uptr)->next = NULL; \
break; \
} \
if (nptr == QUEUE_LIST_END) { \
sim_timer_event_canceled = TRUE; \
pthread_cond_signal (&sim_timer_wake); \
} \
if ((uptr)->next == NULL) \
(uptr)->a_due_time = (uptr)->a_usec_delay = 0; \
else { \
nptr = QUEUE_LIST_END; \
if ((uptr) == sim_clock_cosched_queue) { \
sim_clock_cosched_queue = (uptr)->next; \
(uptr)->next = NULL; \
} \
else \
for (cptr = sim_clock_cosched_queue; \
(cptr != QUEUE_LIST_END); \
cptr = cptr->next) \
if (cptr->next == (uptr)) { \
cptr->next = (uptr)->next; \
nptr = cptr; \
(uptr)->next = NULL; \
break; \
} \
} \
pthread_mutex_unlock (&sim_timer_lock); \
} \
}
#define AIO_RETURN_TIME(uptr) \
if (1) { \
pthread_mutex_lock (&sim_timer_lock); \
for (cptr = sim_wallclock_queue; \
cptr != QUEUE_LIST_END; \
cptr = cptr->next) \
if ((uptr) == cptr) { \
double inst_per_sec = sim_timer_inst_per_sec (); \
int32 result; \
\
result = (int32)(((uptr)->a_due_time - sim_timenow_double())*inst_per_sec);\
if (result < 0) \
result = 0; \
pthread_mutex_unlock (&sim_timer_lock); \
return result + 1; \
} \
pthread_mutex_unlock (&sim_timer_lock); \
if ((uptr)->a_next) /* On asynch queue? */ \
return (uptr)->a_event_time + 1; \
} \
else \
(void)0
#else
#define AIO_RETURN_TIME(uptr) (void)0
#endif
#define AIO_EVENT_BEGIN(uptr) \
do { \
int __was_poll = uptr->flags & UNIT_TM_POLL
#define AIO_EVENT_COMPLETE(uptr, reason) \
if (__was_poll) { \
pthread_mutex_lock (&sim_tmxr_poll_lock); \
uptr->a_polling_now = FALSE; \
if (uptr->a_poll_waiter_count) { \
sim_tmxr_poll_count -= uptr->a_poll_waiter_count; \
uptr->a_poll_waiter_count = 0; \
if (0 == sim_tmxr_poll_count) \
pthread_cond_broadcast (&sim_tmxr_poll_cond); \
} \
pthread_mutex_unlock (&sim_tmxr_poll_lock); \
} \
AIO_UPDATE_QUEUE; \
} while (0)
#if defined(__DECC_VER)
#include <builtins>
@ -674,6 +811,10 @@ extern int32 sim_asynch_inst_latency;
#undef USE_AIO_INTRINSICS
#endif
#ifdef USE_AIO_INTRINSICS
/* This approach uses intrinsics to manage access to the link list head */
/* sim_asynch_queue. This implementation is a completely lock free design */
/* which avoids the potential ABA issues. */
#define AIO_QUEUE_MODE "Lock free asynchronous event queue access"
#define AIO_INIT \
if (1) { \
sim_asynch_main_threadid = pthread_self(); \
@ -681,19 +822,23 @@ extern int32 sim_asynch_inst_latency;
This allows NULL in an entry's a_next pointer to \
indicate that the entry is not currently in any list */ \
sim_asynch_queue = QUEUE_LIST_END; \
sim_wallclock_queue = QUEUE_LIST_END; \
sim_wallclock_entry = NULL; \
sim_clock_cosched_queue = QUEUE_LIST_END; \
} \
else \
(void)0
#define AIO_CLEANUP \
if (1) { \
pthread_mutex_destroy(&sim_asynch_lock); \
pthread_cond_destroy(&sim_asynch_wake); \
pthread_cond_destroy(&sim_idle_wake); \
pthread_mutex_destroy(&sim_timer_lock); \
pthread_cond_destroy(&sim_timer_wake); \
pthread_mutex_destroy(&sim_tmxr_poll_lock); \
pthread_cond_destroy(&sim_tmxr_poll_cond); \
} \
else \
(void)0
/* This approach uses intrinsics to manage access to the link list head */
/* sim_asynch_queue. This implementation is a completely lock free design */
/* which avoids the potential ABA issues. */
#ifdef _WIN32
#include <winsock2.h>
#ifdef PACKED
@ -709,7 +854,6 @@ extern int32 sim_asynch_inst_latency;
#else
#error "Implementation of function InterlockedCompareExchangePointer() is needed to build with USE_AIO_INTRINSICS"
#endif
#define AIO_QUEUE_MODE "Lock free asynchronous event queue access"
#define AIO_QUEUE_VAL InterlockedCompareExchangePointer(&sim_asynch_queue, sim_asynch_queue, NULL)
#define AIO_QUEUE_SET(val, queue) InterlockedCompareExchangePointer(&sim_asynch_queue, val, queue)
#define AIO_UPDATE_QUEUE \
@ -764,6 +908,10 @@ extern int32 sim_asynch_inst_latency;
return SCPE_OK; \
} else (void)0
#else /* !USE_AIO_INTRINSICS */
/* This approach uses a pthread mutex to manage access to the link list */
/* head sim_asynch_queue. It will always work, but may be slower than the */
/* lock free approach when using USE_AIO_INTRINSICS */
#define AIO_QUEUE_MODE "Lock based asynchronous event queue access"
#define AIO_INIT \
if (1) { \
pthread_mutexattr_t attr; \
@ -777,20 +925,23 @@ extern int32 sim_asynch_inst_latency;
This allows NULL in an entry's a_next pointer to \
indicate that the entry is not currently in any list */ \
sim_asynch_queue = QUEUE_LIST_END; \
sim_wallclock_queue = QUEUE_LIST_END; \
sim_wallclock_entry = NULL; \
sim_clock_cosched_queue = QUEUE_LIST_END; \
} \
else \
(void)0
#define AIO_CLEANUP \
if (1) { \
pthread_mutex_destroy(&sim_asynch_lock); \
pthread_cond_destroy(&sim_asynch_wake); \
pthread_cond_destroy(&sim_idle_wake); \
pthread_mutex_destroy(&sim_timer_lock); \
pthread_cond_destroy(&sim_timer_wake); \
pthread_mutex_destroy(&sim_tmxr_poll_lock); \
pthread_cond_destroy(&sim_tmxr_poll_cond); \
} \
else \
(void)0
#define AIO_QUEUE_MODE "Lock based asynchronous event queue access"
/* This approach uses a pthread mutex to manage access to the link list */
/* head sim_asynch_queue. It will always work, but may be slower than the */
/* lock free approach when using USE_AIO_INTRINSICS */
#define AIO_UPDATE_QUEUE \
if (1) { \
UNIT *uptr; \
@ -859,6 +1010,8 @@ extern int32 sim_asynch_inst_latency;
#define AIO_UNLOCK
#define AIO_CLEANUP
#define AIO_RETURN_TIME(uptr)
#define AIO_EVENT_BEGIN(uptr)
#define AIO_EVENT_COMPLETE(uptr, reason)
#define AIO_IS_ACTIVE(uptr) FALSE
#define AIO_CANCEL(uptr)
#define AIO_SET_INTERRUPT_LATENCY(instpersec)

View file

@ -76,8 +76,11 @@
routines are not.
*/
#define NOT_MUX_USING_CODE /* sim_tmxr library provider or agnostic */
#include "sim_defs.h"
#include <ctype.h>
#include <math.h>
t_bool sim_idle_enab = FALSE; /* global flag */
volatile t_bool sim_idle_wait = FALSE; /* global flag */
@ -87,6 +90,7 @@ static int32 sim_calb_tmr = -1; /* the system calibrated tim
static uint32 sim_idle_rate_ms = 0;
static uint32 sim_os_sleep_min_ms = 0;
static uint32 sim_idle_stable = SIM_IDLE_STDFLT;
static t_bool sim_idle_idled = FALSE;
static uint32 sim_throt_ms_start = 0;
static uint32 sim_throt_ms_stop = 0;
static uint32 sim_throt_type = 0;
@ -94,11 +98,32 @@ static uint32 sim_throt_val = 0;
static uint32 sim_throt_state = 0;
static uint32 sim_throt_sleep_time = 0;
static int32 sim_throt_wait = 0;
static UNIT *sim_clock_unit = NULL;
static t_bool sim_asynch_timer =
#if defined (SIM_ASYNCH_CLOCKS)
TRUE;
#else
FALSE;
#endif
t_stat sim_throt_svc (UNIT *uptr);
UNIT sim_throt_unit = { UDATA (&sim_throt_svc, 0, 0) };
#define DBG_IDL TIMER_DBG_IDLE /* idling */
#define DBG_QUE TIMER_DBG_QUEUE /* queue activities */
#define DBG_TRC 0x004 /* tracing */
#define DBG_CAL 0x008 /* calibration activities */
#define DBG_TIM 0x010 /* timer thread activities */
DEBTAB sim_timer_debug[] = {
{"TRACE", DBG_TRC},
{"IDLE", DBG_IDL},
{"QUEUE", DBG_QUE},
{"CALIB", DBG_CAL},
{"TIME", DBG_TIM},
{0}
};
/* OS-dependent timer and clock routines */
/* VMS */
@ -443,6 +468,33 @@ if (sub->tv_nsec > min->tv_nsec) {
}
diff->tv_nsec -= sub->tv_nsec;
diff->tv_sec -= sub->tv_sec;
/* Normalize the result */
while (diff->tv_nsec > 1000000000) {
++diff->tv_sec;
diff->tv_nsec -= 1000000000;
}
}
static int sim_timespec_compare (struct timespec *a, struct timespec *b)
{
while (a->tv_nsec > 1000000000) {
a->tv_nsec -= 1000000000;
++a->tv_sec;
}
while (b->tv_nsec > 1000000000) {
b->tv_nsec -= 1000000000;
++b->tv_sec;
}
if (a->tv_sec < b->tv_sec)
return -1;
if (a->tv_sec > b->tv_sec)
return 1;
if (a->tv_nsec < b->tv_nsec)
return -1;
if (a->tv_nsec > b->tv_nsec)
return 1;
else
return 0;
}
#if defined(SIM_ASYNCH_IO)
@ -450,6 +502,7 @@ uint32 sim_idle_ms_sleep (unsigned int msec)
{
uint32 start_time = sim_os_msec();
struct timespec done_time;
t_bool timedout = FALSE;
clock_gettime(CLOCK_REALTIME, &done_time);
done_time.tv_sec += (msec/1000);
@ -462,8 +515,12 @@ pthread_mutex_lock (&sim_asynch_lock);
sim_idle_wait = TRUE;
if (!pthread_cond_timedwait (&sim_asynch_wake, &sim_asynch_lock, &done_time))
sim_asynch_check = 0; /* force check of asynch queue now */
else
timedout = TRUE;
sim_idle_wait = FALSE;
pthread_mutex_unlock (&sim_asynch_lock);
if (!timedout)
AIO_UPDATE_QUEUE;
return sim_os_msec() - start_time;
}
#define SIM_IDLE_MS_SLEEP sim_idle_ms_sleep
@ -477,11 +534,14 @@ static int32 rtc_ticks[SIM_NTIMERS] = { 0 }; /* ticks */
static int32 rtc_hz[SIM_NTIMERS] = { 0 }; /* tick rate */
static uint32 rtc_rtime[SIM_NTIMERS] = { 0 }; /* real time */
static uint32 rtc_vtime[SIM_NTIMERS] = { 0 }; /* virtual time */
static double rtc_gtime[SIM_NTIMERS] = { 0 }; /* instruction time */
static uint32 rtc_nxintv[SIM_NTIMERS] = { 0 }; /* next interval */
static int32 rtc_based[SIM_NTIMERS] = { 0 }; /* base delay */
static int32 rtc_currd[SIM_NTIMERS] = { 0 }; /* current delay */
static int32 rtc_initd[SIM_NTIMERS] = { 0 }; /* initial delay */
static uint32 rtc_elapsed[SIM_NTIMERS] = { 0 }; /* sec since init */
static uint32 rtc_calibrations[SIM_NTIMERS] = { 0 }; /* calibration count */
static double rtc_clock_skew_max[SIM_NTIMERS] = { 0 }; /* asynchronous max skew */
void sim_rtcn_init_all (void)
{
@ -495,6 +555,7 @@ return;
int32 sim_rtcn_init (int32 time, int32 tmr)
{
sim_debug (DBG_CAL, &sim_timer_dev, "sim_rtcn_init(time=%d, tmr=%d)\n", time, tmr);
if (time == 0)
time = 1;
if ((tmr < 0) || (tmr >= SIM_NTIMERS))
@ -508,6 +569,9 @@ rtc_based[tmr] = time;
rtc_currd[tmr] = time;
rtc_initd[tmr] = time;
rtc_elapsed[tmr] = 0;
rtc_calibrations[tmr] = 0;
if (sim_calb_tmr == -1) /* save first initialized clock as the system timer */
sim_calb_tmr = tmr;
return time;
}
@ -572,11 +636,107 @@ return sim_rtcn_calb (ticksper, 0);
t_bool sim_timer_init (void)
{
sim_debug (DBG_TRC, &sim_timer_dev, "sim_timer_init()\n");
sim_register_internal_device (&sim_timer_dev);
sim_idle_enab = FALSE; /* init idle off */
sim_idle_rate_ms = sim_os_ms_sleep_init (); /* get OS timer rate */
return (sim_idle_rate_ms != 0);
}
/* sim_show_timers - show running timer information */
t_stat sim_show_timers (FILE* st, DEVICE *dptr, UNIT* uptr, int32 val, char* desc)
{
int tmr;
for (tmr=0; tmr<SIM_NTIMERS; ++tmr) {
if (0 == rtc_initd[tmr])
continue;
fprintf (st, "%s%sTimer %d:\n", (sim_asynch_enabled && sim_asynch_timer) ? "Asynchronous " : "", rtc_hz[tmr] ? "Calibrated " : "Uncalibrated ", tmr);
if (rtc_hz[tmr]) {
fprintf (st, " Running at: %dhz\n", rtc_hz[tmr]);
fprintf (st, " Ticks in current second: %d\n", rtc_ticks[tmr]);
}
fprintf (st, " Seconds Running: %u\n", rtc_elapsed[tmr]);
fprintf (st, " Calibrations: %u\n", rtc_calibrations[tmr]);
fprintf (st, " Instruction Time: %.0f\n", rtc_gtime[tmr]);
if (!(sim_asynch_enabled && sim_asynch_timer)) {
fprintf (st, " Real Time: %u\n", rtc_rtime[tmr]);
fprintf (st, " Virtual Time: %u\n", rtc_vtime[tmr]);
fprintf (st, " Next Interval: %u\n", rtc_nxintv[tmr]);
fprintf (st, " Base Tick Delay: %d\n", rtc_based[tmr]);
fprintf (st, " Initial Insts Per Tick: %d\n", rtc_initd[tmr]);
}
fprintf (st, " Current Insts Per Tick: %d\n", rtc_currd[tmr]);
if (rtc_clock_skew_max[tmr] != 0.0)
fprintf (st, " Peak Clock Skew: %.0fms\n", rtc_clock_skew_max[tmr]);
}
return SCPE_OK;
}
REG sim_timer_reg[] = {
{ DRDATAD (TICKS_PER_SEC, rtc_hz[0], 32, "Ticks Per Second"), PV_RSPC|REG_RO},
{ DRDATAD (INSTS_PER_TICK, rtc_currd[0], 32, "Instructions Per Tick"), PV_RSPC|REG_RO},
{ FLDATAD (IDLE_ENAB, sim_idle_enab, 0, "Idle Enabled"), REG_RO},
{ DRDATAD (IDLE_RATE_MS, sim_idle_rate_ms, 32, "Idle Rate Milliseconds"), PV_RSPC|REG_RO},
{ DRDATAD (OS_SLEEP_MIN_MS, sim_os_sleep_min_ms, 32, "Minimum Sleep Resolution"), PV_RSPC|REG_RO},
{ DRDATAD (IDLE_STABLE, sim_idle_stable, 32, "Idle Stable"), PV_RSPC},
{ FLDATAD (IDLE_IDLED, sim_idle_idled, 0, ""), REG_RO},
{ DRDATAD (TMR, sim_calb_tmr, 32, ""), PV_RSPC|REG_RO},
{ DRDATAD (THROT_MS_START, sim_throt_ms_start, 32, ""), PV_RSPC|REG_RO},
{ DRDATAD (THROT_MS_STOP, sim_throt_ms_stop, 32, ""), PV_RSPC|REG_RO},
{ DRDATAD (THROT_TYPE, sim_throt_type, 32, ""), PV_RSPC|REG_RO},
{ DRDATAD (THROT_VAL, sim_throt_val, 32, ""), PV_RSPC|REG_RO},
{ DRDATAD (THROT_STATE, sim_throt_state, 32, ""), PV_RSPC|REG_RO},
{ DRDATAD (THROT_SLEEP_TIME, sim_throt_sleep_time, 32, ""), PV_RSPC|REG_RO},
{ DRDATAD (THROT_WAIT, sim_throt_wait, 32, ""), PV_RSPC|REG_RO},
{ NULL }
};
/* Clear, Set and show asynch */
/* Clear asynch */
t_stat sim_timer_clr_async (UNIT *uptr, int32 val, char *cptr, void *desc)
{
if (sim_asynch_timer) {
sim_asynch_timer = FALSE;
sim_timer_change_asynch ();
}
return SCPE_OK;
}
t_stat sim_timer_set_async (UNIT *uptr, int32 val, char *cptr, void *desc)
{
if (!sim_asynch_timer) {
sim_asynch_timer = TRUE;
sim_timer_change_asynch ();
}
return SCPE_OK;
}
t_stat sim_timer_show_async (FILE *st, UNIT *uptr, int32 val, void *desc)
{
fprintf (st, "%s", (sim_asynch_enabled && sim_asynch_timer) ? "Asynchronous" : "Synchronous");
return SCPE_OK;
}
MTAB sim_timer_mod[] = {
#if defined (SIM_ASYNCH_IO)
{ MTAB_XTD|MTAB_VDV, 0, "ASYNC", "ASYNC", &sim_timer_set_async, &sim_timer_show_async },
{ MTAB_XTD|MTAB_VDV, 0, NULL, "NOASYNC", &sim_timer_clr_async, NULL },
#endif
{ 0 },
};
DEVICE sim_timer_dev = {
"TIMER", &sim_throt_unit, sim_timer_reg, sim_timer_mod,
1, 0, 0, 0, 0, 0,
NULL, NULL, NULL, NULL, NULL, NULL,
NULL, DEV_DEBUG, 0, sim_timer_debug};
/* sim_idle - idle simulator until next event or for specified interval
Inputs:
@ -596,18 +756,28 @@ static uint32 cyc_ms = 0;
uint32 w_ms, w_idle, act_ms;
int32 act_cyc;
if ((sim_clock_queue == QUEUE_LIST_END) || /* clock queue empty? */
((sim_clock_queue->flags & UNIT_IDLE) == 0) || /* event not idle-able? */
(rtc_elapsed[tmr] < sim_idle_stable)) { /* timer not stable? */
//sim_idle_idled = TRUE; /* record idle attempt */
if ((!sim_idle_enab) || /* idling disabled */
((sim_clock_queue == QUEUE_LIST_END) && /* or clock queue empty? */
#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS)
(!(sim_asynch_enabled && sim_asynch_timer)))|| /* and not asynch? */
#else
(TRUE)) ||
#endif
((sim_clock_queue != QUEUE_LIST_END) &&
((sim_clock_queue->flags & UNIT_IDLE) == 0))|| /* or event not idle-able? */
(rtc_elapsed[tmr] < sim_idle_stable)) { /* or timer not stable? */
if (sin_cyc)
sim_interval = sim_interval - 1;
return FALSE;
}
sim_debug (DBG_TRC, &sim_timer_dev, "sim_idle(tmr=%d, sin_cyc=%d)\n", tmr, sin_cyc);
if (cyc_ms == 0) /* not computed yet? */
cyc_ms = (rtc_currd[tmr] * rtc_hz[tmr]) / 1000; /* cycles per msec */
if ((sim_idle_rate_ms == 0) || (cyc_ms == 0)) { /* not possible? */
if (sin_cyc)
sim_interval = sim_interval - 1;
sim_debug (DBG_IDL, &sim_timer_dev, "not possible %d - %d\n", sim_idle_rate_ms, cyc_ms);
return FALSE;
}
w_ms = (uint32) sim_interval / cyc_ms; /* ms to wait */
@ -615,8 +785,13 @@ w_idle = w_ms / sim_idle_rate_ms; /* intervals to wait */
if (w_idle == 0) { /* none? */
if (sin_cyc)
sim_interval = sim_interval - 1;
sim_debug (DBG_IDL, &sim_timer_dev, "no wait\n");
return FALSE;
}
if (sim_clock_queue == QUEUE_LIST_END)
sim_debug (DBG_IDL, &sim_timer_dev, "sleeping for %d ms - pending event in %d instructions\n", w_ms, sim_interval);
else
sim_debug (DBG_IDL, &sim_timer_dev, "sleeping for %d ms - pending event on %s in %d instructions\n", w_ms, sim_uname(sim_clock_queue), sim_interval);
act_ms = SIM_IDLE_MS_SLEEP (w_ms); /* wait */
act_cyc = act_ms * cyc_ms;
if (act_ms < w_ms) /* awakened early? */
@ -624,6 +799,10 @@ if (act_ms < w_ms) /* awakened early? */
if (sim_interval > act_cyc)
sim_interval = sim_interval - act_cyc; /* count down sim_interval */
else sim_interval = 0; /* or fire immediately */
if (sim_clock_queue == QUEUE_LIST_END)
sim_debug (DBG_IDL, &sim_timer_dev, "slept for %d ms - pending event in %d instructions\n", act_ms, sim_interval);
else
sim_debug (DBG_IDL, &sim_timer_dev, "slept for %d ms - pending event on %s in %d instructions\n", act_ms, sim_uname(sim_clock_queue), sim_interval);
return TRUE;
}
@ -866,6 +1045,228 @@ sim_activate (uptr, sim_throt_wait); /* reschedule */
return SCPE_OK;
}
#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS)
static double _timespec_to_double (struct timespec *time)
{
return ((double)time->tv_sec)+(double)(time->tv_nsec)/1000000000.0;
}
static void _double_to_timespec (struct timespec *time, double dtime)
{
time->tv_sec = (time_t)floor(dtime);
time->tv_nsec = (long)((dtime-floor(dtime))*1000000000.0);
}
double sim_timenow_double (void)
{
struct timespec now;
clock_gettime(CLOCK_REALTIME, &now);
return _timespec_to_double (&now);
}
extern int32 sim_is_running;
extern UNIT * volatile sim_wallclock_queue;
extern UNIT * volatile sim_wallclock_entry;
pthread_t sim_timer_thread; /* Wall Clock Timing Thread Id */
pthread_cond_t sim_timer_startup_cond;
t_bool sim_timer_thread_running = FALSE;
t_bool sim_timer_event_canceled = FALSE;
static void *
_timer_thread(void *arg)
{
int sched_policy;
struct sched_param sched_priority;
/* Boost Priority for this I/O thread vs the CPU instruction execution
thread which, in general, won't be readily yielding the processor when
this thread needs to run */
pthread_getschedparam (pthread_self(), &sched_policy, &sched_priority);
++sched_priority.sched_priority;
pthread_setschedparam (pthread_self(), sched_policy, &sched_priority);
sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - starting\n");
pthread_mutex_lock (&sim_timer_lock);
pthread_cond_signal (&sim_timer_startup_cond); /* Signal we're ready to go */
while (sim_asynch_enabled && sim_asynch_timer && sim_is_running) {
struct timespec start_time, stop_time;
struct timespec due_time;
double wait_usec;
int32 inst_delay;
double inst_per_sec;
UNIT *uptr;
if (sim_wallclock_entry) { /* something to insert in queue? */
UNIT *cptr, *prvptr;
sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - timing %s for %d usec\n",
sim_uname(sim_wallclock_entry), sim_wallclock_entry->time);
uptr = sim_wallclock_entry;
sim_wallclock_entry = NULL;
prvptr = NULL;
for (cptr = sim_wallclock_queue; cptr != QUEUE_LIST_END; cptr = cptr->next) {
if (uptr->a_due_time < cptr->a_due_time)
break;
prvptr = cptr;
}
if (prvptr == NULL) { /* insert at head */
cptr = uptr->next = sim_wallclock_queue;
sim_wallclock_queue = uptr;
}
else {
cptr = uptr->next = prvptr->next; /* insert at prvptr */
prvptr->next = uptr;
}
}
/* determine wait time */
if (sim_wallclock_queue != QUEUE_LIST_END) {
/* due time adjusted by 1/2 a minimal sleep interval */
/* the goal being to let the last fractional part of the due time */
/* be done by counting instructions */
_double_to_timespec (&due_time, sim_wallclock_queue->a_due_time-(((double)sim_idle_rate_ms)*0.0005));
}
else {
due_time.tv_sec = 0x7FFFFFFF; /* Sometime when 32 bit time_t wraps */
due_time.tv_nsec = 0;
}
clock_gettime(CLOCK_REALTIME, &start_time);
wait_usec = floor(1000000.0*(_timespec_to_double (&due_time) - _timespec_to_double (&start_time)));
if (sim_wallclock_queue == QUEUE_LIST_END)
sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - waiting forever\n");
else
sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - waiting for %.0f usecs until %.6f\n", wait_usec, sim_wallclock_queue->a_due_time);
if ((wait_usec <= 0.0) ||
(0 != pthread_cond_timedwait (&sim_timer_wake, &sim_timer_lock, &due_time))) {
if (sim_wallclock_queue == QUEUE_LIST_END) /* queue empty? */
continue; /* wait again */
inst_per_sec = sim_timer_inst_per_sec ();
uptr = sim_wallclock_queue;
sim_wallclock_queue = uptr->next;
uptr->next = NULL; /* hygiene */
clock_gettime(CLOCK_REALTIME, &stop_time);
if (1 != sim_timespec_compare (&due_time, &stop_time)) {
inst_delay = 0;
uptr->a_last_fired_time = _timespec_to_double(&stop_time);
}
else {
inst_delay = (int32)(inst_per_sec*(_timespec_to_double(&due_time)-_timespec_to_double(&stop_time)));
uptr->a_last_fired_time = uptr->a_due_time;
}
sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - slept %.0fms - activating(%s,%d)\n",
1000.0*(_timespec_to_double (&stop_time)-_timespec_to_double (&start_time)), sim_uname(uptr), inst_delay);
sim_activate (uptr, inst_delay);
if (sim_clock_unit == uptr)
while (sim_clock_cosched_queue != QUEUE_LIST_END) {
uptr = sim_clock_cosched_queue;
sim_clock_cosched_queue = uptr->next;
uptr->next = NULL;
sim_activate (uptr, inst_delay);
}
}
else /* Something wants to adjust the queue */
if (sim_timer_event_canceled)
sim_timer_event_canceled = FALSE; /* reset flag and continue */
else
if (sim_wallclock_entry == NULL) /* nothing to insert? */
break; /* stop processing entries */
}
pthread_mutex_unlock (&sim_timer_lock);
sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - exiting\n");
return NULL;
}
#endif /* defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS) */
void sim_start_timer_services (void)
{
#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS)
pthread_mutex_lock (&sim_timer_lock);
if (sim_asynch_enabled && sim_asynch_timer) {
pthread_attr_t attr;
UNIT *cptr;
double delta_due_time;
/* when restarting after being manually stopped the due times for all */
/* timer events needs to slide so they fire in the future. (clock ticks */
/* don't accumulate when the simulator is stopped) */
for (cptr = sim_wallclock_queue; cptr != QUEUE_LIST_END; cptr = cptr->next) {
if (cptr == sim_wallclock_queue) { /* Handle first entry */
struct timespec now;
double due_time;
clock_gettime(CLOCK_REALTIME, &now);
due_time = _timespec_to_double(&now) + ((double)(cptr->a_usec_delay)/1000000.0);
delta_due_time = due_time - cptr->a_due_time;
}
cptr->a_due_time += delta_due_time;
}
sim_debug (DBG_TRC, &sim_timer_dev, "sim_start_timer_services() - starting\n");
pthread_cond_init (&sim_timer_startup_cond, NULL);
pthread_attr_init (&attr);
pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
pthread_create (&sim_timer_thread, &attr, _timer_thread, NULL);
pthread_attr_destroy( &attr);
pthread_cond_wait (&sim_timer_startup_cond, &sim_timer_lock); /* Wait for thread to stabilize */
pthread_cond_destroy (&sim_timer_startup_cond);
sim_timer_thread_running = TRUE;
}
pthread_mutex_unlock (&sim_timer_lock);
#endif
}
void sim_stop_timer_services (void)
{
#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS)
pthread_mutex_lock (&sim_timer_lock);
if (sim_timer_thread_running) {
sim_debug (DBG_TRC, &sim_timer_dev, "sim_stop_timer_services() - stopping\n");
pthread_cond_signal (&sim_timer_wake);
pthread_mutex_unlock (&sim_timer_lock);
pthread_join (sim_timer_thread, NULL);
sim_timer_thread_running = FALSE;
}
else
pthread_mutex_unlock (&sim_timer_lock);
#endif
}
t_stat sim_timer_change_asynch (void)
{
#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS)
if (sim_asynch_enabled && sim_asynch_timer)
sim_start_timer_services ();
else {
UNIT *uptr;
int32 accum = 0;
sim_stop_timer_services ();
while (1) {
uptr = sim_wallclock_queue;
if (uptr == QUEUE_LIST_END)
break;
sim_wallclock_queue = uptr->next;
accum += uptr->time;
uptr->next = NULL;
uptr->a_due_time = 0;
uptr->a_usec_delay = 0;
sim_activate_after (uptr, accum);
}
}
#endif
return SCPE_OK;
}
/* Instruction Execution rate. */
/* returns a double since it is mostly used in double expressions and
to avoid overflow if/when strange timing delays might produce unexpected results */
@ -959,3 +1360,36 @@ return _sim_activate (uptr, inst_delay); /* queue it now */
#endif
}
/* Clock coscheduling routines */
t_stat sim_register_clock_unit (UNIT *uptr)
{
sim_clock_unit = uptr;
return SCPE_OK;
}
t_stat sim_clock_coschedule (UNIT *uptr, int32 interval)
{
if (NULL == sim_clock_unit)
return sim_activate (uptr, interval);
else
if (sim_asynch_enabled && sim_asynch_timer) {
if (!sim_is_active (uptr)) { /* already active? */
#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS)
sim_debug (DBG_TIM, &sim_timer_dev, "sim_clock_coschedule() - queueing %s for clock co-schedule\n", sim_uname (uptr));
pthread_mutex_lock (&sim_timer_lock);
uptr->next = sim_clock_cosched_queue;
sim_clock_cosched_queue = uptr;
pthread_mutex_unlock (&sim_timer_lock);
#endif
}
return SCPE_OK;
}
else {
int32 t;
t = sim_activate_time (sim_clock_unit);
return sim_activate (uptr, t? t - 1: interval);
}
}

View file

@ -81,13 +81,20 @@ int clock_gettime(int clock_id, struct timespec *tp);
#define SIM_THROT_PCT 3 /* Max Percent of host CPU */
#define SIM_THROT_SPC 4 /* Specific periodic Delay */
#define TIMER_DBG_IDLE 1 /* Debug Flag for Idle Debugging */
#define TIMER_DBG_QUEUE 2 /* Debug Flag for Asynch Queue Debugging */
t_bool sim_timer_init (void);
void sim_timespec_diff (struct timespec *diff, struct timespec *min, struct timespec *sub);
#if defined(SIM_ASYNCH_CLOCKS)
double sim_timenow_double (void);
#endif
int32 sim_rtcn_init (int32 time, int32 tmr);
void sim_rtcn_init_all (void);
int32 sim_rtcn_calb (int32 ticksper, int32 tmr);
int32 sim_rtc_init (int32 time);
int32 sim_rtc_calb (int32 ticksper);
t_stat sim_show_timers (FILE* st, DEVICE *dptr, UNIT* uptr, int32 val, char* desc);
t_bool sim_idle (uint32 tmr, t_bool sin_cyc);
t_stat sim_set_throt (int32 arg, char *cptr);
t_stat sim_show_throt (FILE *st, DEVICE *dnotused, UNIT *unotused, int32 flag, char *cptr);
@ -100,10 +107,15 @@ uint32 sim_os_msec (void);
void sim_os_sleep (unsigned int sec);
uint32 sim_os_ms_sleep (unsigned int msec);
uint32 sim_os_ms_sleep_init (void);
void sim_start_timer_services (void);
void sim_stop_timer_services (void);
t_stat sim_timer_change_asynch (void);
t_stat sim_timer_activate_after (UNIT *uptr, int32 usec_delay);
t_stat sim_clock_coschedule (UNIT *uptr, int32 interval);
double sim_timer_inst_per_sec (void);
extern t_bool sim_idle_enab; /* global flag */
extern volatile t_bool sim_idle_wait; /* global flag */
extern DEVICE sim_timer_dev;
#endif

View file

@ -318,8 +318,7 @@
*/
#include <ctype.h>
#define NOT_MUX_USING_CODE /* sim_tmxr library define */
#include "sim_defs.h"
#include "sim_serial.h"
@ -328,6 +327,8 @@
#include "sim_tmxr.h"
#include "scp.h"
#include <ctype.h>
/* Telnet protocol constants - negatives are for init'ing signed char data */
/* Commands */
@ -737,8 +738,28 @@ static char mantra[] = {
TN_IAC, TN_DO, TN_BIN
};
if (mp->last_poll_time == 0) { /* first poll initializations */
UNIT *uptr = mp->uptr;
if (!uptr) /* Attached ? */
return -1; /* No connections are possinle! */
if (!(uptr->flags & TMUF_NOASYNCH)) { /* if asynch not disabled */
uptr->flags |= UNIT_TM_POLL; /* tag as polling unit */
sim_cancel (uptr);
}
for (i=0; i < mp->lines; i++) {
uptr = mp->ldsc[i].uptr ? mp->ldsc[i].uptr : mp->uptr;
if (!(mp->uptr->flags & TMUF_NOASYNCH)) { /* if asynch not disabled */
uptr->flags |= UNIT_TM_POLL; /* tag as polling unit */
sim_cancel (uptr);
}
}
}
if ((poll_time - mp->last_poll_time) < TMXR_CONNECT_POLL_INTERVAL)
return -1; /* */
return -1; /* too soon to try */
tmxr_debug_trace (mp, "tmxr_poll_conn()");
@ -1336,8 +1357,16 @@ for (i = 0; i < mp->lines; i++) { /* loop thru lines */
if (!lp->conn) /* skip if !conn */
continue;
nbytes = tmxr_send_buffered_data (lp); /* buffered bytes */
if (nbytes == 0) /* buf empty? enab line */
lp->xmte = 1;
if (nbytes == 0) { /* buf empty? enab line */
#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_MUX)
UNIT *ruptr = lp->uptr ? lp->uptr : lp->mp->uptr;
if ((ruptr->flags & UNIT_TM_POLL) &&
sim_asynch_enabled &&
tmxr_rqln (lp))
_sim_activate (ruptr, 0);
#endif
lp->xmte = 1; /* enable line transmit */
}
} /* end for */
return;
}
@ -1864,10 +1893,554 @@ mp->ldsc[line].o_uptr = uptr_poll;
return SCPE_OK;
}
/* Declare which units are the console input and out devices
Inputs:
*rxuptr = the console input unit
*txuptr = the console output unit
Outputs:
none
Implementation note:
This routine is exported by the tmxr library so that it gets
defined to code which uses it by including sim_tmxr.h. Including
sim_tmxr.h is necessary so that sim_activate is properly defined
in the caller's code to actually call tmxr_activate.
*/
t_stat tmxr_set_console_units (UNIT *rxuptr, UNIT *txuptr)
{
extern TMXR sim_con_tmxr;
tmxr_set_line_unit (&sim_con_tmxr, 0, rxuptr);
tmxr_set_line_output_unit (&sim_con_tmxr, 0, txuptr);
return SCPE_OK;
}
static TMXR **tmxr_open_devices = NULL;
static int tmxr_open_device_count = 0;
#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_MUX)
pthread_t sim_tmxr_poll_thread; /* Polling Thread Id */
#if defined(_WIN32) || defined(VMS)
pthread_t sim_tmxr_serial_poll_thread; /* Serial Polling Thread Id */
pthread_cond_t sim_tmxr_serial_startup_cond;
#endif
pthread_mutex_t sim_tmxr_poll_lock;
pthread_cond_t sim_tmxr_poll_cond;
pthread_cond_t sim_tmxr_startup_cond;
int32 sim_tmxr_poll_count = 0;
t_bool sim_tmxr_poll_running = FALSE;
static void *
_tmxr_poll(void *arg)
{
int sched_policy;
struct sched_param sched_priority;
struct timeval timeout;
int timeout_usec;
DEVICE *dptr = tmxr_open_devices[0]->dptr;
UNIT **units = NULL;
UNIT **activated = NULL;
SOCKET *sockets = NULL;
int wait_count = 0;
/* Boost Priority for this I/O thread vs the CPU instruction execution
thread which, in general, won't be readily yielding the processor when
this thread needs to run */
pthread_getschedparam (pthread_self(), &sched_policy, &sched_priority);
++sched_priority.sched_priority;
pthread_setschedparam (pthread_self(), sched_policy, &sched_priority);
sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_poll() - starting\n");
units = calloc(FD_SETSIZE, sizeof(*units));
activated = calloc(FD_SETSIZE, sizeof(*activated));
sockets = calloc(FD_SETSIZE, sizeof(*sockets));
timeout_usec = 1000000;
pthread_mutex_lock (&sim_tmxr_poll_lock);
pthread_cond_signal (&sim_tmxr_startup_cond); /* Signal we're ready to go */
while (sim_asynch_enabled) {
int i, j, status, select_errno;
fd_set readfds, errorfds;
int socket_count;
SOCKET max_socket_fd;
TMXR *mp;
DEVICE *d;
if ((tmxr_open_device_count == 0) || (!sim_is_running)) {
for (j=0; j<wait_count; ++j) {
d = find_dev_from_unit(activated[j]);
sim_debug (TMXR_DBG_ASY, d, "_tmxr_poll() - Removing interest in %s. Other interest: %d\n", sim_uname(activated[j]), activated[j]->a_poll_waiter_count);
--activated[j]->a_poll_waiter_count;
--sim_tmxr_poll_count;
}
break;
}
/* If we started something we should wait for, let it finish before polling again */
if (wait_count) {
sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_poll() - waiting for %d units\n", wait_count);
pthread_cond_wait (&sim_tmxr_poll_cond, &sim_tmxr_poll_lock);
sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_poll() - continuing with timeout of %dms\n", timeout_usec/1000);
}
FD_ZERO (&readfds);
FD_ZERO (&errorfds);
for (i=max_socket_fd=socket_count=0; i<tmxr_open_device_count; ++i) {
mp = tmxr_open_devices[i];
if ((mp->master) && (mp->uptr->flags&UNIT_TM_POLL)) {
units[socket_count] = mp->uptr;
sockets[socket_count] = mp->master;
FD_SET (mp->master, &readfds);
FD_SET (mp->master, &errorfds);
if (mp->master > max_socket_fd)
max_socket_fd = mp->master;
++socket_count;
}
for (j=0; j<mp->lines; ++j) {
if (mp->ldsc[j].sock) {
units[socket_count] = mp->ldsc[j].uptr;
if (units[socket_count] == NULL)
units[socket_count] = mp->uptr;
sockets[socket_count] = mp->ldsc[j].sock;
FD_SET (mp->ldsc[j].sock, &readfds);
FD_SET (mp->ldsc[j].sock, &errorfds);
if (mp->ldsc[j].sock > max_socket_fd)
max_socket_fd = mp->ldsc[j].sock;
++socket_count;
}
#if !defined(_WIN32) && !defined(VMS)
if (mp->ldsc[j].serport) {
units[socket_count] = mp->ldsc[j].uptr;
if (units[socket_count] == NULL)
units[socket_count] = mp->uptr;
sockets[socket_count] = mp->ldsc[j].serport;
FD_SET (mp->ldsc[j].serport, &readfds);
FD_SET (mp->ldsc[j].serport, &errorfds);
if (mp->ldsc[j].serport > max_socket_fd)
max_socket_fd = mp->ldsc[j].serport;
++socket_count;
}
#endif
if (mp->ldsc[j].connecting) {
units[socket_count] = mp->uptr;
sockets[socket_count] = mp->ldsc[j].connecting;
FD_SET (mp->ldsc[j].connecting, &readfds);
FD_SET (mp->ldsc[j].connecting, &errorfds);
if (mp->ldsc[j].connecting > max_socket_fd)
max_socket_fd = mp->ldsc[j].connecting;
++socket_count;
}
if (mp->ldsc[j].master) {
units[socket_count] = mp->uptr;
sockets[socket_count] = mp->ldsc[j].master;
FD_SET (mp->ldsc[j].master, &readfds);
FD_SET (mp->ldsc[j].master, &errorfds);
if (mp->ldsc[j].master > max_socket_fd)
max_socket_fd = mp->ldsc[j].master;
++socket_count;
}
}
}
pthread_mutex_unlock (&sim_tmxr_poll_lock);
if (timeout_usec > 1000000)
timeout_usec = 1000000;
timeout.tv_sec = timeout_usec/1000000;
timeout.tv_usec = timeout_usec%1000000;
select_errno = 0;
if (socket_count == 0) {
sim_os_ms_sleep (timeout_usec/1000);
status = 0;
}
else
status = select (1+(int)max_socket_fd, &readfds, NULL, &errorfds, &timeout);
select_errno = errno;
wait_count=0;
pthread_mutex_lock (&sim_tmxr_poll_lock);
switch (status) {
case 0: /* timeout */
for (i=max_socket_fd=socket_count=0; i<tmxr_open_device_count; ++i) {
mp = tmxr_open_devices[i];
if (mp->master) {
if (!mp->uptr->a_polling_now) {
mp->uptr->a_polling_now = TRUE;
mp->uptr->a_poll_waiter_count = 0;
d = find_dev_from_unit(mp->uptr);
sim_debug (TMXR_DBG_ASY, d, "_tmxr_poll() - Activating %s to poll connect\n", sim_uname(mp->uptr));
pthread_mutex_unlock (&sim_tmxr_poll_lock);
_sim_activate (mp->uptr, 0);
pthread_mutex_lock (&sim_tmxr_poll_lock);
}
if (mp->txcount) {
timeout_usec = 10000; /* Wait 10ms next time (this gets doubled below) */
mp->txcount = 0;
}
}
for (j=0; j<mp->lines; ++j) {
if ((mp->ldsc[j].conn) && (mp->ldsc[j].uptr)) {
if (tmxr_tqln(&mp->ldsc[j]) || tmxr_rqln (&mp->ldsc[j])) {
timeout_usec = 10000; /* Wait 10ms next time (this gets doubled below) */
/* More than one socket can be associated with the
same unit. Make sure to only activate it one time */
if (!mp->ldsc[j].uptr->a_polling_now) {
mp->ldsc[j].uptr->a_polling_now = TRUE;
mp->ldsc[j].uptr->a_poll_waiter_count = 0;
d = find_dev_from_unit(mp->ldsc[j].uptr);
sim_debug (TMXR_DBG_ASY, d, "_tmxr_poll() - Line %d Activating %s to poll data: %d/%d\n",
j, sim_uname(mp->ldsc[j].uptr), tmxr_tqln(&mp->ldsc[j]), tmxr_rqln (&mp->ldsc[j]));
pthread_mutex_unlock (&sim_tmxr_poll_lock);
_sim_activate (mp->ldsc[j].uptr, 0);
pthread_mutex_lock (&sim_tmxr_poll_lock);
}
}
}
}
}
sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_poll() - Poll Timeout - %dms\n", timeout_usec/1000);
timeout_usec *= 2; /* Double timeout time */
break;
case SOCKET_ERROR:
wait_count = 0;
if (select_errno == EINTR)
break;
fprintf (stderr, "select() returned -1, errno=%d - %s\r\n", select_errno, strerror(select_errno));
abort();
break;
default:
wait_count = 0;
for (i=0; i<socket_count; ++i) {
if (FD_ISSET(sockets[i], &readfds) ||
FD_ISSET(sockets[i], &errorfds)) {
/* More than one socket can be associated with the
same unit. Only activate one time */
for (j=0; j<wait_count; ++j)
if (activated[j] == units[i])
break;
if (j == wait_count) {
activated[j] = units[i];
++wait_count;
if (!activated[j]->a_polling_now) {
activated[j]->a_polling_now = TRUE;
activated[j]->a_poll_waiter_count = 1;
d = find_dev_from_unit(activated[j]);
sim_debug (TMXR_DBG_ASY, d, "_tmxr_poll() - Activating for data %s\n", sim_uname(activated[j]));
pthread_mutex_unlock (&sim_tmxr_poll_lock);
_sim_activate (activated[j], 0);
pthread_mutex_lock (&sim_tmxr_poll_lock);
}
else {
d = find_dev_from_unit(activated[j]);
sim_debug (TMXR_DBG_ASY, d, "_tmxr_poll() - Already Activated %s%d %d times\n", sim_uname(activated[j]), activated[j]->a_poll_waiter_count);
++activated[j]->a_poll_waiter_count;
}
}
}
}
if (wait_count)
timeout_usec = 10000; /* Wait 10ms next time */
break;
}
sim_tmxr_poll_count += wait_count;
}
pthread_mutex_unlock (&sim_tmxr_poll_lock);
free(units);
free(activated);
free(sockets);
sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_poll() - exiting\n");
return NULL;
}
#if defined(_WIN32)
static void *
_tmxr_serial_poll(void *arg)
{
int sched_policy;
struct sched_param sched_priority;
int timeout_usec;
DEVICE *dptr = tmxr_open_devices[0]->dptr;
UNIT **units = NULL;
UNIT **activated = NULL;
SERHANDLE *serports = NULL;
int wait_count = 0;
/* Boost Priority for this I/O thread vs the CPU instruction execution
thread which, in general, won't be readily yielding the processor when
this thread needs to run */
pthread_getschedparam (pthread_self(), &sched_policy, &sched_priority);
++sched_priority.sched_priority;
pthread_setschedparam (pthread_self(), sched_policy, &sched_priority);
sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_serial_poll() - starting\n");
units = calloc(MAXIMUM_WAIT_OBJECTS, sizeof(*units));
activated = calloc(MAXIMUM_WAIT_OBJECTS, sizeof(*activated));
serports = calloc(MAXIMUM_WAIT_OBJECTS, sizeof(*serports));
timeout_usec = 1000000;
pthread_mutex_lock (&sim_tmxr_poll_lock);
pthread_cond_signal (&sim_tmxr_serial_startup_cond); /* Signal we're ready to go */
while (sim_asynch_enabled) {
int i, j;
DWORD status;
int serport_count;
TMXR *mp;
DEVICE *d;
if ((tmxr_open_device_count == 0) || (!sim_is_running)) {
for (j=0; j<wait_count; ++j) {
d = find_dev_from_unit(activated[j]);
sim_debug (TMXR_DBG_ASY, d, "_tmxr_serial_poll() - Removing interest in %s. Other interest: %d\n", sim_uname(activated[j]), activated[j]->a_poll_waiter_count);
--activated[j]->a_poll_waiter_count;
--sim_tmxr_poll_count;
}
break;
}
/* If we started something we should wait for, let it finish before polling again */
if (wait_count) {
sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_serial_poll() - waiting for %d units\n", wait_count);
pthread_cond_wait (&sim_tmxr_poll_cond, &sim_tmxr_poll_lock);
sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_serial_poll() - continuing with timeout of %dms\n", timeout_usec/1000);
}
for (i=serport_count=0; i<tmxr_open_device_count; ++i) {
mp = tmxr_open_devices[i];
for (j=0; j<mp->lines; ++j) {
if (mp->ldsc[j].serport) {
units[serport_count] = mp->ldsc[j].uptr;
if (units[serport_count] == NULL)
units[serport_count] = mp->uptr;
serports[serport_count] = mp->ldsc[j].serport;
++serport_count;
}
}
}
if (serport_count == 0) /* No open serial ports? */
break; /* We're done */
pthread_mutex_unlock (&sim_tmxr_poll_lock);
if (timeout_usec > 1000000)
timeout_usec = 1000000;
status = WaitForMultipleObjects (serport_count, serports, FALSE, timeout_usec/1000);
wait_count=0;
pthread_mutex_lock (&sim_tmxr_poll_lock);
switch (status) {
case WAIT_FAILED:
fprintf (stderr, "WaitForMultipleObjects() Failed, LastError=%d\r\n", GetLastError());
abort();
break;
case WAIT_TIMEOUT:
sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_serial_poll() - Poll Timeout - %dms\n", timeout_usec/1000);
timeout_usec *= 2; /* Double timeout time */
break;
default:
i = status - WAIT_OBJECT_0;
wait_count = 0;
j = wait_count;
activated[j] = units[i];
++wait_count;
if (!activated[j]->a_polling_now) {
activated[j]->a_polling_now = TRUE;
activated[j]->a_poll_waiter_count = 1;
d = find_dev_from_unit(activated[j]);
sim_debug (TMXR_DBG_ASY, d, "_tmxr_serial_poll() - Activating for data %s\n", sim_uname(activated[j]));
pthread_mutex_unlock (&sim_tmxr_poll_lock);
_sim_activate (activated[j], 0);
pthread_mutex_lock (&sim_tmxr_poll_lock);
}
else {
d = find_dev_from_unit(activated[j]);
sim_debug (TMXR_DBG_ASY, d, "_tmxr_serial_poll() - Already Activated %s%d %d times\n", sim_uname(activated[j]), activated[j]->a_poll_waiter_count);
++activated[j]->a_poll_waiter_count;
}
if (wait_count)
timeout_usec = 10000; /* Wait 10ms next time */
break;
}
sim_tmxr_poll_count += wait_count;
}
pthread_mutex_unlock (&sim_tmxr_poll_lock);
free(units);
free(activated);
free(serports);
sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_serial_poll() - exiting\n");
return NULL;
}
#endif /* _WIN32 */
#if defined(VMS)
#include <descrip.h>
#include <ttdef.h>
#include <tt2def.h>
#include <iodef.h>
#include <ssdef.h>
#include <starlet.h>
#include <unistd.h>
typedef struct {
unsigned short status;
unsigned short count;
unsigned int dev_status; } IOSB;
#define MAXIMUM_WAIT_OBJECTS 64 /* Number of possible concurrently opened serial ports */
pthread_cond_t sim_serial_line_startup_cond;
static void *
_tmxr_serial_line_poll(void *arg)
{
TMLN *lp = (TMLN *)arg;
int sched_policy;
struct sched_param sched_priority;
DEVICE *dptr = tmxr_open_devices[0]->dptr;
UNIT *uptr = (lp->uptr ? lp->uptr : lp->mp->uptr);
DEVICE *d = find_dev_from_unit(uptr);
int wait_count = 0;
/* Boost Priority for this I/O thread vs the CPU instruction execution
thread which, in general, won't be readily yielding the processor when
this thread needs to run */
pthread_getschedparam (pthread_self(), &sched_policy, &sched_priority);
++sched_priority.sched_priority;
pthread_setschedparam (pthread_self(), sched_policy, &sched_priority);
sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_serial_line_poll() - starting\n");
pthread_mutex_lock (&sim_tmxr_poll_lock);
pthread_cond_signal (&sim_serial_line_startup_cond); /* Signal we're ready to go */
while (sim_asynch_enabled) {
int i, j;
int serport_count;
TMXR *mp = lp->mp;
unsigned int status, term[2];
unsigned char buf[4];
IOSB iosb;
if ((tmxr_open_device_count == 0) || (!sim_is_running)) {
if (wait_count) {
sim_debug (TMXR_DBG_ASY, d, "_tmxr_serial_line_poll() - Removing interest in %s. Other interest: %d\n", sim_uname(uptr), uptr->a_poll_waiter_count);
--uptr->a_poll_waiter_count;
--sim_tmxr_poll_count;
}
break;
}
/* If we started something we should wait for, let it finish before polling again */
if (wait_count) {
sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_serial_line_poll() - waiting for %d units\n", wait_count);
pthread_cond_wait (&sim_tmxr_poll_cond, &sim_tmxr_poll_lock);
sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_serial_line_poll() - continuing with timeout of 1 sec\n");
}
lp->a_active = TRUE;
pthread_mutex_unlock (&sim_tmxr_poll_lock);
term[0] = term[1] = 0;
status = sys$qio (0, lp->serport,
IO$_READLBLK | IO$M_NOECHO | IO$M_NOFILTR | IO$M_TIMED | IO$M_TRMNOECHO,
&iosb, 0, 0, buf, 1, 1, term, 0, 0);
if (status != SS$_NORMAL) {
fprintf (stderr, "_tmxr_serial_line_poll() - QIO Failed, Status=%d\r\n", status);
abort();
}
wait_count = 0;
sys$synch (0, &iosb);
pthread_mutex_lock (&sim_tmxr_poll_lock);
lp->a_active = FALSE;
if (iosb.count == 1) {
lp->a_buffered_character = buf[0] | SCPE_KFLAG;
wait_count = 1;
if (!uptr->a_polling_now) {
uptr->a_polling_now = TRUE;
uptr->a_poll_waiter_count = 1;
sim_debug (TMXR_DBG_ASY, d, "_tmxr_serial_line_poll() - Activating for data %s\n", sim_uname(uptr));
pthread_mutex_unlock (&sim_tmxr_poll_lock);
_sim_activate (uptr, 0);
pthread_mutex_lock (&sim_tmxr_poll_lock);
}
else {
sim_debug (TMXR_DBG_ASY, d, "_tmxr_serial_line_poll() - Already Activated %s%d %d times\n", sim_uname(uptr), uptr->a_poll_waiter_count);
++uptr->a_poll_waiter_count;
}
}
sim_tmxr_poll_count += wait_count;
}
pthread_mutex_unlock (&sim_tmxr_poll_lock);
sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_serial_line_poll() - exiting\n");
return NULL;
}
static void *
_tmxr_serial_poll(void *arg)
{
int sched_policy;
struct sched_param sched_priority;
int timeout_usec;
DEVICE *dptr = tmxr_open_devices[0]->dptr;
TMLN **lines = NULL;
pthread_t *threads = NULL;
/* Boost Priority for this I/O thread vs the CPU instruction execution
thread which, in general, won't be readily yielding the processor when
this thread needs to run */
pthread_getschedparam (pthread_self(), &sched_policy, &sched_priority);
++sched_priority.sched_priority;
pthread_setschedparam (pthread_self(), sched_policy, &sched_priority);
sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_serial_poll() - starting\n");
lines = calloc(MAXIMUM_WAIT_OBJECTS, sizeof(*lines));
threads = calloc(MAXIMUM_WAIT_OBJECTS, sizeof(*threads));
pthread_mutex_lock (&sim_tmxr_poll_lock);
pthread_cond_signal (&sim_tmxr_serial_startup_cond); /* Signal we're ready to go */
pthread_cond_init (&sim_serial_line_startup_cond, NULL);
while (sim_asynch_enabled) {
pthread_attr_t attr;
int i, j;
int serport_count;
TMXR *mp;
DEVICE *d;
if ((tmxr_open_device_count == 0) || (!sim_is_running))
break;
pthread_attr_init (&attr);
pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
for (i=serport_count=0; i<tmxr_open_device_count; ++i) {
mp = tmxr_open_devices[i];
for (j=0; j<mp->lines; ++j) {
if (mp->ldsc[j].serport) {
lines[serport_count] = &mp->ldsc[j];
pthread_create (&threads[serport_count], &attr, _tmxr_serial_line_poll, (void *)&mp->ldsc[j]);
pthread_cond_wait (&sim_serial_line_startup_cond, &sim_tmxr_poll_lock); /* Wait for thread to stabilize */
++serport_count;
}
}
}
pthread_attr_destroy( &attr);
if (serport_count == 0) /* No open serial ports? */
break; /* We're done */
pthread_mutex_unlock (&sim_tmxr_poll_lock);
for (i=0; i<serport_count; i++)
pthread_join (threads[i], NULL);
pthread_mutex_lock (&sim_tmxr_poll_lock);
}
pthread_mutex_unlock (&sim_tmxr_poll_lock);
pthread_cond_destroy (&sim_serial_line_startup_cond);
free(lines);
free(threads);
sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_serial_poll() - exiting\n");
return NULL;
}
#endif /* VMS */
#endif /* defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_MUX) */
t_stat tmxr_start_poll (void)
{
#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_MUX)
@ -1966,9 +2539,21 @@ pthread_mutex_unlock (&sim_tmxr_poll_lock);
#endif
}
t_stat tmxr_change_async (void)
{
#if defined(SIM_ASYNCH_IO)
if (sim_asynch_enabled)
tmxr_start_poll ();
else
tmxr_stop_poll ();
#endif
return SCPE_OK;
}
/* Attach unit to master socket */
t_stat tmxr_attach (TMXR *mp, UNIT *uptr, char *cptr)
t_stat tmxr_attach_ex (TMXR *mp, UNIT *uptr, char *cptr, t_bool async)
{
char* tptr = NULL;
t_stat r;
@ -1991,6 +2576,13 @@ if ((mp->lines > 1) ||
(mp->ldsc[0].serport == 0)))
uptr->flags = uptr->flags | UNIT_ATTMULT; /* allow multiple attach commands */
#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_MUX)
if (!async) /* if asynch disabled */
uptr->flags |= TMUF_NOASYNCH; /* tag as no asynch */
#else
uptr->flags |= TMUF_NOASYNCH; /* tag as no asynch */
#endif
if (mp->dptr == NULL) /* has device been set? */
mp->dptr = find_dev_from_unit (uptr); /* no, so set device now */
@ -2114,15 +2706,67 @@ return SCPE_OK;
t_stat tmxr_detach (TMXR *mp, UNIT *uptr)
{
int32 i;
if (!(uptr->flags & UNIT_ATT)) /* attached? */
return SCPE_OK;
tmxr_close_master (mp); /* close master socket */
free (uptr->filename); /* free setup string */
uptr->filename = NULL;
uptr->flags = uptr->flags & ~UNIT_ATT; /* not attached */
mp->last_poll_time = 0;
for (i=0; i < mp->lines; i++) {
UNIT *uptr = mp->ldsc[i].uptr ? mp->ldsc[i].uptr : mp->uptr;
UNIT *o_uptr = mp->ldsc[i].o_uptr ? mp->ldsc[i].o_uptr : mp->uptr;
uptr->flags &= ~UNIT_TM_POLL; /* no polling */
o_uptr->flags &= ~UNIT_TM_POLL; /* no polling */
}
uptr->flags &= ~(UNIT_ATT|UNIT_TM_POLL|TMUF_NOASYNCH); /* not attached, no polling, not asynch disabled */
return SCPE_OK;
}
t_stat tmxr_activate (UNIT *uptr, int32 interval)
{
#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_MUX)
if ((!(uptr->flags & UNIT_TM_POLL)) ||
(!sim_asynch_enabled)) {
return _sim_activate (uptr, interval);
}
return SCPE_OK;
#else
return _sim_activate (uptr, interval);
#endif
}
t_stat tmxr_activate_after (UNIT *uptr, int32 usecs_walltime)
{
#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_MUX)
if ((!(uptr->flags & UNIT_TM_POLL)) ||
(!sim_asynch_enabled)) {
return _sim_activate_after (uptr, usecs_walltime);
}
return SCPE_OK;
#else
return _sim_activate_after (uptr, usecs_walltime);
#endif
}
t_stat tmxr_clock_coschedule (UNIT *uptr, int32 interval)
{
#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_MUX)
if ((!(uptr->flags & UNIT_TM_POLL)) ||
(!sim_asynch_enabled)) {
return sim_clock_coschedule (uptr, interval);
}
return SCPE_OK;
#else
return sim_clock_coschedule (uptr, interval);
#endif
}
/* Generic Multiplexer attach help */
t_stat tmxr_attach_help(FILE *st, DEVICE *dptr, UNIT *uptr, int32 flag, char *cptr)
{
TMXR *mux = (TMXR *)dptr->help_ctx;

View file

@ -133,6 +133,7 @@ struct tmxr {
DEVICE *dptr; /* multiplexer device */
UNIT *uptr; /* polling unit (connection) */
char logfiletmpl[FILENAME_MAX]; /* template logfile name */
int32 txcount; /* count of transmit bytes */
int32 buffered; /* Buffered Line Behavior and Buffer Size Flag */
int32 sessions; /* count of tcp connections received */
uint32 last_poll_time; /* time of last connection poll */
@ -149,7 +150,7 @@ void tmxr_poll_tx (TMXR *mp);
int32 tmxr_send_buffered_data (TMLN *lp);
t_stat tmxr_open_master (TMXR *mp, char *cptr);
t_stat tmxr_close_master (TMXR *mp);
t_stat tmxr_attach (TMXR *mp, UNIT *uptr, char *cptr);
t_stat tmxr_attach_ex (TMXR *mp, UNIT *uptr, char *cptr, t_bool async);
t_stat tmxr_detach (TMXR *mp, UNIT *uptr);
t_stat tmxr_attach_help(FILE *st, DEVICE *dptr, UNIT *uptr, int32 flag, char *cptr);
t_stat tmxr_set_modem_control_passthru (TMXR *mp);
@ -177,6 +178,10 @@ t_stat tmxr_show_summ (FILE *st, UNIT *uptr, int32 val, void *desc);
t_stat tmxr_show_cstat (FILE *st, UNIT *uptr, int32 val, void *desc);
t_stat tmxr_show_lines (FILE *st, UNIT *uptr, int32 val, void *desc);
t_stat tmxr_show_open_devices (FILE* st, DEVICE *dptr, UNIT* uptr, int32 val, char* desc);
t_stat tmxr_activate (UNIT *uptr, int32 interval);
t_stat tmxr_activate_after (UNIT *uptr, int32 usecs_walltime);
t_stat tmxr_clock_coschedule (UNIT *uptr, int32 interval);
t_stat tmxr_change_async (void);
t_stat tmxr_startup (void);
t_stat tmxr_shutdown (void);
t_stat tmxr_start_poll (void);
@ -187,5 +192,16 @@ extern FILE *sim_deb; /* debug file */
#define tmxr_debug_trace(mp, msg) if (sim_deb && (mp)->dptr && (TMXR_DBG_TRC & (mp)->dptr->dctrl)) sim_debug (TMXR_DBG_TRC, mp->dptr, "%s\n", (msg)); else (void)0
#define tmxr_debug_trace_line(lp, msg) if (sim_deb && (lp)->mp && (lp)->mp->dptr && (TMXR_DBG_TRC & (lp)->mp->dptr->dctrl)) sim_debug (TMXR_DBG_TRC, (lp)->mp->dptr, "%s\n", (msg)); else (void)0
#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_MUX)
#define tmxr_attach(mp, uptr, cptr) tmxr_attach_ex(mp, uptr, cptr, TRUE)
#if (!defined(NOT_MUX_USING_CODE))
#define sim_activate tmxr_activate
#define sim_activate_after tmxr_activate_after
#define sim_clock_coschedule tmxr_clock_coschedule
#endif
#else
#define tmxr_attach(mp, uptr, cptr) tmxr_attach_ex(mp, uptr, cptr, FALSE)
#endif
#endif