diff --git a/0readmeAsynchIO.txt b/0readmeAsynchIO.txt index 425e3262..95d2a82f 100644 --- a/0readmeAsynchIO.txt +++ b/0readmeAsynchIO.txt @@ -35,7 +35,6 @@ Benefits. - Allows simulator clock ticks to track wall clock was precisely as possible under varying I/O load and activities. -Asynch I/O is provided through a callback model. SimH Libraries which provide Asynch I/O support: sim_disk sim_tape @@ -85,6 +84,9 @@ to enable viewing and setting of these variables via scp: Programming Disk and Tape devices to leverage Asynch I/O +Asynch disk and tape I/O is provided through a callback model. The callback +is invoked when the desired I/O operation has completed. + Naming conventions: All of the routines implemented in sim_disk and sim_tape have been kept in place. All routines which perform I/O have a variant routine available @@ -174,7 +176,7 @@ and 2) to have polling actually happen as soon as data may be available. In most cases no effort is required to add Asynch I/O support to a multiplexer device emulation. If a device emulation takes the normal model of polling for arriving data on every simulated clock tick, then if -Asynch I/O is enabled, then device will operate asynchronously and behave +Asynch I/O is enabled, the device will operate asynchronously and behave well. There is one restriction in this model. Specifically, the device emulation logic can't expect that there will be a particular number (clock tick rate maybe) of invocations of a unit service routine to perform polls @@ -207,9 +209,9 @@ Some devices will need a small amount of extra coding to leverage the Multiplexer Asynch I/O capabilties. Devices which require extra coding have one or more of the following characteristics: - they poll for input data on a different unit (or units) than the unit - which was provided when tmxr_attach was called with. + which was provided when tmxr_attach was called. - they poll for connections on a different unit than the unit which was - provided when tmxr_attach was called with. + provided when tmxr_attach was called. The extra coding required for proper operation is to call tmxr_set_line_unit() to associate the appropriate input polling unit to @@ -243,26 +245,33 @@ recalibrated and used throughout a simulator to schedule device time related delays as needed. Historically, this was fine until modern processors started having dynamically variable processor clock rates. On such host systems, the simulator's concept of time passing can vary -drastically, which may cause dramatic drifting of the simulated operating -system's concept of time. Once all devices are disconnected from the -calibrated clock's instruction count, the only concern for time in the -simulated system is that it's clock tick be as accurate as possible. -This has worked well in the past, however each simulator was burdened -with providing code which facilitated managing the concept of the -relationship between the number of instructions executed and the passage -of wall clock time. To accomodate the needs of activities or events which -should be measured against wall clock time (vs specific number of -instructions executed), the simulator framework has been extended to -specifically provide event scheduling based on elapsed wall time. A new -API can be used by devices to schedule unit event delivery after the -passage of a specific amount of wall clock time. The -api sim_activate_after() provides this capability. This capability is -not limited to being available ONLY when compiling with SIM_SYNCH_IO -defined. When SIM_ASYNCH_IO is defined, this facility is implemented by -a thread which drives the delivery of these events from the host system's -clock ticks (interpolated as needed to accomodate hosts with relatively -large clock ticks). When SIM_ASYNCH_IO is not defined, this facility is -implemented using the traditional simh calibrated clock approach. +drastically. This dynamic adjustment of the host system's execution rate +may cause dramatic drifting of the simulated operating system's concept +of time. Once all devices are disconnected from the calibrated clock's +instruction count, the only concern for time in the simulated system is +that it's clock tick be as accurate as possible. This has worked well +in the past, however each simulator was burdened with providing code +which facilitated managing the concept of the relationship between the +number of instructions executed and the passage of wall clock time. +To accomodate the needs of activities or events which should be measured +against wall clock time (vs specific number of instructions executed), +the simulator framework has been extended to specifically provide event +scheduling based on elapsed wall time. A new API can be used by devices +to schedule unit event delivery after the passage of a specific amount +of wall clock time. The api sim_activate_after() provides this +capability. This capability is not limited to being available ONLY when +compiling with SIM_SYNCH_IO defined. When SIM_ASYNCH_IO is defined, this +facility is implemented by a thread which drives the delivery of these +events from the host system's clock ticks (interpolated as needed to +accomodate hosts with relatively large clock ticks). When SIM_ASYNCH_IO +is not defined, this facility is implemented using the traditional simh +calibrated clock approach. This new approach has been measured to provide +clocks which drift far less than the drift realized in prior simh versions. +Using the released simh v3.9-0 vax simulator with idling enabled, the clock +drifted some 4 minutes in 35 minutes time (approximately 10%). The same OS +disk also running with idling enabled booted for 4 hours had less that 5 +seconds of clock drift (approximately 0.03%). + Run time requirements to use SIM_ASYNCH_IO. The Posix threads API (pthreads) is required for asynchronous execution. diff --git a/Interdata/id16_cpu.c b/Interdata/id16_cpu.c index 2a5605c2..9add9292 100644 --- a/Interdata/id16_cpu.c +++ b/Interdata/id16_cpu.c @@ -650,8 +650,7 @@ while (reason == 0) { /* loop until halted */ } if (PSW & PSW_WAIT) { /* wait state? */ - if (sim_idle_enab) /* idling enabled? */ - sim_idle (TMR_LFC, TRUE); + sim_idle (TMR_LFC, TRUE); /* idling */ else sim_interval = sim_interval - 1; /* no, count cycle */ continue; } diff --git a/Interdata/id32_cpu.c b/Interdata/id32_cpu.c index 6b874df3..386e1630 100644 --- a/Interdata/id32_cpu.c +++ b/Interdata/id32_cpu.c @@ -714,8 +714,7 @@ while (reason == 0) { /* loop until halted */ } if (PSW & PSW_WAIT) { /* wait state? */ - if (sim_idle_enab) /* idling enabled? */ - sim_idle (TMR_LFC, TRUE); + sim_idle (TMR_LFC, TRUE); /* idling */ else sim_interval = sim_interval - 1; /* no, count cycle */ continue; } diff --git a/VAX/vax_cpu.c b/VAX/vax_cpu.c index b751e352..613d32bc 100644 --- a/VAX/vax_cpu.c +++ b/VAX/vax_cpu.c @@ -3129,8 +3129,7 @@ return; t_stat cpu_idle_svc (UNIT *uptr) { -if (sim_idle_enab) - sim_idle (TMR_CLK, FALSE); +sim_idle (TMR_CLK, FALSE); return SCPE_OK; } diff --git a/VAX/vax_stddev.c b/VAX/vax_stddev.c index ece69417..a7c118fd 100644 --- a/VAX/vax_stddev.c +++ b/VAX/vax_stddev.c @@ -397,7 +397,7 @@ int32 t; if (clk_csr & CSR_IE) SET_INT (CLK); t = sim_rtcn_calb (clk_tps, TMR_CLK); /* calibrate clock */ -sim_activate (&clk_unit, t); /* reactivate unit */ +sim_activate_after (&clk_unit, 1000000/clk_tps); /* reactivate unit */ tmr_poll = t; /* set tmr poll */ tmxr_poll = t * TMXR_MULT; /* set mux poll */ if (!todr_blow && todr_reg) /* if running? */ @@ -502,7 +502,7 @@ clk_csr = 0; CLR_INT (CLK); if (!sim_is_running) { /* RESET (not IORESET)? */ t = sim_rtcn_init (clk_unit.wait, TMR_CLK); /* init timer */ - sim_activate (&clk_unit, t); /* activate unit */ + sim_activate_after (&clk_unit, 1000000/clk_tps); /* activate unit */ tmr_poll = t; /* set tmr poll */ tmxr_poll = t * TMXR_MULT; /* set mux poll */ } diff --git a/scp.c b/scp.c index 1df06219..c41afe43 100644 --- a/scp.c +++ b/scp.c @@ -217,7 +217,7 @@ #include "sim_defs.h" #include "sim_rev.h" -#include "sim_ether.h" +#include "sim_tmxr.h" #include #include #include @@ -249,9 +249,24 @@ #define SRBSIZ 1024 /* save/restore buffer */ #define SIM_BRK_INILNT 4096 /* bpt tbl length */ #define SIM_BRK_ALLTYP 0xFFFFFFFF -#define UPDATE_SIM_TIME(x) sim_time = sim_time + (x - sim_interval); \ - sim_rtime = sim_rtime + ((uint32) (x - sim_interval)); \ - x = sim_interval +#define UPDATE_SIM_TIME \ + if (1) { \ + int32 _x; \ + AIO_LOCK; \ + if (sim_clock_queue == QUEUE_LIST_END) \ + _x = noqueue_time; \ + else \ + _x = sim_clock_queue->time; \ + sim_time = sim_time + (_x - sim_interval); \ + sim_rtime = sim_rtime + ((uint32) (_x - sim_interval)); \ + if (sim_clock_queue == QUEUE_LIST_END) \ + noqueue_time = sim_interval; \ + else \ + sim_clock_queue->time = sim_interval; \ + AIO_UNLOCK; \ + } \ + else \ + (void)0 \ #define SZ_D(dp) (size_map[((dp)->dwidth + CHAR_BIT - 1) / CHAR_BIT]) #define SZ_R(rp) \ @@ -287,19 +302,22 @@ /* Asynch I/O support */ #if defined (SIM_ASYNCH_IO) -pthread_mutex_t sim_asynch_lock = PTHREAD_MUTEX_INITIALIZER; -pthread_cond_t sim_asynch_wake = PTHREAD_COND_INITIALIZER; +pthread_mutex_t sim_asynch_lock = PTHREAD_MUTEX_INITIALIZER; +pthread_cond_t sim_idle_wake = PTHREAD_COND_INITIALIZER; +pthread_mutex_t sim_timer_lock = PTHREAD_MUTEX_INITIALIZER; +pthread_cond_t sim_timer_wake = PTHREAD_COND_INITIALIZER; pthread_mutex_t sim_tmxr_poll_lock = PTHREAD_MUTEX_INITIALIZER; -pthread_cond_t sim_tmxr_poll_cond = PTHREAD_COND_INITIALIZER; +pthread_cond_t sim_tmxr_poll_cond = PTHREAD_COND_INITIALIZER; int32 sim_tmxr_poll_count; pthread_t sim_asynch_main_threadid; -UNIT *sim_asynch_queue = NULL; -UNIT *sim_wallclock_queue = NULL; -t_bool sim_asynch_enabled = TRUE; +UNIT * volatile sim_asynch_queue = QUEUE_LIST_END; +UNIT * volatile sim_wallclock_queue = QUEUE_LIST_END; +UNIT * volatile sim_wallclock_entry = NULL; int32 sim_asynch_check; int32 sim_asynch_latency = 4000; /* 4 usec interrupt latency */ int32 sim_asynch_inst_latency = 20; /* assume 5 mip simulator */ #endif +t_bool sim_asynch_enabled = FALSE; /* VM interface */ @@ -424,13 +442,15 @@ void run_cmd_message (const char *unechod_cmdline, t_stat r); /* Global data */ DEVICE *sim_dflt_dev = NULL; -UNIT *sim_clock_queue = NULL; +UNIT *sim_clock_queue = QUEUE_LIST_END; int32 sim_interval = 0; int32 sim_switches = 0; FILE *sim_ofile = NULL; SCHTAB *sim_schptr = FALSE; DEVICE *sim_dfdev = NULL; UNIT *sim_dfunit = NULL; +DEVICE **sim_internal_devices = NULL; +uint32 sim_internal_device_count = 0; int32 sim_opt_out = 0; int32 sim_is_running = 0; uint32 sim_brk_summ = 0; @@ -640,11 +660,12 @@ static CTAB cmd_table[] = { "set console LOG=log_file enable console logging to the\n" " specified destination {STDOUT,DEBUG or filename)\n" "set console NOLOG disable console logging\n" - "set console DEBUG{=TRC;XMT;RCV}\n" - " enable console debugging of the\n" - " specified debug bit flags\n" - "set console NODEBUG{=TRC;XMT;RCV}\n" - " disable console debugging bits indicated\n" + "set console DEBUG=dbg_file\n" + " enable console debugging to the\n" + " specified destination {LOG, STDOUT or filename)\n" + "set console NODEBUG disable console debugging\n" + "set log log_file specify the log destination\n" + " (STDOUT,DEBUG or filename)\n" "set nolog disables any currently active logging\n" "set debug debug_file specify the debug destination\n" " (STDOUT,LOG or filename)\n" @@ -700,6 +721,8 @@ static CTAB cmd_table[] = { "sh{ow} {arg,...} show device parameters\n" "sh{ow} {arg,...} show unit parameters\n" "sh{ow} ethernet show ethernet devices\n" + "sh{ow} multiplexer show multiplexer devices\n" + "sh{ow} clocks show calibrated timers\n" "sh{ow} on show on condition actions\n" }, { "DO", &do_cmd, 1, "do {-V} {-O} {-E} {-Q} {arg,arg...}\b" @@ -800,7 +823,7 @@ stop_cpu = 0; sim_interval = 0; sim_time = sim_rtime = 0; noqueue_time = 0; -sim_clock_queue = NULL; +sim_clock_queue = QUEUE_LIST_END; sim_is_running = 0; sim_log = NULL; if (sim_emax <= 0) @@ -1687,6 +1710,7 @@ if (flag == sim_asynch_enabled) /* already set correctly return SCPE_OK; sim_asynch_enabled = flag; tmxr_change_async (); +sim_timer_change_asynch (); if (1) { uint32 i, j; DEVICE *dptr; @@ -1937,7 +1961,7 @@ else { return SCPE_OK; for (i = 0; i < dptr->numunits; i++) { /* check units */ up = (dptr->units) + i; /* att or active? */ - if ((up->flags & UNIT_ATT) || sim_is_active (up)) + if ((up->flags & UNIT_ATT) || sim_is_active_bool (up)) return SCPE_NOFNC; /* can't do it */ } dptr->flags = dptr->flags | DEV_DIS; /* disable */ @@ -1959,7 +1983,7 @@ if (flag) /* enb? enable */ uptr->flags = uptr->flags & ~UNIT_DIS; else { if ((uptr->flags & UNIT_ATT) || /* dsb */ - sim_is_active (uptr)) /* more tests */ + sim_is_active_bool (uptr)) /* more tests */ return SCPE_NOFNC; uptr->flags = uptr->flags | UNIT_DIS; /* disable */ } @@ -2050,7 +2074,7 @@ static SHTAB show_glob_tab[] = { { "ETHERNET", ð_show_devices, 0 }, { "MULTIPLEXER", &tmxr_show_open_devices, 0 }, { "MUX", &tmxr_show_open_devices, 0 }, - { "TIMERS", &sim_show_timers, 0 }, + { "CLOCKS", &sim_show_timers, 0 }, { "ON", &show_on, 0 }, { NULL, NULL, 0 } }; @@ -2286,14 +2310,14 @@ int32 accum; if (cptr && (*cptr != 0)) return SCPE_2MARG; -if (sim_clock_queue == NULL) +if (sim_clock_queue == QUEUE_LIST_END) fprintf (st, "%s event queue empty, time = %.0f\n", sim_name, sim_time); else { fprintf (st, "%s event queue status, time = %.0f\n", sim_name, sim_time); accum = 0; - for (uptr = sim_clock_queue; uptr != NULL; uptr = uptr->next) { + for (uptr = sim_clock_queue; uptr != QUEUE_LIST_END; uptr = uptr->next) { if (uptr == &sim_step_unit) fprintf (st, " Step timer"); else if ((dptr = find_dev_from_unit (uptr)) != NULL) { @@ -2307,30 +2331,30 @@ else { } } #if defined (SIM_ASYNCH_IO) -pthread_mutex_lock (&sim_asynch_lock); -if (sim_wallclock_queue == NULL) +pthread_mutex_lock (&sim_timer_lock); +if (sim_wallclock_queue == QUEUE_LIST_END) fprintf (st, "%s wall clock event queue empty, time = %.0f\n", sim_name, sim_time); else { fprintf (st, "%s wall clock event queue status, time = %.0f\n", sim_name, sim_time); - accum = 0; - for (uptr = sim_wallclock_queue; uptr != NULL; uptr = uptr->next) { + for (uptr = sim_wallclock_queue; uptr != QUEUE_LIST_END; uptr = uptr->next) { if ((dptr = find_dev_from_unit (uptr)) != NULL) { fprintf (st, " %s", sim_dname (dptr)); if (dptr->numunits > 1) fprintf (st, " unit %d", (int32) (uptr - dptr->units)); } else fprintf (st, " Unknown"); - fprintf (st, " after %d usec\n", accum + uptr->time); - accum = accum + uptr->time; + fprintf (st, " after %d usec\n", uptr->a_usec_delay); } } +pthread_mutex_unlock (&sim_timer_lock); +pthread_mutex_lock (&sim_asynch_lock); fprintf (st, "asynchronous pending event queue\n"); -if (sim_asynch_queue == AIO_LIST_END) +if (sim_asynch_queue == QUEUE_LIST_END) fprintf (st, "Empty\n"); else { - for (uptr = sim_asynch_queue; uptr != AIO_LIST_END; uptr = uptr->a_next) { + for (uptr = sim_asynch_queue; uptr != QUEUE_LIST_END; uptr = uptr->a_next) { if ((dptr = find_dev_from_unit (uptr)) != NULL) { fprintf (st, " %s", sim_dname (dptr)); if (dptr->numunits > 1) fprintf (st, " unit %d", @@ -2436,6 +2460,8 @@ if (cptr && (*cptr != 0)) /* now eol? */ return SCPE_2MARG; for (i = 0; (dptr = sim_devices[i]) != NULL; i++) show_dev_modifiers (st, dptr, NULL, flag, cptr); +for (i = 0; sim_internal_device_count && (dptr = sim_internal_devices[i]); ++i) + show_dev_modifiers (st, dptr, NULL, flag, cptr); return SCPE_OK; } @@ -2524,7 +2550,9 @@ DEVICE *dptr; if (cptr && (*cptr != 0)) /* now eol? */ return SCPE_2MARG; -for (i = 0; (dptr = sim_devices[i]) != NULL; i++) +for (i = 0; (dptr = sim_devices[i]) != NULL; ++i) + show_dev_show_commands (st, dptr, NULL, flag, cptr); +for (i = 0; sim_internal_device_count && (dptr = sim_internal_devices[i]); ++i) show_dev_show_commands (st, dptr, NULL, flag, cptr); return SCPE_OK; } @@ -2547,6 +2575,11 @@ if (dptr->modifiers) { fprintf (st, ", %s", mptr->pstring); else fprintf (st, "sh{ow} %s\t%s", sim_dname (dptr), mptr->pstring); } + if (dptr->flags & DEV_DEBUG) { + if (any++) + fprintf (st, ", DEBUG, NODEBUG"); + else fprintf (st, "sh{ow} %s\tDEBUG, NODEBUG", sim_dname (dptr)); + } if (any) fprintf (st, "\n"); any = 0; @@ -3571,30 +3604,30 @@ for (i = 1; (dptr = sim_devices[i]) != NULL; i++) { /* reposition all */ stop_cpu = 0; sim_is_running = 1; /* flag running */ if (sim_ttrun () != SCPE_OK) { /* set console mode */ - sim_ttcmd (); sim_is_running = 0; /* flag idle */ + sim_ttcmd (); return SCPE_TTYERR; } if ((r = sim_check_console (30)) != SCPE_OK) { /* check console, error? */ - sim_ttcmd (); sim_is_running = 0; /* flag idle */ + sim_ttcmd (); return r; } if (signal (SIGINT, int_handler) == SIG_ERR) { /* set WRU */ - sim_ttcmd (); sim_is_running = 0; /* flag idle */ + sim_ttcmd (); return SCPE_SIGERR; } #ifdef SIGHUP if (signal (SIGHUP, int_handler) == SIG_ERR) { /* set WRU */ - sim_ttcmd (); sim_is_running = 0; /* flag idle */ + sim_ttcmd (); return SCPE_SIGERR; } #endif if (signal (SIGTERM, int_handler) == SIG_ERR) { /* set WRU */ - sim_ttcmd (); sim_is_running = 0; /* flag idle */ + sim_ttcmd (); return SCPE_SIGERR; } if (sim_step) /* set step timer */ @@ -3605,9 +3638,11 @@ if (sim_log) /* flush log if enabled sim_throt_sched (); /* set throttle */ sim_brk_clract (); /* defang actions */ sim_rtcn_init_all (); /* re-init clocks */ +sim_start_timer_services (); /* enable wall clock timing */ r = sim_instr(); sim_is_running = 0; /* flag idle */ +sim_stop_timer_services (); /* disable wall clock timing */ sim_ttcmd (); /* restore console */ signal (SIGINT, SIG_DFL); /* cancel WRU */ #ifdef SIGHUP @@ -3634,13 +3669,8 @@ for (i = 1; (dptr = sim_devices[i]) != NULL; i++) { /* flush attached files } sim_cancel (&sim_step_unit); /* cancel step timer */ sim_throt_cancel (); /* cancel throttle */ +UPDATE_SIM_TIME; /* update sim time */ AIO_UPDATE_QUEUE; -if (sim_clock_queue != NULL) { /* update sim time */ - UPDATE_SIM_TIME (sim_clock_queue->time); - } -else { - UPDATE_SIM_TIME (noqueue_time); - } return r; } @@ -3669,7 +3699,11 @@ t_stat run_boot_prep (void) sim_interval = 0; /* reset queue */ sim_time = sim_rtime = 0; noqueue_time = 0; -sim_clock_queue = NULL; +while (sim_clock_queue != QUEUE_LIST_END) { + UNIT *cptr = sim_clock_queue; + sim_clock_queue = cptr->next; + cptr->next = NULL; /* hygiene */ + } return reset_all (0); } @@ -4685,7 +4719,13 @@ DEVICE *find_dev (char *cptr) int32 i; DEVICE *dptr; -for (i = 0; (dptr = sim_devices[i]) != NULL; i++) { +for (i = 0; (dptr = sim_devices[i]) != NULL; ++i) { + if ((strcmp (cptr, dptr->name) == 0) || + (dptr->lname && + (strcmp (cptr, dptr->lname) == 0))) + return dptr; + } +for (i = 0; sim_internal_device_count && (dptr = sim_internal_devices[i]); ++i) { if ((strcmp (cptr, dptr->name) == 0) || (dptr->lname && (strcmp (cptr, dptr->lname) == 0))) @@ -4741,9 +4781,6 @@ for (i = 0; (dptr = sim_devices[i]) != NULL; i++) { /* base + unit#? */ return NULL; } -DEVICE **sim_internal_devices = NULL; -uint32 sim_internal_device_count = 0; - /* sim_register_internal_device Add device to internal device list Inputs: @@ -4761,7 +4798,7 @@ for (i = 0; i < sim_internal_device_count; ++i) if (sim_internal_devices[i] == dptr) return SCPE_OK; ++sim_internal_device_count; -sim_internal_devices = realloc(sim_internal_devices, sim_internal_device_count*sizeof(*sim_internal_devices)); +sim_internal_devices = realloc(sim_internal_devices, (sim_internal_device_count+1)*sizeof(*sim_internal_devices)); sim_internal_devices[sim_internal_device_count-1] = dptr; sim_internal_devices[sim_internal_device_count] = NULL; return SCPE_OK; @@ -5230,7 +5267,8 @@ return SCPE_OK; sim_activate_after add entry to event queue after a specified amount of wall time sim_cancel remove entry from event queue sim_process_event process entries on event queue - sim_is_active see if entry is on event queue + sim_is_active see if entry is on event queue return time + 1 + sim_is_active_bool see if entry is on event queue sim_atime return absolute time for an entry sim_gtime return global time sim_qcount return event queue entry count @@ -5262,29 +5300,32 @@ t_stat reason; if (stop_cpu) /* stop CPU? */ return SCPE_STOP; -AIO_UPDATE_QUEUE; -if (sim_clock_queue == NULL) { /* queue empty? */ - UPDATE_SIM_TIME (noqueue_time); /* update sim time */ +UPDATE_SIM_TIME; /* update sim time */ +if (sim_clock_queue == QUEUE_LIST_END) { /* queue empty? */ sim_interval = noqueue_time = NOQUEUE_WAIT; /* flag queue empty */ return SCPE_OK; } -UPDATE_SIM_TIME (sim_clock_queue->time); /* update sim time */ do { uptr = sim_clock_queue; /* get first */ sim_clock_queue = uptr->next; /* remove first */ uptr->next = NULL; /* hygiene */ uptr->time = 0; - if (sim_clock_queue != NULL) + if (sim_clock_queue != QUEUE_LIST_END) sim_interval = sim_clock_queue->time; - else sim_interval = noqueue_time = NOQUEUE_WAIT; - AIO_POLL_BEGIN(uptr); + else + sim_interval = noqueue_time = NOQUEUE_WAIT; + AIO_EVENT_BEGIN(uptr); if (uptr->action != NULL) reason = uptr->action (uptr); - else reason = SCPE_OK; - AIO_POLL_COMPLETE(uptr, reason); - } while ((reason == SCPE_OK) && (sim_interval == 0)); + else + reason = SCPE_OK; + AIO_EVENT_COMPLETE(uptr, reason); + } while ((reason == SCPE_OK) && + (sim_interval <= 0) && + (sim_clock_queue != QUEUE_LIST_END)); -/* Empty queue forces sim_interval != 0 */ +if (sim_clock_queue == QUEUE_LIST_END) /* queue empty? */ + sim_interval = noqueue_time = NOQUEUE_WAIT; /* flag queue empty */ return reason; } @@ -5298,29 +5339,24 @@ return reason; reason = result (SCPE_OK if ok) */ -t_stat _sim_activate (UNIT *uptr, int32 event_time) +t_stat sim_activate (UNIT *uptr, int32 event_time) { -return sim_activate (uptr, event_time); +return _sim_activate (uptr, event_time); } -t_stat sim_activate (UNIT *uptr, int32 event_time) +t_stat _sim_activate (UNIT *uptr, int32 event_time) { UNIT *cptr, *prvptr; int32 accum; -AIO_ACTIVATE (sim_activate, uptr, event_time); -if (sim_is_active (uptr)) /* already active? */ +AIO_ACTIVATE (_sim_activate, uptr, event_time); +if (sim_is_active_bool (uptr)) /* already active? */ return SCPE_OK; -if (sim_clock_queue == NULL) { - UPDATE_SIM_TIME (noqueue_time); - } -else { /* update sim time */ - UPDATE_SIM_TIME (sim_clock_queue->time); - } +UPDATE_SIM_TIME; /* update sim time */ prvptr = NULL; accum = 0; -for (cptr = sim_clock_queue; cptr != NULL; cptr = cptr->next) { +for (cptr = sim_clock_queue; cptr != QUEUE_LIST_END; cptr = cptr->next) { if (event_time < (accum + cptr->time)) break; accum = accum + cptr->time; @@ -5335,7 +5371,7 @@ else { prvptr->next = uptr; } uptr->time = event_time - accum; -if (cptr != NULL) +if (cptr != QUEUE_LIST_END) cptr->time = cptr->time - uptr->time; sim_interval = sim_clock_queue->time; return SCPE_OK; @@ -5354,7 +5390,7 @@ t_stat sim_activate_abs (UNIT *uptr, int32 event_time) { AIO_ACTIVATE (sim_activate_abs, uptr, event_time); sim_cancel (uptr); -return sim_activate (uptr, event_time); +return _sim_activate (uptr, event_time); } /* sim_activate_notbefore - activate (queue) event even if event already scheduled @@ -5376,9 +5412,9 @@ sim_cancel (uptr); rtimenow = sim_grtime(); sim_cancel (uptr); if (0x80000000 <= urtime-rtimenow) - return sim_activate (uptr, 0); + return _sim_activate (uptr, 0); else - return sim_activate (uptr, urtime-rtimenow); + return _sim_activate (uptr, urtime-rtimenow); } @@ -5391,62 +5427,17 @@ else reason = result (SCPE_OK if ok) */ -t_stat sim_activate_after (UNIT *uptr, int32 usec_delay) +t_stat sim_activate_after (UNIT *uptr, int32 event_time) { -#if defined(SIM_ASYNCH_IO) -int32 inst_delay; -int32 inst_per_sec = (*sim_tmr_poll)*(*sim_clk_tps); +return _sim_activate_after (uptr, event_time); +} -if (0 == inst_per_sec) - inst_per_sec = 1000; -/* compute instruction count avoiding overflow */ -if ((0 == (usec_delay%1000000)) || /* whole seconds? */ - (usec_delay > 100000000)) /* more than 100 seconds */ - inst_delay = inst_per_sec*(usec_delay/1000000); -else - if ((0 == (usec_delay%1000)) || /* whole milliseconds seconds? */ - (usec_delay > 1000000)) /* more than a second */ - inst_delay = (inst_per_sec*(usec_delay/1000))/1000; - else /* microseconds */ - inst_delay = (inst_per_sec*usec_delay)/1000000; -return sim_activate (uptr, inst_delay); -#else -UNIT *cptr, *prvptr; -int32 accum; - -if (sim_is_active (uptr)) /* already active? */ +t_stat _sim_activate_after (UNIT *uptr, int32 usec_delay) +{ +if (sim_is_active_bool (uptr)) /* already active? */ return SCPE_OK; -pthread_mutex_lock (&sim_asynch_lock); -if (sim_wallclock_queue == NULL) { - UPDATE_SIM_TIME (noqueue_time); - } -else { /* update sim time */ - UPDATE_SIM_TIME (sim_clock_queue->time); - } - -prvptr = NULL; -accum = 0; -for (cptr = sim_wallclock_queue; cptr != NULL; cptr = cptr->next) { - if (usec_delay < (accum + cptr->time)) - break; - accum = accum + cptr->time; - prvptr = cptr; - } -if (prvptr == NULL) { /* insert at head */ - cptr = uptr->next = sim_wallclock_queue; - sim_wallclock_queue = uptr; - } -else { - cptr = uptr->next = prvptr->next; /* insert at prvptr */ - prvptr->next = uptr; - } -uptr->time = usec_delay - accum; -if (cptr != NULL) - cptr->time = cptr->time - uptr->time; -if (prvptr == NULL) /* Need to wake timer thread to time first element on list */ -pthread_mutex_unlock (&sim_asynch_lock); -#endif -return SCPE_OK; +AIO_ACTIVATE (_sim_activate_after, uptr, usec_delay); +return sim_timer_activate_after (uptr, usec_delay); } /* sim_cancel - cancel (dequeue) event @@ -5463,30 +5454,46 @@ t_stat sim_cancel (UNIT *uptr) UNIT *cptr, *nptr; AIO_VALIDATE; -if (sim_clock_queue == NULL) +AIO_CANCEL(uptr); +if (!sim_is_active_bool (uptr)) /* not active? */ + return SCPE_OK; /* nothing to cancel */ +if (sim_clock_queue == QUEUE_LIST_END) return SCPE_OK; -UPDATE_SIM_TIME (sim_clock_queue->time); /* update sim time */ -nptr = NULL; +UPDATE_SIM_TIME; /* update sim time */ +nptr = QUEUE_LIST_END; if (sim_clock_queue == uptr) nptr = sim_clock_queue = uptr->next; else { - for (cptr = sim_clock_queue; cptr != NULL; cptr = cptr->next) { + for (cptr = sim_clock_queue; cptr != QUEUE_LIST_END; cptr = cptr->next) { if (cptr->next == uptr) { nptr = cptr->next = uptr->next; break; /* end queue scan */ } } } -if (nptr != NULL) +if (nptr != QUEUE_LIST_END) nptr->time = nptr->time + uptr->time; uptr->next = NULL; /* hygiene */ uptr->time = 0; -if (sim_clock_queue != NULL) +if (sim_clock_queue != QUEUE_LIST_END) sim_interval = sim_clock_queue->time; else sim_interval = noqueue_time = NOQUEUE_WAIT; return SCPE_OK; } +/* sim_is_active_bool - test for entry in queue, return activation time + + Inputs: + uptr = pointer to unit + Outputs: + result = TRUE if active FALSE if inactive +*/ + +t_bool sim_is_active_bool (UNIT *uptr) +{ +return (uptr->next != NULL); +} + /* sim_is_active - test for entry in queue, return activation time Inputs: @@ -5498,11 +5505,38 @@ return SCPE_OK; int32 sim_is_active (UNIT *uptr) { UNIT *cptr; +int32 accum = 0; + +AIO_VALIDATE; +AIO_RETURN_TIME(uptr); +for (cptr = sim_clock_queue; cptr != QUEUE_LIST_END; cptr = cptr->next) { + if (cptr == sim_clock_queue) { + if (sim_interval > 0) + accum = accum + sim_interval; + } + else accum = accum + cptr->time; + if (cptr == uptr) + return accum + 1; + } +return 0; +} + +/* sim_activation_time - test for entry in queue, return activation time + + Inputs: + uptr = pointer to unit + Outputs: + result = absolute activation time + 1, 0 if inactive +*/ + +int32 sim_activation_time (UNIT *uptr) +{ +UNIT *cptr; int32 accum; AIO_VALIDATE; accum = 0; -for (cptr = sim_clock_queue; cptr != NULL; cptr = cptr->next) { +for (cptr = sim_clock_queue; cptr != QUEUE_LIST_END; cptr = cptr->next) { if (cptr == sim_clock_queue) { if (sim_interval > 0) accum = accum + sim_interval; @@ -5524,23 +5558,13 @@ return 0; double sim_gtime (void) { -if (sim_clock_queue == NULL) { - UPDATE_SIM_TIME (noqueue_time); - } -else { - UPDATE_SIM_TIME (sim_clock_queue->time); - } +UPDATE_SIM_TIME; return sim_time; } uint32 sim_grtime (void) { -if (sim_clock_queue == NULL) { - UPDATE_SIM_TIME (noqueue_time); - } -else { - UPDATE_SIM_TIME (sim_clock_queue->time); - } +UPDATE_SIM_TIME; return sim_rtime; } @@ -5557,7 +5581,7 @@ int32 cnt; UNIT *uptr; cnt = 0; -for (uptr = sim_clock_queue; uptr != NULL; uptr = uptr->next) +for (uptr = sim_clock_queue; uptr != QUEUE_LIST_END; uptr = uptr->next) cnt++; return cnt; } diff --git a/scp.h b/scp.h index 5308a34a..6810c6a0 100644 --- a/scp.h +++ b/scp.h @@ -89,7 +89,9 @@ t_stat _sim_activate (UNIT *uptr, int32 interval); t_stat sim_activate_abs (UNIT *uptr, int32 interval); t_stat sim_activate_notbefore (UNIT *uptr, int32 rtime); t_stat sim_activate_after (UNIT *uptr, int32 usecs_walltime); +t_stat _sim_activate_after (UNIT *uptr, int32 usecs_walltime); t_stat sim_cancel (UNIT *uptr); +t_bool sim_is_active_bool (UNIT *uptr); int32 sim_is_active (UNIT *uptr); double sim_gtime (void); uint32 sim_grtime (void); diff --git a/sim_console.c b/sim_console.c index e57b5d45..4694a65b 100644 --- a/sim_console.c +++ b/sim_console.c @@ -154,15 +154,25 @@ UNIT sim_con_unit = { UDATA (&sim_con_poll_svc, 0, 0) }; /* console c #define DBG_TRC TMXR_DBG_TRC /* trace routine calls */ #define DBG_XMT TMXR_DBG_XMT /* display Transmitted Data */ #define DBG_RCV TMXR_DBG_RCV /* display Received Data */ +#define DBG_ASY TMXR_DBG_ASY /* asynchronous thread activity */ DEBTAB sim_con_debug[] = { {"TRC", DBG_TRC}, {"XMT", DBG_XMT}, {"RCV", DBG_RCV}, + {"ASY", DBG_ASY}, {0} }; -DEVICE sim_con_telnet = {"Console Telnet", &sim_con_unit, NULL, NULL, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, DEV_DEBUG, 0, sim_con_debug}; +MTAB sim_con_mod[] = { + { 0 }, +}; + +DEVICE sim_con_telnet = { + "CON-TEL", &sim_con_unit, NULL, sim_con_mod, + 1, 0, 0, 0, 0, 0, + NULL, NULL, NULL, NULL, NULL, NULL, + NULL, DEV_DEBUG, 0, sim_con_debug}; TMLN sim_con_ldsc = { 0 }; /* console line descr */ TMXR sim_con_tmxr = { 1, 0, 0, &sim_con_ldsc, NULL, &sim_con_telnet };/* console line mux */ @@ -184,8 +194,6 @@ extern int32 sim_quiet; extern FILE *sim_log, *sim_deb; extern FILEREF *sim_log_ref, *sim_deb_ref; extern DEVICE *sim_devices[]; -extern t_stat show_dev_debug (FILE *st, DEVICE *dptr, UNIT *uptr, int32 flag, char *cptr); -extern t_stat set_dev_debug (DEVICE *dptr, UNIT *uptr, int32 flag, char *cptr); /* Set/show data structures */ @@ -199,8 +207,8 @@ static CTAB set_con_tab[] = { { "NOTELNET", &sim_set_notelnet, 0 }, { "LOG", &sim_set_logon, 0 }, { "NOLOG", &sim_set_logoff, 0 }, - { "DEBUG", &sim_set_cons_debug, 1 }, - { "NODEBUG", &sim_set_cons_debug, 0 }, + { "DEBUG", &sim_set_debon, 0 }, + { "NODEBUG", &sim_set_deboff, 0 }, { NULL, NULL, 0 } }; @@ -211,7 +219,7 @@ static SHTAB show_con_tab[] = { { "PCHAR", &sim_show_pchar, 0 }, { "LOG", &sim_show_cons_log, 0 }, { "TELNET", &sim_show_telnet, 0 }, - { "DEBUG", &sim_show_cons_debug, 0 }, + { "DEBUG", &sim_show_debug, 0 }, { "BUFFERED", &sim_show_cons_buff, 0 }, { NULL, NULL, 0 } }; @@ -521,29 +529,6 @@ else { return SCPE_OK; } -/* Show console Debug status */ - -t_stat sim_show_cons_debug (FILE *st, DEVICE *dunused, UNIT *uunused, int32 flag, char *cptr) -{ -return show_dev_debug (st, &sim_con_telnet, sim_con_telnet.units, flag, cptr); -} - -/* Set console to Debug */ - -t_stat sim_set_cons_debug (int32 flg, char *cptr) -{ -t_stat r = set_dev_debug (&sim_con_telnet, sim_con_telnet.units, flg, cptr); - -if ((r == SCPE_OK) && (sim_con_ldsc.uptr != &sim_con_unit)) { - DEVICE *dptr = find_dev_from_unit(sim_con_ldsc.uptr); - - dptr->dctrl = sim_con_telnet.dctrl; - if (dptr->debflags == NULL) - dptr->debflags = sim_con_telnet.debflags; - } -return r; -} - /* Set console to Buffering */ t_stat sim_set_cons_buff (int32 flg, char *cptr) @@ -713,6 +698,14 @@ if (sim_con_ldsc.conn || sim_con_ldsc.txbfd) { /* connected or buffered fflush (sim_log); } } + else { + printf ("Running\r\n"); /* print transition */ + fflush (stdout); + if (sim_log) { /* log file? */ + fprintf (sim_log, "Running\n"); + fflush (sim_log); + } + } return SCPE_OK; } } @@ -870,11 +863,12 @@ extern pthread_mutex_t sim_tmxr_poll_lock; extern pthread_cond_t sim_tmxr_poll_cond; extern int32 sim_tmxr_poll_count; extern t_bool sim_tmxr_poll_running; -extern pthread_t sim_console_poll_thread; /* Keyboard Polling Thread Id */ -extern pthread_cond_t sim_console_startup_cond; -extern t_bool sim_console_poll_running; extern int32 sim_is_running; +pthread_t sim_console_poll_thread; /* Keyboard Polling Thread Id */ +t_bool sim_console_poll_running = FALSE; +pthread_cond_t sim_console_startup_cond; + static void * _console_poll(void *arg) { @@ -882,6 +876,7 @@ int sched_policy; struct sched_param sched_priority; int poll_timeout_count = 0; int wait_count = 0; +DEVICE *d; /* Boost Priority for this I/O thread vs the CPU instruction execution thread which, in general, won't be readily yielding the processor when @@ -890,37 +885,47 @@ pthread_getschedparam (pthread_self(), &sched_policy, &sched_priority); ++sched_priority.sched_priority; pthread_setschedparam (pthread_self(), sched_policy, &sched_priority); -sim_debug (DBG_TRC, &sim_con_telnet, "_console_poll() - starting\n"); +sim_debug (DBG_ASY, &sim_con_telnet, "_console_poll() - starting\n"); pthread_mutex_lock (&sim_tmxr_poll_lock); pthread_cond_signal (&sim_console_startup_cond); /* Signal we're ready to go */ while (sim_asynch_enabled) { - DEVICE *d; + + if (!sim_is_running) { + if (wait_count) { + sim_debug (DBG_ASY, d, "_console_poll() - Removing interest in %s. Other interest: %d\n", d->name, sim_con_ldsc.uptr->a_poll_waiter_count); + --sim_con_ldsc.uptr->a_poll_waiter_count; + --sim_tmxr_poll_count; + } + break; + } /* If we started something, let it finish before polling again */ if (wait_count) { pthread_cond_wait (&sim_tmxr_poll_cond, &sim_tmxr_poll_lock); - sim_debug (DBG_TRC, &sim_con_telnet, "_console_poll() - continuing with ti\n"); + sim_debug (DBG_ASY, &sim_con_telnet, "_console_poll() - continuing with after wait\n"); } - if (!sim_is_running) - break; - pthread_mutex_unlock (&sim_tmxr_poll_lock); wait_count = 0; if (sim_os_poll_kbd_ready (1000)) { - sim_debug (DBG_TRC, &sim_con_telnet, "_console_poll() - Keyboard Data available\n"); + sim_debug (DBG_ASY, &sim_con_telnet, "_console_poll() - Keyboard Data available\n"); pthread_mutex_lock (&sim_tmxr_poll_lock); ++wait_count; if (!sim_con_ldsc.uptr->a_polling_now) { sim_con_ldsc.uptr->a_polling_now = TRUE; sim_con_ldsc.uptr->a_poll_waiter_count = 1; d = find_dev_from_unit(sim_con_ldsc.uptr); - sim_debug (DBG_TRC, &sim_con_telnet, "_console_poll() - Activating %s\n", d->name); + sim_debug (DBG_ASY, &sim_con_telnet, "_console_poll() - Activating %s\n", d->name); + pthread_mutex_unlock (&sim_tmxr_poll_lock); _sim_activate (sim_con_ldsc.uptr, 0); + pthread_mutex_lock (&sim_tmxr_poll_lock); } - else + else { + d = find_dev_from_unit(sim_con_ldsc.uptr); + sim_debug (DBG_ASY, &sim_con_telnet, "_console_poll() - Already Activated %s %d times\n", d->name, sim_con_ldsc.uptr->a_poll_waiter_count); ++sim_con_ldsc.uptr->a_poll_waiter_count; + } } else pthread_mutex_lock (&sim_tmxr_poll_lock); @@ -929,7 +934,7 @@ while (sim_asynch_enabled) { } pthread_mutex_unlock (&sim_tmxr_poll_lock); -sim_debug (DBG_TRC, &sim_con_telnet, "_console_poll() - exiting\n"); +sim_debug (DBG_ASY, &sim_con_telnet, "_console_poll() - exiting\n"); return NULL; } @@ -1687,8 +1692,8 @@ if (!isatty (0)) { /* skip if !tty */ } FD_ZERO (&readfds); FD_SET (0, &readfds); -timeout.tv_sec = 0; -timeout.tv_usec = ms_timeout*1000; +timeout.tv_sec = (ms_timeout*1000)/1000000; +timeout.tv_usec = (ms_timeout*1000)%1000000; return (1 == select (1, &readfds, NULL, NULL, &timeout)); } @@ -1815,8 +1820,8 @@ if (!sim_os_ttisatty()) { /* skip if !tty */ } FD_ZERO (&readfds); FD_SET (0, &readfds); -timeout.tv_sec = 0; -timeout.tv_usec = ms_timeout*1000; +timeout.tv_sec = (ms_timeout*1000)/1000000; +timeout.tv_usec = (ms_timeout*1000)%1000000; return (1 == select (1, &readfds, NULL, NULL, &timeout)); } diff --git a/sim_console.h b/sim_console.h index 4d255800..f2fcf086 100644 --- a/sim_console.h +++ b/sim_console.h @@ -88,6 +88,7 @@ t_stat sim_ttrun (void); t_stat sim_ttcmd (void); t_stat sim_ttclose (void); t_bool sim_ttisatty (void); +t_stat sim_os_poll_kbd (void); int32 sim_tt_inpcvt (int32 c, uint32 mode); int32 sim_tt_outcvt (int32 c, uint32 mode); diff --git a/sim_defs.h b/sim_defs.h index e8ed9bb1..1d9f028b 100644 --- a/sim_defs.h +++ b/sim_defs.h @@ -267,7 +267,7 @@ typedef uint32 t_addr; #define KBD_MAX_WAIT 500000 #define SERIAL_IN_WAIT 100 /* serial in time */ #define SERIAL_OUT_WAIT 100 /* serial output */ -#define NOQUEUE_WAIT 10000 /* min check time */ +#define NOQUEUE_WAIT 1000000 /* min check time */ #define KBD_LIM_WAIT(x) (((x) > KBD_MAX_WAIT)? KBD_MAX_WAIT: (x)) #define KBD_WAIT(w,s) ((w)? w: KBD_LIM_WAIT (s)) @@ -279,6 +279,14 @@ typedef uint32 t_addr; #define MATCH_CMD(ptr,cmd) strncmp ((ptr), (cmd), strlen (ptr)) +/* End of Linked List/Queue value */ +/* Chosen for 2 reasons: */ +/* 1 - to not be NULL, this allowing the NULL value to */ +/* indicate inclusion on a list */ +/* and */ +/* 2 - to not be a valid/possible pointer (alignment) */ +#define QUEUE_LIST_END ((void *)1) + /* Device data structure */ struct sim_device { @@ -376,33 +384,38 @@ struct sim_unit { t_bool a_polling_now; /* polling active flag */ int32 a_poll_waiter_count; /* count of polling threads */ /* waiting for this unit */ + /* Asynchronous Timer control */ + double a_due_time; /* due time for timer event */ + double a_skew; /* accumulated skew being corrected */ + double a_last_fired_time; /* time last event fired */ + int32 a_usec_delay; /* time delay for timer event */ #endif }; /* Unit flags */ -#define UNIT_V_UF_31 12 /* dev spec, V3.1 */ -#define UNIT_V_UF 16 /* device specific */ -#define UNIT_V_RSV 31 /* reserved!! */ +#define UNIT_V_UF_31 12 /* dev spec, V3.1 */ +#define UNIT_V_UF 16 /* device specific */ +#define UNIT_V_RSV 31 /* reserved!! */ -#define UNIT_ATTABLE 0000001 /* attachable */ -#define UNIT_RO 0000002 /* read only */ -#define UNIT_FIX 0000004 /* fixed capacity */ -#define UNIT_SEQ 0000010 /* sequential */ -#define UNIT_ATT 0000020 /* attached */ -#define UNIT_BINK 0000040 /* K = power of 2 */ -#define UNIT_BUFABLE 0000100 /* bufferable */ -#define UNIT_MUSTBUF 0000200 /* must buffer */ -#define UNIT_BUF 0000400 /* buffered */ -#define UNIT_ROABLE 0001000 /* read only ok */ -#define UNIT_DISABLE 0002000 /* disable-able */ -#define UNIT_DIS 0004000 /* disabled */ -#define UNIT_RAW 0010000 /* raw mode */ -#define UNIT_TEXT 0020000 /* text mode */ -#define UNIT_IDLE 0040000 /* idle eligible */ -#define UNIT_TM_POLL 0100000 /* TMXR Polling unit */ - /* This flag is ONLY set dynamically */ - /* it should NOT be set via initialization */ +#define UNIT_ATTABLE 0000001 /* attachable */ +#define UNIT_RO 0000002 /* read only */ +#define UNIT_FIX 0000004 /* fixed capacity */ +#define UNIT_SEQ 0000010 /* sequential */ +#define UNIT_ATT 0000020 /* attached */ +#define UNIT_BINK 0000040 /* K = power of 2 */ +#define UNIT_BUFABLE 0000100 /* bufferable */ +#define UNIT_MUSTBUF 0000200 /* must buffer */ +#define UNIT_BUF 0000400 /* buffered */ +#define UNIT_ROABLE 0001000 /* read only ok */ +#define UNIT_DISABLE 0002000 /* disable-able */ +#define UNIT_DIS 0004000 /* disabled */ +#define UNIT_RAW 0010000 /* raw mode */ +#define UNIT_TEXT 0020000 /* text mode */ +#define UNIT_IDLE 0040000 /* idle eligible */ +#define UNIT_TM_POLL 0100000 /* TMXR Polling unit */ + /* This flag is ONLY set dynamically */ + /* it should NOT be set via initialization */ #define UNIT_UFMASK_31 (((1u << UNIT_V_RSV) - 1) & ~((1u << UNIT_V_UF_31) - 1)) #define UNIT_UFMASK (((1u << UNIT_V_RSV) - 1) & ~((1u << UNIT_V_UF) - 1)) @@ -523,7 +536,7 @@ struct sim_fileref { /* The following macros define structure contents */ -#define UDATA(act,fl,cap) NULL,act,NULL,NULL,NULL,0,0,(fl),(cap),0,0 +#define UDATA(act,fl,cap) NULL,act,NULL,NULL,NULL,0,0,(fl),(cap),0,NULL,0,0 #if defined (__STDC__) || defined (_WIN32) #define ORDATA(nm,loc,wd) #nm, &(loc), 8, (wd), 0, 1 @@ -564,50 +577,119 @@ typedef struct sim_fileref FILEREF; #include "scp.h" #include "sim_console.h" #include "sim_timer.h" +#include "sim_ether.h" #include "sim_fio.h" /* Asynch/Threaded I/O support */ +extern t_bool sim_asynch_enabled; + #if defined (SIM_ASYNCH_IO) #include #include "sim_tmxr.h" extern pthread_mutex_t sim_asynch_lock; -extern pthread_cond_t sim_asynch_wake; +extern pthread_cond_t sim_idle_wake; +extern pthread_mutex_t sim_timer_lock; +extern pthread_cond_t sim_timer_wake; +extern t_bool sim_timer_event_canceled; extern int32 sim_tmxr_poll_count; extern pthread_cond_t sim_tmxr_poll_cond; extern pthread_mutex_t sim_tmxr_poll_lock; extern pthread_t sim_asynch_main_threadid; -extern struct sim_unit *sim_asynch_queue; +extern UNIT * volatile sim_asynch_queue; extern volatile t_bool sim_idle_wait; -extern t_bool sim_asynch_enabled; extern int32 sim_asynch_check; extern int32 sim_asynch_latency; extern int32 sim_asynch_inst_latency; -#define AIO_LIST_END ((void *)1) /* Chosen to deliberately not be a valid pointer (alignment) */ #define AIO_INIT \ if (1) { \ sim_asynch_main_threadid = pthread_self(); \ + sim_asynch_enabled = TRUE; \ /* Empty list/list end uses the point value (void *)1. \ This allows NULL in an entry's a_next pointer to \ indicate that the entry is not currently in any list */ \ - sim_asynch_queue = AIO_LIST_END; \ + sim_asynch_queue = QUEUE_LIST_END; \ } \ else \ (void)0 #define AIO_CLEANUP \ if (1) { \ pthread_mutex_destroy(&sim_asynch_lock); \ - pthread_cond_destroy(&sim_asynch_wake); \ + pthread_cond_destroy(&sim_idle_wake); \ + pthread_mutex_destroy(&sim_timer_lock); \ + pthread_cond_destroy(&sim_timer_wake); \ pthread_mutex_destroy(&sim_tmxr_poll_lock); \ pthread_cond_destroy(&sim_tmxr_poll_cond); \ } \ else \ (void)0 -#define AIO_POLL_BEGIN(uptr) \ - do { -#define AIO_POLL_COMPLETE(uptr, reason) \ +#define AIO_LOCK \ + pthread_mutex_lock(&sim_asynch_lock) +#define AIO_UNLOCK \ + pthread_mutex_unlock(&sim_asynch_lock) +#define AIO_CANCEL(uptr) \ + if (1) { \ + if (((uptr)->flags & UNIT_TM_POLL) && \ + !((uptr)->next) && !((uptr)->a_next)) { \ + (uptr)->a_polling_now = FALSE; \ + sim_tmxr_poll_count -= (uptr)->a_poll_waiter_count; \ + (uptr)->a_poll_waiter_count = 0; \ + } \ + if (sim_is_active_bool (uptr)) { \ + UNIT *cptr, *nptr; \ + AIO_UPDATE_QUEUE; \ + pthread_mutex_lock (&sim_timer_lock); \ + nptr = QUEUE_LIST_END; \ + if ((uptr) == sim_wallclock_queue) { \ + sim_wallclock_queue = (uptr)->next; \ + (uptr)->next = NULL; \ + } \ + else \ + for (cptr = sim_wallclock_queue; \ + (cptr != QUEUE_LIST_END); \ + cptr = cptr->next) \ + if (cptr->next == (uptr)) { \ + cptr->next = (uptr)->next; \ + nptr = cptr; \ + (uptr)->next = NULL; \ + break; \ + } \ + if (nptr == QUEUE_LIST_END) { \ + sim_timer_event_canceled = TRUE; \ + pthread_cond_signal (&sim_timer_wake); \ + } \ + if ((uptr)->next == NULL) \ + (uptr)->a_due_time = (uptr)->a_usec_delay = 0; \ + pthread_mutex_unlock (&sim_timer_lock); \ + } \ + } \ + else \ + (void)0 +#define AIO_RETURN_TIME(uptr) \ + if (1) { \ + pthread_mutex_lock (&sim_timer_lock); \ + for (cptr = sim_wallclock_queue; \ + cptr != QUEUE_LIST_END; \ + cptr = cptr->next) \ + if ((uptr) == cptr) { \ + int32 inst_per_sec = sim_timer_inst_per_sec (); \ + int32 result; \ + \ + result = (int32)(((uptr)->a_due_time - sim_timenow_double())*inst_per_sec);\ + pthread_mutex_unlock (&sim_timer_lock); \ + return result + 1; \ + } \ + pthread_mutex_unlock (&sim_timer_lock); \ + if ((uptr)->a_next) /* On asynch queue? */ \ + return (uptr)->a_event_time + 1; \ + } \ + else \ + (void)0 +#define AIO_EVENT_BEGIN(uptr) \ + do { +#define AIO_EVENT_COMPLETE(uptr, reason) \ if (uptr->flags & UNIT_TM_POLL) { \ pthread_mutex_lock (&sim_tmxr_poll_lock); \ uptr->a_polling_now = FALSE; \ @@ -619,6 +701,7 @@ extern int32 sim_asynch_inst_latency; } \ pthread_mutex_unlock (&sim_tmxr_poll_lock); \ } \ + AIO_UPDATE_QUEUE; \ } while (0) #if defined(__DECC_VER) @@ -630,7 +713,7 @@ extern int32 sim_asynch_inst_latency; #if defined(_WIN32) || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) #define USE_AIO_INTRINSICS 1 #endif -#ifdef USE_AIO_INTRINSICS +#if defined(USE_AIO_INTRINSICS) && !defined(DONT_USE_AIO_INTRINSICS) /* This approach uses intrinsics to manage access to the link list head */ /* sim_asynch_queue. This implementation is a completely lock free design */ /* which avoids the potential ABA issues. */ @@ -649,13 +732,13 @@ extern int32 sim_asynch_inst_latency; #define AIO_QUEUE_VAL InterlockedCompareExchangePointer(&sim_asynch_queue, sim_asynch_queue, NULL) #define AIO_QUEUE_SET(val, queue) InterlockedCompareExchangePointer(&sim_asynch_queue, val, queue) #define AIO_UPDATE_QUEUE \ - if (AIO_QUEUE_VAL != AIO_LIST_END) { /* List !Empty */ \ + if (AIO_QUEUE_VAL != QUEUE_LIST_END) { /* List !Empty */ \ UNIT *q, *uptr; \ int32 a_event_time; \ do \ q = AIO_QUEUE_VAL; \ - while (q != AIO_QUEUE_SET(AIO_LIST_END, q)); \ - while (q != AIO_LIST_END) { /* List !Empty */ \ + while (q != AIO_QUEUE_SET(QUEUE_LIST_END, q)); \ + while (q != QUEUE_LIST_END) { /* List !Empty */ \ uptr = q; \ q = q->a_next; \ uptr->a_next = NULL; /* hygiene */ \ @@ -673,27 +756,34 @@ extern int32 sim_asynch_inst_latency; } else (void)0 #define AIO_ACTIVATE(caller, uptr, event_time) \ if (!pthread_equal ( pthread_self(), sim_asynch_main_threadid )) { \ + DEVICE *d; \ + if (sim_deb) \ + d = find_dev_from_unit(uptr); \ + sim_debug (TIMER_DBG_QUEUE, &sim_timer_dev, "asynch event on %s after %d instructions\n", d->name, event_time);\ if (uptr->a_next) { /* already queued? */ \ uptr->a_activate_call = sim_activate_abs; \ } else { \ UNIT *q, *qe; \ uptr->a_event_time = event_time; \ - uptr->a_activate_call = sim_activate; \ - uptr->a_next = AIO_LIST_END; /* Mark as on list */ \ + uptr->a_activate_call = caller; \ + uptr->a_next = QUEUE_LIST_END; /* Mark as on list */ \ do { \ do \ q = AIO_QUEUE_VAL; \ - while (q != AIO_QUEUE_SET(AIO_LIST_END, q));/* Grab current list */ \ - for (qe = uptr; qe->a_next != AIO_LIST_END; qe = qe->a_next); \ + while (q != AIO_QUEUE_SET(QUEUE_LIST_END, q));/* Grab current list */\ + for (qe = uptr; qe->a_next != QUEUE_LIST_END; qe = qe->a_next); \ qe->a_next = q; /* append current list */\ do \ q = AIO_QUEUE_VAL; \ while (q != AIO_QUEUE_SET(uptr, q)); \ uptr = q; \ - } while (uptr != AIO_LIST_END); \ + } while (uptr != QUEUE_LIST_END); \ } \ - if (sim_idle_wait) \ - pthread_cond_signal (&sim_asynch_wake); \ + sim_asynch_check = 0; /* try to force check */ \ + if (sim_idle_wait) { \ + sim_debug (TIMER_DBG_IDLE, &sim_timer_dev, "waking due to event on %s after %d instructions\n", d->name, event_time);\ + pthread_cond_signal (&sim_idle_wake); \ + } \ return SCPE_OK; \ } else (void)0 #else /* !USE_AIO_INTRINSICS */ @@ -704,11 +794,11 @@ extern int32 sim_asynch_inst_latency; if (1) { \ UNIT *uptr; \ pthread_mutex_lock (&sim_asynch_lock); \ - while (sim_asynch_queue != AIO_LIST_END) { /* List !Empty */ \ + while (sim_asynch_queue != QUEUE_LIST_END) { /* List !Empty */ \ int32 a_event_time; \ uptr = sim_asynch_queue; \ sim_asynch_queue = uptr->a_next; \ - uptr->a_next = NULL; \ + uptr->a_next = NULL; /* hygiene */ \ if (uptr->a_activate_call != &sim_activate_notbefore) { \ a_event_time = uptr->a_event_time-((sim_asynch_inst_latency+1)/2); \ if (a_event_time < 0) \ @@ -716,12 +806,11 @@ extern int32 sim_asynch_inst_latency; } \ else \ a_event_time = uptr->a_event_time; \ + pthread_mutex_unlock (&sim_asynch_lock); \ uptr->a_activate_call (uptr, a_event_time); \ - if (uptr->a_check_completion) { \ - pthread_mutex_unlock (&sim_asynch_lock); \ + if (uptr->a_check_completion) \ uptr->a_check_completion (uptr); \ - pthread_mutex_lock (&sim_asynch_lock); \ - } \ + pthread_mutex_lock (&sim_asynch_lock); \ } \ pthread_mutex_unlock (&sim_asynch_lock); \ } else (void)0 @@ -737,8 +826,9 @@ extern int32 sim_asynch_inst_latency; sim_asynch_queue = uptr; \ } \ if (sim_idle_wait) \ - pthread_cond_signal (&sim_asynch_wake); \ + pthread_cond_signal (&sim_idle_wake); \ pthread_mutex_unlock (&sim_asynch_lock); \ + sim_asynch_check = 0; \ return SCPE_OK; \ } else (void)0 #endif /* USE_AIO_INTRINSICS */ @@ -761,8 +851,12 @@ extern int32 sim_asynch_inst_latency; #define AIO_CHECK_EVENT #define AIO_INIT #define AIO_CLEANUP -#define AIO_POLL_BEGIN(uptr) -#define AIO_POLL_COMPLETE(uptr) +#define AIO_LOCK +#define AIO_UNLOCK +#define AIO_CANCEL(uptr) +#define AIO_RETURN_TIME(uptr) +#define AIO_EVENT_BEGIN(uptr) +#define AIO_EVENT_COMPLETE(uptr, reason) #define AIO_SET_INTERRUPT_LATENCY(instpersec) #endif /* SIM_ASYNCH_IO */ diff --git a/sim_timer.c b/sim_timer.c index 42694a2f..1ac7a602 100644 --- a/sim_timer.c +++ b/sim_timer.c @@ -60,17 +60,20 @@ This library includes the following routines: - sim_timer_init - initialize timing system - sim_rtc_init - initialize calibration - sim_rtc_calb - calibrate clock - sim_timer_init - initialize timing system - sim_idle - virtual machine idle - sim_os_msec - return elapsed time in msec - sim_os_sleep - sleep specified number of seconds - sim_os_ms_sleep - sleep specified number of milliseconds - sim_idle_ms_sleep - sleep specified number of milliseconds - or until awakened by an asynchronous - event + sim_timer_init - initialize timing system + sim_rtc_init - initialize calibration + sim_rtc_calb - calibrate clock + sim_timer_init - initialize timing system + sim_idle - virtual machine idle + sim_os_msec - return elapsed time in msec + sim_os_sleep - sleep specified number of seconds + sim_os_ms_sleep - sleep specified number of milliseconds + sim_idle_ms_sleep - sleep specified number of milliseconds + or until awakened by an asynchronous + event + sim_timespec_diff subtract two timespec values + sim_timer_activate_after schedule unit for specific time + The calibration, idle, and throttle routines are OS-independent; the _os_ routines are not. @@ -78,15 +81,17 @@ #include "sim_defs.h" #include +#include -t_bool sim_idle_enab = FALSE; /* global flag */ -volatile t_bool sim_idle_wait = FALSE; /* global flag */ -int32 *sim_tmr_poll = NULL; /* global */ -int32 *sim_clk_tps = NULL; /* global */ +t_bool sim_idle_enab = FALSE; /* global flag */ +volatile t_bool sim_idle_wait = FALSE; /* global flag */ + +static int32 sim_calb_tmr = -1; /* the system calibrated timer */ static uint32 sim_idle_rate_ms = 0; static uint32 sim_os_sleep_min_ms = 0; static uint32 sim_idle_stable = SIM_IDLE_STDFLT; +static t_bool sim_idle_idled = FALSE; static uint32 sim_throt_ms_start = 0; static uint32 sim_throt_ms_stop = 0; static uint32 sim_throt_type = 0; @@ -94,6 +99,7 @@ static uint32 sim_throt_val = 0; static uint32 sim_throt_state = 0; static uint32 sim_throt_sleep_time = 0; static int32 sim_throt_wait = 0; + extern int32 sim_interval, sim_switches; extern FILE *sim_log; extern UNIT *sim_clock_queue; @@ -102,6 +108,20 @@ t_stat sim_throt_svc (UNIT *uptr); UNIT sim_throt_unit = { UDATA (&sim_throt_svc, 0, 0) }; +#define DBG_IDL TIMER_DBG_IDLE /* idling */ +#define DBG_QUE TIMER_DBG_QUEUE /* queue activities */ +#define DBG_TRC 0x004 /* tracing */ +#define DBG_CAL 0x008 /* calibration activities */ +#define DBG_TIM 0x010 /* timer thread activities */ +DEBTAB sim_timer_debug[] = { + {"TRACE", DBG_TRC}, + {"IDLE", DBG_IDL}, + {"QUEUE", DBG_QUE}, + {"CALIB", DBG_CAL}, + {"TIME", DBG_TIM}, + {0} +}; + /* OS-dependent timer and clock routines */ /* VMS */ @@ -440,12 +460,38 @@ sim_timespec_diff (struct timespec *diff, struct timespec *min, struct timespec /* move the minuend value to the difference and operate there. */ *diff = *min; /* Borrow as needed for the nsec value */ -if (sub->tv_nsec > min->tv_nsec) { +while (sub->tv_nsec > diff->tv_nsec) { --diff->tv_sec; diff->tv_nsec += 1000000000; } diff->tv_nsec -= sub->tv_nsec; diff->tv_sec -= sub->tv_sec; +while (diff->tv_nsec > 1000000000) { + ++diff->tv_sec; + diff->tv_nsec -= 1000000000; + } +} + +static int sim_timespec_compare (struct timespec *a, struct timespec *b) +{ +while (a->tv_nsec > 1000000000) { + a->tv_nsec -= 1000000000; + ++a->tv_sec; + } +while (b->tv_nsec > 1000000000) { + b->tv_nsec -= 1000000000; + ++b->tv_sec; + } +if (a->tv_sec < b->tv_sec) + return -1; +if (a->tv_sec > b->tv_sec) + return 1; +if (a->tv_nsec < b->tv_nsec) + return -1; +if (a->tv_nsec > b->tv_nsec) + return 1; +else + return 0; } #if defined(SIM_ASYNCH_IO) @@ -453,20 +499,25 @@ uint32 sim_idle_ms_sleep (unsigned int msec) { uint32 start_time = sim_os_msec(); struct timespec done_time; +t_bool timedout = FALSE; clock_gettime(CLOCK_REALTIME, &done_time); done_time.tv_sec += (msec/1000); done_time.tv_nsec += 1000000*(msec%1000); if (done_time.tv_nsec > 1000000000) { - done_time.tv_sec += 1; + done_time.tv_sec += done_time.tv_nsec/1000000000; done_time.tv_nsec = done_time.tv_nsec%1000000000; } pthread_mutex_lock (&sim_asynch_lock); sim_idle_wait = TRUE; -if (!pthread_cond_timedwait (&sim_asynch_wake, &sim_asynch_lock, &done_time)) +if (!pthread_cond_timedwait (&sim_idle_wake, &sim_asynch_lock, &done_time)) sim_asynch_check = 0; /* force check of asynch queue now */ +else + timedout = TRUE; sim_idle_wait = FALSE; pthread_mutex_unlock (&sim_asynch_lock); +if (!timedout) + AIO_UPDATE_QUEUE; return sim_os_msec() - start_time; } #define SIM_IDLE_MS_SLEEP sim_idle_ms_sleep @@ -480,11 +531,14 @@ static int32 rtc_ticks[SIM_NTIMERS] = { 0 }; /* ticks */ static int32 rtc_hz[SIM_NTIMERS] = { 0 }; /* tick rate */ static uint32 rtc_rtime[SIM_NTIMERS] = { 0 }; /* real time */ static uint32 rtc_vtime[SIM_NTIMERS] = { 0 }; /* virtual time */ +static double rtc_gtime[SIM_NTIMERS] = { 0 }; /* instruction time */ static uint32 rtc_nxintv[SIM_NTIMERS] = { 0 }; /* next interval */ static int32 rtc_based[SIM_NTIMERS] = { 0 }; /* base delay */ static int32 rtc_currd[SIM_NTIMERS] = { 0 }; /* current delay */ static int32 rtc_initd[SIM_NTIMERS] = { 0 }; /* initial delay */ static uint32 rtc_elapsed[SIM_NTIMERS] = { 0 }; /* sec since init */ +static uint32 rtc_calibrations[SIM_NTIMERS] = { 0 }; /* calibration count */ +static double rtc_clock_skew_max[SIM_NTIMERS] = { 0 }; /* asynchronous max skew */ void sim_rtcn_init_all (void) { @@ -498,6 +552,7 @@ return; int32 sim_rtcn_init (int32 time, int32 tmr) { +sim_debug (DBG_CAL, &sim_timer_dev, "sim_rtcn_init(time=%d, tmr=%d)\n", time, tmr); if (time == 0) time = 1; if ((tmr < 0) || (tmr >= SIM_NTIMERS)) @@ -511,10 +566,9 @@ rtc_based[tmr] = time; rtc_currd[tmr] = time; rtc_initd[tmr] = time; rtc_elapsed[tmr] = 0; -if (!sim_tmr_poll) - sim_tmr_poll = &rtc_currd[tmr]; -if (!sim_clk_tps) - sim_clk_tps = &rtc_hz[tmr]; +rtc_calibrations[tmr] = 0; +if (sim_calb_tmr == -1) /* save first initialized clock as the system timer */ + sim_calb_tmr = tmr; return time; } @@ -522,6 +576,7 @@ int32 sim_rtcn_calb (int32 ticksper, int32 tmr) { uint32 new_rtime, delta_rtime; int32 delta_vtime; +double new_gtime; if ((tmr < 0) || (tmr >= SIM_NTIMERS)) return 10000; @@ -534,15 +589,48 @@ rtc_elapsed[tmr] = rtc_elapsed[tmr] + 1; /* count sec */ if (!rtc_avail) /* no timer? */ return rtc_currd[tmr]; new_rtime = sim_os_msec (); /* wall time */ +sim_debug (DBG_TRC, &sim_timer_dev, "sim_rtcn_calb(ticksper=%d, tmr=%d) rtime=%d\n", ticksper, tmr, new_rtime); +if (sim_idle_idled) { + rtc_rtime[tmr] = new_rtime; /* save wall time */ + rtc_vtime[tmr] = rtc_vtime[tmr] + 1000; /* adv sim time */ + rtc_gtime[tmr] = sim_gtime(); /* save instruction time */ + sim_idle_idled = FALSE; /* reset idled flag */ + sim_debug (DBG_CAL, &sim_timer_dev, "skipping calibration due to idling - result: %d\n", rtc_currd[tmr]); + return rtc_currd[tmr]; /* avoid calibrating idle checks */ + } if (new_rtime < rtc_rtime[tmr]) { /* time running backwards? */ rtc_rtime[tmr] = new_rtime; /* reset wall time */ + sim_debug (DBG_CAL, &sim_timer_dev, "time running backwards - result: %d\n", rtc_currd[tmr]); return rtc_currd[tmr]; /* can't calibrate */ } +++rtc_calibrations[tmr]; /* count calibrations */ delta_rtime = new_rtime - rtc_rtime[tmr]; /* elapsed wtime */ rtc_rtime[tmr] = new_rtime; /* adv wall time */ rtc_vtime[tmr] = rtc_vtime[tmr] + 1000; /* adv sim time */ -if (delta_rtime > 30000) /* gap too big? */ +if (delta_rtime > 30000) { /* gap too big? */ + rtc_currd[tmr] = rtc_initd[tmr]; + sim_debug (DBG_CAL, &sim_timer_dev, "gap too big: delta = %d - result: %d\n", delta_rtime, rtc_initd[tmr]); return rtc_initd[tmr]; /* can't calibr */ + } +new_gtime = sim_gtime(); +if (sim_asynch_enabled) + if (rtc_elapsed[tmr] > sim_idle_stable) { + /* An asynchronous clock, merely needs to divide the number of */ + /* instructions actually executed by the clock rate. */ + rtc_currd[tmr] = (int32)((new_gtime - rtc_gtime[tmr])/ticksper); + rtc_gtime[tmr] = new_gtime; /* save instruction time */ + sim_debug (DBG_CAL, &sim_timer_dev, "asynch calibration result: %d\n", rtc_currd[tmr]); + return rtc_currd[tmr]; /* calibrated result */ + } + else { + rtc_currd[tmr] = rtc_initd[tmr]; + sim_debug (DBG_CAL, &sim_timer_dev, "asynch not stable calibration result: %d\n", rtc_initd[tmr]); + return rtc_initd[tmr]; /* initial result until stable */ + } +rtc_gtime[tmr] = new_gtime; /* save instruction time */ +/* This self regulating algorithm depends directly on the assumption */ +/* that this routine is called back after processing the number of */ +/* instructions which was returned the last time it was called. */ if (delta_rtime == 0) /* gap too small? */ rtc_based[tmr] = rtc_based[tmr] * ticksper; /* slew wide */ else rtc_based[tmr] = (int32) (((double) rtc_based[tmr] * (double) rtc_nxintv[tmr]) / @@ -559,6 +647,7 @@ if (rtc_based[tmr] <= 0) /* never negative or zer rtc_based[tmr] = 1; if (rtc_currd[tmr] <= 0) /* never negative or zero! */ rtc_currd[tmr] = 1; +sim_debug (DBG_CAL, &sim_timer_dev, "calibrated result: %d\n", rtc_currd[tmr]); AIO_SET_INTERRUPT_LATENCY(rtc_currd[tmr]*ticksper); /* set interrrupt latency */ return rtc_currd[tmr]; } @@ -579,6 +668,8 @@ return sim_rtcn_calb (ticksper, 0); t_bool sim_timer_init (void) { +sim_debug (DBG_TRC, &sim_timer_dev, "sim_timer_init()\n"); +sim_register_internal_device (&sim_timer_dev); sim_idle_enab = FALSE; /* init idle off */ sim_idle_rate_ms = sim_os_ms_sleep_init (); /* get OS timer rate */ return (sim_idle_rate_ms != 0); @@ -594,21 +685,56 @@ for (tmr=0; tmrflags & UNIT_IDLE) == 0) || /* event not idle-able? */ - (rtc_elapsed[tmr] < sim_idle_stable)) { /* timer not stable? */ +sim_idle_idled = TRUE; /* record idle attempt */ +if ((!sim_idle_enab) || /* idling disabled */ + ((sim_clock_queue == QUEUE_LIST_END) && /* or clock queue empty? */ +#if defined(SIM_ASYNCH_IO) + (!sim_asynch_enabled)) || /* and not asynch? */ +#else + (TRUE)) || +#endif + ((sim_clock_queue != QUEUE_LIST_END) && + ((sim_clock_queue->flags & UNIT_IDLE) == 0))|| /* or event not idle-able? */ + (rtc_elapsed[tmr] < sim_idle_stable)) { /* or timer not stable? */ if (sin_cyc) sim_interval = sim_interval - 1; return FALSE; } +sim_debug (DBG_TRC, &sim_timer_dev, "sim_idle(tmr=%d, sin_cyc=%d)\n", tmr, sin_cyc); if (cyc_ms == 0) /* not computed yet? */ cyc_ms = (rtc_currd[tmr] * rtc_hz[tmr]) / 1000; /* cycles per msec */ if ((sim_idle_rate_ms == 0) || (cyc_ms == 0)) { /* not possible? */ if (sin_cyc) sim_interval = sim_interval - 1; + sim_debug (DBG_IDL, &sim_timer_dev, "not possible %d - %d\n", sim_idle_rate_ms, cyc_ms); return FALSE; } w_ms = (uint32) sim_interval / cyc_ms; /* ms to wait */ @@ -648,8 +784,15 @@ w_idle = w_ms / sim_idle_rate_ms; /* intervals to wait */ if (w_idle == 0) { /* none? */ if (sin_cyc) sim_interval = sim_interval - 1; + sim_debug (DBG_IDL, &sim_timer_dev, "no wait\n"); return FALSE; } +if (sim_clock_queue == QUEUE_LIST_END) + sim_debug (DBG_IDL, &sim_timer_dev, "sleeping for %d ms - pending event in %d instructions\n", w_ms, sim_interval); +else { + DEVICE *d = find_dev_from_unit(sim_clock_queue); + sim_debug (DBG_IDL, &sim_timer_dev, "sleeping for %d ms - pending event on %s in %d instructions\n", w_ms, d->name, sim_interval); + } act_ms = SIM_IDLE_MS_SLEEP (w_ms); /* wait */ act_cyc = act_ms * cyc_ms; if (act_ms < w_ms) /* awakened early? */ @@ -657,6 +800,12 @@ if (act_ms < w_ms) /* awakened early? */ if (sim_interval > act_cyc) sim_interval = sim_interval - act_cyc; /* count down sim_interval */ else sim_interval = 0; /* or fire immediately */ +if (sim_clock_queue == QUEUE_LIST_END) + sim_debug (DBG_IDL, &sim_timer_dev, "slept for %d ms - pending event in %d instructions\n", act_ms, sim_interval); +else { + DEVICE *d = find_dev_from_unit(sim_clock_queue); + sim_debug (DBG_IDL, &sim_timer_dev, "slept for %d ms - pending event on %s in %d instructions\n", act_ms, d->name, sim_interval); + } return TRUE; } @@ -729,8 +878,12 @@ if (arg == 0) { sim_throt_type = SIM_THROT_NONE; sim_throt_cancel (); } -else if (sim_idle_rate_ms == 0) +else if (sim_idle_rate_ms == 0) { + printf ("Throttling is not available, Minimum OS sleep time is %dms\n", sim_os_sleep_min_ms); + if (sim_log) + fprintf (sim_log, "Throttling is not available, Minimum OS sleep time is %dms\n", sim_os_sleep_min_ms); return SCPE_NOFNC; + } else { val = strtotv (cptr, &tptr, 10); if (cptr == tptr) @@ -898,3 +1051,316 @@ switch (sim_throt_state) { sim_activate (uptr, sim_throt_wait); /* reschedule */ return SCPE_OK; } + +#if defined(SIM_ASYNCH_IO) + +static double _timespec_to_double (struct timespec *time) +{ +return ((double)time->tv_sec)+(double)(time->tv_nsec)/1000000000.0; +} + +static void _double_to_timespec (struct timespec *time, double dtime) +{ +time->tv_sec = (time_t)floor(dtime); +time->tv_nsec = (long)((dtime-floor(dtime))*1000000000.0); +} + +double sim_timenow_double (void) +{ +struct timespec now; + +clock_gettime(CLOCK_REALTIME, &now); +return _timespec_to_double (&now); +} + +extern int32 sim_is_running; +extern UNIT * volatile sim_wallclock_queue; +extern UNIT * volatile sim_wallclock_entry; + +pthread_t sim_timer_thread; /* Wall Clock Timing Thread Id */ +pthread_cond_t sim_timer_startup_cond; +t_bool sim_timer_thread_running = FALSE; +t_bool sim_timer_event_canceled = FALSE; + +static void * +_timer_thread(void *arg) +{ +int sched_policy; +struct sched_param sched_priority; + +/* Boost Priority for this I/O thread vs the CPU instruction execution + thread which, in general, won't be readily yielding the processor when + this thread needs to run */ +pthread_getschedparam (pthread_self(), &sched_policy, &sched_priority); +++sched_priority.sched_priority; +pthread_setschedparam (pthread_self(), sched_policy, &sched_priority); + +sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - starting\n"); + +pthread_mutex_lock (&sim_timer_lock); +pthread_cond_signal (&sim_timer_startup_cond); /* Signal we're ready to go */ +while (sim_asynch_enabled && sim_is_running) { + struct timespec start_time, stop_time; + struct timespec due_time; + double wait_usec; + int32 inst_delay; + int32 inst_per_sec; + UNIT *uptr; + DEVICE *d; + + if (sim_wallclock_entry) { /* something to insert in queue? */ + UNIT *cptr, *prvptr; + + d = find_dev_from_unit(sim_wallclock_entry); + sim_debug (DBG_TIM, &sim_timer_dev, (d->numunits > 1) ? "_timer_thread() - timing %s%d for %d usec\n" : "_timer_thread() - timing %s%.0d for %d usec\n", + d->name, (int)(sim_wallclock_entry-d->units), sim_wallclock_entry->time); + + uptr = sim_wallclock_entry; + sim_wallclock_entry = NULL; + + prvptr = NULL; + for (cptr = sim_wallclock_queue; cptr != QUEUE_LIST_END; cptr = cptr->next) { + if (uptr->a_due_time < cptr->a_due_time) + break; + prvptr = cptr; + } + if (prvptr == NULL) { /* insert at head */ + cptr = uptr->next = sim_wallclock_queue; + sim_wallclock_queue = uptr; + } + else { + cptr = uptr->next = prvptr->next; /* insert at prvptr */ + prvptr->next = uptr; + } + } + + /* determine wait time */ + if (sim_wallclock_queue != QUEUE_LIST_END) { + /* due time adjusted by 1/2 a minimal sleep interval */ + /* the goal being to let the last fractional part of the due time */ + /* be done by counting instructions */ + _double_to_timespec (&due_time, sim_wallclock_queue->a_due_time-(((double)sim_idle_rate_ms)*0.0005)); + d = find_dev_from_unit(sim_wallclock_queue); /* find this before waiting */ + } + else { + due_time.tv_sec = 0x7FFFFFFF; /* Sometime when 32 bit time_t wraps */ + due_time.tv_nsec = 0; + } + clock_gettime(CLOCK_REALTIME, &start_time); + wait_usec = floor(1000000.0*(_timespec_to_double (&due_time) - _timespec_to_double (&start_time))); + if (sim_wallclock_queue == QUEUE_LIST_END) + sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - waiting forever\n"); + else + sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - waiting for %.0f usecs until %.6f\n", wait_usec, sim_wallclock_queue->a_due_time); + if ((wait_usec <= 0.0) || + (0 != pthread_cond_timedwait (&sim_timer_wake, &sim_timer_lock, &due_time))) { + if (sim_wallclock_queue == QUEUE_LIST_END) /* queue empty? */ + continue; /* wait again */ + inst_per_sec = sim_timer_inst_per_sec (); + + uptr = sim_wallclock_queue; + sim_wallclock_queue = uptr->next; + uptr->next = NULL; /* hygiene */ + + clock_gettime(CLOCK_REALTIME, &stop_time); + if (1 != sim_timespec_compare (&due_time, &stop_time)) { + inst_delay = 0; + uptr->a_last_fired_time = _timespec_to_double(&stop_time); + } + else { + inst_delay = (int32)(inst_per_sec*(_timespec_to_double(&due_time)-_timespec_to_double(&stop_time))); + uptr->a_last_fired_time = uptr->a_due_time; + } + sim_debug (DBG_TIM, &sim_timer_dev, (d->numunits > 1) ? "_timer_thread() - slept %.0fms - activating(%s%d,%d)\n" : "_timer_thread() - slept %.0fms - activating(%s%.0d,%d)\n", + 1000.0*(_timespec_to_double (&stop_time)-_timespec_to_double (&start_time)), d->name, (int)(uptr-d->units), inst_delay); + sim_activate (uptr, inst_delay); + } + else /* Something wants to adjust the queue */ + if (sim_timer_event_canceled) + sim_timer_event_canceled = FALSE; /* reset flag and continue */ + else + if (sim_wallclock_entry == NULL) /* nothing to insert? */ + break; /* stop processing entries */ + } +pthread_mutex_unlock (&sim_timer_lock); + +sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - exiting\n"); + +return NULL; +} + +#endif + +void sim_start_timer_services (void) +{ +#if defined(SIM_ASYNCH_IO) +pthread_mutex_lock (&sim_timer_lock); +if (sim_asynch_enabled) { + pthread_attr_t attr; + UNIT *cptr; + double delta_due_time; + + /* when restarting after being manually stopped the due times for all */ + /* timer events needs to slide so they fire in the future. (clock ticks */ + /* don't accumulate when the simulator is stopped) */ + for (cptr = sim_wallclock_queue; cptr != QUEUE_LIST_END; cptr = cptr->next) { + if (cptr == sim_wallclock_queue) { /* Handle first entry */ + struct timespec now; + double due_time; + + clock_gettime(CLOCK_REALTIME, &now); + due_time = _timespec_to_double(&now) + ((double)(cptr->a_usec_delay)/1000000.0); + delta_due_time = due_time - cptr->a_due_time; + } + cptr->a_due_time += delta_due_time; + } + sim_debug (DBG_TRC, &sim_timer_dev, "sim_start_timer_services() - starting\n"); + pthread_cond_init (&sim_timer_startup_cond, NULL); + pthread_attr_init (&attr); + pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM); + pthread_create (&sim_timer_thread, &attr, _timer_thread, NULL); + pthread_attr_destroy( &attr); + pthread_cond_wait (&sim_timer_startup_cond, &sim_timer_lock); /* Wait for thread to stabilize */ + pthread_cond_destroy (&sim_timer_startup_cond); + sim_timer_thread_running = TRUE; + } +pthread_mutex_unlock (&sim_timer_lock); +#endif +} + +void sim_stop_timer_services (void) +{ +#if defined(SIM_ASYNCH_IO) +pthread_mutex_lock (&sim_timer_lock); +if (sim_timer_thread_running) { + sim_debug (DBG_TRC, &sim_timer_dev, "sim_stop_timer_services() - stopping\n"); + pthread_cond_signal (&sim_timer_wake); + pthread_mutex_unlock (&sim_timer_lock); + pthread_join (sim_timer_thread, NULL); + sim_timer_thread_running = FALSE; + } +else + pthread_mutex_unlock (&sim_timer_lock); +#endif +} + +t_stat sim_timer_change_asynch (void) +{ +#if defined(SIM_ASYNCH_IO) +if (sim_asynch_enabled) + sim_start_timer_services (); +else { + UNIT *uptr; + int32 accum = 0; + + sim_stop_timer_services (); + while (1) { + uptr = sim_wallclock_queue; + if (uptr == QUEUE_LIST_END) + break; + sim_wallclock_queue = uptr->next; + accum += uptr->time; + uptr->next = NULL; + uptr->a_due_time = 0; + uptr->a_usec_delay = 0; + sim_activate_after (uptr, accum); + } + } +#endif +return SCPE_OK; +} + +int32 sim_timer_inst_per_sec (void) +{ +int32 inst_per_sec; + +if (sim_calb_tmr == -1) + return SIM_INITIAL_IPS; +inst_per_sec = rtc_currd[sim_calb_tmr]*rtc_hz[sim_calb_tmr]; +if (0 == inst_per_sec) + inst_per_sec = SIM_INITIAL_IPS; +return inst_per_sec; +} + +t_stat sim_timer_activate_after (UNIT *uptr, int32 usec_delay) +{ +int32 inst_delay; +int32 inst_per_sec; +DEVICE *d; + +AIO_VALIDATE; +if (sim_is_active_bool (uptr)) /* already active? */ + return SCPE_OK; +inst_per_sec = sim_timer_inst_per_sec (); +inst_delay = (int32)((((double)inst_per_sec)*usec_delay)/1000000.0); +#if defined(SIM_ASYNCH_IO) +if ((sim_calb_tmr == -1) || /* if No timer initialized + (inst_delay < rtc_currd[sim_calb_tmr]) || /* or sooner than next clock tick? */ + (rtc_elapsed[sim_calb_tmr] < sim_idle_stable) || /* or not idle stable yet */ + (!sim_asynch_enabled)) { /* or asynch disabled */ + if (sim_deb) + d = find_dev_from_unit(uptr); + sim_debug (DBG_TIM, &sim_timer_dev, (d->numunits > 1) ? "sim_timer_activate_after() - activating %s%d after %d instructions\n" : "sim_timer_activate_after() - activating %s%.0d after %d instructions\n" , + d->name, (int)(uptr-d->units), inst_delay); + return _sim_activate (uptr, inst_delay); /* queue it now */ + } +if (1) { + struct timespec now; + double d_now; + + clock_gettime (CLOCK_REALTIME, &now); + d_now = _timespec_to_double (&now); + /* Determine if this is a clock tick like invocation + or an ocaisional measured device delay */ + if ((uptr->a_usec_delay == usec_delay) && + (uptr->a_due_time != 0.0) && + (1)) { + double d_delay = ((double)usec_delay)/1000000.0; + + uptr->a_due_time += d_delay; + if (uptr->a_due_time < (d_now + d_delay*0.1)) { /* Accumulate lost time */ + uptr->a_skew += (d_now + d_delay*0.1) - uptr->a_due_time; + uptr->a_due_time = d_now + d_delay/10.0; + if (uptr->a_skew > 30.0) { /* Gap too big? */ + uptr->a_usec_delay = usec_delay; + uptr->a_skew = uptr->a_last_fired_time = 0.0; + uptr->a_due_time = d_now + (double)(usec_delay)/1000000.0; + } + if (uptr->a_skew > rtc_clock_skew_max[sim_calb_tmr]) + rtc_clock_skew_max[sim_calb_tmr] = uptr->a_skew; + } + else { + if (uptr->a_skew > 0.0) { /* Lost time to make up? */ + if (uptr->a_skew > d_delay*0.9) { + uptr->a_skew -= d_delay*0.9; + uptr->a_due_time -= d_delay*0.9; + } + else { + uptr->a_due_time -= uptr->a_skew; + uptr->a_skew = 0.0; + } + } + } + } + else { + uptr->a_usec_delay = usec_delay; + uptr->a_skew = uptr->a_last_fired_time = 0.0; + uptr->a_due_time = d_now + (double)(usec_delay)/1000000.0; + } + uptr->time = usec_delay; + + if (sim_deb) + d = find_dev_from_unit(uptr); + sim_debug (DBG_TIM, &sim_timer_dev, (d->numunits > 1) ? "sim_timer_activate_after() - queue addition %s%d at %.6f\n" : "sim_timer_activate_after() - queue addition %s%.0d at %.6f\n" , + d->name, (int)(uptr-d->units), uptr->a_due_time); + } +pthread_mutex_lock (&sim_timer_lock); +sim_wallclock_entry = uptr; +pthread_mutex_unlock (&sim_timer_lock); +pthread_cond_signal (&sim_timer_wake); /* wake the timer thread to deal with it */ +return SCPE_OK; +#else +return _sim_activate (uptr, inst_delay); /* queue it now */ +#endif +} + diff --git a/sim_timer.h b/sim_timer.h index 5aab518b..46f9014e 100644 --- a/sim_timer.h +++ b/sim_timer.h @@ -58,35 +58,44 @@ int clock_gettime(int clock_id, struct timespec *tp); #endif -#define SIM_NTIMERS 8 /* # timers */ -#define SIM_TMAX 500 /* max timer makeup */ +#define SIM_NTIMERS 8 /* # timers */ +#define SIM_TMAX 500 /* max timer makeup */ -#define SIM_IDLE_CAL 10 /* ms to calibrate */ -#define SIM_IDLE_MAX 10 /* max granularity idle */ -#define SIM_IDLE_STMIN 10 /* min sec for stability */ -#define SIM_IDLE_STDFLT 20 /* dft sec for stability */ -#define SIM_IDLE_STMAX 600 /* max sec for stability */ +#define SIM_INITIAL_IPS 50000 /* uncalibrated assumption */ + /* about instructions per second */ -#define SIM_THROT_WINIT 1000 /* cycles to skip */ -#define SIM_THROT_WST 10000 /* initial wait */ -#define SIM_THROT_WMUL 4 /* multiplier */ -#define SIM_THROT_WMIN 100 /* min wait */ -#define SIM_THROT_MSMIN 10 /* min for measurement */ -#define SIM_THROT_NONE 0 /* throttle parameters */ -#define SIM_THROT_MCYC 1 /* MegaCycles Per Sec */ -#define SIM_THROT_KCYC 2 /* KiloCycles Per Sec */ -#define SIM_THROT_PCT 3 /* Max Percent of host CPU */ -#define SIM_THROT_SPC 4 /* Specific periodic Delay */ +#define SIM_IDLE_CAL 10 /* ms to calibrate */ +#define SIM_IDLE_MAX 10 /* max granularity idle */ +#define SIM_IDLE_STMIN 10 /* min sec for stability */ +#define SIM_IDLE_STDFLT 20 /* dft sec for stability */ +#define SIM_IDLE_STMAX 600 /* max sec for stability */ + +#define SIM_THROT_WINIT 1000 /* cycles to skip */ +#define SIM_THROT_WST 10000 /* initial wait */ +#define SIM_THROT_WMUL 4 /* multiplier */ +#define SIM_THROT_WMIN 100 /* min wait */ +#define SIM_THROT_MSMIN 10 /* min for measurement */ +#define SIM_THROT_NONE 0 /* throttle parameters */ +#define SIM_THROT_MCYC 1 /* MegaCycles Per Sec */ +#define SIM_THROT_KCYC 2 /* KiloCycles Per Sec */ +#define SIM_THROT_PCT 3 /* Max Percent of host CPU */ +#define SIM_THROT_SPC 4 /* Specific periodic Delay */ + +#define TIMER_DBG_IDLE 1 /* Debug Flag for Idle Debugging */ +#define TIMER_DBG_QUEUE 2 /* Debug Flag for Asynch Queue Debugging */ t_bool sim_timer_init (void); void sim_timespec_diff (struct timespec *diff, struct timespec *min, struct timespec *sub); +#if defined(SIM_ASYNCH_IO) +double sim_timenow_double (void); +#endif int32 sim_rtcn_init (int32 time, int32 tmr); void sim_rtcn_init_all (void); int32 sim_rtcn_calb (int32 ticksper, int32 tmr); int32 sim_rtc_init (int32 time); int32 sim_rtc_calb (int32 ticksper); t_stat sim_show_timers (FILE* st, DEVICE *dptr, UNIT* uptr, int32 val, char* desc); -t_bool sim_idle (uint32 tmr, t_bool sin_cyc); +t_bool sim_idle (int32 tmr, t_bool sin_cyc); t_stat sim_set_throt (int32 arg, char *cptr); t_stat sim_show_throt (FILE *st, DEVICE *dnotused, UNIT *unotused, int32 flag, char *cptr); t_stat sim_set_idle (UNIT *uptr, int32 val, char *cptr, void *desc); @@ -98,11 +107,14 @@ uint32 sim_os_msec (void); void sim_os_sleep (unsigned int sec); uint32 sim_os_ms_sleep (unsigned int msec); uint32 sim_os_ms_sleep_init (void); +void sim_start_timer_services (void); +void sim_stop_timer_services (void); +t_stat sim_timer_change_asynch (void); +t_stat sim_timer_activate_after (UNIT *uptr, int32 usec_delay); +int32 sim_timer_inst_per_sec (void); extern t_bool sim_idle_enab; /* idle enabled flag */ extern volatile t_bool sim_idle_wait; /* idle waiting flag */ -extern int32 *sim_tmr_poll; /* pointer to instructions per clock tick */ -extern int32 *sim_clk_tps; /* pointer to clock ticks per second */ - +extern DEVICE sim_timer_dev; #endif diff --git a/sim_tmxr.c b/sim_tmxr.c index 7542310b..ed53c06e 100644 --- a/sim_tmxr.c +++ b/sim_tmxr.c @@ -524,11 +524,13 @@ for (i = 0; i < mp->lines; i++) { /* loop thru lines */ nbytes = tmxr_send_buffered_data (lp); /* buffered bytes */ if (nbytes == 0) /* buf empty? enab line */ lp->xmte = 1; +#if defined(SIM_ASYNCH_IO) if (lp->uptr && (lp->uptr->flags & UNIT_TM_POLL) && sim_asynch_enabled && tmxr_rqln (lp)) _sim_activate (lp->uptr, 0); +#endif } /* end for */ return; } @@ -713,6 +715,8 @@ sim_con_ldsc.uptr = uptr; if (!(uptr->flags & UNIT_TM_POLL)) { uptr->flags |= UNIT_TM_POLL; /* tag as polling unit */ } +else + sim_cancel (uptr); return SCPE_OK; } @@ -727,9 +731,6 @@ pthread_cond_t sim_tmxr_poll_cond; pthread_cond_t sim_tmxr_startup_cond; int32 sim_tmxr_poll_count = 0; t_bool sim_tmxr_poll_running = FALSE; -pthread_t sim_console_poll_thread; /* Keyboard Polling Thread Id */ -pthread_cond_t sim_console_startup_cond; -t_bool sim_console_poll_running = FALSE; static void * _tmxr_poll(void *arg) @@ -751,7 +752,7 @@ pthread_getschedparam (pthread_self(), &sched_policy, &sched_priority); ++sched_priority.sched_priority; pthread_setschedparam (pthread_self(), sched_policy, &sched_priority); -sim_debug (TMXR_DBG_TRC, dptr, "_tmxr_poll() - starting\n"); +sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_poll() - starting\n"); units = calloc(FD_SETSIZE, sizeof(*units)); activated = calloc(FD_SETSIZE, sizeof(*activated)); @@ -760,20 +761,26 @@ timeout_usec = 1000000; pthread_mutex_lock (&sim_tmxr_poll_lock); pthread_cond_signal (&sim_tmxr_startup_cond); /* Signal we're ready to go */ while (sim_asynch_enabled) { - int i, j, status; + int i, j, status, select_errno; fd_set readfds, errorfds; int socket_count; SOCKET max_socket_fd; TMXR *mp; DEVICE *d; + if ((tmxr_open_device_count == 0) || (!sim_is_running)) { + for (j=0; jname, (int)(activated[j]-d->units), activated[j]->a_poll_waiter_count); + --activated[j]->a_poll_waiter_count; + --sim_tmxr_poll_count; + } + break; + } /* If we started something we should wait for, let it finish before polling again */ if (wait_count) { pthread_cond_wait (&sim_tmxr_poll_cond, &sim_tmxr_poll_lock); - sim_debug (TMXR_DBG_TRC, dptr, "_tmxr_poll() - continuing with timeout of %dms\n", timeout_usec/1000); + sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_poll() - continuing with timeout of %dms\n", timeout_usec/1000); } - if ((tmxr_open_device_count == 0) || (!sim_is_running)) - break; FD_ZERO (&readfds); FD_ZERO (&errorfds); for (i=max_socket_fd=socket_count=0; i 1000000) timeout_usec = 1000000; - timeout.tv_sec = 0; - timeout.tv_usec = timeout_usec; + timeout.tv_sec = timeout_usec/1000000; + timeout.tv_usec = timeout_usec%1000000; + select_errno = 0; status = select(1+(int)max_socket_fd, &readfds, NULL, &errorfds, &timeout); + select_errno = errno; wait_count=0; pthread_mutex_lock (&sim_tmxr_poll_lock); switch (status) { @@ -818,8 +827,10 @@ while (sim_asynch_enabled) { mp->uptr->a_polling_now = TRUE; mp->uptr->a_poll_waiter_count = 0; d = find_dev_from_unit(mp->uptr); - sim_debug (TMXR_DBG_TRC, d, "_tmxr_poll() - Activating %s%d to poll connect\n", d->name, (int)(mp->uptr-d->units)); + sim_debug (TMXR_DBG_ASY, d, "_tmxr_poll() - Activating %s%d to poll connect\n", d->name, (int)(mp->uptr-d->units)); + pthread_mutex_unlock (&sim_tmxr_poll_lock); _sim_activate (mp->uptr, 0); + pthread_mutex_lock (&sim_tmxr_poll_lock); } if (mp->txcount) { timeout_usec = 10000; /* Wait 10ms next time (this gets doubled below) */ @@ -836,18 +847,24 @@ while (sim_asynch_enabled) { mp->ldsc[j].uptr->a_polling_now = TRUE; mp->ldsc[j].uptr->a_poll_waiter_count = 0; d = find_dev_from_unit(mp->ldsc[j].uptr); - sim_debug (TMXR_DBG_TRC, d, "_tmxr_poll() - Line %d Activating %s%d to poll data: %d/%d\n", + sim_debug (TMXR_DBG_ASY, d, "_tmxr_poll() - Line %d Activating %s%d to poll data: %d/%d\n", j, d->name, (int)(mp->ldsc[j].uptr-d->units), tmxr_tqln(&mp->ldsc[j]), tmxr_rqln (&mp->ldsc[j])); + pthread_mutex_unlock (&sim_tmxr_poll_lock); _sim_activate (mp->ldsc[j].uptr, 0); + pthread_mutex_lock (&sim_tmxr_poll_lock); } } } } } - sim_debug (TMXR_DBG_TRC, dptr, "_tmxr_poll() - Poll Timeout - %dms\n", timeout_usec/1000); + sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_poll() - Poll Timeout - %dms\n", timeout_usec/1000); timeout_usec *= 2; /* Double timeout time */ break; case SOCKET_ERROR: + wait_count = 0; + if (select_errno == EINTR) + break; + fprintf (stderr, "select() returned -1, errno=%d - %s\r\n", select_errno, strerror(select_errno)); abort(); break; default: @@ -867,11 +884,16 @@ while (sim_asynch_enabled) { activated[j]->a_polling_now = TRUE; activated[j]->a_poll_waiter_count = 1; d = find_dev_from_unit(activated[j]); - sim_debug (TMXR_DBG_TRC, d, "_tmxr_poll() - Activating for data %s%d\n", d->name, (int)(activated[j]-d->units)); + sim_debug (TMXR_DBG_ASY, d, "_tmxr_poll() - Activating for data %s%d\n", d->name, (int)(activated[j]-d->units)); + pthread_mutex_unlock (&sim_tmxr_poll_lock); _sim_activate (activated[j], 0); + pthread_mutex_lock (&sim_tmxr_poll_lock); } - else + else { + d = find_dev_from_unit(activated[j]); + sim_debug (TMXR_DBG_ASY, d, "_tmxr_poll() - Already Activated %s%d %d times\n", d->name, (int)(activated[j]-d->units), activated[j]->a_poll_waiter_count); ++activated[j]->a_poll_waiter_count; + } } } } @@ -886,7 +908,7 @@ free(units); free(activated); free(sockets); -sim_debug (TMXR_DBG_TRC, dptr, "_tmxr_poll() - exiting\n"); +sim_debug (TMXR_DBG_ASY, dptr, "_tmxr_poll() - exiting\n"); return NULL; } @@ -924,6 +946,20 @@ if (sim_tmxr_poll_running) { pthread_mutex_unlock (&sim_tmxr_poll_lock); pthread_join (sim_tmxr_poll_thread, NULL); sim_tmxr_poll_running = FALSE; + /* Transitioning from asynch mode so kick all polling units onto the event queue */ + if (tmxr_open_device_count) { + int i, j; + + for (i=0; iuptr) + _sim_activate (mp->uptr, 0); + for (j = 0; j < mp->lines; ++j) + if (mp->ldsc[j].uptr) + _sim_activate (mp->ldsc[j].uptr, 0); + } + } } else pthread_mutex_unlock (&sim_tmxr_poll_lock); @@ -935,8 +971,10 @@ static void _tmxr_add_to_open_list (TMXR* mux) { tmxr_open_devices = realloc(tmxr_open_devices, (tmxr_open_device_count+1)*sizeof(*tmxr_open_devices)); tmxr_open_devices[tmxr_open_device_count++] = mux; +#if defined(SIM_ASYNCH_IO) if ((tmxr_open_device_count == 1) && (sim_asynch_enabled)) tmxr_start_poll (); +#endif } static void _tmxr_remove_from_open_list (TMXR* mux) @@ -1106,6 +1144,18 @@ return _sim_activate (uptr, interval); #endif } +t_stat tmxr_activate_after (UNIT *uptr, int32 usecs_walltime) +{ +#if defined(SIM_ASYNCH_IO) +if ((!(uptr->flags & UNIT_TM_POLL)) || + (!sim_asynch_enabled)) { + return _sim_activate_after (uptr, usecs_walltime); + } +return SCPE_OK; +#else +return _sim_activate_after (uptr, usecs_walltime); +#endif +} /* Stub examine and deposit */ diff --git a/sim_tmxr.h b/sim_tmxr.h index fa607f6a..7a793be2 100644 --- a/sim_tmxr.h +++ b/sim_tmxr.h @@ -53,12 +53,16 @@ #define TMXR_DBG_XMT 0x10000 /* Debug Transmit Data */ #define TMXR_DBG_RCV 0x20000 /* Debug Received Data */ -#define TMXR_DBG_TRC 0x40000 /* Debug trace routine calls */ +#define TMXR_DBG_ASY 0x40000 /* Debug Received Data */ +#define TMXR_DBG_TRC 0x80000 /* Debug trace routine calls */ /* Unit flags */ -#define TMUF_V_NOASYNCH (UNIT_V_UF + 12) /* Asynch Disabled unit */ -#define TMUF_NOASYNCH (1u << TMUF_V_NOASYNCH) +#define TMUF_V_NOASYNCH (UNIT_V_UF + 12) /* Asynch Disabled unit */ +#define TMUF_NOASYNCH (1u << TMUF_V_NOASYNCH) /* This flag can be defined */ + /* statically in a unit's flag field */ + /* This will disable the unit from */ + /* supporting asynchronmous mux behaviors */ typedef struct tmln TMLN; typedef struct tmxr TMXR; @@ -135,6 +139,7 @@ t_stat tmxr_show_cstat (FILE *st, UNIT *uptr, int32 val, void *desc); t_stat tmxr_show_lines (FILE *st, UNIT *uptr, int32 val, void *desc); t_stat tmxr_show_open_devices (FILE* st, DEVICE *dptr, UNIT* uptr, int32 val, char* desc); t_stat tmxr_activate (UNIT *uptr, int32 interval); +t_stat tmxr_activate_after (UNIT *uptr, int32 usecs_walltime); t_stat tmxr_change_async (void); t_stat tmxr_startup (void); t_stat tmxr_shutdown (void); @@ -150,6 +155,7 @@ extern FILE *sim_deb; /* debug file */ #define tmxr_attach(mp, uptr, cptr) tmxr_attach_ex(mp, uptr, cptr, TRUE) #if (!defined(NOT_MUX_USING_CODE)) #define sim_activate tmxr_activate +#define sim_activate_after tmxr_activate_after #endif #else #define tmxr_attach(mp, uptr, cptr) tmxr_attach_ex(mp, uptr, cptr, FALSE)