SCP: Make sim_cancel more efficient by stopping search as soon as canceled

This commit is contained in:
Mark Pizzolato 2016-12-24 09:49:32 -08:00
parent 875926c271
commit 7c2d20f26f
5 changed files with 27 additions and 26 deletions

2
scp.c
View file

@ -9076,6 +9076,8 @@ t_stat sim_cancel (UNIT *uptr)
UNIT *cptr, *nptr; UNIT *cptr, *nptr;
AIO_VALIDATE; AIO_VALIDATE;
if ((uptr->cancel) && uptr->cancel (uptr))
return SCPE_OK;
AIO_CANCEL(uptr); AIO_CANCEL(uptr);
AIO_UPDATE_QUEUE; AIO_UPDATE_QUEUE;
if (sim_clock_queue == QUEUE_LIST_END) if (sim_clock_queue == QUEUE_LIST_END)

View file

@ -541,7 +541,7 @@ struct UNIT {
void *up7; /* device specific */ void *up7; /* device specific */
void *up8; /* device specific */ void *up8; /* device specific */
void *tmxr; /* TMXR linkage */ void *tmxr; /* TMXR linkage */
void (*cancel)(UNIT *); t_bool (*cancel)(UNIT *);
#ifdef SIM_ASYNCH_IO #ifdef SIM_ASYNCH_IO
void (*a_check_completion)(UNIT *); void (*a_check_completion)(UNIT *);
t_bool (*a_is_active)(UNIT *); t_bool (*a_is_active)(UNIT *);
@ -985,21 +985,15 @@ extern int32 sim_asynch_inst_latency;
#define AIO_IS_ACTIVE(uptr) (((uptr)->a_is_active ? (uptr)->a_is_active (uptr) : FALSE) || ((uptr)->a_next)) #define AIO_IS_ACTIVE(uptr) (((uptr)->a_is_active ? (uptr)->a_is_active (uptr) : FALSE) || ((uptr)->a_next))
#if defined(SIM_ASYNCH_MUX) #if defined(SIM_ASYNCH_MUX)
#define AIO_CANCEL(uptr) \ #define AIO_CANCEL(uptr) \
if ((uptr)->cancel) \
(uptr)->cancel (uptr); \
else { \
if (((uptr)->dynflags & UNIT_TM_POLL) && \ if (((uptr)->dynflags & UNIT_TM_POLL) && \
!((uptr)->next) && !((uptr)->a_next)) { \ !((uptr)->next) && !((uptr)->a_next)) { \
(uptr)->a_polling_now = FALSE; \ (uptr)->a_polling_now = FALSE; \
sim_tmxr_poll_count -= (uptr)->a_poll_waiter_count; \ sim_tmxr_poll_count -= (uptr)->a_poll_waiter_count; \
(uptr)->a_poll_waiter_count = 0; \ (uptr)->a_poll_waiter_count = 0; \
} \
} }
#endif /* defined(SIM_ASYNCH_MUX) */ #endif /* defined(SIM_ASYNCH_MUX) */
#if !defined(AIO_CANCEL) #if !defined(AIO_CANCEL)
#define AIO_CANCEL(uptr) \ #define AIO_CANCEL(uptr)
if ((uptr)->cancel) \
(uptr)->cancel (uptr)
#endif /* !defined(AIO_CANCEL) */ #endif /* !defined(AIO_CANCEL) */
#define AIO_EVENT_BEGIN(uptr) \ #define AIO_EVENT_BEGIN(uptr) \
do { \ do { \
@ -1179,9 +1173,7 @@ extern int32 sim_asynch_inst_latency;
#define AIO_EVENT_BEGIN(uptr) #define AIO_EVENT_BEGIN(uptr)
#define AIO_EVENT_COMPLETE(uptr, reason) #define AIO_EVENT_COMPLETE(uptr, reason)
#define AIO_IS_ACTIVE(uptr) FALSE #define AIO_IS_ACTIVE(uptr) FALSE
#define AIO_CANCEL(uptr) \ #define AIO_CANCEL(uptr)
if ((uptr)->cancel) \
(uptr)->cancel (uptr)
#define AIO_SET_INTERRUPT_LATENCY(instpersec) #define AIO_SET_INTERRUPT_LATENCY(instpersec)
#define AIO_TLS #define AIO_TLS
#endif /* SIM_ASYNCH_IO */ #endif /* SIM_ASYNCH_IO */

View file

@ -239,7 +239,7 @@ if (ctx) {
return FALSE; return FALSE;
} }
static void _disk_cancel (UNIT *uptr) static t_bool _disk_cancel (UNIT *uptr)
{ {
struct disk_context *ctx = (struct disk_context *)uptr->disk_ctx; struct disk_context *ctx = (struct disk_context *)uptr->disk_ctx;
@ -252,6 +252,7 @@ if (ctx) {
pthread_mutex_unlock (&ctx->io_lock); pthread_mutex_unlock (&ctx->io_lock);
} }
} }
return FALSE;
} }
#else #else
#define AIO_CALLSETUP #define AIO_CALLSETUP

View file

@ -335,7 +335,7 @@ if (ctx) {
return FALSE; return FALSE;
} }
static void _tape_cancel (UNIT *uptr) static t_bool _tape_cancel (UNIT *uptr)
{ {
struct tape_context *ctx = (struct tape_context *)uptr->tape_ctx; struct tape_context *ctx = (struct tape_context *)uptr->tape_ctx;
@ -348,6 +348,7 @@ if (ctx) {
pthread_mutex_unlock (&ctx->io_lock); pthread_mutex_unlock (&ctx->io_lock);
} }
} }
return FALSE;
} }
#else #else
#define AIO_CALLSETUP \ #define AIO_CALLSETUP \

View file

@ -683,8 +683,8 @@ static double _timespec_to_double (struct timespec *time);
static void _double_to_timespec (struct timespec *time, double dtime); static void _double_to_timespec (struct timespec *time, double dtime);
static t_bool _rtcn_tick_catchup_check (int32 tmr, int32 time); static t_bool _rtcn_tick_catchup_check (int32 tmr, int32 time);
static void _rtcn_configure_calibrated_clock (int32 newtmr); static void _rtcn_configure_calibrated_clock (int32 newtmr);
static void _sim_coschedule_cancel(UNIT *uptr); static t_bool _sim_coschedule_cancel(UNIT *uptr);
static void _sim_wallclock_cancel (UNIT *uptr); static t_bool _sim_wallclock_cancel (UNIT *uptr);
static t_bool _sim_wallclock_is_active (UNIT *uptr); static t_bool _sim_wallclock_is_active (UNIT *uptr);
t_stat sim_timer_show_idle_mode (FILE* st, UNIT* uptr, int32 val, CONST void * desc); t_stat sim_timer_show_idle_mode (FILE* st, UNIT* uptr, int32 val, CONST void * desc);
@ -1762,7 +1762,7 @@ rtc_calib_tick_time[tmr] += rtc_clock_tick_size[tmr];
* non-success status, while co-schedule activities might, so they are * non-success status, while co-schedule activities might, so they are
* queued to run from sim_process_event * queued to run from sim_process_event
*/ */
sim_debug (DBG_QUE, &sim_timer_dev, "sim_timer_tick_svc - scheduling %s - cosched interval: %d\n", sim_uname (sim_clock_unit[tmr]), sim_cosched_interval[tmr]); sim_debug (DBG_QUE, &sim_timer_dev, "sim_timer_tick_svc(tmr=%d) - scheduling %s - cosched interval: %d\n", tmr, sim_uname (sim_clock_unit[tmr]), sim_cosched_interval[tmr]);
if (sim_clock_unit[tmr]->action == NULL) if (sim_clock_unit[tmr]->action == NULL)
return SCPE_IERR; return SCPE_IERR;
stat = sim_clock_unit[tmr]->action (sim_clock_unit[tmr]); stat = sim_clock_unit[tmr]->action (sim_clock_unit[tmr]);
@ -2171,6 +2171,7 @@ for (tmr=0; tmr<=SIM_NTIMERS; tmr++) {
accum += cptr->time; accum += cptr->time;
_sim_activate (cptr, accum*rtc_currd[tmr]); _sim_activate (cptr, accum*rtc_currd[tmr]);
} }
sim_cosched_interval[tmr] = 0;
} }
} }
sim_cancel (&SIM_INTERNAL_UNIT); /* Make sure Internal Timer is stopped */ sim_cancel (&SIM_INTERNAL_UNIT); /* Make sure Internal Timer is stopped */
@ -2416,7 +2417,7 @@ else {
for (cptr = sim_clock_cosched_queue[tmr]; cptr != QUEUE_LIST_END; cptr = cptr->next) { for (cptr = sim_clock_cosched_queue[tmr]; cptr != QUEUE_LIST_END; cptr = cptr->next) {
if (ticks < (accum + cptr->time)) if (ticks < (accum + cptr->time))
break; break;
accum = accum + cptr->time; accum += cptr->time;
prvptr = cptr; prvptr = cptr;
} }
if (prvptr == NULL) { if (prvptr == NULL) {
@ -2443,7 +2444,7 @@ return sim_clock_coschedule_tmr (uptr, tmr, ticks);
} }
/* Cancel a unit on the coschedule queue */ /* Cancel a unit on the coschedule queue */
static void _sim_coschedule_cancel (UNIT *uptr) static t_bool _sim_coschedule_cancel (UNIT *uptr)
{ {
AIO_UPDATE_QUEUE; AIO_UPDATE_QUEUE;
if (uptr->next) { /* On a queue? */ if (uptr->next) { /* On a queue? */
@ -2474,11 +2475,12 @@ if (uptr->next) { /* On a queue? */
if (nptr != QUEUE_LIST_END) if (nptr != QUEUE_LIST_END)
nptr->time += uptr->time; nptr->time += uptr->time;
sim_debug (DBG_QUE, &sim_timer_dev, "Canceled Clock Coscheduled Event for %s\n", sim_uname(uptr)); sim_debug (DBG_QUE, &sim_timer_dev, "Canceled Clock Coscheduled Event for %s\n", sim_uname(uptr));
return; return TRUE;
} }
} }
} }
} }
return FALSE;
} }
t_bool sim_timer_is_active (UNIT *uptr) t_bool sim_timer_is_active (UNIT *uptr)
@ -2495,9 +2497,10 @@ return FALSE;
} }
#if defined(SIM_ASYNCH_CLOCKS) #if defined(SIM_ASYNCH_CLOCKS)
static void _sim_wallclock_cancel (UNIT *uptr) static t_bool _sim_wallclock_cancel (UNIT *uptr)
{ {
int32 tmr; int32 tmr;
t_bool b_return = FALSE;
AIO_UPDATE_QUEUE; AIO_UPDATE_QUEUE;
pthread_mutex_lock (&sim_timer_lock); pthread_mutex_lock (&sim_timer_lock);
@ -2542,9 +2545,11 @@ if (uptr->a_next) {
sim_clock_unit[tmr]->cancel = NULL; sim_clock_unit[tmr]->cancel = NULL;
sim_clock_unit[tmr]->a_is_active = NULL; sim_clock_unit[tmr]->a_is_active = NULL;
} }
b_return = TRUE;
} }
} }
pthread_mutex_unlock (&sim_timer_lock); pthread_mutex_unlock (&sim_timer_lock);
return b_return;
} }
static t_bool _sim_wallclock_is_active (UNIT *uptr) static t_bool _sim_wallclock_is_active (UNIT *uptr)