Added missing sim_cancel support for device simulation code using the sim_disk and sim_tape libraries and doing asynchronous I/O if asynchronous I/O is active

This commit is contained in:
Mark Pizzolato 2012-12-12 10:58:19 -08:00
parent e541a3408f
commit 2b5ceae2be
5 changed files with 110 additions and 6 deletions

51
scp.c
View file

@ -3144,7 +3144,7 @@ for (i = 0; (dptr = sim_devices[i]) != NULL; i++) { /* loop thru devices */
WRITE_I (dptr->flags); /* [V2.10] flags */
for (j = 0; j < dptr->numunits; j++) {
uptr = dptr->units + j;
t = sim_is_active (uptr);
t = sim_activate_time (uptr);
WRITE_I (j); /* unit number */
WRITE_I (t); /* activation time */
WRITE_I (uptr->u3); /* unit specific */
@ -5221,6 +5221,7 @@ return SCPE_OK;
sim_cancel remove entry from event queue
sim_process_event process entries on event queue
sim_is_active see if entry is on event queue
sim_activate_time return time until activation
sim_atime return absolute time for an entry
sim_gtime return global time
sim_qcount return event queue entry count
@ -5357,7 +5358,6 @@ uint32 rtimenow, urtime = (uint32)rtime;
AIO_ACTIVATE (sim_activate_notbefore, uptr, rtime);
sim_cancel (uptr);
rtimenow = sim_grtime();
sim_cancel (uptr);
if (0x80000000 <= urtime-rtimenow)
return sim_activate (uptr, 0);
else
@ -5378,9 +5378,13 @@ t_stat sim_cancel (UNIT *uptr)
UNIT *cptr, *nptr;
AIO_VALIDATE;
AIO_CANCEL(uptr);
AIO_UPDATE_QUEUE;
if (sim_clock_queue == NULL)
return SCPE_OK;
UPDATE_SIM_TIME (sim_clock_queue->time); /* update sim time */
if (!sim_is_active_bool (uptr))
return SCPE_OK;
nptr = NULL;
if (sim_clock_queue == uptr)
nptr = sim_clock_queue = uptr->next;
@ -5415,6 +5419,49 @@ int32 sim_is_active (UNIT *uptr)
UNIT *cptr;
int32 accum;
AIO_VALIDATE;
AIO_UPDATE_QUEUE;
accum = 0;
for (cptr = sim_clock_queue; cptr != NULL; cptr = cptr->next) {
if (cptr == sim_clock_queue) {
if (sim_interval > 0)
accum = accum + sim_interval;
}
else accum = accum + cptr->time;
if (cptr == uptr)
return accum + 1;
}
return 0;
}
/* sim_is_active_bool - test for entry in queue
Inputs:
uptr = pointer to unit
Outputs:
result = TRUE if unit is busy, FALSE inactive
*/
t_bool sim_is_active_bool (UNIT *uptr)
{
AIO_VALIDATE;
AIO_UPDATE_QUEUE;
return ((uptr->next) || AIO_IS_ACTIVE(uptr));
}
/* sim_activate_time - return activation time
Inputs:
uptr = pointer to unit
Outputs:
result = absolute activation time + 1, 0 if inactive
*/
int32 sim_activate_time (UNIT *uptr)
{
UNIT *cptr;
int32 accum;
AIO_VALIDATE;
accum = 0;
for (cptr = sim_clock_queue; cptr != NULL; cptr = cptr->next) {

2
scp.h
View file

@ -90,6 +90,8 @@ t_stat sim_activate_abs (UNIT *uptr, int32 interval);
t_stat sim_activate_notbefore (UNIT *uptr, int32 rtime);
t_stat sim_cancel (UNIT *uptr);
int32 sim_is_active (UNIT *uptr);
t_bool sim_is_active_bool (UNIT *uptr);
int32 sim_activate_time (UNIT *uptr);
double sim_gtime (void);
uint32 sim_grtime (void);
int32 sim_qcount (void);

View file

@ -368,6 +368,8 @@ struct sim_unit {
void *up8; /* device specific */
#ifdef SIM_ASYNCH_IO
void (*a_check_completion)(struct sim_unit *);
t_bool (*a_is_active)(struct sim_unit *);
void (*a_cancel)(struct sim_unit *);
struct sim_unit *a_next; /* next asynch active */
int32 a_event_time;
t_stat (*a_activate_call)(struct sim_unit *, int32);
@ -609,7 +611,8 @@ extern int32 sim_asynch_inst_latency;
pthread_mutex_destroy(&sim_asynch_lock); \
pthread_cond_destroy(&sim_asynch_wake); \
}
#define AIO_IS_ACTIVE(uptr) (((uptr)->a_is_active ? (uptr)->a_is_active (uptr) : FALSE) || ((uptr)->a_next))
#define AIO_CANCEL(uptr) if ((uptr)->a_cancel) (uptr)->a_cancel (uptr); else (void)0
#if defined(__DECC_VER)
#include <builtins>
#if defined(__IA64)
@ -750,6 +753,8 @@ extern int32 sim_asynch_inst_latency;
#define AIO_CHECK_EVENT
#define AIO_INIT
#define AIO_CLEANUP
#define AIO_IS_ACTIVE(uptr) FALSE
#define AIO_CANCEL(uptr)
#define AIO_SET_INTERRUPT_LATENCY(instpersec)
#endif /* SIM_ASYNCH_IO */

View file

@ -108,6 +108,7 @@ struct disk_context {
pthread_t io_thread; /* I/O Thread Id */
pthread_mutex_t io_lock;
pthread_cond_t io_cond;
pthread_cond_t io_done;
pthread_cond_t startup_cond;
int io_dop;
uint8 *buf;
@ -196,6 +197,7 @@ while (ctx->asynch_io) {
}
pthread_mutex_lock (&ctx->io_lock);
ctx->io_dop = DOP_DONE;
pthread_cond_signal (&ctx->io_done);
sim_activate (uptr, ctx->asynch_io_latency);
}
pthread_mutex_unlock (&ctx->io_lock);
@ -209,7 +211,7 @@ return NULL;
processing events for any unit. It is only called when an asynchronous
thread has called sim_activate() to activate a unit. The job of this
routine is to put the unit in proper condition to digest what may have
occurred in the asynchrcondition thread.
occurred in the asynchrconous thread.
Since disk processing only handles a single I/O at a time to a
particular disk device (due to using stdio for the SimH Disk format
@ -231,6 +233,25 @@ if (ctx->callback && ctx->io_dop == DOP_DONE) {
callback (uptr, ctx->io_status);
}
}
static t_bool _disk_is_active (UNIT *uptr)
{
struct disk_context *ctx = (struct disk_context *)uptr->disk_ctx;
sim_debug (ctx->dbit, ctx->dptr, "_disk_is_active(unit=%d, dop=%d)\n", uptr-ctx->dptr->units, ctx->io_dop);
return (ctx->io_dop != DOP_DONE);
}
static void _disk_cancel (UNIT *uptr)
{
struct disk_context *ctx = (struct disk_context *)uptr->disk_ctx;
sim_debug (ctx->dbit, ctx->dptr, "_disk_cancel(unit=%d, dop=%d)\n", uptr-ctx->dptr->units, ctx->io_dop);
pthread_mutex_lock (&ctx->io_lock);
while (ctx->io_dop != DOP_DONE)
pthread_cond_wait (&ctx->io_done, &ctx->io_lock);
pthread_mutex_unlock (&ctx->io_lock);
}
#else
#define AIO_CALLSETUP
#define AIO_CALL(op, _lba, _buf, _rsects, _sects, _callback) \
@ -425,6 +446,7 @@ ctx->asynch_io_latency = latency;
if (ctx->asynch_io) {
pthread_mutex_init (&ctx->io_lock, NULL);
pthread_cond_init (&ctx->io_cond, NULL);
pthread_cond_init (&ctx->io_done, NULL);
pthread_cond_init (&ctx->startup_cond, NULL);
pthread_attr_init(&attr);
pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
@ -434,8 +456,10 @@ if (ctx->asynch_io) {
pthread_cond_wait (&ctx->startup_cond, &ctx->io_lock); /* Wait for thread to stabilize */
pthread_mutex_unlock (&ctx->io_lock);
pthread_cond_destroy (&ctx->startup_cond);
uptr->a_check_completion = _disk_completion_dispatch;
}
uptr->a_check_completion = _disk_completion_dispatch;
uptr->a_is_active = _disk_is_active;
uptr->a_cancel = _disk_cancel;
#endif
return SCPE_OK;
}
@ -460,6 +484,7 @@ if (ctx->asynch_io) {
pthread_join (ctx->io_thread, NULL);
pthread_mutex_destroy (&ctx->io_lock);
pthread_cond_destroy (&ctx->io_cond);
pthread_cond_destroy (&ctx->io_done);
}
return SCPE_OK;
#endif

View file

@ -125,6 +125,7 @@ struct tape_context {
pthread_t io_thread; /* I/O Thread Id */
pthread_mutex_t io_lock;
pthread_cond_t io_cond;
pthread_cond_t io_done;
pthread_cond_t startup_cond;
int io_top;
uint8 *buf;
@ -273,6 +274,7 @@ struct tape_context *ctx = (struct tape_context *)uptr->tape_ctx;
}
pthread_mutex_lock (&ctx->io_lock);
ctx->io_top = TOP_DONE;
pthread_cond_signal (&ctx->io_done);
sim_activate (uptr, ctx->asynch_io_latency);
}
pthread_mutex_unlock (&ctx->io_lock);
@ -286,7 +288,7 @@ struct tape_context *ctx = (struct tape_context *)uptr->tape_ctx;
processing events for any unit. It is only called when an asynchronous
thread has called sim_activate() to activate a unit. The job of this
routine is to put the unit in proper condition to digest what may have
occurred in the asynchrcondition thread.
occurred in the asynchronous thread.
Since tape processing only handles a single I/O at a time to a
particular tape device, we have the opportunity to possibly detect
@ -306,6 +308,25 @@ if (ctx->callback && ctx->io_top == TOP_DONE) {
callback (uptr, ctx->io_status);
}
}
static t_bool _tape_is_active (UNIT *uptr)
{
struct tape_context *ctx = (struct tape_context *)uptr->tape_ctx;
sim_debug (ctx->dbit, ctx->dptr, "_tape_is_active(unit=%d, top=%d)\n", uptr-ctx->dptr->units, ctx->io_top);
return (ctx->io_top != TOP_DONE);
}
static void _tape_cancel (UNIT *uptr)
{
struct tape_context *ctx = (struct tape_context *)uptr->tape_ctx;
sim_debug (ctx->dbit, ctx->dptr, "_tape_cancel(unit=%d, top=%d)\n", uptr-ctx->dptr->units, ctx->io_top);
pthread_mutex_lock (&ctx->io_lock);
while (ctx->io_top != TOP_DONE)
pthread_cond_wait (&ctx->io_done, &ctx->io_lock);
pthread_mutex_unlock (&ctx->io_lock);
}
#else
#define AIO_CALLSETUP
#define AIO_CALL(op, _buf, _fc, _bc, _max, _vbc, _gaplen, _bpi, _obj, _callback) \
@ -333,6 +354,7 @@ ctx->asynch_io_latency = latency;
if (ctx->asynch_io) {
pthread_mutex_init (&ctx->io_lock, NULL);
pthread_cond_init (&ctx->io_cond, NULL);
pthread_cond_init (&ctx->io_done, NULL);
pthread_cond_init (&ctx->startup_cond, NULL);
pthread_attr_init(&attr);
pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
@ -344,6 +366,8 @@ if (ctx->asynch_io) {
pthread_cond_destroy (&ctx->startup_cond);
}
uptr->a_check_completion = _tape_completion_dispatch;
uptr->a_is_active = _tape_is_active;
uptr->a_cancel = _tape_cancel;
#endif
return SCPE_OK;
}
@ -368,6 +392,7 @@ if (ctx->asynch_io) {
pthread_join (ctx->io_thread, NULL);
pthread_mutex_destroy (&ctx->io_lock);
pthread_cond_destroy (&ctx->io_cond);
pthread_cond_destroy (&ctx->io_done);
}
return SCPE_OK;
#endif