- Changed asynch queue insertion and removal to use a lock free algorithm based only on InterlockedCompareExchangePointer. We can now use this lock free approach on IA64 host systems as well.
- Removed flawed logic which assumed that sim_interval was meaningful when referenced by an asynchronous thread. - Adjust the event_time of events removed from the asynch queue to account for the average time spent on the queue before the event was noticed by the instruction execution thread. - Added a sim_activate_notbefore function which specifies an rtime which is the earliest time the event should fire. - Changed the 'wakeup from idle' logic to force an immediate asynch queue check if the wakeup was not due to a timeout (i.e. it was due to an asynch queue insertion). - Fixed the descrip.mms to build asynchronous support on AXP and IA64 VMS with kernel threads enabled
This commit is contained in:
parent
7ac3557524
commit
ab3af3062d
8 changed files with 149 additions and 89 deletions
|
@ -1794,15 +1794,13 @@ return 0; /* success! */
|
|||
void rq_io_complete (UNIT *uptr, t_stat status)
|
||||
{
|
||||
MSC *cp = rq_ctxmap[uptr->cnum];
|
||||
int32 elapsed = sim_grtime()-uptr->iostarttime;
|
||||
|
||||
sim_debug (DBG_TRC, rq_devmap[cp->cnum], "rq_io_complete(status=%d)\n", status);
|
||||
|
||||
uptr->io_status = status;
|
||||
uptr->io_complete = 1;
|
||||
/* Reschedule for the appropriate delay */
|
||||
if (elapsed <= rq_xtime)
|
||||
sim_activate_abs (uptr, rq_xtime-elapsed);
|
||||
sim_activate_notbefore (uptr, uptr->iostarttime+rq_xtime);
|
||||
}
|
||||
|
||||
/* Unit service for data transfer commands */
|
||||
|
|
|
@ -1280,15 +1280,13 @@ return ST_SUC; /* success! */
|
|||
void tq_io_complete (UNIT *uptr, t_stat status)
|
||||
{
|
||||
struct tq_req_results *res = (struct tq_req_results *)uptr->results;
|
||||
int32 elapsed = sim_grtime()-uptr->iostarttime;
|
||||
|
||||
sim_debug(DBG_TRC, &tq_dev, "tq_io_complete(status=%d)\n", status);
|
||||
|
||||
res->io_status = status;
|
||||
res->io_complete = 1;
|
||||
/* Reschedule for the appropriate delay */
|
||||
if (elapsed <= tq_xtime)
|
||||
sim_activate_abs (uptr, tq_xtime-elapsed);
|
||||
sim_activate_notbefore (uptr, uptr->iostarttime+tq_xtime);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -626,7 +626,7 @@ for ( ;; ) {
|
|||
}
|
||||
fault_PC = PC;
|
||||
recqptr = 0; /* clr recovery q */
|
||||
AIO_CHECK_EVENT;
|
||||
AIO_CHECK_EVENT; /* queue async events */
|
||||
if (sim_interval <= 0) { /* chk clock queue */
|
||||
temp = sim_process_event ();
|
||||
if (temp)
|
||||
|
|
24
descrip.mms
24
descrip.mms
|
@ -85,7 +85,6 @@
|
|||
CC_DEBUG = /DEBUG
|
||||
|
||||
.IFDEF DEBUG
|
||||
LINK_DEBUG = /DEBUG/TRACEBACK
|
||||
CC_OPTIMIZE = /NOOPTIMIZE
|
||||
NEST_DEBUG = ,DEBUG=1
|
||||
|
||||
|
@ -95,27 +94,37 @@ CC_FLAGS = /PREF=ALL
|
|||
.IFDEF NOASYNCH
|
||||
ARCH = AXP-NOASYNCH-DBG
|
||||
CC_DEFS = "_LARGEFILE"
|
||||
LINK_DEBUG = /DEBUG/TRACEBACK
|
||||
.ELSE
|
||||
ARCH = AXP-DBG
|
||||
CC_DEFS = "_LARGEFILE","SIM_ASYNCH_IO=1"
|
||||
LINK_DEBUG = /DEBUG/TRACEBACK/THREADS_ENABLE
|
||||
.ENDIF
|
||||
.ENDIF
|
||||
|
||||
.IFDEF MMSIA64
|
||||
ALPHA_OR_IA64 = 1
|
||||
CC_FLAGS = /PREF=ALL
|
||||
.IFDEF NOASYNCH
|
||||
ARCH = I64-NOASYNCH-DBG
|
||||
CC_DEFS = "_LARGEFILE"
|
||||
LINK_DEBUG = /DEBUG/TRACEBACK
|
||||
.ELSE
|
||||
ARCH = I64-DBG
|
||||
CC_DEFS = "_LARGEFILE","SIM_ASYNCH_IO=1"
|
||||
LINK_DEBUG = /DEBUG/TRACEBACK/THREADS_ENABLE
|
||||
.ENDIF
|
||||
.ENDIF
|
||||
|
||||
.IFDEF MMSVAX
|
||||
CC_FLAGS = $(CC_FLAGS)
|
||||
ARCH = VAX-DBG
|
||||
CC_DEFS = "__VAX"
|
||||
LINK_DEBUG = /DEBUG/TRACEBACK
|
||||
.ENDIF
|
||||
|
||||
.ELSE
|
||||
LINK_DEBUG = /NODEBUG/NOTRACEBACK
|
||||
# !DEBUG
|
||||
|
||||
.IFDEF MMSALPHA
|
||||
ALPHA_OR_IA64 = 1
|
||||
|
@ -124,9 +133,11 @@ CC_FLAGS = /PREF=ALL
|
|||
.IFDEF NOASYNCH
|
||||
ARCH = AXP-NOASYNCH
|
||||
CC_DEFS = "_LARGEFILE"
|
||||
LINK_DEBUG = /NODEBUG/NOTRACEBACK
|
||||
.ELSE
|
||||
ARCH = AXP
|
||||
CC_DEFS = "_LARGEFILE","SIM_ASYNCH_IO=1"
|
||||
LINK_DEBUG = /NODEBUG/NOTRACEBACK/THREADS_ENABLE
|
||||
.ENDIF
|
||||
LINK_SECTION_BINDING = /SECTION_BINDING
|
||||
.ENDIF
|
||||
|
@ -135,8 +146,15 @@ LINK_SECTION_BINDING = /SECTION_BINDING
|
|||
ALPHA_OR_IA64 = 1
|
||||
CC_OPTIMIZE = /OPT=(LEV=5)
|
||||
CC_FLAGS = /PREF=ALL
|
||||
.IFDEF NOASYNCH
|
||||
ARCH = I64-NOASYNCH
|
||||
CC_DEFS = "_LARGEFILE"
|
||||
LINK_DEBUG = /NODEBUG/NOTRACEBACK
|
||||
.ELSE
|
||||
ARCH = I64
|
||||
CC_DEFS = "_LARGEFILE","SIM_ASYNCH_IO=1"
|
||||
LINK_DEBUG = /NODEBUG/NOTRACEBACK/THREADS_ENABLE
|
||||
.ENDIF
|
||||
.ENDIF
|
||||
|
||||
.IFDEF MMSVAX
|
||||
|
@ -144,10 +162,12 @@ CC_OPTIMIZE = /OPTIMIZE
|
|||
CC_FLAGS = $(CC_FLAGS)
|
||||
ARCH = VAX
|
||||
CC_DEFS = "__VAX"
|
||||
LINK_DEBUG = /NODEBUG/NOTRACEBACK
|
||||
.ENDIF
|
||||
|
||||
.ENDIF
|
||||
|
||||
|
||||
# Define Our Compiler Flags & Define The Compile Command
|
||||
OUR_CC_FLAGS = $(CC_FLAGS)$(CC_DEBUG)$(CC_OPTIMIZE) \
|
||||
/NEST=PRIMARY/NAME=(AS_IS,SHORT)
|
||||
|
|
30
scp.c
30
scp.c
|
@ -1961,17 +1961,17 @@ for (uptr = sim_clock_queue; uptr != NULL; uptr = uptr->next) {
|
|||
#if defined (SIM_ASYNCH_IO)
|
||||
pthread_mutex_lock (&sim_asynch_lock);
|
||||
fprintf (st, "asynchronous pending event queue\n");
|
||||
if (sim_asynch_queue == (void *)-1)
|
||||
if (sim_asynch_queue == AIO_LIST_END)
|
||||
fprintf (st, "Empty\n");
|
||||
else {
|
||||
for (uptr = sim_asynch_queue; uptr != (void *)-1; uptr = uptr->a_next) {
|
||||
for (uptr = sim_asynch_queue; uptr != AIO_LIST_END; uptr = uptr->a_next) {
|
||||
if ((dptr = find_dev_from_unit (uptr)) != NULL) {
|
||||
fprintf (st, " %s", sim_dname (dptr));
|
||||
if (dptr->numunits > 1) fprintf (st, " unit %d",
|
||||
(int32) (uptr - dptr->units));
|
||||
}
|
||||
else fprintf (st, " Unknown");
|
||||
fprintf (st, " event delay %d, queue time %d\n", uptr->a_event_time, uptr->a_sim_interval);
|
||||
fprintf (st, " event delay %d\n", uptr->a_event_time);
|
||||
}
|
||||
}
|
||||
fprintf (st, "asynch latency: %d nanoseconds\n", sim_asynch_latency);
|
||||
|
@ -4914,6 +4914,30 @@ sim_cancel (uptr);
|
|||
return sim_activate (uptr, event_time);
|
||||
}
|
||||
|
||||
/* sim_activate_notbefore - activate (queue) event even if event already scheduled
|
||||
but not before the specified time
|
||||
|
||||
Inputs:
|
||||
uptr = pointer to unit
|
||||
rtime = relative timeout
|
||||
Outputs:
|
||||
reason = result (SCPE_OK if ok)
|
||||
*/
|
||||
|
||||
t_stat sim_activate_notbefore (UNIT *uptr, int32 rtime)
|
||||
{
|
||||
uint32 rtimenow, urtime = (uint32)rtime;
|
||||
|
||||
AIO_ACTIVATE (sim_activate_notbefore, uptr, rtime);
|
||||
sim_cancel (uptr);
|
||||
rtimenow = sim_grtime();
|
||||
sim_cancel (uptr);
|
||||
if (0x80000000 <= urtime-rtimenow)
|
||||
return sim_activate (uptr, 0);
|
||||
else
|
||||
return sim_activate (uptr, urtime-rtimenow);
|
||||
}
|
||||
|
||||
/* sim_cancel - cancel (dequeue) event
|
||||
|
||||
Inputs:
|
||||
|
|
1
scp.h
1
scp.h
|
@ -83,6 +83,7 @@ t_stat echo_cmd (int32 flag, char *ptr);
|
|||
t_stat sim_process_event (void);
|
||||
t_stat sim_activate (UNIT *uptr, int32 interval);
|
||||
t_stat sim_activate_abs (UNIT *uptr, int32 interval);
|
||||
t_stat sim_activate_notbefore (UNIT *uptr, int32 rtime);
|
||||
t_stat sim_cancel (UNIT *uptr);
|
||||
int32 sim_is_active (UNIT *uptr);
|
||||
double sim_gtime (void);
|
||||
|
|
108
sim_defs.h
108
sim_defs.h
|
@ -368,7 +368,6 @@ struct sim_unit {
|
|||
void (*a_check_completion)(struct sim_unit *);
|
||||
struct sim_unit *a_next; /* next asynch active */
|
||||
int32 a_event_time;
|
||||
int32 a_sim_interval;
|
||||
t_stat (*a_activate_call)(struct sim_unit *, int32);
|
||||
#endif
|
||||
};
|
||||
|
@ -564,35 +563,40 @@ extern pthread_mutex_t sim_asynch_lock;
|
|||
extern pthread_cond_t sim_asynch_wake;
|
||||
extern pthread_t sim_asynch_main_threadid;
|
||||
extern struct sim_unit *sim_asynch_queue;
|
||||
extern t_bool sim_idle_wait;
|
||||
extern volatile t_bool sim_idle_wait;
|
||||
extern t_bool sim_asynch_enabled;
|
||||
extern int32 sim_asynch_check;
|
||||
extern int32 sim_asynch_latency;
|
||||
extern int32 sim_asynch_inst_latency;
|
||||
|
||||
#define AIO_LIST_END ((void *)1) /* Chosen to deliberately not be a valid pointer (alignment) */
|
||||
#define AIO_INIT \
|
||||
if (1) { \
|
||||
sim_asynch_main_threadid = pthread_self(); \
|
||||
/* Empty list/list end uses the point value (void *)-1. \
|
||||
/* Empty list/list end uses the point value (void *)1. \
|
||||
This allows NULL in an entry's a_next pointer to \
|
||||
indicate that the entry is not currently in any list */ \
|
||||
sim_asynch_queue = (void *)-1; \
|
||||
sim_asynch_queue = AIO_LIST_END; \
|
||||
}
|
||||
#define AIO_CLEANUP \
|
||||
if (1) { \
|
||||
pthread_mutex_destroy(&sim_asynch_lock); \
|
||||
pthread_cond_destroy(&sim_asynch_wake); \
|
||||
}
|
||||
|
||||
#if defined(__DECC_VER)
|
||||
#include <builtins>
|
||||
#if defined(__IA64)
|
||||
#define USE_AIO_INTRINSICS 1
|
||||
#endif
|
||||
#endif
|
||||
#if defined(_WIN32) || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
|
||||
#define USE_AIO_INTRINSICS 1
|
||||
#endif
|
||||
#ifdef USE_AIO_INTRINSICS
|
||||
/* This approach uses intrinsics to manage access to the link list head */
|
||||
/* sim_asynch_queue. However, once the list head state has been determined */
|
||||
/* a lock is used to manage the list update and entry removal. */
|
||||
/* This approach avoids the ABA issues with a completly lock free approach */
|
||||
/* since the ABA problem is very likely to happen with this use model, and */
|
||||
/* it avoids the lock overhead for the simple list head checking. */
|
||||
/* sim_asynch_queue. This implementation is a completely lock free design */
|
||||
/* which avoids the potential ABA issues. */
|
||||
#ifdef _WIN32
|
||||
#include <winsock2.h>
|
||||
#ifdef ERROR
|
||||
|
@ -600,66 +604,81 @@ extern int32 sim_asynch_inst_latency;
|
|||
#endif /* ERROR */
|
||||
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
|
||||
#define InterlockedCompareExchangePointer(Destination, Exchange, Comparand) __sync_val_compare_and_swap(Destination, Comparand, Exchange)
|
||||
#define InterlockedExchangePointer(Destination, value) __sync_lock_test_and_set(Destination, value)
|
||||
#elif defined(__DECC_VER)
|
||||
#define InterlockedCompareExchangePointer(Destination, Exchange, Comparand) (void *)((int32)_InterlockedCompareExchange64_rel(Destination, Exchange, Comparand))
|
||||
#else
|
||||
#error "Implementation of functions InterlockedCompareExchangePointer() and InterlockedExchangePointer() are needed to build with USE_AIO_INTRINSICS"
|
||||
#error "Implementation of function InterlockedCompareExchangePointer() is needed to build with USE_AIO_INTRINSICS"
|
||||
#endif
|
||||
#define AIO_QUEUE_VAL InterlockedCompareExchangePointer(&sim_asynch_queue, sim_asynch_queue, NULL)
|
||||
#define AIO_QUEUE_SET(val) InterlockedExchangePointer(&sim_asynch_queue, val)
|
||||
#define AIO_QUEUE_SET(val, queue) InterlockedCompareExchangePointer(&sim_asynch_queue, val, queue)
|
||||
#define AIO_UPDATE_QUEUE \
|
||||
if (1) { \
|
||||
UNIT *uptr; \
|
||||
if (AIO_QUEUE_VAL != (void *)-1) { \
|
||||
pthread_mutex_lock (&sim_asynch_lock); \
|
||||
while ((uptr = AIO_QUEUE_VAL) != (void *)-1) { \
|
||||
if (AIO_QUEUE_VAL != AIO_LIST_END) { /* List !Empty */ \
|
||||
UNIT *q, *uptr; \
|
||||
int32 a_event_time; \
|
||||
AIO_QUEUE_SET(uptr->a_next); \
|
||||
do \
|
||||
q = AIO_QUEUE_VAL; \
|
||||
while (q != AIO_QUEUE_SET(AIO_LIST_END, q)); \
|
||||
while (q != AIO_LIST_END) { /* List !Empty */ \
|
||||
uptr = q; \
|
||||
q = q->a_next; \
|
||||
uptr->a_next = NULL; /* hygiene */ \
|
||||
a_event_time = uptr->a_event_time-(uptr->a_sim_interval-sim_interval); \
|
||||
if (a_event_time < 0) a_event_time = 0; \
|
||||
if (uptr->a_activate_call != &sim_activate_notbefore) { \
|
||||
a_event_time = uptr->a_event_time-((sim_asynch_inst_latency+1)/2); \
|
||||
if (a_event_time < 0) \
|
||||
a_event_time = 0; \
|
||||
} \
|
||||
else \
|
||||
a_event_time = uptr->a_event_time; \
|
||||
uptr->a_activate_call (uptr, a_event_time); \
|
||||
if (uptr->a_check_completion) { \
|
||||
pthread_mutex_unlock (&sim_asynch_lock); \
|
||||
if (uptr->a_check_completion) \
|
||||
uptr->a_check_completion (uptr); \
|
||||
pthread_mutex_lock (&sim_asynch_lock); \
|
||||
} \
|
||||
} \
|
||||
pthread_mutex_unlock (&sim_asynch_lock); \
|
||||
} \
|
||||
sim_asynch_check = sim_asynch_inst_latency; \
|
||||
}
|
||||
} else 0
|
||||
#define AIO_ACTIVATE(caller, uptr, event_time) \
|
||||
if (!pthread_equal ( pthread_self(), sim_asynch_main_threadid )) { \
|
||||
pthread_mutex_lock (&sim_asynch_lock); \
|
||||
if (uptr->a_next) { \
|
||||
uptr->a_activate_call = sim_activate_abs; \
|
||||
} else { \
|
||||
uptr->a_next = AIO_QUEUE_VAL; \
|
||||
UNIT *q, *qe; \
|
||||
uptr->a_event_time = event_time; \
|
||||
uptr->a_sim_interval = sim_interval; \
|
||||
uptr->a_activate_call = caller; \
|
||||
AIO_QUEUE_SET(uptr); \
|
||||
uptr->a_activate_call = sim_activate; \
|
||||
uptr->a_next = AIO_LIST_END; /* Mark as on list */ \
|
||||
do { \
|
||||
do \
|
||||
q = AIO_QUEUE_VAL; \
|
||||
while (q != AIO_QUEUE_SET(AIO_LIST_END, q));/* Grab current list */ \
|
||||
for (qe = uptr; qe->a_next != AIO_LIST_END; qe = qe->a_next); \
|
||||
qe->a_next = q; /* append current list */\
|
||||
do \
|
||||
q = AIO_QUEUE_VAL; \
|
||||
while (q != AIO_QUEUE_SET(uptr, q)); \
|
||||
uptr = q; \
|
||||
} while (uptr != AIO_LIST_END); \
|
||||
} \
|
||||
if (sim_idle_wait) \
|
||||
pthread_cond_signal (&sim_asynch_wake); \
|
||||
pthread_mutex_unlock (&sim_asynch_lock); \
|
||||
return SCPE_OK; \
|
||||
}
|
||||
} else 0
|
||||
#else /* !USE_AIO_INTRINSICS */
|
||||
/* This approach uses a pthread mutex to manage access to the link list */
|
||||
/* head sim_asynch_queue. It will always work, but may be slower than the */
|
||||
/* partially lock free approach when using USE_AIO_INTRINSICS */
|
||||
/* lock free approach when using USE_AIO_INTRINSICS */
|
||||
#define AIO_UPDATE_QUEUE \
|
||||
if (1) { \
|
||||
UNIT *uptr; \
|
||||
pthread_mutex_lock (&sim_asynch_lock); \
|
||||
while (sim_asynch_queue != (void *)-1) { /* List !Empty */ \
|
||||
while (sim_asynch_queue != AIO_LIST_END) { /* List !Empty */ \
|
||||
int32 a_event_time; \
|
||||
uptr = sim_asynch_queue; \
|
||||
sim_asynch_queue = uptr->a_next; \
|
||||
uptr->a_next = NULL; \
|
||||
a_event_time = uptr->a_event_time-(uptr->a_sim_interval-sim_interval); \
|
||||
if (a_event_time < 0) a_event_time = 0; \
|
||||
if (uptr->a_activate_call != &sim_activate_notbefore) { \
|
||||
a_event_time = uptr->a_event_time-((sim_asynch_inst_latency+1)/2); \
|
||||
if (a_event_time < 0) \
|
||||
a_event_time = 0; \
|
||||
} \
|
||||
else \
|
||||
a_event_time = uptr->a_event_time; \
|
||||
uptr->a_activate_call (uptr, a_event_time); \
|
||||
if (uptr->a_check_completion) { \
|
||||
pthread_mutex_unlock (&sim_asynch_lock); \
|
||||
|
@ -668,8 +687,7 @@ extern int32 sim_asynch_inst_latency;
|
|||
} \
|
||||
} \
|
||||
pthread_mutex_unlock (&sim_asynch_lock); \
|
||||
sim_asynch_check = sim_asynch_inst_latency; \
|
||||
}
|
||||
} else 0
|
||||
#define AIO_ACTIVATE(caller, uptr, event_time) \
|
||||
if (!pthread_equal ( pthread_self(), sim_asynch_main_threadid )) { \
|
||||
pthread_mutex_lock (&sim_asynch_lock); \
|
||||
|
@ -678,7 +696,6 @@ extern int32 sim_asynch_inst_latency;
|
|||
} else { \
|
||||
uptr->a_next = sim_asynch_queue; \
|
||||
uptr->a_event_time = event_time; \
|
||||
uptr->a_sim_interval = sim_interval; \
|
||||
uptr->a_activate_call = caller; \
|
||||
sim_asynch_queue = uptr; \
|
||||
} \
|
||||
|
@ -686,19 +703,20 @@ extern int32 sim_asynch_inst_latency;
|
|||
pthread_cond_signal (&sim_asynch_wake); \
|
||||
pthread_mutex_unlock (&sim_asynch_lock); \
|
||||
return SCPE_OK; \
|
||||
}
|
||||
} else 0
|
||||
#endif /* USE_AIO_INTRINSICS */
|
||||
#define AIO_VALIDATE if (!pthread_equal ( pthread_self(), sim_asynch_main_threadid )) abort()
|
||||
#define AIO_CHECK_EVENT \
|
||||
if (0 > --sim_asynch_check) { \
|
||||
AIO_UPDATE_QUEUE; \
|
||||
}
|
||||
sim_asynch_check = sim_asynch_inst_latency; \
|
||||
} else 0
|
||||
#define AIO_SET_INTERRUPT_LATENCY(instpersec) \
|
||||
if (1) { \
|
||||
sim_asynch_inst_latency = (int32)((((double)(instpersec))*sim_asynch_latency)/1000000000);\
|
||||
if (sim_asynch_inst_latency == 0) \
|
||||
sim_asynch_inst_latency = 1; \
|
||||
}
|
||||
} else 0
|
||||
#else /* !SIM_ASYNCH_IO */
|
||||
#define AIO_UPDATE_QUEUE
|
||||
#define AIO_ACTIVATE(caller, uptr, event_time)
|
||||
|
|
|
@ -80,7 +80,7 @@
|
|||
#include <ctype.h>
|
||||
|
||||
t_bool sim_idle_enab = FALSE; /* global flag */
|
||||
t_bool sim_idle_wait = FALSE; /* global flag */
|
||||
volatile t_bool sim_idle_wait = FALSE; /* global flag */
|
||||
|
||||
static uint32 sim_idle_rate_ms = 0;
|
||||
static uint32 sim_idle_stable = SIM_IDLE_STDFLT;
|
||||
|
@ -457,7 +457,8 @@ if (done_time.tv_nsec > 1000000000) {
|
|||
}
|
||||
pthread_mutex_lock (&sim_asynch_lock);
|
||||
sim_idle_wait = TRUE;
|
||||
pthread_cond_timedwait (&sim_asynch_wake, &sim_asynch_lock, &done_time);
|
||||
if (!pthread_cond_timedwait (&sim_asynch_wake, &sim_asynch_lock, &done_time))
|
||||
sim_asynch_check = 0; /* force check of asynch queue now */
|
||||
sim_idle_wait = FALSE;
|
||||
pthread_mutex_unlock (&sim_asynch_lock);
|
||||
return sim_os_msec() - start_time;
|
||||
|
|
Loading…
Add table
Reference in a new issue