3b2: Fix minor MMU paging bug

On a full cache miss, the MMU should only bring a page
descriptor into cache if the segment descriptor does NOT
have the 'contiguous' bit set.
This commit is contained in:
Seth Morabito 2018-04-13 13:42:30 -07:00
parent ca77941ce0
commit afffe300ee

View file

@ -664,7 +664,7 @@ t_stat mmu_decode_va(uint32 va, uint8 r_acc, t_bool fc, uint32 *pa)
return SCPE_NXM; return SCPE_NXM;
} }
if (mmu_get_pd(va, r_acc, fc, sd0, sd1, &pd, &pd_acc) != SCPE_OK) { if (SD_PAGED(sd0) && mmu_get_pd(va, r_acc, fc, sd0, sd1, &pd, &pd_acc) != SCPE_OK) {
sim_debug(EXECUTE_MSG, &mmu_dev, sim_debug(EXECUTE_MSG, &mmu_dev,
"[%08x] Could not get PD (full miss). r_acc=%d, fc=%d, va=%08x\n", "[%08x] Could not get PD (full miss). r_acc=%d, fc=%d, va=%08x\n",
R[NUM_PC], r_acc, fc, va); R[NUM_PC], r_acc, fc, va);