3b2: Fix minor MMU paging bug
On a full cache miss, the MMU should only bring a page descriptor into cache if the segment descriptor does NOT have the 'contiguous' bit set.
This commit is contained in:
parent
ca77941ce0
commit
afffe300ee
1 changed files with 1 additions and 1 deletions
|
@ -664,7 +664,7 @@ t_stat mmu_decode_va(uint32 va, uint8 r_acc, t_bool fc, uint32 *pa)
|
||||||
return SCPE_NXM;
|
return SCPE_NXM;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mmu_get_pd(va, r_acc, fc, sd0, sd1, &pd, &pd_acc) != SCPE_OK) {
|
if (SD_PAGED(sd0) && mmu_get_pd(va, r_acc, fc, sd0, sd1, &pd, &pd_acc) != SCPE_OK) {
|
||||||
sim_debug(EXECUTE_MSG, &mmu_dev,
|
sim_debug(EXECUTE_MSG, &mmu_dev,
|
||||||
"[%08x] Could not get PD (full miss). r_acc=%d, fc=%d, va=%08x\n",
|
"[%08x] Could not get PD (full miss). r_acc=%d, fc=%d, va=%08x\n",
|
||||||
R[NUM_PC], r_acc, fc, va);
|
R[NUM_PC], r_acc, fc, va);
|
||||||
|
|
Loading…
Add table
Reference in a new issue