(old) | htmldiff from- | (new) |
This page displays common pseudocode functions shared by many pages.
// AArch32.VCRMatch() // ================== boolean AArch32.VCRMatch(bits(32) vaddress) if UsingAArch32() && ELUsingAArch32(EL1) && IsZero(vaddress<1:0>) && PSTATE.EL != EL2 then // Each bit position in this string corresponds to a bit in DBGVCR and an exception vector. match_word = Zeros(32); if vaddress<31:5> == ExcVectorBase()<31:5> then if HaveEL(EL3) && !IsSecure() then match_word<UInt(vaddress<4:2>) + 24> = '1'; // Non-secure vectors else match_word<UInt(vaddress<4:2>) + 0> = '1'; // Secure vectors (or no EL3) if HaveEL(EL3) && ELUsingAArch32(EL3) && IsSecure() && vaddress<31:5> == MVBAR<31:5> then match_word<UInt(vaddress<4:2>) + 8> = '1'; // Monitor vectors // Mask out bits not corresponding to vectors. if !HaveEL(EL3) then mask = '00000000':'00000000':'00000000':'11011110'; // DBGVCR[31:8] are RES0 elsif !ELUsingAArch32(EL3) then mask = '11011110':'00000000':'00000000':'11011110'; // DBGVCR[15:8] are RES0 else mask = '11011110':'00000000':'11011100':'11011110'; match_word = match_word AND DBGVCR AND mask; match = !IsZero(match_word); // Check for UNPREDICTABLE case - match on Prefetch Abort and Data Abort vectors if !IsZero(match_word<28:27,12:11,4:3>) && DebugTarget() == PSTATE.EL then match = ConstrainUnpredictableBool(Unpredictable_VCMATCHDAPA); else match = FALSE; return match;
// AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled() // ======================================================== boolean AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled() // The definition of this function is IMPLEMENTATION DEFINED. // In the recommended interface, AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled returns // the state of the (DBGEN AND SPIDEN) signal. if !HaveEL(EL3) && !IsSecure() then return FALSE; return DBGEN == HIGH && SPIDEN == HIGH;
// AArch32.BreakpointMatch() // ========================= // Breakpoint matching in an AArch32 translation regime. (boolean,boolean) AArch32.BreakpointMatch(integer n, bits(32) vaddress, integer size) assert ELUsingAArch32(S1TranslationRegime()); assert n <= UInt(DBGDIDR.BRPs); enabled = DBGBCR[n].E == '1'; ispriv = PSTATE.EL != EL0; linked = DBGBCR[n].BT == '0x01'; isbreakpnt = TRUE; linked_to = FALSE; state_match = AArch32.StateMatch(DBGBCR[n].SSC, DBGBCR[n].HMC, DBGBCR[n].PMC, linked, DBGBCR[n].LBN, isbreakpnt, ispriv); (value_match, value_mismatch) = AArch32.BreakpointValueMatch(n, vaddress, linked_to); if size == 4 then // Check second halfword // If the breakpoint address and BAS of an Address breakpoint match the address of the // second halfword of an instruction, but not the address of the first halfword, it is // CONSTRAINED UNPREDICTABLE whether or not this breakpoint generates a Breakpoint debug // event. (match_i, mismatch_i) = AArch32.BreakpointValueMatch(n, vaddress + 2, linked_to); if !value_match && match_i then value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF); if value_mismatch && !mismatch_i then value_mismatch = ConstrainUnpredictableBool(Unpredictable_BPMISMATCHHALF); if vaddress<1> == '1' && DBGBCR[n].BAS == '1111' then // The above notwithstanding, if DBGBCR[n].BAS == '1111', then it is CONSTRAINED // UNPREDICTABLE whether or not a Breakpoint debug event is generated for an instruction // at the address DBGBVR[n]+2. if value_match then value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF); if !value_mismatch then value_mismatch = ConstrainUnpredictableBool(Unpredictable_BPMISMATCHHALF); match = value_match && state_match && enabled; mismatch = value_mismatch && state_match && enabled; return (match, mismatch);
// AArch32.BreakpointValueMatch() // ============================== // The first result is whether an Address Match or Context breakpoint is programmed on the // instruction at "address". The second result is whether an Address Mismatch breakpoint is // programmed on the instruction, that is, whether the instruction should be stepped. (boolean,boolean) AArch32.BreakpointValueMatch(integer n, bits(32) vaddress, boolean linked_to) // "n" is the identity of the breakpoint unit to match against. // "vaddress" is the current instruction address, ignored if linked_to is TRUE and for Context // matching breakpoints. // "linked_to" is TRUE if this is a call from StateMatch for linking. // If a non-existent breakpoint then it is CONSTRAINED UNPREDICTABLE whether this gives // no match or the breakpoint is mapped to another UNKNOWN implemented breakpoint. if n > UInt(DBGDIDR.BRPs) then (c, n) = ConstrainUnpredictableInteger(0, UInt(DBGDIDR.BRPs), Unpredictable_BPNOTIMPL); assert c IN {Constraint_DISABLED, Constraint_UNKNOWN}; if c == Constraint_DISABLED then return (FALSE,FALSE); // If this breakpoint is not enabled, it cannot generate a match. (This could also happen on a // call from StateMatch for linking). if DBGBCR[n].E == '0' then return (FALSE,FALSE); context_aware = (n >= UInt(DBGDIDR.BRPs) - UInt(DBGDIDR.CTX_CMPs)); // If BT is set to a reserved type, behaves either as disabled or as a not-reserved type. dbgtype = DBGBCR[n].BT; if ((dbgtype IN {'011x','11xx'} && !HaveVirtHostExt()) || // Context matching (dbgtype == '010x' && HaltOnBreakpointOrWatchpoint()) || // Address mismatch (dbgtype != '0x0x' && !context_aware) || // Context matching (dbgtype == '1xxx' && !HaveEL(EL2))) then // EL2 extension (c, dbgtype) = ConstrainUnpredictableBits(Unpredictable_RESBPTYPE); assert c IN {Constraint_DISABLED, Constraint_UNKNOWN}; if c == Constraint_DISABLED then return (FALSE,FALSE); // Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value // Determine what to compare against. match_addr = (dbgtype == '0x0x'); mismatch = (dbgtype == '010x'); match_vmid = (dbgtype == '10xx'); match_cid1 = (dbgtype == 'xx1x'); match_cid2 = (dbgtype == '11xx'); linked = (dbgtype == 'xxx1'); // If this is a call from StateMatch, return FALSE if the breakpoint is not programmed for a // VMID and/or context ID match, of if not context-aware. The above assertions mean that the // code can just test for match_addr == TRUE to confirm all these things. if linked_to && (!linked || match_addr) then return (FALSE,FALSE); // If called from BreakpointMatch return FALSE for Linked context ID and/or VMID matches. if !linked_to && linked && !match_addr then return (FALSE,FALSE); // Do the comparison. if match_addr then byte = UInt(vaddress<1:0>); assert byte IN {0,2}; // "vaddress" is halfword aligned byte_select_match = (DBGBCR[n].BAS<byte> == '1'); BVR_match = vaddress<31:2> == DBGBVR[n]<31:2> && byte_select_match; elsif match_cid1 then BVR_match = (PSTATE.EL != EL2 && CONTEXTIDR == DBGBVR[n]<31:0>); if match_vmid then if ELUsingAArch32(EL2) then vmid = ZeroExtend(VTTBR.VMID, 16); bvr_vmid = ZeroExtend(DBGBXVR[n]<7:0>, 16); elsif !Have16bitVMID() || VTCR_EL2.VS == '0' then vmid = ZeroExtend(VTTBR_EL2.VMID<7:0>, 16); bvr_vmid = ZeroExtend(DBGBXVR[n]<7:0>, 16); else vmid = VTTBR_EL2.VMID; bvr_vmid = DBGBXVR[n]<15:0>; BXVR_match = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && vmid == bvr_vmid); elsif match_cid2 then BXVR_match = (!IsSecure() && HaveVirtHostExt() && !ELUsingAArch32(EL2) && DBGBXVR[n]<31:0> == CONTEXTIDR_EL2); bvr_match_valid = (match_addr || match_cid1); bxvr_match_valid = (match_vmid || match_cid2); match = (!bxvr_match_valid || BXVR_match) && (!bvr_match_valid || BVR_match); return (match && !mismatch, !match && mismatch);
// AArch32.StateMatch()
// ====================
// Determine whether a breakpoint or watchpoint is enabled in the current mode and state.
boolean AArch32.StateMatch(bits(2) SSC, bit HMC, bits(2) PxC, boolean linked, bits(4) LBN,
boolean isbreakpnt, boolean ispriv)
// "SSC", "HMC", "PxC" are the control fields from the DBGBCR[n] or DBGWCR[n] register.
// "linked" is TRUE if this is a linked breakpoint/watchpoint type.
// "LBN" is the linked breakpoint number from the DBGBCR[n] or DBGWCR[n] register.
// "isbreakpnt" is TRUE for breakpoints, FALSE for watchpoints.
// "ispriv" is valid for watchpoints, and selects between privileged and unprivileged accesses.
// If parameters are set to a reserved type, behaves as either disabled or a defined type
(c, SSC, HMC, PxC) = if ((HMC:SSC:PxC) IN {'011xx','100x0','101x0','11010','11101','1111x'} || // Reserved
(HMC == '0' && PxC == '00' && !isbreakpnt) || // Usr/Svc/Sys
(SSC IN {'01','10'} && ! CheckValidStateMatchHaveEL(SSC, HMC, PxC, isbreakpnt);
if c ==( EL3)) || // No EL3
(HMC:SSC:PxC == '11000' && ELUsingAArch32(EL3)) || // AArch64 only
(HMC:SSC != '000' && HMC:SSC != '111' && !HaveEL(EL3) && !HaveEL(EL2)) || // No EL3/EL2
(HMC:SSC:PxC == '11100' && !HaveEL(EL2))) then // No EL2
(c, <HMC,SSC,PxC>) = ConstrainUnpredictableBits(Unpredictable_RESBPWPCTRL);
assert c IN {Constraint_DISABLED then return FALSE;
// Otherwise the HMC,SSC,PxC values are either valid or the values returned by
// CheckValidStateMatch are valid.
PL2_match =, Constraint_UNKNOWN};
if c == Constraint_DISABLED then return FALSE;
// Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value
PL2_match = HaveEL(EL2) && ((HMC == '1' && (SSC:PxC != '1000')) || SSC == '11');
) && HMC == '1';
PL1_match = PxC<0> == '1';
PL0_match = PxC<1> == '1';
SSU_match = isbreakpnt && HMC == '0' && PxC == '00' && SSC != '11';
el = PSTATE.EL;
if !ispriv && !isbreakpnt then
priv_match = PL0_match;
elsif SSU_match then
priv_match = PSTATE.M IN {M32_User,M32_Svc,M32_System};
else
case PSTATE.EL of
case el of
when EL3 priv_match = PL1_match; // EL3 and EL1 are both PL1
when EL2 priv_match = PL2_match;
when EL1 priv_match = PL1_match;
when EL0 priv_match = PL0_match;
case SSC of
when '00' security_state_match = TRUE; // Both
when '01' security_state_match = !IsSecure(); // Non-secure only
when '10' security_state_match =priv_match = PL0_match;
case SSC of
when '00' security_state_match = TRUE; // Both
when '01' security_state_match = ! IsSecure(); // Secure only
when '11' security_state_match = (HMC == '1' ||(); // Non-secure only
when '10' security_state_match = IsSecure()); // HMC=1 -> Both, 0 -> Secure only
(); // Secure only
when '11' security_state_match = TRUE; // Both
if linked then
// "LBN" must be an enabled context-aware breakpoint unit. If it is not context-aware then
// it is CONSTRAINED UNPREDICTABLE whether this gives no match, or LBN is mapped to some
// UNKNOWN breakpoint that is context-aware.
lbn = UInt(LBN);
first_ctx_cmp = (UInt(DBGDIDR.BRPs) - UInt(DBGDIDR.CTX_CMPs));
last_ctx_cmp = UInt(DBGDIDR.BRPs);
if (lbn < first_ctx_cmp || lbn > last_ctx_cmp) then
(c, lbn) = ConstrainUnpredictableInteger(first_ctx_cmp, last_ctx_cmp, Unpredictable_BPNOTCTXCMP);
assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN};
case c of
when Constraint_DISABLED return FALSE; // Disabled
when Constraint_NONE linked = FALSE; // No linking
// Otherwise ConstrainUnpredictableInteger returned a context-aware breakpoint
if linked then
vaddress = bits(32) UNKNOWN;
linked_to = TRUE;
(linked_match,-) = AArch32.BreakpointValueMatch(lbn, vaddress, linked_to);
return priv_match && security_state_match && (!linked || linked_match);
// CheckValidStateMatch()
// ======================
// Checks for an invalid state match that will generate Constrained Unpredictable behaviour, otherwise
// returns Constraint_NONE.
// AArch32.GenerateDebugExceptions()
// =================================
(Constraint, bits(2), bit, bits(2))boolean CheckValidStateMatch(bits(2) SSC, bit HMC, bits(2) PxC, boolean isbreakpnt)
boolean reserved = FALSE;
// Match 'Usr/Sys/Svc' only valid for AArch32 breakpoints
if (!isbreakpnt || !AArch32.GenerateDebugExceptions()
returnHaveAArch32ELAArch32.GenerateDebugExceptionsFrom((PSTATE.EL,EL1IsSecure)) && HMC:PxC == '000' && SSC != '11' then
reserved = TRUE;
// Both EL3 and EL2 are not implemented
if !HaveEL(EL3) && !HaveEL(EL2) && (HMC != '0' || SSC != '00') then
reserved = TRUE;
// EL3 is not implemented
if !HaveEL(EL3) && SSC IN {'01','10'} && HMC:SSC:PxC != '10100' then
reserved = TRUE;
// EL3 using AArch64 only
if (!HaveEL(EL3) || HighestELUsingAArch32()) && HMC:SSC:PxC == '11000' then
reserved = TRUE;
// EL2 is not implemented
if !HaveEL(EL2) && HMC:SSC:PxC == '11100' then
reserved = TRUE;
// Secure EL2 is not implemented
if !HaveSecureEL2Ext() && (HMC:SSC:PxC) IN {'01100','10100','x11x1'} then
reserved = TRUE;
// Values that are not allocated in any architecture version
if (HMC:SSC:PxC) IN {'01110','100x0','10110','11x10'} then
reserved = TRUE;
if reserved then
// If parameters are set to a reserved type, behaves as either disabled or a defined type
(c, <HMC,SSC,PxC>) = ConstrainUnpredictableBits(Unpredictable_RESBPWPCTRL);
assert c IN {Constraint_DISABLED, Constraint_UNKNOWN};
if c == Constraint_DISABLED then
return (c, bits(2) UNKNOWN, bit UNKNOWN, bits(2) UNKNOWN);
// Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value
return (Constraint_NONE, SSC, HMC, PxC);());
// AArch32.GenerateDebugExceptions()
// =================================
// AArch32.GenerateDebugExceptionsFrom()
// =====================================
boolean AArch32.GenerateDebugExceptions()
returnAArch32.GenerateDebugExceptionsFrom(bits(2) from, boolean secure)
if from == AArch32.GenerateDebugExceptionsFromEL0(PSTATE.EL,&& ! (EL1, secure) then
mask = bit UNKNOWN; // PSTATE.D mask, unused for EL0 case
return AArch64.GenerateDebugExceptionsFrom(from, secure, mask);
if DBGOSLSR.OSLK == '1' || DoubleLockStatus() || Halted() then
return FALSE;
if HaveEL(EL3) && secure then
spd = if ELUsingAArch32(EL3) then SDCR.SPD else MDCR_EL3.SPD32;
if spd<1> == '1' then
enabled = spd<0> == '1';
else
// SPD == 0b01 is reserved, but behaves the same as 0b00.
enabled = AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled();
if from == EL0 then enabled = enabled || SDER.SUIDEN == '1';
else
enabled = from != EL2IsSecureELStateUsingAArch32());;
return enabled;
// AArch32.GenerateDebugExceptionsFrom()
// =====================================
// AArch32.CheckForPMUOverflow()
// =============================
// Signal Performance Monitors overflow IRQ and CTI overflow events
boolean AArch32.GenerateDebugExceptionsFrom(bits(2) from, boolean secure)
AArch32.CheckForPMUOverflow()
if from == if ! EL0ELUsingAArch32 && !(ELStateUsingAArch32(EL1, secure) then
mask = bit UNKNOWN; // PSTATE.D mask, unused for EL0 case
return) then return AArch64.GenerateDebugExceptionsFromAArch64.CheckForPMUOverflow(from, secure, mask);
if DBGOSLSR.OSLK == '1' ||();
pmuirq = PMCR.E == '1' && PMINTENSET<31> == '1' && PMOVSSET<31> == '1';
for n = 0 to DoubleLockStatusUInt() ||(PMCR.N) - 1
if Halted() then
return FALSE;
if HaveEL(EL3EL2) && secure then
spd = if) then
hpmn = if ! ELUsingAArch32(EL3EL2) then SDCR.SPD else MDCR_EL3.SPD32;
if spd<1> == '1' then
enabled = spd<0> == '1';
else
// SPD == 0b01 is reserved, but behaves the same as 0b00.
enabled =) then MDCR_EL2.HPMN else HDCR.HPMN;
hpme = if ! AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabledELUsingAArch32();
if from ==( EL0EL2 then enabled = enabled || SDER.SUIDEN == '1';
else
enabled = from !=) then MDCR_EL2.HPME else HDCR.HPME;
E = (if n < (hpmn) then PMCR.E else hpme);
else
E = PMCR.E;
if E == '1' && PMINTENSET<n> == '1' && PMOVSSET<n> == '1' then pmuirq = TRUE;
SetInterruptRequestLevel(InterruptID_PMUIRQ, if pmuirq then HIGH else LOW);
CTI_SetEventLevel(CrossTriggerIn_PMUOverflowEL2UInt;
, if pmuirq then HIGH else LOW);
return enabled; // The request remains set until the condition is cleared. (For example, an interrupt handler
// or cross-triggered event handler clears the overflow status flag by writing to PMOVSCLR_EL0.)
return pmuirq;
// AArch32.CheckForPMUOverflow()
// =============================
// Signal Performance Monitors overflow IRQ and CTI overflow events
// AArch32.CountEvents()
// =====================
// Return TRUE if counter "n" should count its event. For the cycle counter, n == 31.
boolean AArch32.CheckForPMUOverflow()
if !AArch32.CountEvents(integer n)
assert n == 31 || n <UInt(PMCR.N);
if !ELUsingAArch32(EL1) then return AArch64.CheckForPMUOverflowAArch64.CountEvents();
pmuirq = PMCR.E == '1' && PMINTENSET<31> == '1' && PMOVSSET<31> == '1';
for n = 0 to(n);
// Event counting is disabled in Debug state
debug = UIntHalted(PMCR.N) - 1
if();
// In Non-secure state, some counters are reserved for EL2
if HaveEL(EL2) then
hpmn = if !ELUsingAArch32(EL2) then MDCR_EL2.HPMN else HDCR.HPMN;
hpme = if !ELUsingAArch32(EL2) then MDCR_EL2.HPME else HDCR.HPME;
E = (if n < E = if n < UInt(hpmn) then PMCR.E else hpme);
else
E = PMCR.E;
if E == '1' && PMINTENSET<n> == '1' && PMOVSSET<n> == '1' then pmuirq = TRUE;
(hpmn) || n == 31 then PMCR.E else hpme;
else
E = PMCR.E;
enabled = E == '1' && PMCNTENSET<n> == '1';
SetInterruptRequestLevel( if !InterruptID_PMUIRQIsSecure, if pmuirq then HIGH else LOW);
CTI_SetEventLevel(() then
// Event counting in Non-secure state is allowed unless all of:
// * EL2 and the HPMD Extension are implemented
// * Executing at EL2
// * PMNx is not reserved for EL2
// * HDCR.HPMD == 1
if() && PSTATE.EL == EL2 && (n < UInt(hpmn) || n == 31) then
hpmd = if !ELUsingAArch32(EL2) then MDCR_EL2.HPMD else HDCR.HPMD;
prohibited = (hpmd == '1');
else
prohibited = FALSE;
else
// Event counting in Secure state is prohibited unless any one of:
// * EL3 is not implemented
// * EL3 is using AArch64 and MDCR_EL3.SPME == 1
// * EL3 is using AArch32 and SDCR.SPME == 1
// * Executing at EL0, and SDER.SUNIDEN == 1.
spme = (if ELUsingAArch32(EL3) then SDCR.SPME else MDCR_EL3.SPME);
prohibited = HaveEL(EL3) && spme == '0' && (PSTATE.EL != EL0 || SDER.SUNIDEN == '0');
// The IMPLEMENTATION DEFINED authentication interface might override software controls
if prohibited && !HaveNoSecurePMUDisableOverride() then
prohibited = !ExternalSecureNoninvasiveDebugEnabled();
// For the cycle counter, PMCR.DP enables counting when otherwise prohibited
if prohibited && n == 31 then prohibited = (PMCR.DP == '1');
// Event counting can be filtered by the {P, U, NSK, NSU, NSH} bits
filter = if n == 31 then PMCCFILTR else PMEVTYPER[n];
P = filter<31>;
U = filter<30>;
NSK = if HaveEL(EL3) then filter<29> else '0';
NSU = if HaveEL(EL3) then filter<28> else '0';
NSH = if HaveEL(EL2) then filter<27> else '0';
case PSTATE.EL of
when EL0 filtered = if IsSecure() then U == '1' else U != NSU;
when EL1 filtered = if IsSecure() then P == '1' else P != NSK;
when EL2 filtered = (NSH == '0');
when EL3CrossTriggerIn_PMUOverflowHaveHPMDExt, if pmuirq then HIGH else LOW);
filtered = (P == '1');
// The request remains set until the condition is cleared. (For example, an interrupt handler
// or cross-triggered event handler clears the overflow status flag by writing to PMOVSCLR_EL0.)
return pmuirq; return !debug && enabled && !prohibited && !filtered;
// AArch32.CountEvents()
// =====================
// Return TRUE if counter "n" should count its event. For the cycle counter, n == 31.
boolean// AArch32.EnterHypModeInDebugState()
// ==================================
// Take an exception in Debug state to Hyp mode. AArch32.CountEvents(integer n)
assert n == 31 || n <AArch32.EnterHypModeInDebugState( UIntExceptionRecord(PMCR.N);
if !exception)ELUsingAArch32SynchronizeContext(();
assertEL1) then return AArch64.CountEvents(n);
// Event counting is disabled in Debug state
debug = Halted();
// In Non-secure state, some counters are reserved for EL2
if HaveEL(EL2) then
hpmn = if !) && !ELUsingAArch32IsSecure(() &&EL2) then MDCR_EL2.HPMN else HDCR.HPMN;
hpme = if !ELUsingAArch32(EL2) then MDCR_EL2.HPME else HDCR.HPME;
if); HaveHPMDExtAArch32.ReportHypEntry() then
hpmd = if !(exception);ELUsingAArch32AArch32.WriteMode(EL2M32_Hyp) then MDCR_EL2.HPMD else HDCR.HPMD;
E = if n <); UIntSPSR(hpmn) || n == 31 then PMCR.E else hpme;
else
E = PMCR.E;
enabled = E == '1' && PMCNTENSET<n> == '1';
// Event counting in Secure state is prohibited unless any one of:
// * EL3 is not implemented
// * EL3 is using AArch64 and MDCR_EL3.SPME == 1
// * EL3 is using AArch32 and SDCR.SPME == 1
// * Executing at EL0, and SDER.SUNIDEN == 1.
spme = (if[] = bits(32) UNKNOWN;
ELR_hyp = bits(32) UNKNOWN;
// In Debug state, the PE always execute T32 instructions when in AArch32 state, and
// PSTATE.{SS,A,I,F} are not observable so behave as UNKNOWN.
PSTATE.T = '1'; // PSTATE.J is RES0
PSTATE.<SS,A,I,F> = bits(4) UNKNOWN;
DLR = bits(32) UNKNOWN;
DSPSR = bits(32) UNKNOWN;
PSTATE.E = HSCTLR.EE;
PSTATE.IL = '0';
PSTATE.IT = '00000000';
if ELUsingAArch32HaveSSBSExt(() then PSTATE.SSBS = bit UNKNOWN;
EDSCR.ERR = '1';EL3UpdateEDSCRFields) then SDCR.SPME else MDCR_EL3.SPME);
prohibited =(); HaveELEndOfInstruction(EL3) && IsSecure() && spme == '0' && (PSTATE.EL != EL0 || SDER.SUNIDEN == '0');
// Event counting at EL2 is prohibited if all of:
// * The HPMD Extension is implemented
// * Executing at EL2
// * PMNx is not reserved for EL2
// * HDCR.HPMD == 1
if !prohibited && HaveEL(EL2) && HaveHPMDExt() && PSTATE.EL == EL2 && (n < UInt(hpmn) || n == 31) then
prohibited = (hpmd == '1');
// The IMPLEMENTATION DEFINED authentication interface might override software controls
if prohibited && !HaveNoSecurePMUDisableOverride() then
prohibited = !ExternalSecureNoninvasiveDebugEnabled();
// For the cycle counter, PMCR.DP enables counting when otherwise prohibited
if prohibited && n == 31 then prohibited = (PMCR.DP == '1');
// Event counting can be filtered by the {P, U, NSK, NSU, NSH} bits
filter = if n == 31 then PMCCFILTR else PMEVTYPER[n];
P = filter<31>;
U = filter<30>;
NSK = if HaveEL(EL3) then filter<29> else '0';
NSU = if HaveEL(EL3) then filter<28> else '0';
NSH = if HaveEL(EL2) then filter<27> else '0';
case PSTATE.EL of
when EL0 filtered = if IsSecure() then U == '1' else U != NSU;
when EL1 filtered = if IsSecure() then P == '1' else P != NSK;
when EL2 filtered = (NSH == '0');
when EL3 filtered = (P == '1');
return !debug && enabled && !prohibited && !filtered;();
// AArch32.EnterHypModeInDebugState()
// ==================================
// Take an exception in Debug state to Hyp mode.// AArch32.EnterModeInDebugState()
// ===============================
// Take an exception in Debug state to a mode other than Monitor and Hyp mode.
AArch32.EnterHypModeInDebugState(AArch32.EnterModeInDebugState(bits(5) target_mode)ExceptionRecord exception)
SynchronizeContext();
assert HaveELELUsingAArch32(EL2EL1) && !) && PSTATE.EL !=IsSecure() && ELUsingAArch32(EL2);;
if PSTATE.M ==
AArch32.ReportHypEntryM32_Monitor(exception);then SCR.NS = '0';
AArch32.WriteMode((target_mode);M32_HypSPSR);[] = bits(32) UNKNOWN;
[14] = bits(32) UNKNOWN;
// In Debug state, the PE always execute T32 instructions when in AArch32 state, and
// PSTATE.{SS,A,I,F} are not observable so behave as UNKNOWN.
PSTATE.T = '1'; // PSTATE.J is RES0
PSTATE.<SS,A,I,F> = bits(4) UNKNOWN;
DLR = bits(32) UNKNOWN;
DSPSR = bits(32) UNKNOWN;
PSTATE.E = SCTLR.EE;
PSTATE.IL = '0';
PSTATE.IT = '00000000';
if HavePANExtSPSRR[] = bits(32) UNKNOWN;
ELR_hyp = bits(32) UNKNOWN;
// In Debug state, the PE always execute T32 instructions when in AArch32 state, and
// PSTATE.{SS,A,I,F} are not observable so behave as UNKNOWN.
PSTATE.T = '1'; // PSTATE.J is RES0
PSTATE.<SS,A,I,F> = bits(4) UNKNOWN;
DLR = bits(32) UNKNOWN;
DSPSR = bits(32) UNKNOWN;
PSTATE.E = HSCTLR.EE;
PSTATE.IL = '0';
PSTATE.IT = '00000000';
() && SCTLR.SPAN == '0' then PSTATE.PAN = '1';
if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN;
EDSCR.ERR = '1';
UpdateEDSCRFields();(); // Update EDSCR processor state flags.
EndOfInstruction();
// AArch32.EnterModeInDebugState()
// ===============================
// Take an exception in Debug state to a mode other than Monitor and Hyp mode.// AArch32.EnterMonitorModeInDebugState()
// ======================================
// Take an exception in Debug state to Monitor mode.
AArch32.EnterModeInDebugState(bits(5) target_mode)AArch32.EnterMonitorModeInDebugState()
SynchronizeContext();
assert HaveEL(EL3) && ELUsingAArch32(EL1EL3) && PSTATE.EL !=);
from_secure = EL2IsSecure;
();
if PSTATE.M == M32_Monitor then SCR.NS = '0';
AArch32.WriteMode(M32_Monitor(target_mode););
SPSR[] = bits(32) UNKNOWN;
R[14] = bits(32) UNKNOWN;
// In Debug state, the PE always execute T32 instructions when in AArch32 state, and
// PSTATE.{SS,A,I,F} are not observable so behave as UNKNOWN.
PSTATE.T = '1'; // PSTATE.J is RES0
PSTATE.<SS,A,I,F> = bits(4) UNKNOWN;
DLR = bits(32) UNKNOWN;
DSPSR = bits(32) UNKNOWN;
PSTATE.E = SCTLR.EE;
PSTATE.IL = '0';
PSTATE.IT = '00000000';
if HavePANExt() && SCTLR.SPAN == '0' then PSTATE.PAN = '1';
() then
if !from_secure then
PSTATE.PAN = '0';
elsif SCTLR.SPAN == '0' then
PSTATE.PAN = '1';
if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN;
DLR = bits(32) UNKNOWN;
DSPSR = bits(32) UNKNOWN;
EDSCR.ERR = '1';
UpdateEDSCRFields(); // Update EDSCR processor state flags.
EndOfInstruction();
// AArch32.EnterMonitorModeInDebugState()
// ======================================
// Take an exception in Debug state to Monitor mode.// AArch32.WatchpointByteMatch()
// =============================
boolean
AArch32.EnterMonitorModeInDebugState()AArch32.WatchpointByteMatch(integer n, bits(32) vaddress)
bottom = if DBGWVR[n]<2> == '1' then 2 else 3; // Word or doubleword
byte_select_match = (DBGWCR[n].BAS<
SynchronizeContextUInt();
assert(vaddress<bottom-1:0>)> != '0');
mask = HaveELUInt((DBGWCR[n].MASK);
// If DBGWCR[n].MASK is non-zero value and DBGWCR[n].BAS is not set to '11111111', or
// DBGWCR[n].BAS specifies a non-contiguous set of bytes behavior is CONSTRAINED
// UNPREDICTABLE.
if mask > 0 && !EL3IsOnes) &&(DBGWCR[n].BAS) then
byte_select_match = ELUsingAArch32ConstrainUnpredictableBool(EL3Unpredictable_WPMASKANDBAS);
from_secure = else
LSB = (DBGWCR[n].BAS AND NOT(DBGWCR[n].BAS - 1)); MSB = (DBGWCR[n].BAS + LSB);
if ! IsSecureIsZero();
if PSTATE.M ==(MSB AND (MSB - 1)) then // Not contiguous
byte_select_match = M32_MonitorConstrainUnpredictableBool then SCR.NS = '0';(
AArch32.WriteModeUnpredictable_WPBASCONTIGUOUS();
bottom = 3; // For the whole doubleword
// If the address mask is set to a reserved value, the behavior is CONSTRAINED UNPREDICTABLE.
if mask > 0 && mask <= 2 then
(c, mask) =M32_MonitorConstrainUnpredictableInteger);(3, 31,
SPSRUnpredictable_RESWPMASK[] = bits(32) UNKNOWN;);
assert c IN {
RConstraint_DISABLED[14] = bits(32) UNKNOWN;
// In Debug state, the PE always execute T32 instructions when in AArch32 state, and
// PSTATE.{SS,A,I,F} are not observable so behave as UNKNOWN.
PSTATE.T = '1'; // PSTATE.J is RES0
PSTATE.<SS,A,I,F> = bits(4) UNKNOWN;
PSTATE.E = SCTLR.EE;
PSTATE.IL = '0';
PSTATE.IT = '00000000';
if, HavePANExtConstraint_NONE() then
if !from_secure then
PSTATE.PAN = '0';
elsif SCTLR.SPAN == '0' then
PSTATE.PAN = '1';
if, HaveSSBSExtConstraint_UNKNOWN() then PSTATE.SSBS = bit UNKNOWN;
DLR = bits(32) UNKNOWN;
DSPSR = bits(32) UNKNOWN;
EDSCR.ERR = '1';};
case c of
when
UpdateEDSCRFieldsConstraint_DISABLED(); // Update EDSCR processor state flags.return FALSE; // Disabled
when
mask = 0; // No masking
// Otherwise the value returned by ConstrainUnpredictableInteger is a not-reserved value
if mask > bottom then
WVR_match = (vaddress<31:mask> == DBGWVR[n]<31:mask>);
// If masked bits of DBGWVR_EL1[n] are not zero, the behavior is CONSTRAINED UNPREDICTABLE.
if WVR_match && !IsZero(DBGWVR[n]<mask-1:bottom>) then
WVR_match = ConstrainUnpredictableBool(Unpredictable_WPMASKEDBITSEndOfInstructionConstraint_NONE(););
else
WVR_match = vaddress<31:bottom> == DBGWVR[n]<31:bottom>;
return WVR_match && byte_select_match;
// AArch32.WatchpointByteMatch()
// =============================
// AArch32.WatchpointMatch()
// =========================
// Watchpoint matching in an AArch32 translation regime.
boolean AArch32.WatchpointByteMatch(integer n, bits(32) vaddress)
bottom = if DBGWVR[n]<2> == '1' then 2 else 3; // Word or doubleword
byte_select_match = (DBGWCR[n].BAS<AArch32.WatchpointMatch(integer n, bits(32) vaddress, integer size, boolean ispriv,
boolean iswrite)
assertUIntELUsingAArch32(vaddress<bottom-1:0>)> != '0');
mask =( S1TranslationRegime());
assert n <= UInt(DBGWCR[n].MASK);
(DBGDIDR.WRPs);
// If DBGWCR[n].MASK is non-zero value and DBGWCR[n].BAS is not set to '11111111', or
// DBGWCR[n].BAS specifies a non-contiguous set of bytes behavior is CONSTRAINED
// UNPREDICTABLE.
if mask > 0 && ! // "ispriv" is FALSE for LDRT/STRT instructions executed at EL1 and all
// load/stores at EL0, TRUE for all other load/stores. "iswrite" is TRUE for stores, FALSE for
// loads.
enabled = DBGWCR[n].E == '1';
linked = DBGWCR[n].WT == '1';
isbreakpnt = FALSE;
state_match =IsOnesAArch32.StateMatch(DBGWCR[n].BAS) then
byte_select_match =(DBGWCR[n].SSC, DBGWCR[n].HMC, DBGWCR[n].PAC,
linked, DBGWCR[n].LBN, isbreakpnt, ispriv);
ls_match = (DBGWCR[n].LSC<(if iswrite then 1 else 0)> == '1');
value_match = FALSE;
for byte = 0 to size - 1
value_match = value_match || ConstrainUnpredictableBoolAArch32.WatchpointByteMatch(Unpredictable_WPMASKANDBAS);
else
LSB = (DBGWCR[n].BAS AND NOT(DBGWCR[n].BAS - 1)); MSB = (DBGWCR[n].BAS + LSB);
if !IsZero(MSB AND (MSB - 1)) then // Not contiguous
byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPBASCONTIGUOUS);
bottom = 3; // For the whole doubleword
// If the address mask is set to a reserved value, the behavior is CONSTRAINED UNPREDICTABLE.
if mask > 0 && mask <= 2 then
(c, mask) = ConstrainUnpredictableInteger(3, 31, Unpredictable_RESWPMASK);
assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN};
case c of
when Constraint_DISABLED return FALSE; // Disabled
when Constraint_NONE mask = 0; // No masking
// Otherwise the value returned by ConstrainUnpredictableInteger is a not-reserved value
if mask > bottom then
WVR_match = (vaddress<31:mask> == DBGWVR[n]<31:mask>);
// If masked bits of DBGWVR_EL1[n] are not zero, the behavior is CONSTRAINED UNPREDICTABLE.
if WVR_match && !IsZero(DBGWVR[n]<mask-1:bottom>) then
WVR_match = ConstrainUnpredictableBool(Unpredictable_WPMASKEDBITS);
else
WVR_match = vaddress<31:bottom> == DBGWVR[n]<31:bottom>;
(n, vaddress + byte);
return WVR_match && byte_select_match; return value_match && state_match && ls_match && enabled;
// AArch32.WatchpointMatch()
// =========================
// Watchpoint matching in an AArch32 translation regime.
boolean// AArch32.Abort()
// ===============
// Abort and Debug exception handling in an AArch32 translation regime. AArch32.WatchpointMatch(integer n, bits(32) vaddress, integer size, boolean ispriv,
boolean iswrite)
assertAArch32.Abort(bits(32) vaddress, FaultRecord fault)
// Check if routed to AArch64 state
route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(S1TranslationRegimeEL1());
assert n <=);
if !route_to_aarch64 && UIntEL2Enabled(DBGDIDR.WRPs);
// "ispriv" is FALSE for LDRT/STRT instructions executed at EL1 and all
// load/stores at EL0, TRUE for all other load/stores. "iswrite" is TRUE for stores, FALSE for
// loads.
enabled = DBGWCR[n].E == '1';
linked = DBGWCR[n].WT == '1';
isbreakpnt = FALSE;
state_match =() && ! AArch32.StateMatchELUsingAArch32(DBGWCR[n].SSC, DBGWCR[n].HMC, DBGWCR[n].PAC,
linked, DBGWCR[n].LBN, isbreakpnt, ispriv);
ls_match = (DBGWCR[n].LSC<(if iswrite then 1 else 0)> == '1');
value_match = FALSE;
for byte = 0 to size - 1
value_match = value_match ||( ) then
route_to_aarch64 = (HCR_EL2.TGE == '1' || IsSecondStage(fault) ||
(HaveRASExt() && HCR2.TEA == '1' && IsExternalAbort(fault)) ||
(IsDebugException(fault) && MDCR_EL2.TDE == '1'));
if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then
route_to_aarch64 = SCR_EL3.EA == '1' && IsExternalAbort(fault);
if route_to_aarch64 then
AArch64.Abort(ZeroExtend(vaddress), fault);
elsif fault.acctype == AccType_IFETCH then
AArch32.TakePrefetchAbortException(vaddress, fault);
else
AArch32.TakeDataAbortExceptionAArch32.WatchpointByteMatchEL2(n, vaddress + byte);
return value_match && state_match && ls_match && enabled;(vaddress, fault);
// AArch32.Abort()
// ===============
// Abort and Debug exception handling in an AArch32 translation regime.// AArch32.AbortSyndrome()
// =======================
// Creates an exception syndrome record for Abort exceptions taken to Hyp mode
// from an AArch32 translation regime.
ExceptionRecord
AArch32.Abort(bits(32) vaddress,AArch32.AbortSyndrome( Exception exceptype, FaultRecord fault)
// Check if routed to AArch64 state
route_to_aarch64 = PSTATE.EL ==fault, bits(32) vaddress)
exception = EL0ExceptionSyndrome && !(exceptype);
d_side = exceptype ==ELUsingAArch32Exception_DataAbort(;
exception.syndrome =EL1AArch32.FaultSyndrome);
if !route_to_aarch64 &&(d_side, fault);
exception.vaddress = EL2EnabledZeroExtend() && !(vaddress);
ifELUsingAArch32IPAValid((fault) then
exception.ipavalid = TRUE;
exception.NS = fault.ipaddress.NS;
exception.ipaddress =EL2) then
route_to_aarch64 = (HCR_EL2.TGE == '1' || IsSecondStage(fault) ||
(HaveRASExt() && HCR2.TEA == '1' && IsExternalAbort(fault)) ||
(IsDebugException(fault) && MDCR_EL2.TDE == '1'));
if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then
route_to_aarch64 = SCR_EL3.EA == '1' && IsExternalAbort(fault);
if route_to_aarch64 then
AArch64.Abort(ZeroExtend(vaddress), fault);
elsif fault.acctype == AccType_IFETCH then
AArch32.TakePrefetchAbortException(vaddress, fault);
else
AArch32.TakeDataAbortException(vaddress, fault);(fault.ipaddress.address);
else
exception.ipavalid = FALSE;
return exception;
// AArch32.AbortSyndrome()
// =======================
// Creates an exception syndrome record for Abort exceptions taken to Hyp mode
// from an AArch32 translation regime.
ExceptionRecord// AArch32.CheckPCAlignment()
// ========================== AArch32.AbortSyndrome(AArch32.CheckPCAlignment()
bits(32) pc =ExceptionThisInstrAddr exceptype,();
if ( FaultRecordCurrentInstrSet fault, bits(32) vaddress)
exception =() == ExceptionSyndromeInstrSet_A32(exceptype);
d_side = exceptype ==&& pc<1> == '1') || pc<0> == '1' then
if Exception_DataAbortAArch32.GeneralExceptionsToAArch64;
exception.syndrome =() then AArch32.FaultSyndromeAArch64.PCAlignmentFault(d_side, fault);
exception.vaddress =();
// Generate an Alignment fault Prefetch Abort exception
vaddress = pc;
acctype = ZeroExtendAccType_IFETCH(vaddress);
if;
iswrite = FALSE;
secondstage = FALSE; IPAValidAArch32.Abort(fault) then
exception.ipavalid = TRUE;
exception.NS = fault.ipaddress.NS;
exception.ipaddress =(vaddress, ZeroExtendAArch32.AlignmentFault(fault.ipaddress.address);
else
exception.ipavalid = FALSE;
return exception;(acctype, iswrite, secondstage));
// AArch32.CheckPCAlignment()
// ==========================// AArch32.ReportDataAbort()
// =========================
// Report syndrome information for aborts taken to modes other than Hyp mode.
AArch32.CheckPCAlignment()
bits(32) pc =AArch32.ReportDataAbort(boolean route_to_monitor, ThisInstrAddrFaultRecord();
if (fault, bits(32) vaddress)
// The encoding used in the IFSR or DFSR can be Long-descriptor format or Short-descriptor
// format. Normally, the current translation table format determines the format. For an abort
// from Non-secure state to Monitor mode, the IFSR or DFSR uses the Long-descriptor format if
// any of the following applies:
// * The Secure TTBCR.EAE is set to 1.
// * The abort is synchronous and either:
// - It is taken from Hyp mode.
// - It is taken from EL1 or EL0, and the Non-secure TTBCR.EAE is set to 1.
long_format = FALSE;
if route_to_monitor && !CurrentInstrSetIsSecure() ==() then
long_format = TTBCR_S.EAE == '1';
if ! InstrSet_A32IsSErrorInterrupt && pc<1> == '1') || pc<0> == '1' then
if(fault) && !long_format then
long_format = PSTATE.EL == AArch32.GeneralExceptionsToAArch64EL2() then|| TTBCR.EAE == '1';
else
long_format = TTBCR.EAE == '1';
d_side = TRUE;
if long_format then
syndrome = AArch64.PCAlignmentFaultAArch32.FaultStatusLD();
// Generate an Alignment fault Prefetch Abort exception
vaddress = pc;
acctype =(d_side, fault);
else
syndrome = AccType_IFETCHAArch32.FaultStatusSD;
iswrite = FALSE;
secondstage = FALSE;(d_side, fault);
if fault.acctype ==
AArch32.AbortAccType_IC(vaddress,then
if (!long_format &&
boolean IMPLEMENTATION_DEFINED "Report I-cache maintenance fault in IFSR") then
i_syndrome = syndrome;
syndrome<10,3:0> = (Fault_ICacheMaintAArch32.AlignmentFaultEncodeSDFSC(acctype, iswrite, secondstage));, 1);
else
i_syndrome = bits(32) UNKNOWN;
if route_to_monitor then
IFSR_S = i_syndrome;
else
IFSR = i_syndrome;
if route_to_monitor then
DFSR_S = syndrome;
DFAR_S = vaddress;
else
DFSR = syndrome;
DFAR = vaddress;
return;
// AArch32.ReportDataAbort()
// =========================
// AArch32.ReportPrefetchAbort()
// =============================
// Report syndrome information for aborts taken to modes other than Hyp mode.
AArch32.ReportDataAbort(boolean route_to_monitor,AArch32.ReportPrefetchAbort(boolean route_to_monitor, FaultRecord fault, bits(32) vaddress)
// The encoding used in the IFSR or DFSR can be Long-descriptor format or Short-descriptor
// format. Normally, the current translation table format determines the format. For an abort
// from Non-secure state to Monitor mode, the IFSR or DFSR uses the Long-descriptor format if
// any of the following applies:
// The encoding used in the IFSR can be Long-descriptor format or Short-descriptor format.
// Normally, the current translation table format determines the format. For an abort from
// Non-secure state to Monitor mode, the IFSR uses the Long-descriptor format if any of the
// following applies:
// * The Secure TTBCR.EAE is set to 1.
// * The abort is synchronous and either:
// - It is taken from Hyp mode.
// - It is taken from EL1 or EL0, and the Non-secure TTBCR.EAE is set to 1.
// * It is taken from Hyp mode.
// * It is taken from EL1 or EL0, and the Non-secure TTBCR.EAE is set to 1.
long_format = FALSE;
if route_to_monitor && !IsSecure() then
long_format = TTBCR_S.EAE == '1';
if ! long_format = TTBCR_S.EAE == '1' || PSTATE.EL ==IsSErrorInterrupt(fault) && !long_format then
long_format = PSTATE.EL == EL2 || TTBCR.EAE == '1';
else
long_format = TTBCR.EAE == '1';
d_side = TRUE;
d_side = FALSE;
if long_format then
syndrome = fsr = AArch32.FaultStatusLD(d_side, fault);
else
syndrome = fsr = AArch32.FaultStatusSD(d_side, fault);
if fault.acctype == AccType_IC then
if (!long_format &&
boolean IMPLEMENTATION_DEFINED "Report I-cache maintenance fault in IFSR") then
i_syndrome = syndrome;
syndrome<10,3:0> = EncodeSDFSC(Fault_ICacheMaint, 1);
else
i_syndrome = bits(32) UNKNOWN;
if route_to_monitor then
IFSR_S = i_syndrome;
else
IFSR = i_syndrome;
(d_side, fault);
if route_to_monitor then
DFSR_S = syndrome;
DFAR_S = vaddress;
IFSR_S = fsr;
IFAR_S = vaddress;
else
DFSR = syndrome;
DFAR = vaddress;
IFSR = fsr;
IFAR = vaddress;
return;
// AArch32.ReportPrefetchAbort()
// =============================
// Report syndrome information for aborts taken to modes other than Hyp mode.// AArch32.TakeDataAbortException()
// ================================
AArch32.ReportPrefetchAbort(boolean route_to_monitor,AArch32.TakeDataAbortException(bits(32) vaddress, FaultRecord fault, bits(32) vaddress)
// The encoding used in the IFSR can be Long-descriptor format or Short-descriptor format.
// Normally, the current translation table format determines the format. For an abort from
// Non-secure state to Monitor mode, the IFSR uses the Long-descriptor format if any of the
// following applies:
// * The Secure TTBCR.EAE is set to 1.
// * It is taken from Hyp mode.
// * It is taken from EL1 or EL0, and the Non-secure TTBCR.EAE is set to 1.
long_format = FALSE;
if route_to_monitor && !fault)
route_to_monitor =HaveEL(EL3) && SCR.EA == '1' && IsExternalAbort(fault);
route_to_hyp = (HaveEL(EL2) && !IsSecure() then
long_format = TTBCR_S.EAE == '1' || PSTATE.EL ==() && PSTATE.EL IN { EL0, EL1} &&
(HCR.TGE == '1' || IsSecondStage(fault) ||
(HaveRASExt() && HCR2.TEA == '1' && IsExternalAbort(fault)) ||
(IsDebugException(fault) && HDCR.TDE == '1')));
bits(32) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x10;
lr_offset = 8;
if IsDebugException(fault) then DBGDSCRext.MOE = fault.debugmoe;
if route_to_monitor then
AArch32.ReportDataAbort(route_to_monitor, fault, vaddress);
AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset);
elsif PSTATE.EL == EL2 || TTBCR.EAE == '1';
else
long_format = TTBCR.EAE == '1';
d_side = FALSE;
if long_format then
fsr =|| route_to_hyp then
exception = AArch32.FaultStatusLDAArch32.AbortSyndrome(d_side, fault);
else
fsr =( , fault, vaddress);
if PSTATE.EL == EL2 then
AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset);
else
AArch32.EnterHypMode(exception, preferred_exception_return, 0x14);
else
AArch32.ReportDataAbort(route_to_monitor, fault, vaddress);
AArch32.EnterMode(M32_AbortAArch32.FaultStatusSDException_DataAbort(d_side, fault);
if route_to_monitor then
IFSR_S = fsr;
IFAR_S = vaddress;
else
IFSR = fsr;
IFAR = vaddress;
return;, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeDataAbortException()
// ================================// AArch32.TakePrefetchAbortException()
// ====================================
AArch32.TakeDataAbortException(bits(32) vaddress,AArch32.TakePrefetchAbortException(bits(32) vaddress, FaultRecord fault)
route_to_monitor = HaveEL(EL3) && SCR.EA == '1' && IsExternalAbort(fault);
route_to_hyp = (HaveEL(EL2) && !IsSecure() && PSTATE.EL IN {EL0, EL1} &&
(HCR.TGE == '1' || IsSecondStage(fault) ||
(HaveRASExt() && HCR2.TEA == '1' && IsExternalAbort(fault)) ||
(IsDebugException(fault) && HDCR.TDE == '1')));
bits(32) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x10;
lr_offset = 8;
vect_offset = 0x0C;
lr_offset = 4;
if IsDebugException(fault) then DBGDSCRext.MOE = fault.debugmoe;
if route_to_monitor then
AArch32.ReportDataAbortAArch32.ReportPrefetchAbort(route_to_monitor, fault, vaddress);
AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset);
elsif PSTATE.EL == EL2 || route_to_hyp then
exception = if fault.statuscode == Fault_Alignment then // PC Alignment fault
exception = ExceptionSyndrome(Exception_PCAlignment);
exception.vaddress = ThisInstrAddr();
else
exception = AArch32.AbortSyndrome(Exception_DataAbortException_InstructionAbort, fault, vaddress);
if PSTATE.EL == EL2 then
AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset);
else
AArch32.EnterHypMode(exception, preferred_exception_return, 0x14);
else
AArch32.ReportDataAbortAArch32.ReportPrefetchAbort(route_to_monitor, fault, vaddress);
AArch32.EnterMode(M32_Abort, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakePrefetchAbortException()
// ====================================// BranchTargetException
// =====================
// Raise branch target exception.
AArch32.TakePrefetchAbortException(bits(32) vaddress,AArch64.BranchTargetException(bits(52) vaddress)
route_to_el2 = PSTATE.EL == FaultRecord fault)
route_to_monitor = HaveEL(EL3) && SCR.EA == '1' && IsExternalAbort(fault);
route_to_hyp = (HaveEL(EL2) && !IsSecure() && PSTATE.EL IN {EL0,&& EL1EL2Enabled} &&
(HCR.TGE == '1' ||() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return = IsSecondStage(fault) ||
(HaveRASExt() && HCR2.TEA == '1' && IsExternalAbort(fault)) ||
(IsDebugException(fault) && HDCR.TDE == '1')));
bits(32) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0C;
lr_offset = 4;
vect_offset = 0x0;
if exception = IsDebugExceptionExceptionSyndrome(fault) then DBGDSCRext.MOE = fault.debugmoe;
if route_to_monitor then(
AArch32.ReportPrefetchAbortException_BranchTarget(route_to_monitor, fault, vaddress););
exception.syndrome<1:0> = PSTATE.BTYPE;
exception.syndrome<24:2> =
AArch32.EnterMonitorModeZeros(preferred_exception_return, lr_offset, vect_offset);
elsif PSTATE.EL ==(); // RES0
if EL2UInt || route_to_hyp then
if fault.statuscode ==(PSTATE.EL) > Fault_AlignmentUInt then // PC Alignment fault
exception =( ExceptionSyndromeEL1() thenException_PCAlignmentAArch64.TakeException);
exception.vaddress =(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then ThisInstrAddrAArch64.TakeException();
else
exception =( AArch32.AbortSyndrome(Exception_InstructionAbort, fault, vaddress);
if PSTATE.EL == EL2 then, exception, preferred_exception_return, vect_offset);
else
AArch32.EnterHypModeAArch64.TakeException(exception, preferred_exception_return, vect_offset);
else(
AArch32.EnterHypModeEL1(exception, preferred_exception_return, 0x14);
else
AArch32.ReportPrefetchAbort(route_to_monitor, fault, vaddress);
AArch32.EnterMode(M32_Abort, preferred_exception_return, lr_offset, vect_offset);, exception, preferred_exception_return, vect_offset);
// BranchTargetException
// =====================
// Raise branch target exception.// AArch32.TakePhysicalFIQException()
// ==================================
AArch64.BranchTargetException(bits(52) vaddress)
AArch32.TakePhysicalFIQException()
route_to_el2 = PSTATE.EL == // Check if routed to AArch64 state
route_to_aarch64 = PSTATE.EL == EL0 &&&& ! ELUsingAArch32(EL1);
if !route_to_aarch64 && EL2Enabled() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return =() && ! ThisInstrAddrELUsingAArch32();
vect_offset = 0x0;
exception =( ExceptionSyndromeEL2() then
route_to_aarch64 = HCR_EL2.TGE == '1' || (HCR_EL2.FMO == '1' && !Exception_BranchTargetIsInHost);
exception.syndrome<1:0> = PSTATE.BTYPE;
exception.syndrome<24:2> =());
if !route_to_aarch64 && ZerosHaveEL(); // RES0
if( UIntEL3(PSTATE.EL) >) && ! UIntELUsingAArch32(EL3) then
route_to_aarch64 = SCR_EL3.FIQ == '1';
if route_to_aarch64 then AArch64.TakePhysicalFIQException();
route_to_monitor = HaveEL(EL3) && SCR.FIQ == '1';
route_to_hyp = (PSTATE.EL IN {EL0, EL1) then} &&
AArch64.TakeExceptionEL2Enabled(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then() &&
(HCR.TGE == '1' || HCR.FMO == '1'));
bits(32) preferred_exception_return =
AArch64.TakeExceptionThisInstrAddr(();
vect_offset = 0x1C;
lr_offset = 4;
if route_to_monitor thenAArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset);
elsif PSTATE.EL == EL2, exception, preferred_exception_return, vect_offset);
else|| route_to_hyp then
exception =
AArch64.TakeExceptionExceptionSyndrome();
AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset);
else
AArch32.EnterMode(M32_FIQEL1Exception_FIQ, exception, preferred_exception_return, vect_offset);, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakePhysicalFIQException()
// ==================================// AArch32.TakePhysicalIRQException()
// ==================================
// Take an enabled physical IRQ exception.
AArch32.TakePhysicalFIQException()
AArch32.TakePhysicalIRQException()
// Check if routed to AArch64 state
route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1);
if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then
route_to_aarch64 = HCR_EL2.TGE == '1' || (HCR_EL2.FMO == '1' && ! route_to_aarch64 = HCR_EL2.TGE == '1' || (HCR_EL2.IMO == '1' && !IsInHost());
if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then
route_to_aarch64 = SCR_EL3.FIQ == '1';
route_to_aarch64 = SCR_EL3.IRQ == '1';
if route_to_aarch64 then AArch64.TakePhysicalFIQExceptionAArch64.TakePhysicalIRQException();
route_to_monitor = HaveEL(EL3) && SCR.FIQ == '1';
) && SCR.IRQ == '1';
route_to_hyp = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() &&
(HCR.TGE == '1' || HCR.FMO == '1'));
(HCR.TGE == '1' || HCR.IMO == '1'));
bits(32) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x1C;
vect_offset = 0x18;
lr_offset = 4;
if route_to_monitor then
AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset);
elsif PSTATE.EL == EL2 || route_to_hyp then
exception = ExceptionSyndrome(Exception_FIQException_IRQ);
AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset);
else
AArch32.EnterMode(M32_FIQM32_IRQ, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakePhysicalIRQException()
// ==================================
// Take an enabled physical IRQ exception.// AArch32.TakePhysicalSErrorException()
// =====================================
AArch32.TakePhysicalIRQException()
// Check if routed to AArch64 state
route_to_aarch64 = PSTATE.EL ==AArch32.TakePhysicalSErrorException(boolean parity, bit extflag, bits(2) errortype,
boolean impdef_syndrome, bits(24) full_syndrome) ClearPendingPhysicalSError();
// Check if routed to AArch64 state
route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1);
if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then
route_to_aarch64 = HCR_EL2.TGE == '1' || (HCR_EL2.IMO == '1' && ! route_to_aarch64 = (HCR_EL2.TGE == '1' || (!IsInHost());
() && HCR_EL2.AMO == '1'));
if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then
route_to_aarch64 = SCR_EL3.IRQ == '1';
route_to_aarch64 = SCR_EL3.EA == '1';
if route_to_aarch64 then AArch64.TakePhysicalIRQExceptionAArch64.TakePhysicalSErrorException();
(impdef_syndrome, full_syndrome);
route_to_monitor = HaveEL(EL3) && SCR.IRQ == '1';
) && SCR.EA == '1';
route_to_hyp = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() &&
(HCR.TGE == '1' || HCR.IMO == '1'));
(HCR.TGE == '1' || HCR.AMO == '1'));
bits(32) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x18;
lr_offset = 4;
if route_to_monitor then vect_offset = 0x10;
lr_offset = 8;
fault =
AArch32.AsynchExternalAbort(parity, errortype, extflag);
vaddress = bits(32) UNKNOWN;
if route_to_monitor then
AArch32.ReportDataAbort(route_to_monitor, fault, vaddress);
AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset);
elsif PSTATE.EL == EL2 || route_to_hyp then
exception = ExceptionSyndromeAArch32.AbortSyndrome(Exception_IRQException_DataAbort);, fault, vaddress);
if PSTATE.EL ==
EL2 then
AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset);
else
AArch32.EnterHypMode(exception, preferred_exception_return, 0x14);
else
AArch32.ReportDataAbort(route_to_monitor, fault, vaddress);
AArch32.EnterMode(M32_IRQM32_Abort, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakePhysicalSErrorException()
// =====================================// AArch32.TakeVirtualFIQException()
// =================================
AArch32.TakePhysicalSErrorException(boolean parity, bit extflag, bits(2) errortype,
boolean impdef_syndrome, bits(24) full_syndrome)AArch32.TakeVirtualFIQException()
assert PSTATE.EL IN {
ClearPendingPhysicalSError();
// Check if routed to AArch64 state
route_to_aarch64 = PSTATE.EL == EL0 && !,ELUsingAArch32(EL1);
if !route_to_aarch64 &&} && EL2Enabled() && !();
ifELUsingAArch32(EL2) then
route_to_aarch64 = (HCR_EL2.TGE == '1' || (!) then // Virtual IRQ enabled if TGE==0 and FMO==1
assert HCR.TGE == '0' && HCR.FMO == '1';
else
assert HCR_EL2.TGE == '0' && HCR_EL2.FMO == '1';
// Check if routed to AArch64 state
if PSTATE.EL ==IsInHostEL0() && HCR_EL2.AMO == '1'));
if !route_to_aarch64 &&&& ! HaveEL(EL3) && !ELUsingAArch32(EL3) then
route_to_aarch64 = SCR_EL3.EA == '1';
if route_to_aarch64 then
AArch64.TakePhysicalSErrorException(impdef_syndrome, full_syndrome);
route_to_monitor = HaveEL(EL3) && SCR.EA == '1';
route_to_hyp = (PSTATE.EL IN {EL0, EL1} &&) then EL2EnabledAArch64.TakeVirtualFIQException() &&
(HCR.TGE == '1' || HCR.AMO == '1'));
();
bits(32) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x10;
lr_offset = 8;
fault = vect_offset = 0x1C;
lr_offset = 4; AArch32.AsynchExternalAbort(parity, errortype, extflag);
vaddress = bits(32) UNKNOWN;
if route_to_monitor then
AArch32.ReportDataAbort(route_to_monitor, fault, vaddress);
AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset);
elsif PSTATE.EL == EL2 || route_to_hyp then
exception = AArch32.AbortSyndrome(Exception_DataAbort, fault, vaddress);
if PSTATE.EL == EL2 then
AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset);
else
AArch32.EnterHypMode(exception, preferred_exception_return, 0x14);
else
AArch32.ReportDataAbort(route_to_monitor, fault, vaddress);
AArch32.EnterMode(M32_AbortM32_FIQ, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeVirtualFIQException()
// AArch32.TakeVirtualIRQException()
// =================================
AArch32.TakeVirtualFIQException()
AArch32.TakeVirtualIRQException()
assert PSTATE.EL IN {EL0, EL1} && EL2Enabled();
if ELUsingAArch32(EL2) then // Virtual IRQ enabled if TGE==0 and FMO==1
assert HCR.TGE == '0' && HCR.FMO == '1';
) then // Virtual IRQs enabled if TGE==0 and IMO==1
assert HCR.TGE == '0' && HCR.IMO == '1';
else
assert HCR_EL2.TGE == '0' && HCR_EL2.FMO == '1';
assert HCR_EL2.TGE == '0' && HCR_EL2.IMO == '1';
// Check if routed to AArch64 state
if PSTATE.EL == EL0 && !ELUsingAArch32(EL1) then AArch64.TakeVirtualFIQExceptionAArch64.TakeVirtualIRQException();
bits(32) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x1C;
vect_offset = 0x18;
lr_offset = 4;
AArch32.EnterMode(M32_FIQM32_IRQ, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeVirtualIRQException()
// =================================// AArch32.TakeVirtualSErrorException()
// ====================================
AArch32.TakeVirtualIRQException()
AArch32.TakeVirtualSErrorException(bit extflag, bits(2) errortype, boolean impdef_syndrome, bits(24) full_syndrome)
assert PSTATE.EL IN {EL0, EL1} && EL2Enabled();
if ELUsingAArch32(EL2) then // Virtual IRQs enabled if TGE==0 and IMO==1
assert HCR.TGE == '0' && HCR.IMO == '1';
) then // Virtual SError enabled if TGE==0 and AMO==1
assert HCR.TGE == '0' && HCR.AMO == '1';
else
assert HCR_EL2.TGE == '0' && HCR_EL2.IMO == '1';
assert HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1';
// Check if routed to AArch64 state
if PSTATE.EL == EL0 && !ELUsingAArch32(EL1) then AArch64.TakeVirtualIRQExceptionAArch64.TakeVirtualSErrorException();
(impdef_syndrome, full_syndrome);
route_to_monitor = FALSE;
bits(32) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x18;
lr_offset = 4; vect_offset = 0x10;
lr_offset = 8;
vaddress = bits(32) UNKNOWN;
parity = FALSE;
if
HaveRASExt() then
if ELUsingAArch32(EL2) then
fault = AArch32.AsynchExternalAbort(FALSE, VDFSR.AET, VDFSR.ExT);
else
fault = AArch32.AsynchExternalAbort(FALSE, VSESR_EL2.AET, VSESR_EL2.ExT);
else
fault = AArch32.AsynchExternalAbort(parity, errortype, extflag);
ClearPendingVirtualSError();
AArch32.ReportDataAbort(route_to_monitor, fault, vaddress);
AArch32.EnterMode(M32_IRQM32_Abort, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeVirtualSErrorException()
// ====================================// AArch32.SoftwareBreakpoint()
// ============================
AArch32.TakeVirtualSErrorException(bit extflag, bits(2) errortype, boolean impdef_syndrome, bits(24) full_syndrome)
AArch32.SoftwareBreakpoint(bits(16) immediate)
assert PSTATE.EL IN { if (EL0, EL1} && EL2Enabled();
if() && ! ELUsingAArch32(EL2) then // Virtual SError enabled if TGE==0 and AMO==1
assert HCR.TGE == '0' && HCR.AMO == '1';
else
assert HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1';
// Check if routed to AArch64 state
if PSTATE.EL ==) &&
(HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1')) || ! EL0 && !ELUsingAArch32(EL1) then AArch64.TakeVirtualSErrorExceptionAArch64.SoftwareBreakpoint(impdef_syndrome, full_syndrome);
route_to_monitor = FALSE;
bits(32) preferred_exception_return =(immediate);
vaddress = bits(32) UNKNOWN;
acctype = ThisInstrAddrAccType_IFETCH();
vect_offset = 0x10;
lr_offset = 8;
vaddress = bits(32) UNKNOWN;
parity = FALSE;
if; // Take as a Prefetch Abort
iswrite = FALSE;
entry = HaveRASExtDebugException_BKPT() then
if;
fault = ELUsingAArch32AArch32.DebugFault((acctype, iswrite, entry);EL2AArch32.Abort) then
fault = AArch32.AsynchExternalAbort(FALSE, VDFSR.AET, VDFSR.ExT);
else
fault = AArch32.AsynchExternalAbort(FALSE, VSESR_EL2.AET, VSESR_EL2.ExT);
else
fault = AArch32.AsynchExternalAbort(parity, errortype, extflag);
ClearPendingVirtualSError();
AArch32.ReportDataAbort(route_to_monitor, fault, vaddress);
AArch32.EnterMode(M32_Abort, preferred_exception_return, lr_offset, vect_offset);(vaddress, fault);
// AArch32.SoftwareBreakpoint()
// ============================constant bits(4)
AArch32.SoftwareBreakpoint(bits(16) immediate)
if (DebugException_Breakpoint = '0001';
constant bits(4)EL2Enabled() && !DebugException_BKPT = '0011';
constant bits(4)ELUsingAArch32(DebugException_VectorCatch = '0101';
constant bits(4)EL2) &&
(HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1')) || !ELUsingAArch32(EL1) then
AArch64.SoftwareBreakpoint(immediate);
vaddress = bits(32) UNKNOWN;
acctype = AccType_IFETCH; // Take as a Prefetch Abort
iswrite = FALSE;
entry = DebugException_BKPT;
fault = AArch32.DebugFault(acctype, iswrite, entry);
AArch32.Abort(vaddress, fault);DebugException_Watchpoint = '1010';
constant bits(4)// AArch32.CheckAdvSIMDOrFPRegisterTraps()
// =======================================
// Check if an instruction that accesses an Advanced SIMD and
// floating-point System register is trapped by an appropriate HCR.TIDx
// ID group trap control. DebugException_Breakpoint = '0001';
constant bits(4)AArch32.CheckAdvSIMDOrFPRegisterTraps(bits(4) reg)
if PSTATE.EL == DebugException_BKPT = '0011';
constant bits(4)&& DebugException_VectorCatch = '0101';
constant bits(4)() then
tid0 = if (EL2) then HCR.TID0 else HCR_EL2.TID0;
tid3 = if ELUsingAArch32(EL2) then HCR.TID3 else HCR_EL2.TID3;
if (tid0 == '1' && reg == '0000') // FPSID
|| (tid3 == '1' && reg IN {'0101', '0110', '0111'}) then // MVFRx
if ELUsingAArch32(EL2) then
AArch32.SystemAccessTrap(M32_Hyp, 0x8); // Exception_AdvSIMDFPAccessTrap
else
AArch64.AArch32SystemAccessTrap(EL2DebugException_Watchpoint = '1010';, 0x8); // Exception_AdvSIMDFPAccessTrap
// AArch32.CheckAdvSIMDOrFPRegisterTraps()
// =======================================
// Check if an instruction that accesses an Advanced SIMD and
// floating-point System register is trapped by an appropriate HCR.TIDx
// ID group trap control.// AArch32.ExceptionClass()
// ========================
// Returns the Exception Class and Instruction Length fields to be reported in HSR
(integer,bit)
AArch32.CheckAdvSIMDOrFPRegisterTraps(bits(4) reg)
if PSTATE.EL ==AArch32.ExceptionClass( EL1Exception &&exceptype)
il = if EL2EnabledThisInstrLength() then
tid0 = if() == 32 then '1' else '0';
case exceptype of
when ELUsingAArch32Exception_Uncategorized(ec = 0x00; il = '1';
whenEL2Exception_WFxTrap) then HCR.TID0 else HCR_EL2.TID0;
tid3 = ifec = 0x01;
when ELUsingAArch32Exception_CP15RTTrap(ec = 0x03;
whenEL2Exception_CP15RRTTrap) then HCR.TID3 else HCR_EL2.TID3;
if (tid0 == '1' && reg == '0000') // FPSID
|| (tid3 == '1' && reg IN {'0101', '0110', '0111'}) then // MVFRx
ifec = 0x04;
when ELUsingAArch32Exception_CP14RTTrap(ec = 0x05;
whenEL2Exception_CP14DTTrap) thenec = 0x06;
when
AArch32.SystemAccessTrapException_AdvSIMDFPAccessTrap(ec = 0x07;
whenM32_HypException_FPIDTrap, 0x8); // Exception_AdvSIMDFPAccessTrap
elseec = 0x08;
when
ec = 0x09;
when Exception_CP14RRTTrap ec = 0x0C;
when Exception_BranchTarget ec = 0x0D;
when Exception_IllegalState ec = 0x0E; il = '1';
when Exception_SupervisorCall ec = 0x11;
when Exception_HypervisorCall ec = 0x12;
when Exception_MonitorCall ec = 0x13;
when Exception_ERetTrap ec = 0x1A;
when Exception_InstructionAbort ec = 0x20; il = '1';
when Exception_PCAlignment ec = 0x22; il = '1';
when Exception_DataAbort ec = 0x24;
when Exception_NV2DataAbort ec = 0x25;
when Exception_FPTrappedException ec = 0x28;
otherwise UnreachableAArch64.AArch32SystemAccessTrapException_PACTrap(();
if ec IN {0x20,0x24} && PSTATE.EL ==EL2, 0x8); // Exception_AdvSIMDFPAccessTrapthen
ec = ec + 1;
return (ec,il);
// AArch32.ExceptionClass()
// ========================
// Returns the Exception Class and Instruction Length fields to be reported in HSR
// AArch32.GeneralExceptionsToAArch64()
// ====================================
// Returns TRUE if exceptions normally routed to EL1 are being handled at an Exception
// level using AArch64, because either EL1 is using AArch64 or TGE is in force and EL2
// is using AArch64.
(integer,bit)boolean AArch32.ExceptionClass(AArch32.GeneralExceptionsToAArch64()
return ((PSTATE.EL ==ExceptionEL0 exceptype)
il = if&& ! ThisInstrLengthELUsingAArch32() == 32 then '1' else '0';
case exceptype of
when( Exception_UncategorizedEL1 ec = 0x00; il = '1';
when)) ||
( Exception_WFxTrapEL2Enabled ec = 0x01;
when() && ! Exception_CP15RTTrapELUsingAArch32 ec = 0x03;
when Exception_CP15RRTTrap ec = 0x04;
when Exception_CP14RTTrap ec = 0x05;
when Exception_CP14DTTrap ec = 0x06;
when Exception_AdvSIMDFPAccessTrap ec = 0x07;
when Exception_FPIDTrap ec = 0x08;
when Exception_PACTrap ec = 0x09;
when Exception_CP14RRTTrap ec = 0x0C;
when Exception_BranchTarget ec = 0x0D;
when Exception_IllegalState ec = 0x0E; il = '1';
when Exception_SupervisorCall ec = 0x11;
when Exception_HypervisorCall ec = 0x12;
when Exception_MonitorCall ec = 0x13;
when Exception_ERetTrap ec = 0x1A;
when Exception_InstructionAbort ec = 0x20; il = '1';
when Exception_PCAlignment ec = 0x22; il = '1';
when Exception_DataAbort ec = 0x24;
when Exception_NV2DataAbort ec = 0x25;
when Exception_FPTrappedException ec = 0x28;
otherwise Unreachable();
if ec IN {0x20,0x24} && PSTATE.EL ==( EL2 then
ec = ec + 1;
return (ec,il);) && HCR_EL2.TGE == '1'));
// AArch32.GeneralExceptionsToAArch64()
// ====================================
// Returns TRUE if exceptions normally routed to EL1 are being handled at an Exception
// level using AArch64, because either EL1 is using AArch64 or TGE is in force and EL2
// is using AArch64.
boolean// AArch32.ReportHypEntry()
// ========================
// Report syndrome information to Hyp mode registers. AArch32.GeneralExceptionsToAArch64()
return ((PSTATE.EL ==AArch32.ReportHypEntry( EL0ExceptionRecord && !exception)ELUsingAArch32Exception(exceptype = exception.exceptype;
(ec,il) =EL1AArch32.ExceptionClass)) ||
((exceptype);
iss = exception.syndrome;
// IL is not valid for Data Abort exceptions without valid instruction syndrome information
if ec IN {0x24,0x25} && iss<24> == '0' then
il = '1';
HSR = ec<5:0>:il:iss;
if exceptype IN {EL2EnabledException_InstructionAbort() && !,ELUsingAArch32Exception_PCAlignment(} then
HIFAR = exception.vaddress<31:0>;
HDFAR = bits(32) UNKNOWN;
elsif exceptype ==EL2Exception_DataAbort) && HCR_EL2.TGE == '1'));then
HIFAR = bits(32) UNKNOWN;
HDFAR = exception.vaddress<31:0>;
if exception.ipavalid then
HPFAR<31:4> = exception.ipaddress<39:12>;
else
HPFAR<31:4> = bits(28) UNKNOWN;
return;
// AArch32.ReportHypEntry()
// ========================
// Report syndrome information to Hyp mode registers.// Resets System registers and memory-mapped control registers that have architecturally-defined
// reset values to those values.
AArch32.ReportHypEntry(AArch32.ResetControlRegisters(boolean cold_reset);ExceptionRecord exception)
Exception exceptype = exception.exceptype;
(ec,il) = AArch32.ExceptionClass(exceptype);
iss = exception.syndrome;
// IL is not valid for Data Abort exceptions without valid instruction syndrome information
if ec IN {0x24,0x25} && iss<24> == '0' then
il = '1';
HSR = ec<5:0>:il:iss;
if exceptype IN {Exception_InstructionAbort, Exception_PCAlignment} then
HIFAR = exception.vaddress<31:0>;
HDFAR = bits(32) UNKNOWN;
elsif exceptype == Exception_DataAbort then
HIFAR = bits(32) UNKNOWN;
HDFAR = exception.vaddress<31:0>;
if exception.ipavalid then
HPFAR<31:4> = exception.ipaddress<39:12>;
else
HPFAR<31:4> = bits(28) UNKNOWN;
return;
// Resets System registers and memory-mapped control registers that have architecturally-defined
// reset values to those values.// AArch32.TakeReset()
// ===================
// Reset into AArch32 state
AArch32.ResetControlRegisters(boolean cold_reset);AArch32.TakeReset(boolean cold_reset)
assertHighestELUsingAArch32();
// Enter the highest implemented Exception level in AArch32 state
if HaveEL(EL3) then
AArch32.WriteMode(M32_Svc);
SCR.NS = '0'; // Secure state
elsif HaveEL(EL2) then
AArch32.WriteMode(M32_Hyp);
else
AArch32.WriteMode(M32_Svc);
// Reset the CP14 and CP15 registers and other system components
AArch32.ResetControlRegisters(cold_reset);
FPEXC.EN = '0';
// Reset all other PSTATE fields, including instruction set and endianness according to the
// SCTLR values produced by the above call to ResetControlRegisters()
PSTATE.<A,I,F> = '111'; // All asynchronous exceptions masked
PSTATE.IT = '00000000'; // IT block state reset
PSTATE.T = SCTLR.TE; // Instruction set: TE=0: A32, TE=1: T32. PSTATE.J is RES0.
PSTATE.E = SCTLR.EE; // Endianness: EE=0: little-endian, EE=1: big-endian
PSTATE.IL = '0'; // Clear Illegal Execution state bit
// All registers, bits and fields not reset by the above pseudocode or by the BranchTo() call
// below are UNKNOWN bitstrings after reset. In particular, the return information registers
// R14 or ELR_hyp and SPSR have UNKNOWN values, so that it
// is impossible to return from a reset in an architecturally defined way.
AArch32.ResetGeneralRegisters();
AArch32.ResetSIMDFPRegisters();
AArch32.ResetSpecialRegisters();
ResetExternalDebugRegisters(cold_reset);
bits(32) rv; // IMPLEMENTATION DEFINED reset vector
if HaveEL(EL3) then
if MVBAR<0> == '1' then // Reset vector in MVBAR
rv = MVBAR<31:1>:'0';
else
rv = bits(32) IMPLEMENTATION_DEFINED "reset vector address";
else
rv = RVBAR<31:1>:'0';
// The reset vector must be correctly aligned
assert rv<0> == '0' && (PSTATE.T == '1' || rv<1> == '0');
BranchTo(rv, BranchType_RESET);
// AArch32.TakeReset()
// ===================
// Reset into AArch32 state// ExcVectorBase()
// ===============
bits(32)
AArch32.TakeReset(boolean cold_reset)
assertExcVectorBase()
if SCTLR.V == '1' then // Hivecs selected, base = 0xFFFF0000
return HighestELUsingAArch32Ones();
// Enter the highest implemented Exception level in AArch32 state
if(16): HaveELZeros((16);
else
return VBAR<31:5>:EL3Zeros) then
AArch32.WriteMode(M32_Svc);
SCR.NS = '0'; // Secure state
elsif HaveEL(EL2) then
AArch32.WriteMode(M32_Hyp);
else
AArch32.WriteMode(M32_Svc);
// Reset the CP14 and CP15 registers and other system components
AArch32.ResetControlRegisters(cold_reset);
FPEXC.EN = '0';
// Reset all other PSTATE fields, including instruction set and endianness according to the
// SCTLR values produced by the above call to ResetControlRegisters()
PSTATE.<A,I,F> = '111'; // All asynchronous exceptions masked
PSTATE.IT = '00000000'; // IT block state reset
PSTATE.T = SCTLR.TE; // Instruction set: TE=0: A32, TE=1: T32. PSTATE.J is RES0.
PSTATE.E = SCTLR.EE; // Endianness: EE=0: little-endian, EE=1: big-endian
PSTATE.IL = '0'; // Clear Illegal Execution state bit
// All registers, bits and fields not reset by the above pseudocode or by the BranchTo() call
// below are UNKNOWN bitstrings after reset. In particular, the return information registers
// R14 or ELR_hyp and SPSR have UNKNOWN values, so that it
// is impossible to return from a reset in an architecturally defined way.
AArch32.ResetGeneralRegisters();
AArch32.ResetSIMDFPRegisters();
AArch32.ResetSpecialRegisters();
ResetExternalDebugRegisters(cold_reset);
bits(32) rv; // IMPLEMENTATION DEFINED reset vector
if HaveEL(EL3) then
if MVBAR<0> == '1' then // Reset vector in MVBAR
rv = MVBAR<31:1>:'0';
else
rv = bits(32) IMPLEMENTATION_DEFINED "reset vector address";
else
rv = RVBAR<31:1>:'0';
// The reset vector must be correctly aligned
assert rv<0> == '0' && (PSTATE.T == '1' || rv<1> == '0');
BranchTo(rv, BranchType_RESET);(5);
// ExcVectorBase()
// ===============
bits(32)// AArch32.FPTrappedException()
// ============================ ExcVectorBase()
if SCTLR.V == '1' then // Hivecs selected, base = 0xFFFF0000
returnAArch32.FPTrappedException(bits(8) accumulated_exceptions)
if OnesAArch32.GeneralExceptionsToAArch64(16):() then
is_ase = FALSE;
element = 0;ZerosAArch64.FPTrappedException(16);
else
return VBAR<31:5>:(is_ase, element, accumulated_exceptions);
FPEXC.DEX = '1';
FPEXC.TFV = '1';
FPEXC<7,4:0> = accumulated_exceptions<7,4:0>; // IDF,IXF,UFF,OFF,DZF,IOF
FPEXC<10:8> = '111'; // VECITR is RES1ZerosAArch32.TakeUndefInstrException(5);();
// AArch32.FPTrappedException()
// ============================// AArch32.CallHypervisor()
// ========================
// Performs a HVC call
AArch32.FPTrappedException(bits(8) accumulated_exceptions)
ifAArch32.CallHypervisor(bits(16) immediate)
assert AArch32.GeneralExceptionsToAArch64HaveEL() then
is_ase = FALSE;
element = 0;(
AArch64.FPTrappedExceptionEL2(is_ase, element, accumulated_exceptions);
FPEXC.DEX = '1';
FPEXC.TFV = '1';
FPEXC<7,4:0> = accumulated_exceptions<7,4:0>; // IDF,IXF,UFF,OFF,DZF,IOF
FPEXC<10:8> = '111'; // VECITR is RES1);
if !
(EL2) then
AArch64.CallHypervisor(immediate);
else
AArch32.TakeHVCExceptionAArch32.TakeUndefInstrExceptionELUsingAArch32();(immediate);
// AArch32.CallHypervisor()
// AArch32.CallSupervisor()
// ========================
// Performs a HVC call// Calls the Supervisor
AArch32.CallHypervisor(bits(16) immediate)
assertAArch32.CallSupervisor(bits(16) immediate)
if HaveELAArch32.CurrentCond(() != '1110' then
immediate = bits(16) UNKNOWN;
ifEL2AArch32.GeneralExceptionsToAArch64);
if !() thenELUsingAArch32AArch64.CallSupervisor((immediate);
elseEL2AArch32.TakeSVCException) then
AArch64.CallHypervisor(immediate);
else
AArch32.TakeHVCException(immediate);
// AArch32.CallSupervisor()
// ========================
// Calls the Supervisor// AArch32.TakeHVCException()
// ==========================
AArch32.CallSupervisor(bits(16) immediate)
ifAArch32.TakeHVCException(bits(16) immediate)
assert AArch32.CurrentCondHaveEL() != '1110' then
immediate = bits(16) UNKNOWN;
if( AArch32.GeneralExceptionsToAArch64EL2() then) &&
AArch64.CallSupervisorELUsingAArch32(immediate);
else(
);
AArch32.ITAdvance();
SSAdvance();
bits(32) preferred_exception_return = NextInstrAddr();
vect_offset = 0x08;
exception = ExceptionSyndrome(Exception_HypervisorCall);
exception.syndrome<15:0> = immediate;
if PSTATE.EL == EL2 then
AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset);
else
AArch32.EnterHypModeAArch32.TakeSVCExceptionEL2(immediate);(exception, preferred_exception_return, 0x14);
// AArch32.TakeHVCException()
// AArch32.TakeSMCException()
// ==========================
AArch32.TakeHVCException(bits(16) immediate)
AArch32.TakeSMCException()
assert HaveEL(EL2EL3) && ELUsingAArch32(EL2EL3);
AArch32.ITAdvance();
SSAdvance();
bits(32) preferred_exception_return = NextInstrAddr();
vect_offset = 0x08;
exception = lr_offset = 0; ExceptionSyndromeAArch32.EnterMonitorMode(Exception_HypervisorCall);
exception.syndrome<15:0> = immediate;
if PSTATE.EL == EL2 then
AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset);
else
AArch32.EnterHypMode(exception, preferred_exception_return, 0x14);(preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeSMCException()
// AArch32.TakeSVCException()
// ==========================
AArch32.TakeSMCException()
assertAArch32.TakeSVCException(bits(16) immediate) HaveELAArch32.ITAdvance(();EL3SSAdvance) &&();
route_to_hyp = PSTATE.EL == ELUsingAArch32EL0(&&EL3EL2Enabled);() && HCR.TGE == '1';
bits(32) preferred_exception_return =
AArch32.ITAdvanceNextInstrAddr();();
vect_offset = 0x08;
lr_offset = 0;
if PSTATE.EL ==
SSAdvanceEL2();
bits(32) preferred_exception_return =|| route_to_hyp then
exception = NextInstrAddrExceptionSyndrome();
vect_offset = 0x08;
lr_offset = 0;(
);
exception.syndrome<15:0> = immediate;
if PSTATE.EL == EL2 then
AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset);
else
AArch32.EnterHypMode(exception, preferred_exception_return, 0x14);
else
AArch32.EnterMode(M32_SvcAArch32.EnterMonitorModeException_SupervisorCall(preferred_exception_return, lr_offset, vect_offset);, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeSVCException()
// ==========================// AArch32.EnterHypMode()
// ======================
// Take an exception to Hyp mode.
AArch32.TakeSVCException(bits(16) immediate)AArch32.EnterHypMode(
AArch32.ITAdvanceExceptionRecord();exception, bits(32) preferred_exception_return,
integer vect_offset)
SSAdvanceSynchronizeContext();
route_to_hyp = PSTATE.EL == assert EL0HaveEL &&( EL2EnabledEL2() && HCR.TGE == '1';
bits(32) preferred_exception_return =) && ! NextInstrAddrIsSecure();
vect_offset = 0x08;
lr_offset = 0;
if PSTATE.EL ==() && ELUsingAArch32(EL2 || route_to_hyp then
exception =);
spsr = ExceptionSyndromeGetPSRFromPSTATE(();
if !(exception.exceptype IN {Exception_SupervisorCallException_IRQ);
exception.syndrome<15:0> = immediate;
if PSTATE.EL ==, EL2Exception_FIQ then}) then
AArch32.EnterHypModeAArch32.ReportHypEntry(exception, preferred_exception_return, vect_offset);
else(exception);
AArch32.EnterHypModeAArch32.WriteMode(exception, preferred_exception_return, 0x14);
else(
AArch32.EnterModeM32_Hyp();[] = spsr;
ELR_hyp = preferred_exception_return;
PSTATE.T = HSCTLR.TE; // PSTATE.J is RES0
PSTATE.SS = '0';
if !HaveEL(EL3) || SCR_GEN[].EA == '0' then PSTATE.A = '1';
if !HaveEL(EL3) || SCR_GEN[].IRQ == '0' then PSTATE.I = '1';
if !HaveEL(EL3) || SCR_GEN[].FIQ == '0' then PSTATE.F = '1';
PSTATE.E = HSCTLR.EE;
PSTATE.IL = '0';
PSTATE.IT = '00000000';
if HaveSSBSExt() then PSTATE.SSBS = HSCTLR.DSSBS;
BranchTo(HVBAR<31:5>:vect_offset<4:0>, BranchType_EXCEPTION);
EndOfInstructionM32_SvcSPSR, preferred_exception_return, lr_offset, vect_offset);();
// AArch32.EnterHypMode()
// ======================
// Take an exception to Hyp mode.// AArch32.EnterMode()
// ===================
// Take an exception to a mode other than Monitor and Hyp mode.
AArch32.EnterHypMode(AArch32.EnterMode(bits(5) target_mode, bits(32) preferred_exception_return, integer lr_offset,
integer vect_offset)ExceptionRecord exception, bits(32) preferred_exception_return,
integer vect_offset)
SynchronizeContext();
assert HaveELELUsingAArch32(EL2EL1) && !) && PSTATE.EL !=IsSecure() && ELUsingAArch32(EL2);
;
spsr = GetPSRFromPSTATE();
if !(exception.exceptype IN { if PSTATE.M ==Exception_IRQM32_Monitor,then SCR.NS = '0'; Exception_FIQ}) then
AArch32.ReportHypEntry(exception);
AArch32.WriteMode((target_mode);M32_Hyp);
SPSR[] = spsr;
ELR_hyp = preferred_exception_return;
PSTATE.T = HSCTLR.TE; // PSTATE.J is RES0
PSTATE.SS = '0';
if ![] = spsr;HaveELR([14] = preferred_exception_return + lr_offset;
PSTATE.T = SCTLR.TE; // PSTATE.J is RES0
PSTATE.SS = '0';
if target_mode ==EL3M32_FIQ) ||then
PSTATE.<A,I,F> = '111';
elsif target_mode IN { SCR_GENM32_Abort[].EA == '0' then PSTATE.A = '1';
if !,HaveELM32_IRQ(} then
PSTATE.<A,I> = '11';
else
PSTATE.I = '1';
PSTATE.E = SCTLR.EE;
PSTATE.IL = '0';
PSTATE.IT = '00000000';
ifEL3HavePANExt) ||() && SCTLR.SPAN == '0' then PSTATE.PAN = '1';
if SCR_GEN[].IRQ == '0' then PSTATE.I = '1';
if !HaveEL(EL3) || SCR_GEN[].FIQ == '0' then PSTATE.F = '1';
PSTATE.E = HSCTLR.EE;
PSTATE.IL = '0';
PSTATE.IT = '00000000';
if HaveSSBSExt() then PSTATE.SSBS = HSCTLR.DSSBS;() then PSTATE.SSBS = SCTLR.DSSBS;
BranchTo(ExcVectorBase(HVBAR<31:5>:vect_offset<4:0>,()<31:5>:vect_offset<4:0>, BranchType_EXCEPTION);
EndOfInstruction();
// AArch32.EnterMode()
// ===================
// Take an exception to a mode other than Monitor and Hyp mode.// AArch32.EnterMonitorMode()
// ==========================
// Take an exception to Monitor mode.
AArch32.EnterMode(bits(5) target_mode, bits(32) preferred_exception_return, integer lr_offset,
integer vect_offset)AArch32.EnterMonitorMode(bits(32) preferred_exception_return, integer lr_offset,
integer vect_offset)
SynchronizeContext();
assert HaveEL(EL3) && ELUsingAArch32(EL1EL3) && PSTATE.EL !=);
from_secure = EL2IsSecure;
();
spsr = GetPSRFromPSTATE();
if PSTATE.M == M32_Monitor then SCR.NS = '0';
AArch32.WriteMode(target_mode);(
M32_Monitor);
SPSR[] = spsr;
R[14] = preferred_exception_return + lr_offset;
PSTATE.T = SCTLR.TE; // PSTATE.J is RES0
PSTATE.SS = '0';
if target_mode == PSTATE.<A,I,F> = '111';
PSTATE.E = SCTLR.EE;
PSTATE.IL = '0';
PSTATE.IT = '00000000';
if M32_FIQ then
PSTATE.<A,I,F> = '111';
elsif target_mode IN {M32_Abort, M32_IRQ} then
PSTATE.<A,I> = '11';
else
PSTATE.I = '1';
PSTATE.E = SCTLR.EE;
PSTATE.IL = '0';
PSTATE.IT = '00000000';
if HavePANExt() && SCTLR.SPAN == '0' then PSTATE.PAN = '1';
() then
if !from_secure then
PSTATE.PAN = '0';
elsif SCTLR.SPAN == '0' then
PSTATE.PAN = '1';
if HaveSSBSExt() then PSTATE.SSBS = SCTLR.DSSBS;
BranchTo(ExcVectorBase()<31:5>:vect_offset<4:0>,(MVBAR<31:5>:vect_offset<4:0>, BranchType_EXCEPTION);
EndOfInstruction();
// AArch32.EnterMonitorMode()
// ==========================
// Take an exception to Monitor mode.// AArch32.CheckAdvSIMDOrFPEnabled()
// =================================
// Check against CPACR, FPEXC, HCPTR, NSACR, and CPTR_EL3.
AArch32.EnterMonitorMode(bits(32) preferred_exception_return, integer lr_offset,
integer vect_offset)AArch32.CheckAdvSIMDOrFPEnabled(boolean fpexc_check, boolean advsimd)
if PSTATE.EL ==
SynchronizeContextEL0();
assert&& (! HaveEL(EL3EL2) &&) || (! ELUsingAArch32(EL3EL2);
from_secure =) && HCR_EL2.TGE == '0')) && ! IsSecureELUsingAArch32();
spsr =( GetPSRFromPSTATEEL1();
if PSTATE.M ==) then
// The PE behaves as if FPEXC.EN is 1 M32_MonitorAArch64.CheckFPAdvSIMDEnabled then SCR.NS = '0';();
elsif PSTATE.EL ==
AArch32.WriteModeEL0(&&M32_MonitorHaveEL);(
SPSREL2[] = spsr;) && !
RELUsingAArch32[14] = preferred_exception_return + lr_offset;
PSTATE.T = SCTLR.TE; // PSTATE.J is RES0
PSTATE.SS = '0';
PSTATE.<A,I,F> = '111';
PSTATE.E = SCTLR.EE;
PSTATE.IL = '0';
PSTATE.IT = '00000000';
if( HavePANExtEL2() then
if !from_secure then
PSTATE.PAN = '0';
elsif SCTLR.SPAN == '0' then
PSTATE.PAN = '1';
if) && HCR_EL2.TGE == '1' && ! HaveSSBSExtELUsingAArch32() then PSTATE.SSBS = SCTLR.DSSBS;(
BranchToEL1(MVBAR<31:5>:vect_offset<4:0>,) then
if fpexc_check && HCR_EL2.RW == '0' then
fpexc_en = bits(1) IMPLEMENTATION_DEFINED "FPEXC.EN value when TGE==1 and RW==0";
if fpexc_en == '0' then UNDEFINED; BranchType_EXCEPTIONAArch64.CheckFPAdvSIMDEnabled);();
else
cpacr_asedis = CPACR.ASEDIS;
cpacr_cp10 = CPACR.cp10;
if
(EL3) && ELUsingAArch32(EL3) && !IsSecure() then
// Check if access disabled in NSACR
if NSACR.NSASEDIS == '1' then cpacr_asedis = '1';
if NSACR.cp10 == '0' then cpacr_cp10 = '00';
if PSTATE.EL != EL2 then
// Check if Advanced SIMD disabled in CPACR
if advsimd && cpacr_asedis == '1' then UNDEFINED;
if cpacr_cp10 == '10' then
(c, cpacr_cp10) = ConstrainUnpredictableBits(Unpredictable_RESCPACR);
// Check if access disabled in CPACR
case cpacr_cp10 of
when '00' disabled = TRUE;
when '01' disabled = PSTATE.EL == EL0;
when '11' disabled = FALSE;
if disabled then UNDEFINED;
// If required, check FPEXC enabled bit.
if fpexc_check && FPEXC.EN == '0' then UNDEFINED;
AArch32.CheckFPAdvSIMDTrapEndOfInstructionHaveEL();(advsimd); // Also check against HCPTR and CPTR_EL3
// AArch32.CheckAdvSIMDOrFPEnabled()
// =================================
// Check against CPACR, FPEXC, HCPTR, NSACR, and CPTR_EL3.// AArch32.CheckFPAdvSIMDTrap()
// ============================
// Check against CPTR_EL2 and CPTR_EL3.
AArch32.CheckAdvSIMDOrFPEnabled(boolean fpexc_check, boolean advsimd)
if PSTATE.EL ==AArch32.CheckFPAdvSIMDTrap(boolean advsimd)
if EL0EL2Enabled && (!() && !HaveELELUsingAArch32(EL2) || (!) thenELUsingAArch32AArch64.CheckFPAdvSIMDTrap(();
else
ifHaveEL(EL2) && HCR_EL2.TGE == '0')) && !) && !IsSecure() then
hcptr_tase = HCPTR.TASE;
hcptr_cp10 = HCPTR.TCP10;
if HaveEL(EL3) && ELUsingAArch32(EL1EL3) then
// The PE behaves as if FPEXC.EN is 1) && !
AArch64.CheckFPAdvSIMDEnabledIsSecure();
elsif PSTATE.EL ==() then
// Check if access disabled in NSACR
if NSACR.NSASEDIS == '1' then hcptr_tase = '1';
if NSACR.cp10 == '0' then hcptr_cp10 = '1';
// Check if access disabled in HCPTR
if (advsimd && hcptr_tase == '1') || hcptr_cp10 == '1' then
exception = EL0ExceptionSyndrome &&( HaveELException_AdvSIMDFPAccessTrap();
exception.syndrome<24:20> =EL2ConditionSyndrome) && !();
if advsimd then
exception.syndrome<5> = '1';
else
exception.syndrome<5> = '0';
exception.syndrome<3:0> = '1010'; // coproc field, always 0xA
if PSTATE.EL ==ELUsingAArch32(EL2) && HCR_EL2.TGE == '1' && !thenELUsingAArch32AArch32.TakeUndefInstrException((exception);
elseEL1AArch32.TakeHypTrapException) then
if fpexc_check && HCR_EL2.RW == '0' then
fpexc_en = bits(1) IMPLEMENTATION_DEFINED "FPEXC.EN value when TGE==1 and RW==0";
if fpexc_en == '0' then UNDEFINED;(exception);
if
AArch64.CheckFPAdvSIMDEnabled();
else
cpacr_asedis = CPACR.ASEDIS;
cpacr_cp10 = CPACR.cp10;
if HaveEL(EL3) &&) && ! ELUsingAArch32(EL3) && !) then
// Check if access disabled in CPTR_EL3
if CPTR_EL3.TFP == '1' thenIsSecureAArch64.AdvSIMDFPAccessTrap() then
// Check if access disabled in NSACR
if NSACR.NSASEDIS == '1' then cpacr_asedis = '1';
if NSACR.cp10 == '0' then cpacr_cp10 = '00';
if PSTATE.EL !=( EL2EL3 then
// Check if Advanced SIMD disabled in CPACR
if advsimd && cpacr_asedis == '1' then UNDEFINED;
if cpacr_cp10 == '10' then
(c, cpacr_cp10) = ConstrainUnpredictableBits(Unpredictable_RESCPACR);
// Check if access disabled in CPACR
case cpacr_cp10 of
when '00' disabled = TRUE;
when '01' disabled = PSTATE.EL == EL0;
when '11' disabled = FALSE;
if disabled then UNDEFINED;
// If required, check FPEXC enabled bit.
if fpexc_check && FPEXC.EN == '0' then UNDEFINED;
AArch32.CheckFPAdvSIMDTrap(advsimd); // Also check against HCPTR and CPTR_EL3);
return;
// AArch32.CheckFPAdvSIMDTrap()
// ============================
// Check against CPTR_EL2 and CPTR_EL3.// AArch32.CheckForSMCUndefOrTrap()
// ================================
// Check for UNDEFINED or trap on SMC instruction
AArch32.CheckFPAdvSIMDTrap(boolean advsimd)
ifAArch32.CheckForSMCUndefOrTrap()
if ! HaveEL(EL3) || PSTATE.EL == EL0 then
UNDEFINED;
if EL2Enabled() && !ELUsingAArch32(EL2) then
AArch64.CheckFPAdvSIMDTrapAArch64.CheckForSMCUndefOrTrap();
else
if( Zeros(16));
else
route_to_hyp = HaveEL(EL2) && !IsSecure() then
hcptr_tase = HCPTR.TASE;
hcptr_cp10 = HCPTR.TCP10;
if() && PSTATE.EL == HaveELEL1(&& HCR.TSC == '1';
if route_to_hyp then
exception =EL3) && ELUsingAArch32(EL3) && !IsSecure() then
// Check if access disabled in NSACR
if NSACR.NSASEDIS == '1' then hcptr_tase = '1';
if NSACR.cp10 == '0' then hcptr_cp10 = '1';
// Check if access disabled in HCPTR
if (advsimd && hcptr_tase == '1') || hcptr_cp10 == '1' then
exception = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrapException_MonitorCall);
exception.syndrome<24:20> =); ConditionSyndrome();
if advsimd then
exception.syndrome<5> = '1';
else
exception.syndrome<5> = '0';
exception.syndrome<3:0> = '1010'; // coproc field, always 0xA
if PSTATE.EL == EL2 then
AArch32.TakeUndefInstrException(exception);
else
AArch32.TakeHypTrapException(exception);
if HaveEL(EL3) && !ELUsingAArch32(EL3) then
// Check if access disabled in CPTR_EL3
if CPTR_EL3.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL3);
return;(exception);
// AArch32.CheckForSMCUndefOrTrap()
// ================================
// Check for UNDEFINED or trap on SMC instruction// AArch32.CheckForWFxTrap()
// =========================
// Check for trap on WFE or WFI instruction
AArch32.CheckForSMCUndefOrTrap()
if !AArch32.CheckForWFxTrap(bits(2) target_el, boolean is_wfe)
assertHaveEL((target_el);
// Check for routing to AArch64
if !ELUsingAArch32(target_el) then
AArch64.CheckForWFxTrap(target_el, is_wfe);
return;
case target_el of
when EL1 trap = (if is_wfe then SCTLR.nTWE else SCTLR.nTWI) == '0';
when EL2 trap = (if is_wfe then HCR.TWE else HCR.TWI) == '1';
when EL3) || PSTATE.EL ==trap = (if is_wfe then SCR.TWE else SCR.TWI) == '1';
if trap then
if target_el == EL0EL1 then
UNDEFINED;
if&& EL2Enabled() && !ELUsingAArch32(EL2) then) && HCR_EL2.TGE == '1' then
AArch64.CheckForSMCUndefOrTrapAArch64.WFxTrap((target_el, is_wfe);
if target_el ==ZerosEL3(16));
else
route_to_hyp =then HaveELAArch32.TakeMonitorTrapException(();
elsif target_el ==EL2) && !then
exception =IsSecureExceptionSyndrome() && PSTATE.EL ==( EL1Exception_WFxTrap && HCR.TSC == '1';
if route_to_hyp then
exception =);
exception.syndrome<24:20> = ExceptionSyndromeConditionSyndrome(();
exception.syndrome<0> = if is_wfe then '1' else '0';Exception_MonitorCallAArch32.TakeHypTrapException);(exception);
else
AArch32.TakeHypTrapExceptionAArch32.TakeUndefInstrException(exception);();
// AArch32.CheckForWFxTrap()
// =========================
// Check for trap on WFE or WFI instruction// AArch32.CheckITEnabled()
// ========================
// Check whether the T32 IT instruction is disabled.
AArch32.CheckForWFxTrap(bits(2) target_el, boolean is_wfe)
assertAArch32.CheckITEnabled(bits(4) mask)
if PSTATE.EL == HaveELEL2(target_el);
// Check for routing to AArch64
if !then
it_disabled = HSCTLR.ITD;
else
it_disabled = (ifELUsingAArch32(target_el) then(
AArch64.CheckForWFxTrap(target_el, is_wfe);
return;
case target_el of
when EL1 trap = (if is_wfe then SCTLR.nTWE else SCTLR.nTWI) == '0';
when) then SCTLR.ITD else SCTLR[].ITD);
if it_disabled == '1' then
if mask != '1000' then UNDEFINED;
// Otherwise whether the IT block is allowed depends on hw1 of the next instruction.
next_instr = EL2AArch32.MemSingle trap = (if is_wfe then HCR.TWE else HCR.TWI) == '1';
when[ EL3NextInstrAddr trap = (if is_wfe then SCR.TWE else SCR.TWI) == '1';
if trap then
if target_el ==(), 2, EL1AccType_IFETCH && EL2Enabled() && !ELUsingAArch32(EL2) && HCR_EL2.TGE == '1' then
AArch64.WFxTrap(target_el, is_wfe);
if target_el == EL3 then
AArch32.TakeMonitorTrapException();
elsif target_el == EL2 then
exception = ExceptionSyndrome(Exception_WFxTrap);
exception.syndrome<24:20> = ConditionSyndrome();
exception.syndrome<0> = if is_wfe then '1' else '0';
AArch32.TakeHypTrapException(exception);
else
AArch32.TakeUndefInstrException();, TRUE];
if next_instr IN {'11xxxxxxxxxxxxxx', '1011xxxxxxxxxxxx', '10100xxxxxxxxxxx',
'01001xxxxxxxxxxx', '010001xxx1111xxx', '010001xx1xxxx111'} then
// It is IMPLEMENTATION DEFINED whether the Undefined Instruction exception is
// taken on the IT instruction or the next instruction. This is not reflected in
// the pseudocode, which always takes the exception on the IT instruction. This
// also does not take into account cases where the next instruction is UNPREDICTABLE.
UNDEFINED;
return;
// AArch32.CheckITEnabled()
// ========================
// Check whether the T32 IT instruction is disabled.// AArch32.CheckIllegalState()
// ===========================
// Check PSTATE.IL bit and generate Illegal Execution state exception if set.
AArch32.CheckITEnabled(bits(4) mask)
if PSTATE.EL ==AArch32.CheckIllegalState()
if AArch32.GeneralExceptionsToAArch64() then
AArch64.CheckIllegalState();
elsif PSTATE.IL == '1' then
route_to_hyp = PSTATE.EL == EL0 && EL2Enabled() && HCR.TGE == '1';
bits(32) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x04;
if PSTATE.EL == EL2 then
it_disabled = HSCTLR.ITD;
else
it_disabled = (if|| route_to_hyp then
exception = ELUsingAArch32ExceptionSyndrome(EL1Exception_IllegalState) then SCTLR.ITD else SCTLR[].ITD);
if it_disabled == '1' then
if mask != '1000' then UNDEFINED;
// Otherwise whether the IT block is allowed depends on hw1 of the next instruction.
next_instr =);
if PSTATE.EL == AArch32.MemSingleEL2[thenNextInstrAddrAArch32.EnterHypMode(), 2,(exception, preferred_exception_return, vect_offset);
else (exception, preferred_exception_return, 0x14);
else
AArch32.TakeUndefInstrExceptionAccType_IFETCHAArch32.EnterHypMode, TRUE];
if next_instr IN {'11xxxxxxxxxxxxxx', '1011xxxxxxxxxxxx', '10100xxxxxxxxxxx',
'01001xxxxxxxxxxx', '010001xxx1111xxx', '010001xx1xxxx111'} then
// It is IMPLEMENTATION DEFINED whether the Undefined Instruction exception is
// taken on the IT instruction or the next instruction. This is not reflected in
// the pseudocode, which always takes the exception on the IT instruction. This
// also does not take into account cases where the next instruction is UNPREDICTABLE.
UNDEFINED;
return;();
// AArch32.CheckIllegalState()
// ===========================
// Check PSTATE.IL bit and generate Illegal Execution state exception if set.// AArch32.CheckSETENDEnabled()
// ============================
// Check whether the AArch32 SETEND instruction is disabled.
AArch32.CheckIllegalState()
ifAArch32.CheckSETENDEnabled()
if PSTATE.EL == AArch32.GeneralExceptionsToAArch64() then
AArch64.CheckIllegalState();
elsif PSTATE.IL == '1' then
route_to_hyp = PSTATE.EL == EL0 && EL2Enabled() && HCR.TGE == '1';
bits(32) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x04;
if PSTATE.EL == EL2 || route_to_hyp then
exception =then
setend_disabled = HSCTLR.SED;
else
setend_disabled = (if ExceptionSyndromeELUsingAArch32(Exception_IllegalStateEL1);
if PSTATE.EL == EL2 then
AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset);
else
AArch32.EnterHypMode(exception, preferred_exception_return, 0x14);
else
AArch32.TakeUndefInstrException();) then SCTLR.SED else SCTLR[].SED);
if setend_disabled == '1' then
UNDEFINED;
return;
// AArch32.CheckSETENDEnabled()
// ============================
// Check whether the AArch32 SETEND instruction is disabled.// AArch32.SystemAccessTrap()
// ==========================
// Trapped system register access.
AArch32.CheckSETENDEnabled()
if PSTATE.EL ==AArch32.SystemAccessTrap(bits(5) mode, integer ec)
(valid, target_el) = ELFromM32(mode);
assert valid && HaveEL(target_el) && target_el != EL0 && UInt(target_el) >= UInt(PSTATE.EL);
if target_el == EL2 then
setend_disabled = HSCTLR.SED;
else
setend_disabled = (if exception = ELUsingAArch32AArch32.SystemAccessTrapSyndrome((), ec);
AArch32.TakeHypTrapException(exception);
else
AArch32.TakeUndefInstrExceptionEL1ThisInstr) then SCTLR.SED else SCTLR[].SED);
if setend_disabled == '1' then
UNDEFINED;
return;();
// AArch32.SystemAccessTrap()
// ==========================
// Trapped system register access.// AArch32.SystemAccessTrapSyndrome()
// ==================================
// Returns the syndrome information for traps on AArch32 MCR, MCRR, MRC, MRRC, and VMRS, VMSR instructions,
// other than traps that are due to HCPTR or CPACR.
ExceptionRecord
AArch32.SystemAccessTrap(bits(5) mode, integer ec)
(valid, target_el) =AArch32.SystemAccessTrapSyndrome(bits(32) instr, integer ec) ELFromM32ExceptionRecord(mode);
assert valid &&exception;
case ec of
when 0x0 exception = HaveELExceptionSyndrome(target_el) && target_el !=( EL0Exception_Uncategorized &&);
when 0x3 exception = UIntExceptionSyndrome(target_el) >=( UIntException_CP15RTTrap(PSTATE.EL);
if target_el ==);
when 0x4 exception = EL2ExceptionSyndrome then
exception =( AArch32.SystemAccessTrapSyndromeException_CP15RRTTrap();
when 0x5 exception =ThisInstrExceptionSyndrome(), ec);(
AArch32.TakeHypTrapExceptionException_CP14RTTrap(exception);
else);
when 0x6 exception =
(Exception_CP14DTTrap);
when 0x7 exception = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrap);
when 0x8 exception = ExceptionSyndrome(Exception_FPIDTrap);
when 0xC exception = ExceptionSyndrome(Exception_CP14RRTTrap);
otherwise Unreachable();
bits(20) iss = Zeros();
if exception.exceptype IN {Exception_FPIDTrap, Exception_CP14RTTrap, Exception_CP15RTTrap} then
// Trapped MRC/MCR, VMRS on FPSID
iss<19:17> = instr<7:5>; // opc2
iss<16:14> = instr<23:21>; // opc1
iss<13:10> = instr<19:16>; // CRn
iss<8:5> = instr<15:12>; // Rt
elsif exception.exceptype IN {Exception_CP14RRTTrap, Exception_AdvSIMDFPAccessTrap, Exception_CP15RRTTrap} then
// Trapped MRRC/MCRR, VMRS/VMSR
iss<19:16> = instr<7:4>; // opc1
iss<13:10> = instr<19:16>; // Rt2
iss<8:5> = instr<15:12>; // Rt
iss<4:1> = instr<3:0>; // CRm
elsif exception.exceptype == Exception_CP14DTTrap then
// Trapped LDC/STC
iss<19:12> = instr<7:0>; // imm8
iss<4> = instr<23>; // U
iss<2:1> = instr<24,21>; // P,W
if instr<19:16> == '1111' then // Rn==15, LDC(Literal addressing)/STC
iss<8:5> = bits(4) UNKNOWN;
iss<3> = '1';
elsif exception.exceptype == Exception_Uncategorized then
// Trapped for unknown reason
iss<8:5> = instr<19:16>; // Rn
iss<3> = '0';
iss<0> = instr<20>; // Direction
exception.syndrome<24:20> = ConditionSyndromeAArch32.TakeUndefInstrExceptionExceptionSyndrome();();
exception.syndrome<19:0> = iss;
return exception;
// AArch32.SystemAccessTrapSyndrome()
// ==================================
// Returns the syndrome information for traps on AArch32 MCR, MCRR, MRC, MRRC, and VMRS, VMSR instructions,
// other than traps that are due to HCPTR or CPACR.
ExceptionRecord// AArch32.TakeHypTrapException()
// ==============================
// Exceptions routed to Hyp mode as a Hyp Trap exception. AArch32.SystemAccessTrapSyndrome(bits(32) instr, integer ec)AArch32.TakeHypTrapException(integer ec)
exception =
ExceptionRecordAArch32.SystemAccessTrapSyndrome exception;
case ec of
when 0x0 exception =( ExceptionSyndromeThisInstr((), ec);Exception_UncategorizedAArch32.TakeHypTrapException);
when 0x3 exception =(exception);
// AArch32.TakeHypTrapException()
// ==============================
// Exceptions routed to Hyp mode as a Hyp Trap exception. ExceptionSyndromeAArch32.TakeHypTrapException(Exception_CP15RTTrapExceptionRecord);
when 0x4 exception =exception)
assert ExceptionSyndromeHaveEL(Exception_CP15RRTTrapEL2);
when 0x5 exception =) && ! ExceptionSyndromeIsSecure(() &&Exception_CP14RTTrapELUsingAArch32);
when 0x6 exception =( ExceptionSyndromeEL2();
bits(32) preferred_exception_return =Exception_CP14DTTrapThisInstrAddr);
when 0x7 exception =();
vect_offset = 0x14; ExceptionSyndromeAArch32.EnterHypMode(Exception_AdvSIMDFPAccessTrap);
when 0x8 exception = ExceptionSyndrome(Exception_FPIDTrap);
when 0xC exception = ExceptionSyndrome(Exception_CP14RRTTrap);
otherwise Unreachable();
bits(20) iss = Zeros();
if exception.exceptype IN {Exception_FPIDTrap, Exception_CP14RTTrap, Exception_CP15RTTrap} then
// Trapped MRC/MCR, VMRS on FPSID
iss<19:17> = instr<7:5>; // opc2
iss<16:14> = instr<23:21>; // opc1
iss<13:10> = instr<19:16>; // CRn
iss<8:5> = instr<15:12>; // Rt
elsif exception.exceptype IN {Exception_CP14RRTTrap, Exception_AdvSIMDFPAccessTrap, Exception_CP15RRTTrap} then
// Trapped MRRC/MCRR, VMRS/VMSR
iss<19:16> = instr<7:4>; // opc1
iss<13:10> = instr<19:16>; // Rt2
iss<8:5> = instr<15:12>; // Rt
iss<4:1> = instr<3:0>; // CRm
elsif exception.exceptype == Exception_CP14DTTrap then
// Trapped LDC/STC
iss<19:12> = instr<7:0>; // imm8
iss<4> = instr<23>; // U
iss<2:1> = instr<24,21>; // P,W
if instr<19:16> == '1111' then // Rn==15, LDC(Literal addressing)/STC
iss<8:5> = bits(4) UNKNOWN;
iss<3> = '1';
elsif exception.exceptype == Exception_Uncategorized then
// Trapped for unknown reason
iss<8:5> = instr<19:16>; // Rn
iss<3> = '0';
iss<0> = instr<20>; // Direction
exception.syndrome<24:20> = ConditionSyndrome();
exception.syndrome<19:0> = iss;
return exception;(exception, preferred_exception_return, vect_offset);
// AArch32.TakeHypTrapException()
// ==============================
// Exceptions routed to Hyp mode as a Hyp Trap exception.// AArch32.TakeMonitorTrapException()
// ==================================
// Exceptions routed to Monitor mode as a Monitor Trap exception.
AArch32.TakeHypTrapException(integer ec)
exception =AArch32.TakeMonitorTrapException()
assert AArch32.SystemAccessTrapSyndrome(ThisInstr(), ec);
AArch32.TakeHypTrapException(exception);
// AArch32.TakeHypTrapException()
// ==============================
// Exceptions routed to Hyp mode as a Hyp Trap exception.
AArch32.TakeHypTrapException(ExceptionRecord exception)
assert HaveEL(EL2EL3) && !) &&IsSecure() && ELUsingAArch32(EL2EL3);
bits(32) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x14; vect_offset = 0x04;
lr_offset = if
() == InstrSet_A32 then 4 else 2;
AArch32.EnterMonitorModeAArch32.EnterHypModeCurrentInstrSet(exception, preferred_exception_return, vect_offset);(preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeMonitorTrapException()
// ==================================
// Exceptions routed to Monitor mode as a Monitor Trap exception.// AArch32.TakeUndefInstrException()
// =================================
AArch32.TakeMonitorTrapException()
assertAArch32.TakeUndefInstrException()
exception = HaveELExceptionSyndrome(EL3Exception_Uncategorized) &&); ELUsingAArch32AArch32.TakeUndefInstrException((exception);
// AArch32.TakeUndefInstrException()
// =================================EL3);
bits(32) preferred_exception_return =AArch32.TakeUndefInstrException( ExceptionRecord exception)
route_to_hyp = PSTATE.EL == EL0 && EL2Enabled() && HCR.TGE == '1';
bits(32) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x04;
lr_offset = if CurrentInstrSet() == InstrSet_A32 then 4 else 2;then 4 else 2;
if PSTATE.EL ==
then
AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset);
elsif route_to_hyp then
AArch32.EnterHypMode(exception, preferred_exception_return, 0x14);
else
AArch32.EnterMode(M32_UndefAArch32.EnterMonitorModeEL2(preferred_exception_return, lr_offset, vect_offset);, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeUndefInstrException()
// =================================// AArch32.UndefinedFault()
// ========================
AArch32.TakeUndefInstrException()
exception =AArch32.UndefinedFault()
if ExceptionSyndromeAArch32.GeneralExceptionsToAArch64(() thenException_UncategorizedAArch64.UndefinedFault);();
AArch32.TakeUndefInstrException(exception);
// AArch32.TakeUndefInstrException()
// =================================
AArch32.TakeUndefInstrException(ExceptionRecord exception)
route_to_hyp = PSTATE.EL == EL0 && EL2Enabled() && HCR.TGE == '1';
bits(32) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x04;
lr_offset = if CurrentInstrSet() == InstrSet_A32 then 4 else 2;
if PSTATE.EL == EL2 then
AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset);
elsif route_to_hyp then
AArch32.EnterHypMode(exception, preferred_exception_return, 0x14);
else
AArch32.EnterMode(M32_Undef, preferred_exception_return, lr_offset, vect_offset);();
// AArch32.UndefinedFault()
// ========================// AArch32.CreateFaultRecord()
// ===========================
FaultRecord
AArch32.UndefinedFault()
ifAArch32.CreateFaultRecord( AArch32.GeneralExceptionsToAArch64Fault() thenstatuscode, bits(40) ipaddress, bits(4) domain,
integer level, AArch64.UndefinedFaultAccType();acctype, boolean write, bit extflag,
bits(4) debugmoe, bits(2) errortype, boolean secondstage, boolean s2fs1walk)
fault;
fault.statuscode = statuscode;
if (statuscode != Fault_None && PSTATE.EL != EL2 && TTBCR.EAE == '0' && !secondstage && !s2fs1walk &&
AArch32.DomainValid(statuscode, level)) then
fault.domain = domain;
else
fault.domain = bits(4) UNKNOWN;
fault.debugmoe = debugmoe;
fault.errortype = errortype;
fault.ipaddress.NS = bit UNKNOWN;
fault.ipaddress.address = ZeroExtendAArch32.TakeUndefInstrExceptionFaultRecord();(ipaddress);
fault.level = level;
fault.acctype = acctype;
fault.write = write;
fault.extflag = extflag;
fault.secondstage = secondstage;
fault.s2fs1walk = s2fs1walk;
return fault;
// AArch32.CreateFaultRecord()
// ===========================
// AArch32.DomainValid()
// =====================
// Returns TRUE if the Domain is valid for a Short-descriptor translation scheme.
FaultRecordboolean AArch32.CreateFaultRecord(AArch32.DomainValid(Fault statuscode, bits(40) ipaddress, bits(4) domain,
integer level,statuscode, integer level)
assert statuscode != AccTypeFault_None acctype, boolean write, bit extflag,
bits(4) debugmoe, bits(2) errortype, boolean secondstage, boolean s2fs1walk);
case statuscode of
when
FaultRecordFault_Domain fault;
fault.statuscode = statuscode;
if (statuscode !=return TRUE;
when Fault_NoneFault_Translation && PSTATE.EL !=, EL2Fault_AccessFlag && TTBCR.EAE == '0' && !secondstage && !s2fs1walk &&,
AArch32.DomainValidFault_SyncExternalOnWalk(statuscode, level)) then
fault.domain = domain;
else
fault.domain = bits(4) UNKNOWN;
fault.debugmoe = debugmoe;
fault.errortype = errortype;
fault.ipaddress.NS = bit UNKNOWN;
fault.ipaddress.address =, ZeroExtendFault_SyncParityOnWalk(ipaddress);
fault.level = level;
fault.acctype = acctype;
fault.write = write;
fault.extflag = extflag;
fault.secondstage = secondstage;
fault.s2fs1walk = s2fs1walk;
return fault;return level == 2;
otherwise
return FALSE;
// AArch32.DomainValid()
// =====================
// Returns TRUE if the Domain is valid for a Short-descriptor translation scheme.
// AArch32.FaultStatusLD()
// =======================
// Creates an exception fault status value for Abort and Watchpoint exceptions taken
// to Abort mode using AArch32 and Long-descriptor format.
booleanbits(32) AArch32.DomainValid(AArch32.FaultStatusLD(boolean d_side,FaultFaultRecord statuscode, integer level)
assert statuscode !=fault)
assert fault.statuscode != Fault_None;
case statuscode of
when bits(32) fsr = Fault_DomainZeros
return TRUE;
when();
if Fault_TranslationHaveRASExt,() && Fault_AccessFlagIsAsyncAbort,(fault) then fsr<15:14> = fault.errortype;
if d_side then
if fault.acctype IN { Fault_SyncExternalOnWalkAccType_DC, , AccType_AT} then
fsr<13> = '1'; fsr<11> = '1';
else
fsr<11> = if fault.write then '1' else '0';
if IsExternalAbort(fault) then fsr<12> = fault.extflag;
fsr<9> = '1';
fsr<5:0> = EncodeLDFSCFault_SyncParityOnWalkAccType_IC
return level == 2;
otherwise
return FALSE;(fault.statuscode, fault.level);
return fsr;
// AArch32.FaultStatusLD()
// AArch32.FaultStatusSD()
// =======================
// Creates an exception fault status value for Abort and Watchpoint exceptions taken
// to Abort mode using AArch32 and Long-descriptor format.
// to Abort mode using AArch32 and Short-descriptor format.
bits(32) AArch32.FaultStatusLD(boolean d_side,AArch32.FaultStatusSD(boolean d_side, FaultRecord fault)
assert fault.statuscode != Fault_None;
bits(32) fsr = Zeros();
if HaveRASExt() && IsAsyncAbort(fault) then fsr<15:14> = fault.errortype;
if d_side then
if fault.acctype IN {AccType_DC, AccType_IC, AccType_AT} then
fsr<13> = '1'; fsr<11> = '1';
else
fsr<11> = if fault.write then '1' else '0';
if IsExternalAbort(fault) then fsr<12> = fault.extflag;
fsr<9> = '1';
fsr<5:0> = fsr<9> = '0';
fsr<10,3:0> = EncodeLDFSCEncodeSDFSC(fault.statuscode, fault.level);
if d_side then
fsr<7:4> = fault.domain; // Domain field (data fault only)
return fsr;
// AArch32.FaultStatusSD()
// AArch32.FaultSyndrome()
// =======================
// Creates an exception fault status value for Abort and Watchpoint exceptions taken
// to Abort mode using AArch32 and Short-descriptor format.
// Creates an exception syndrome value for Abort and Watchpoint exceptions taken to
// AArch32 Hyp mode.
bits(32)bits(25) AArch32.FaultStatusSD(boolean d_side,AArch32.FaultSyndrome(boolean d_side, FaultRecord fault)
assert fault.statuscode != Fault_None;
bits(32) fsr = bits(25) iss = Zeros();
if HaveRASExt() && IsAsyncAbort(fault) then fsr<15:14> = fault.errortype;
(fault) then iss<11:10> = fault.errortype; // AET
if d_side then
if fault.acctype IN { ifIsSecondStage(fault) && !fault.s2fs1walk then iss<24:14> = LSInstructionSyndrome();
if fault.acctype IN {AccType_DC, AccType_DC_UNPRIV, AccType_IC, AccType_AT} then
fsr<13> = '1'; fsr<11> = '1';
iss<8> = '1'; iss<6> = '1';
else
fsr<11> = if fault.write then '1' else '0';
iss<6> = if fault.write then '1' else '0';
if IsExternalAbort(fault) then fsr<12> = fault.extflag;
fsr<9> = '0';
fsr<10,3:0> =(fault) then iss<9> = fault.extflag;
iss<7> = if fault.s2fs1walk then '1' else '0';
iss<5:0> = EncodeSDFSCEncodeLDFSC(fault.statuscode, fault.level);
if d_side then
fsr<7:4> = fault.domain; // Domain field (data fault only)
return fsr; return iss;
// AArch32.FaultSyndrome()
// =======================
// Creates an exception syndrome value for Abort and Watchpoint exceptions taken to
// AArch32 Hyp mode.
// EncodeSDFSC()
// =============
// Function that gives the Short-descriptor FSR code for different types of Fault
bits(25)bits(5) AArch32.FaultSyndrome(boolean d_side,EncodeSDFSC( FaultRecordFault fault)
assert fault.statuscode !=statuscode, integer level)
bits(5) result;
case statuscode of
when Fault_NoneFault_AccessFlag;
bits(25) iss =assert level IN {1,2};
result = if level == 1 then '00011' else '00110';
when ZerosFault_Alignment();
ifresult = '00001';
when HaveRASExtFault_Permission() &&assert level IN {1,2};
result = if level == 1 then '01101' else '01111';
when IsAsyncAbortFault_Domain(fault) then iss<11:10> = fault.errortype; // AET
if d_side then
ifassert level IN {1,2};
result = if level == 1 then '01001' else '01011';
when IsSecondStageFault_Translation(fault) && !fault.s2fs1walk then iss<24:14> =assert level IN {1,2};
result = if level == 1 then '00101' else '00111';
when LSInstructionSyndromeFault_SyncExternal();
if fault.acctype IN {result = '01000';
whenAccType_DCFault_SyncExternalOnWalk,assert level IN {1,2};
result = if level == 1 then '01100' else '01110';
when AccType_DC_UNPRIVFault_SyncParity,result = '11001';
when AccType_ICFault_SyncParityOnWalk,assert level IN {1,2};
result = if level == 1 then '11100' else '11110';
when AccType_ATFault_AsyncParity} then
iss<8> = '1'; iss<6> = '1';
else
iss<6> = if fault.write then '1' else '0';
ifresult = '11000';
when IsExternalAbortFault_AsyncExternal(fault) then iss<9> = fault.extflag;
iss<7> = if fault.s2fs1walk then '1' else '0';
iss<5:0> =result = '10110';
when
result = '00010';
when Fault_TLBConflict
result = '10000';
when Fault_Lockdown
result = '10100'; // IMPLEMENTATION DEFINED
when Fault_Exclusive
result = '10101'; // IMPLEMENTATION DEFINED
when Fault_ICacheMaint
result = '00100';
otherwise
UnreachableEncodeLDFSCFault_Debug(fault.statuscode, fault.level);
();
return iss; return result;
// EncodeSDFSC()
// =============
// Function that gives the Short-descriptor FSR code for different types of Fault
// A32ExpandImm()
// ==============
bits(5)bits(32) EncodeSDFSC(A32ExpandImm(bits(12) imm12)
// PSTATE.C argument to following function call does not affect the imm32 result.
(imm32, -) =FaultA32ExpandImm_C statuscode, integer level)
bits(5) result;
case statuscode of
when Fault_AccessFlag
assert level IN {1,2};
result = if level == 1 then '00011' else '00110';
when Fault_Alignment
result = '00001';
when Fault_Permission
assert level IN {1,2};
result = if level == 1 then '01101' else '01111';
when Fault_Domain
assert level IN {1,2};
result = if level == 1 then '01001' else '01011';
when Fault_Translation
assert level IN {1,2};
result = if level == 1 then '00101' else '00111';
when Fault_SyncExternal
result = '01000';
when Fault_SyncExternalOnWalk
assert level IN {1,2};
result = if level == 1 then '01100' else '01110';
when Fault_SyncParity
result = '11001';
when Fault_SyncParityOnWalk
assert level IN {1,2};
result = if level == 1 then '11100' else '11110';
when Fault_AsyncParity
result = '11000';
when Fault_AsyncExternal
result = '10110';
when Fault_Debug
result = '00010';
when Fault_TLBConflict
result = '10000';
when Fault_Lockdown
result = '10100'; // IMPLEMENTATION DEFINED
when Fault_Exclusive
result = '10101'; // IMPLEMENTATION DEFINED
when Fault_ICacheMaint
result = '00100';
otherwise
Unreachable();
(imm12, PSTATE.C);
return result; return imm32;
// A32ExpandImm()
// ==============
// A32ExpandImm_C()
// ================
bits(32)(bits(32), bit) A32ExpandImm(bits(12) imm12)
A32ExpandImm_C(bits(12) imm12, bit carry_in)
// PSTATE.C argument to following function call does not affect the imm32 result.
(imm32, -) = unrotated_value = (imm12<7:0>, 32);
(imm32, carry_out) = Shift_C(unrotated_value, SRType_ROR, 2*UIntA32ExpandImm_CZeroExtend(imm12, PSTATE.C);
(imm12<11:8>), carry_in);
return imm32; return (imm32, carry_out);
// A32ExpandImm_C()
// DecodeImmShift()
// ================
(bits(32), bit)(SRType, integer) A32ExpandImm_C(bits(12) imm12, bit carry_in)
DecodeImmShift(bits(2) srtype, bits(5) imm5)
unrotated_value = case srtype of
when '00'
shift_t = ZeroExtendSRType_LSL(imm12<7:0>, 32);
(imm32, carry_out) =; shift_n = (imm5);
when '01'
shift_t = SRType_LSR; shift_n = if imm5 == '00000' then 32 else UInt(imm5);
when '10'
shift_t = SRType_ASR; shift_n = if imm5 == '00000' then 32 else UInt(imm5);
when '11'
if imm5 == '00000' then
shift_t = SRType_RRXShift_CUInt(unrotated_value,; shift_n = 1;
else
shift_t = SRType_ROR, 2*; shift_n =UInt(imm12<11:8>), carry_in);
(imm5);
return (imm32, carry_out); return (shift_t, shift_n);
// DecodeImmShift()
// DecodeRegShift()
// ================
(SRType, integer)SRType DecodeImmShift(bits(2) srtype, bits(5) imm5)
DecodeRegShift(bits(2) srtype)
case srtype of
when '00'
shift_t = when '00' shift_t = SRType_LSL; shift_n =;
when '01' shift_t = UInt(imm5);
when '01'
shift_t = SRType_LSR; shift_n = if imm5 == '00000' then 32 else;
when '10' shift_t = UInt(imm5);
when '10'
shift_t = SRType_ASR; shift_n = if imm5 == '00000' then 32 else;
when '11' shift_t = UInt(imm5);
when '11'
if imm5 == '00000' then
shift_t = SRType_RRX; shift_n = 1;
else
shift_t = SRType_ROR; shift_n = UInt(imm5);
return (shift_t, shift_n);;
return shift_t;
// DecodeRegShift()
// ================
// RRX()
// =====
SRTypebits(N) DecodeRegShift(bits(2) srtype)
case srtype of
when '00' shift_t =RRX(bits(N) x, bit carry_in)
(result, -) = SRType_LSLRRX_C;
when '01' shift_t = SRType_LSR;
when '10' shift_t = SRType_ASR;
when '11' shift_t = SRType_ROR;
return shift_t;(x, carry_in);
return result;
// RRX()
// =====
// RRX_C()
// =======
bits(N)(bits(N), bit) RRX(bits(N) x, bit carry_in)
(result, -) =RRX_C(bits(N) x, bit carry_in)
result = carry_in : x<N-1:1>;
carry_out = x<0>;
return (result, carry_out); RRX_C(x, carry_in);
return result;
// RRX_C()
// =======
(bits(N), bit)enumeration RRX_C(bits(N) x, bit carry_in)
result = carry_in : x<N-1:1>;
carry_out = x<0>;
return (result, carry_out);SRType {SRType_LSL, SRType_LSR, SRType_ASR, SRType_ROR, SRType_RRX};
enumeration// Shift()
// =======
bits(N) SRType {Shift(bits(N) value,SRType_LSL,srtype, integer amount, bit carry_in)
(result, -) = SRType_LSR, SRType_ASR, SRType_ROR, SRType_RRX};(value, srtype, amount, carry_in);
return result;
// Shift()
// =======
// Shift_C()
// =========
bits(N)(bits(N), bit) Shift(bits(N) value,Shift_C(bits(N) value, SRType srtype, integer amount, bit carry_in)
(result, -) = assert !(srtype == && amount != 1);
if amount == 0 then
(result, carry_out) = (value, carry_in);
else
case srtype of
when SRType_LSL
(result, carry_out) = LSL_C(value, amount);
when SRType_LSR
(result, carry_out) = LSR_C(value, amount);
when SRType_ASR
(result, carry_out) = ASR_C(value, amount);
when SRType_ROR
(result, carry_out) = ROR_C(value, amount);
when SRType_RRX
(result, carry_out) = RRX_CShift_CSRType_RRX(value, srtype, amount, carry_in);
return result;(value, carry_in);
return (result, carry_out);
// Shift_C()
// =========
// T32ExpandImm()
// ==============
(bits(N), bit)bits(32) Shift_C(bits(N) value,T32ExpandImm(bits(12) imm12)
// PSTATE.C argument to following function call does not affect the imm32 result.
(imm32, -) = SRTypeT32ExpandImm_C srtype, integer amount, bit carry_in)
assert !(srtype == SRType_RRX && amount != 1);
if amount == 0 then
(result, carry_out) = (value, carry_in);
else
case srtype of
when SRType_LSL
(result, carry_out) = LSL_C(value, amount);
when SRType_LSR
(result, carry_out) = LSR_C(value, amount);
when SRType_ASR
(result, carry_out) = ASR_C(value, amount);
when SRType_ROR
(result, carry_out) = ROR_C(value, amount);
when SRType_RRX
(result, carry_out) = RRX_C(value, carry_in);
(imm12, PSTATE.C);
return (result, carry_out); return imm32;
// T32ExpandImm()
// ==============
// T32ExpandImm_C()
// ================
bits(32)(bits(32), bit) T32ExpandImm(bits(12) imm12)
T32ExpandImm_C(bits(12) imm12, bit carry_in)
// PSTATE.C argument to following function call does not affect the imm32 result.
(imm32, -) = if imm12<11:10> == '00' then
case imm12<9:8> of
when '00'
imm32 = (imm12<7:0>, 32);
when '01'
imm32 = '00000000' : imm12<7:0> : '00000000' : imm12<7:0>;
when '10'
imm32 = imm12<7:0> : '00000000' : imm12<7:0> : '00000000';
when '11'
imm32 = imm12<7:0> : imm12<7:0> : imm12<7:0> : imm12<7:0>;
carry_out = carry_in;
else
unrotated_value = ZeroExtend('1':imm12<6:0>, 32);
(imm32, carry_out) = ROR_C(unrotated_value, UIntT32ExpandImm_CZeroExtend(imm12, PSTATE.C);
(imm12<11:7>));
return imm32; return (imm32, carry_out);
// T32ExpandImm_C()
// ================
// AArch32.CheckCP15InstrCoarseTraps()
// ===================================
// Check for coarse-grained CP15 traps in HSTR and HCR.
(bits(32), bit)boolean T32ExpandImm_C(bits(12) imm12, bit carry_in)
AArch32.CheckCP15InstrCoarseTraps(integer CRn, integer nreg, integer CRm)
if imm12<11:10> == '00' then
case imm12<9:8> of
when '00'
imm32 = // Check for coarse-grained Hyp traps
if PSTATE.EL IN { ZeroExtendEL0(imm12<7:0>, 32);
when '01'
imm32 = '00000000' : imm12<7:0> : '00000000' : imm12<7:0>;
when '10'
imm32 = imm12<7:0> : '00000000' : imm12<7:0> : '00000000';
when '11'
imm32 = imm12<7:0> : imm12<7:0> : imm12<7:0> : imm12<7:0>;
carry_out = carry_in;
else
unrotated_value =, ZeroExtendEL1('1':imm12<6:0>, 32);
(imm32, carry_out) =} && ROR_CEL2Enabled(unrotated_value,() then
if PSTATE.EL == && !ELUsingAArch32(EL2) then
return AArch64.CheckCP15InstrCoarseTrapsUIntEL0(imm12<11:7>));
(CRn, nreg, CRm);
// Check for MCR, MRC, MCRR and MRRC disabled by HSTR<CRn/CRm>
major = if nreg == 1 then CRn else CRm;
if !(major IN {4,14}) && HSTR<major> == '1' then
return TRUE;
return (imm32, carry_out); // Check for MRC and MCR disabled by HCR.TIDCP
if (HCR.TIDCP == '1' && nreg == 1 &&
((CRn == 9 && CRm IN {0,1,2, 5,6,7,8 }) ||
(CRn == 10 && CRm IN {0,1, 4, 8 }) ||
(CRn == 11 && CRm IN {0,1,2,3,4,5,6,7,8,15}))) then
return TRUE;
return FALSE;
// AArch32.CheckCP15InstrCoarseTraps()
// ===================================
// Check for coarse-grained CP15 traps in HSTR and HCR.
// AArch32.ExclusiveMonitorsPass()
// ===============================
// Return TRUE if the Exclusives monitors for the current PE include all of the addresses
// associated with the virtual address region of size bytes starting at address.
// The immediately following memory write must be to the same addresses.
boolean AArch32.CheckCP15InstrCoarseTraps(integer CRn, integer nreg, integer CRm)
AArch32.ExclusiveMonitorsPass(bits(32) address, integer size)
// Check for coarse-grained Hyp traps
if PSTATE.EL IN { // It is IMPLEMENTATION DEFINED whether the detection of memory aborts happens
// before or after the check on the local Exclusives monitor. As a result a failure
// of the local monitor can occur on some implementations even if the memory
// access would give an memory abort.
acctype =EL0AccType_ATOMIC,;
iswrite = TRUE;
aligned = (address == EL1Align} &&(address, size));
if !aligned then
secondstage = FALSE; EL2EnabledAArch32.Abort() then
if PSTATE.EL ==(address, EL0AArch32.AlignmentFault && !(acctype, iswrite, secondstage));
passed =ELUsingAArch32AArch32.IsExclusiveVA((address,EL2ProcessorID) then
return(), size);
if !passed then
return FALSE;
memaddrdesc = (address, acctype, iswrite, aligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch32.Abort(address, memaddrdesc.fault);
passed = IsExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size);
if passed then
ClearExclusiveLocal(ProcessorID());
if memaddrdesc.memattrs.shareable then
passed = IsExclusiveGlobal(memaddrdesc.paddress, ProcessorIDAArch64.CheckCP15InstrCoarseTrapsAArch32.TranslateAddress(CRn, nreg, CRm);
// Check for MCR, MRC, MCRR and MRRC disabled by HSTR<CRn/CRm>
major = if nreg == 1 then CRn else CRm;
if !(major IN {4,14}) && HSTR<major> == '1' then
return TRUE;
(), size);
// Check for MRC and MCR disabled by HCR.TIDCP
if (HCR.TIDCP == '1' && nreg == 1 &&
((CRn == 9 && CRm IN {0,1,2, 5,6,7,8 }) ||
(CRn == 10 && CRm IN {0,1, 4, 8 }) ||
(CRn == 11 && CRm IN {0,1,2,3,4,5,6,7,8,15}))) then
return TRUE;
return FALSE; return passed;
// AArch32.ExclusiveMonitorsPass()
// ===============================
// Return TRUE if the Exclusives monitors for the current PE include all of the addresses
// associated with the virtual address region of size bytes starting at address.
// The immediately following memory write must be to the same addresses.
// An optional IMPLEMENTATION DEFINED test for an exclusive access to a virtual
// address region of size bytes starting at address.
//
// It is permitted (but not required) for this function to return FALSE and
// cause a store exclusive to fail if the virtual address region is not
// totally included within the region recorded by MarkExclusiveVA().
//
// It is always safe to return TRUE which will check the physical address only.
boolean AArch32.ExclusiveMonitorsPass(bits(32) address, integer size)
// It is IMPLEMENTATION DEFINED whether the detection of memory aborts happens
// before or after the check on the local Exclusives monitor. As a result a failure
// of the local monitor can occur on some implementations even if the memory
// access would give an memory abort.
acctype =AArch32.IsExclusiveVA(bits(32) address, integer processorid, integer size); AccType_ATOMIC;
iswrite = TRUE;
aligned = AArch32.CheckAlignment(address, size, acctype, iswrite);
passed = AArch32.IsExclusiveVA(address, ProcessorID(), size);
if !passed then
return FALSE;
memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, aligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch32.Abort(address, memaddrdesc.fault);
passed = IsExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size);
ClearExclusiveLocal(ProcessorID());
if passed then
if memaddrdesc.memattrs.shareable then
passed = IsExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size);
return passed;
// An optional IMPLEMENTATION DEFINED test for an exclusive access to a virtual
// address region of size bytes starting at address.
//
// It is permitted (but not required) for this function to return FALSE and
// cause a store exclusive to fail if the virtual address region is not
// totally included within the region recorded by MarkExclusiveVA().
//
// It is always safe to return TRUE which will check the physical address only.
boolean// Optionally record an exclusive access to the virtual address region of size bytes
// starting at address for processorid. AArch32.IsExclusiveVA(bits(32) address, integer processorid, integer size);AArch32.MarkExclusiveVA(bits(32) address, integer processorid, integer size);
// Optionally record an exclusive access to the virtual address region of size bytes
// starting at address for processorid.// AArch32.SetExclusiveMonitors()
// ==============================
// Sets the Exclusives monitors for the current PE to record the addresses associated
// with the virtual address region of size bytes starting at address.
AArch32.MarkExclusiveVA(bits(32) address, integer processorid, integer size);AArch32.SetExclusiveMonitors(bits(32) address, integer size)
acctype =AccType_ATOMIC;
iswrite = FALSE;
aligned = (address == Align(address, size));
memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, aligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
return;
if memaddrdesc.memattrs.shareable then
MarkExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size);
MarkExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size);
AArch32.MarkExclusiveVA(address, ProcessorID(), size);
// AArch32.SetExclusiveMonitors()
// ==============================
// Sets the Exclusives monitors for the current PE to record the addresses associated
// with the virtual address region of size bytes starting at address.// CheckAdvSIMDEnabled()
// =====================
AArch32.SetExclusiveMonitors(bits(32) address, integer size)
CheckAdvSIMDEnabled()
acctype = fpexc_check = TRUE;
advsimd = TRUE; AccType_ATOMICAArch32.CheckAdvSIMDOrFPEnabled;
iswrite = FALSE;
aligned = (address ==(fpexc_check, advsimd);
// Return from CheckAdvSIMDOrFPEnabled() occurs only if Advanced SIMD access is permitted
// Make temporary copy of D registers
// _Dclone[] is used as input data for instruction pseudocode
for i = 0 to 31
_Dclone[i] = AlignD(address, size));
memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, aligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
return;
if memaddrdesc.memattrs.shareable then
MarkExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size);
MarkExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size);
AArch32.MarkExclusiveVA(address, ProcessorID(), size);[i];
return;
// CheckAdvSIMDEnabled()
// =====================// CheckAdvSIMDOrVFPEnabled()
// ==========================
CheckAdvSIMDEnabled()
fpexc_check = TRUE;
advsimd = TRUE;CheckAdvSIMDOrVFPEnabled(boolean include_fpexc_check, boolean advsimd)
AArch32.CheckAdvSIMDOrFPEnabled(fpexc_check, advsimd);
// Return from CheckAdvSIMDOrFPEnabled() occurs only if Advanced SIMD access is permitted
// Make temporary copy of D registers
// _Dclone[] is used as input data for instruction pseudocode
for i = 0 to 31
_Dclone[i] =(include_fpexc_check, advsimd);
// Return from CheckAdvSIMDOrFPEnabled() occurs only if VFP access is permitted
return; D[i];
return;
// CheckAdvSIMDOrVFPEnabled()
// ==========================// CheckCryptoEnabled32()
// ======================
CheckAdvSIMDOrVFPEnabled(boolean include_fpexc_check, boolean advsimd)CheckCryptoEnabled32()
AArch32.CheckAdvSIMDOrFPEnabledCheckAdvSIMDEnabled(include_fpexc_check, advsimd);
// Return from CheckAdvSIMDOrFPEnabled() occurs only if VFP access is permitted
();
// Return from CheckAdvSIMDEnabled() occurs only if access is permitted
return;
// CheckCryptoEnabled32()
// ======================// CheckVFPEnabled()
// =================
CheckCryptoEnabled32()CheckVFPEnabled(boolean include_fpexc_check)
advsimd = FALSE;
CheckAdvSIMDEnabledAArch32.CheckAdvSIMDOrFPEnabled();
// Return from CheckAdvSIMDEnabled() occurs only if access is permitted
(include_fpexc_check, advsimd);
// Return from CheckAdvSIMDOrFPEnabled() occurs only if VFP access is permitted
return;
// CheckVFPEnabled()
// =================// FPHalvedSub()
// =============
bits(N)
CheckVFPEnabled(boolean include_fpexc_check)
advsimd = FALSE;FPHalvedSub(bits(N) op1, bits(N) op2,
fpcr)
assert N IN {16,32,64};
rounding = FPRoundingMode(fpcr);
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity);
zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero);
if inf1 && inf2 && sign1 == sign2 then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '1') then
result = FPInfinity('0');
elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '0') then
result = FPInfinity('1');
elsif zero1 && zero2 && sign1 != sign2 then
result = FPZero(sign1);
else
result_value = (value1 - value2) / 2.0;
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding == FPRounding_NEGINF then '1' else '0';
result = FPZero(result_sign);
else
result = FPRoundAArch32.CheckAdvSIMDOrFPEnabledFPCRType(include_fpexc_check, advsimd);
// Return from CheckAdvSIMDOrFPEnabled() occurs only if VFP access is permitted
return;(result_value, fpcr);
return result;
// FPHalvedSub()
// FPRSqrtStep()
// =============
bits(N) FPHalvedSub(bits(N) op1, bits(N) op2,FPRSqrtStep(bits(N) op1, bits(N) op2)
assert N IN {16,32}; FPCRType fpcr)
assert N IN {16,32,64};
rounding =fpcr = FPRoundingModeStandardFPSCRValue(fpcr);
();
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity);
zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero);
if inf1 && inf2 && sign1 == sign2 then
result = bits(N) product;
if (inf1 && zero2) || (zero1 && inf2) then
product = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '1') then
result = FPInfinity('0');
elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '0') then
result = FPInfinity('1');
elsif zero1 && zero2 && sign1 != sign2 then
result = FPZero(sign1);
('0');
else
result_value = (value1 - value2) / 2.0;
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding == product = FPRounding_NEGINFFPMul then '1' else '0';
result =(op1, op2, fpcr);
bits(N) three = FPZeroFPThree(result_sign);
else
result =('0');
result = FPRoundFPHalvedSub(result_value, fpcr);
(three, product, fpcr);
return result;
// FPRSqrtStep()
// FPRecipStep()
// =============
bits(N) FPRSqrtStep(bits(N) op1, bits(N) op2)
FPRecipStep(bits(N) op1, bits(N) op2)
assert N IN {16,32};
FPCRType fpcr = StandardFPSCRValue();
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity);
zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero);
bits(N) product;
if (inf1 && zero2) || (zero1 && inf2) then
product = FPZero('0');
else
product = FPMul(op1, op2, fpcr);
bits(N) three = bits(N) two = FPThreeFPTwo('0');
result = FPHalvedSubFPSub(three, product, fpcr);
(two, product, fpcr);
return result;
// FPRecipStep()
// =============
// StandardFPSCRValue()
// ====================
bits(N)FPCRType FPRecipStep(bits(N) op1, bits(N) op2)
assert N IN {16,32};StandardFPSCRValue()
return '00000' : FPSCR.AHP : '110000' : FPSCR.FZ16 : '0000000000000000000';
FPCRType fpcr = StandardFPSCRValue();
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity);
zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero);
bits(N) product;
if (inf1 && zero2) || (zero1 && inf2) then
product = FPZero('0');
else
product = FPMul(op1, op2, fpcr);
bits(N) two = FPTwo('0');
result = FPSub(two, product, fpcr);
return result;
// StandardFPSCRValue()
// ====================
// AArch32.CheckAlignment()
// ========================
FPCRTypeboolean StandardFPSCRValue()
return '00000' : FPSCR.AHP : '110000' : FPSCR.FZ16 : '0000000000000000000';AArch32.CheckAlignment(bits(32) address, integer alignment,AccType acctype,
boolean iswrite)
if PSTATE.EL == EL0 && !ELUsingAArch32(S1TranslationRegime()) then
A = SCTLR[].A; //use AArch64 register, when higher Exception level is using AArch64
elsif PSTATE.EL == EL2 then
A = HSCTLR.A;
else
A = SCTLR.A;
aligned = (address == Align(address, alignment));
atomic = acctype IN { AccType_ATOMIC, AccType_ATOMICRW, AccType_ORDEREDATOMIC, AccType_ORDEREDATOMICRW };
ordered = acctype IN { AccType_ORDERED, AccType_ORDEREDRW, AccType_LIMITEDORDERED, AccType_ORDEREDATOMIC, AccType_ORDEREDATOMICRW };
vector = acctype == AccType_VEC;
// AccType_VEC is used for SIMD element alignment checks only
check = (atomic || ordered || vector || A == '1');
if check && !aligned then
secondstage = FALSE;
AArch32.Abort(address, AArch32.AlignmentFault(acctype, iswrite, secondstage));
return aligned;
// AArch32.CheckAlignment()
// ========================
// AArch32.MemSingle[] - non-assignment (read) form
// ================================================
// Perform an atomic, little-endian read of 'size' bytes.
booleanbits(size*8) AArch32.CheckAlignment(bits(32) address, integer alignment,AArch32.MemSingle[bits(32) address, integer size, AccType acctype,
boolean iswrite)
if PSTATE.EL ==acctype, boolean wasaligned]
assert size IN {1, 2, 4, 8, 16};
assert address == EL0Align && !(address, size);ELUsingAArch32AddressDescriptor(memaddrdesc;
bits(size*8) value;
iswrite = FALSE;
// MMU or MPU
memaddrdesc =S1TranslationRegimeAArch32.TranslateAddress()) then
A = SCTLR[].A; //use AArch64 register, when higher Exception level is using AArch64
elsif PSTATE.EL ==(address, acctype, iswrite, wasaligned, size);
// Check for aborts or debug exceptions
if EL2IsFault then
A = HSCTLR.A;
else
A = SCTLR.A;
aligned = (address ==(memaddrdesc) then AlignAArch32.Abort(address, alignment));
atomic = acctype IN {(address, memaddrdesc.fault);
// Memory array access
accdesc = CreateAccessDescriptor(acctype);
if AccType_ATOMICHaveMTEExt,() then
if AccType_ATOMICRWAArch64.AccessIsTagChecked,( AccType_ORDEREDATOMICZeroExtend,(address, 64), acctype) then
bits(4) ptag = AccType_ORDEREDATOMICRWAArch64.TransformTag };
ordered = acctype IN {( AccType_ORDEREDZeroExtend,(address, 64));
if ! AccType_ORDEREDRWAArch64.CheckTag,(memaddrdesc, ptag, iswrite) then AccType_LIMITEDORDEREDAArch64.TagCheckFail,( AccType_ORDEREDATOMICZeroExtend,(address, 64), iswrite);
value = _Mem[memaddrdesc, size, accdesc];
return value;
// AArch32.MemSingle[] - assignment (write) form
// =============================================
// Perform an atomic, little-endian write of 'size' bytes. AccType_ORDEREDATOMICRW };
vector = acctype ==AArch32.MemSingle[bits(32) address, integer size, AccType_VECAccType;
// AccType_VEC is used for SIMD element alignment checks only
check = (atomic || ordered || vector || A == '1');
if check && !aligned then
secondstage = FALSE;acctype, boolean wasaligned] = bits(size*8) value
assert size IN {1, 2, 4, 8, 16};
assert address ==
Align(address, size);
AddressDescriptor memaddrdesc;
iswrite = TRUE;
// MMU or MPU
memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, wasaligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch32.Abort(address,(address, memaddrdesc.fault);
// Effect on exclusives
if memaddrdesc.memattrs.shareable then (memaddrdesc.paddress, ProcessorID(), size);
// Memory array access
accdesc = CreateAccessDescriptor(acctype);
if HaveMTEExt() then
if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then
bits(4) ptag = AArch64.TransformTag(ZeroExtend(address, 64));
if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then
AArch64.TagCheckFail(ZeroExtendAArch32.AlignmentFaultClearExclusiveByAddress(acctype, iswrite, secondstage));
return aligned;(address, 64), iswrite);
_Mem[memaddrdesc, size, accdesc] = value;
return;
// AArch32.MemSingle[] - non-assignment (read) form
// ================================================
// Perform an atomic, little-endian read of 'size' bytes.
bits(size*8) AArch32.MemSingle[bits(32) address, integer size, AccType acctype, boolean wasaligned]
assert size IN {1, 2, 4, 8, 16};
assert address == Align(address, size);
AddressDescriptor memaddrdesc;
bits(size*8) value;
iswrite = FALSE;
// MMU or MPU
memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, wasaligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch32.Abort(address, memaddrdesc.fault);
// Memory array access
accdesc = CreateAccessDescriptor(acctype);
if HaveMTEExt() then
if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then
bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64));
if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then
AArch64.TagCheckFail(ZeroExtend(address, 64), iswrite);
value = _Mem[memaddrdesc, size, accdesc];
return value;
// AArch32.MemSingle[] - assignment (write) form
// =============================================
// Perform an atomic, little-endian write of 'size' bytes.
AArch32.MemSingle[bits(32) address, integer size, AccType acctype, boolean wasaligned] = bits(size*8) value
assert size IN {1, 2, 4, 8, 16};
assert address == Align(address, size);
AddressDescriptor memaddrdesc;
iswrite = TRUE;
// MMU or MPU
memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, wasaligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch32.Abort(address, memaddrdesc.fault);
// Effect on exclusives
if memaddrdesc.memattrs.shareable then
ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), size);
// Memory array access
accdesc = CreateAccessDescriptor(acctype);
if HaveMTEExt() then
if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then
bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64));
if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then
AArch64.TagCheckFail(ZeroExtend(address, 64), iswrite);
_Mem[memaddrdesc, size, accdesc] = value;
return;Hint_PreloadData(bits(32) address);
Hint_PreloadData(bits(32) address);Hint_PreloadDataForWrite(bits(32) address);
Hint_PreloadDataForWrite(bits(32) address);Hint_PreloadInstr(bits(32) address);
// MemA[] - non-assignment form
// ============================
bits(8*size) MemA[bits(32) address, integer size]
acctype = AccType_ATOMIC;
return Mem_with_type[address, size, acctype];
// MemA[] - assignment form
// ========================
MemA[bits(32) address, integer size] = bits(8*size) value
acctype = AccType_ATOMIC;
Mem_with_typeHint_PreloadInstr(bits(32) address);[address, size, acctype] = value;
return;
// MemA[] - non-assignment form
// MemO[] - non-assignment form
// ============================
bits(8*size) MemA[bits(32) address, integer size]
MemO[bits(32) address, integer size]
acctype = AccType_ATOMICAccType_ORDERED;
return Mem_with_type[address, size, acctype];
// MemA[] - assignment form
// MemO[] - assignment form
// ========================
MemA[bits(32) address, integer size] = bits(8*size) value
MemO[bits(32) address, integer size] = bits(8*size) value
acctype = AccType_ATOMICAccType_ORDERED;
Mem_with_type[address, size, acctype] = value;
return;
// MemO[] - non-assignment form
// MemU[] - non-assignment form
// ============================
bits(8*size) MemO[bits(32) address, integer size]
MemU[bits(32) address, integer size]
acctype = AccType_ORDEREDAccType_NORMAL;
return Mem_with_type[address, size, acctype];
// MemO[] - assignment form
// MemU[] - assignment form
// ========================
MemO[bits(32) address, integer size] = bits(8*size) value
MemU[bits(32) address, integer size] = bits(8*size) value
acctype = AccType_ORDEREDAccType_NORMAL;
Mem_with_type[address, size, acctype] = value;
return;
// MemU[] - non-assignment form
// ============================
// MemU_unpriv[] - non-assignment form
// ===================================
bits(8*size) MemU[bits(32) address, integer size]
MemU_unpriv[bits(32) address, integer size]
acctype = AccType_NORMALAccType_UNPRIV;
return Mem_with_type[address, size, acctype];
// MemU[] - assignment form
// ========================// MemU_unpriv[] - assignment form
// ===============================
MemU[bits(32) address, integer size] = bits(8*size) value
MemU_unpriv[bits(32) address, integer size] = bits(8*size) value
acctype = AccType_NORMALAccType_UNPRIV;
Mem_with_type[address, size, acctype] = value;
return;
// MemU_unpriv[] - non-assignment form
// ===================================
// Mem_with_type[] - non-assignment (read) form
// ============================================
// Perform a read of 'size' bytes. The access byte order is reversed for a big-endian access.
// Instruction fetches would call AArch32.MemSingle directly.
bits(8*size)bits(size*8) MemU_unpriv[bits(32) address, integer size]
acctype =Mem_with_type[bits(32) address, integer size, AccType_UNPRIVAccType;
returnacctype]
assert size IN {1, 2, 4, 8, 16};
bits(size*8) value;
boolean iswrite = FALSE;
aligned = Mem_with_typeAArch32.CheckAlignment[address, size, acctype];
// MemU_unpriv[] - assignment form
// ===============================(address, size, acctype, iswrite);
if !aligned then
assert size > 1;
value<7:0> =
AArch32.MemSingle[address, 1, acctype, aligned];
// For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory
// access will generate an Alignment Fault, as to get this far means the first byte did
// not, so we must be changing to a new translation page.
c = ConstrainUnpredictable(Unpredictable_DEVPAGE2);
assert c IN {Constraint_FAULT, Constraint_NONE};
if c == Constraint_NONE then aligned = TRUE;
for i = 1 to size-1
value<8*i+7:8*i> = AArch32.MemSingle[address+i, 1, acctype, aligned];
else
value = AArch32.MemSingle[address, size, acctype, aligned];
if BigEndian() then
value = BigEndianReverse(value);
return value;
// Mem_with_type[] - assignment (write) form
// =========================================
// Perform a write of 'size' bytes. The byte order is reversed for a big-endian access.
MemU_unpriv[bits(32) address, integer size] = bits(8*size) value
acctype =Mem_with_type[bits(32) address, integer size, AccType_UNPRIVAccType;acctype] = bits(size*8) value
boolean iswrite = TRUE;
if
() then
value = BigEndianReverse(value);
aligned = AArch32.CheckAlignment(address, size, acctype, iswrite);
if !aligned then
assert size > 1;
AArch32.MemSingle[address, 1, acctype, aligned] = value<7:0>;
// For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory
// access will generate an Alignment Fault, as to get this far means the first byte did
// not, so we must be changing to a new translation page.
c = ConstrainUnpredictable(Unpredictable_DEVPAGE2);
assert c IN {Constraint_FAULT, Constraint_NONE};
if c == Constraint_NONE then aligned = TRUE;
for i = 1 to size-1
AArch32.MemSingle[address+i, 1, acctype, aligned] = value<8*i+7:8*i>;
else
AArch32.MemSingleMem_with_typeBigEndian[address, size, acctype] = value;
[address, size, acctype, aligned] = value;
return;
// Mem_with_type[] - non-assignment (read) form
// ============================================
// Perform a read of 'size' bytes. The access byte order is reversed for a big-endian access.
// Instruction fetches would call AArch32.MemSingle directly.
bits(size*8)// AArch32.ESBOperation()
// ======================
// Perform the AArch32 ESB operation for ESB executed in AArch32 state Mem_with_type[bits(32) address, integer size,AArch32.ESBOperation()
// Check if routed to AArch64 state
route_to_aarch64 = PSTATE.EL == AccTypeEL0 acctype]
assert size IN {1, 2, 4, 8, 16};
bits(size*8) value;
boolean iswrite = FALSE;
aligned =&& ! AArch32.CheckAlignmentELUsingAArch32(address, size, acctype, iswrite);
if !aligned then
assert size > 1;
value<7:0> =( AArch32.MemSingleEL1[address, 1, acctype, aligned];
// For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory
// access will generate an Alignment Fault, as to get this far means the first byte did
// not, so we must be changing to a new translation page.
c =);
if !route_to_aarch64 && ConstrainUnpredictableEL2Enabled(() && !Unpredictable_DEVPAGE2ELUsingAArch32);
assert c IN {(Constraint_FAULTEL2,) then
route_to_aarch64 = HCR_EL2.TGE == '1' || HCR_EL2.AMO == '1';
if !route_to_aarch64 && Constraint_NONEHaveEL};
if c ==( Constraint_NONEEL3 then aligned = TRUE;
for i = 1 to size-1
value<8*i+7:8*i> =) && ! AArch32.MemSingleELUsingAArch32[address+i, 1, acctype, aligned];
else
value =( AArch32.MemSingleEL3[address, size, acctype, aligned];
) then
route_to_aarch64 = SCR_EL3.EA == '1';
if if route_to_aarch64 then BigEndianAArch64.ESBOperation() then
value =();
return;
route_to_monitor = BigEndianReverseHaveEL(value);
return value;
// Mem_with_type[] - assignment (write) form
// =========================================
// Perform a write of 'size' bytes. The byte order is reversed for a big-endian access.(
Mem_with_type[bits(32) address, integer size,) && AccTypeELUsingAArch32 acctype] = bits(size*8) value
boolean iswrite = TRUE;
if( BigEndianEL3() then
value =) && SCR.EA == '1';
route_to_hyp = PSTATE.EL IN { BigEndianReverseEL0(value);
aligned =, AArch32.CheckAlignmentEL1(address, size, acctype, iswrite);
if !aligned then
assert size > 1;} &&
AArch32.MemSingleEL2Enabled[address, 1, acctype, aligned] = value<7:0>;
() && (HCR.TGE == '1' || HCR.AMO == '1');
// For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory
// access will generate an Alignment Fault, as to get this far means the first byte did
// not, so we must be changing to a new translation page.
c = if route_to_monitor then
target = ConstrainUnpredictableM32_Monitor(;
elsif route_to_hyp || PSTATE.M ==Unpredictable_DEVPAGE2M32_Hyp);
assert c IN {then
target =Constraint_FAULTM32_Hyp,;
else
target = Constraint_NONEM32_Abort};
if c ==;
if Constraint_NONEIsSecure then aligned = TRUE;
for i = 1 to size-1() then
mask_active = TRUE;
elsif target ==
AArch32.MemSingleM32_Monitor[address+i, 1, acctype, aligned] = value<8*i+7:8*i>;
elsethen
mask_active = SCR.AW == '1' && (!
(EL2) || (HCR.TGE == '0' && HCR.AMO == '0'));
else
mask_active = target == M32_Abort || PSTATE.M == M32_Hyp;
mask_set = PSTATE.A == '1';
(-, el) = ELFromM32(target);
intdis = Halted() || ExternalDebugInterruptsDisabled(el);
masked = intdis || (mask_active && mask_set);
// Check for a masked Physical SError pending
if IsPhysicalSErrorPending() && masked then
syndrome32 = AArch32.PhysicalSErrorSyndrome();
DISR = AArch32.ReportDeferredSError(syndrome32.AET, syndrome32.ExT);
ClearPendingPhysicalSErrorAArch32.MemSingleHaveEL[address, size, acctype, aligned] = value;
();
return;
// AArch32.ESBOperation()
// ======================
// Perform the AArch32 ESB operation for ESB executed in AArch32 state// Return the SError syndrome
AArch32.SErrorSyndrome
AArch32.ESBOperation()
// Check if routed to AArch64 state
route_to_aarch64 = PSTATE.EL ==AArch32.PhysicalSErrorSyndrome(); EL0 && !ELUsingAArch32(EL1);
if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then
route_to_aarch64 = HCR_EL2.TGE == '1' || HCR_EL2.AMO == '1';
if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then
route_to_aarch64 = SCR_EL3.EA == '1';
if route_to_aarch64 then
AArch64.ESBOperation();
return;
route_to_monitor = HaveEL(EL3) && ELUsingAArch32(EL3) && SCR.EA == '1';
route_to_hyp = PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR.TGE == '1' || HCR.AMO == '1');
if route_to_monitor then
target = M32_Monitor;
elsif route_to_hyp || PSTATE.M == M32_Hyp then
target = M32_Hyp;
else
target = M32_Abort;
if IsSecure() then
mask_active = TRUE;
elsif target == M32_Monitor then
mask_active = SCR.AW == '1' && (!HaveEL(EL2) || (HCR.TGE == '0' && HCR.AMO == '0'));
else
mask_active = target == M32_Abort || PSTATE.M == M32_Hyp;
mask_set = PSTATE.A == '1';
(-, el) = ELFromM32(target);
intdis = Halted() || ExternalDebugInterruptsDisabled(el);
masked = intdis || (mask_active && mask_set);
// Check for a masked Physical SError pending
if IsPhysicalSErrorPending() && masked then
syndrome32 = AArch32.PhysicalSErrorSyndrome();
DISR = AArch32.ReportDeferredSError(syndrome32.AET, syndrome32.ExT);
ClearPendingPhysicalSError();
return;
// Return the SError syndrome
AArch32.SErrorSyndrome// AArch32.ReportDeferredSError()
// ==============================
// Return deferred SError syndrome
bits(32) AArch32.PhysicalSErrorSyndrome();AArch32.ReportDeferredSError(bits(2) AET, bit ExT)
bits(32) target;
target<31> = '1'; // A
syndrome =Zeros(16);
if PSTATE.EL == EL2 then
syndrome<11:10> = AET; // AET
syndrome<9> = ExT; // EA
syndrome<5:0> = '010001'; // DFSC
else
syndrome<15:14> = AET; // AET
syndrome<12> = ExT; // ExT
syndrome<9> = TTBCR.EAE; // LPAE
if TTBCR.EAE == '1' then // Long-descriptor format
syndrome<5:0> = '010001'; // STATUS
else // Short-descriptor format
syndrome<10,3:0> = '10110'; // FS
if HaveAnyAArch64() then
target<24:0> = ZeroExtend(syndrome);// Any RES0 fields must be set to zero
else
target<15:0> = syndrome;
return target;
// AArch32.ReportDeferredSError()
// ==============================
// Return deferred SError syndrome
bits(32)type AArch32.ReportDeferredSError(bits(2) AET, bit ExT)
bits(32) target;
target<31> = '1'; // A
syndrome =AArch32.SErrorSyndrome is (
bits(2) AET,
bit ExT
) Zeros(16);
if PSTATE.EL == EL2 then
syndrome<11:10> = AET; // AET
syndrome<9> = ExT; // EA
syndrome<5:0> = '010001'; // DFSC
else
syndrome<15:14> = AET; // AET
syndrome<12> = ExT; // ExT
syndrome<9> = TTBCR.EAE; // LPAE
if TTBCR.EAE == '1' then // Long-descriptor format
syndrome<5:0> = '010001'; // STATUS
else // Short-descriptor format
syndrome<10,3:0> = '10110'; // FS
if HaveAnyAArch64() then
target<24:0> = ZeroExtend(syndrome);// Any RES0 fields must be set to zero
else
target<15:0> = syndrome;
return target;
type// AArch32.vESBOperation()
// =======================
// Perform the ESB operation for virtual SError interrupts executed in AArch32 state AArch32.SErrorSyndrome is (
bits(2) AET,
bit ExT
)AArch32.vESBOperation()
assert PSTATE.EL IN {EL0, EL1} && EL2Enabled();
// Check for EL2 using AArch64 state
if !ELUsingAArch32(EL2) then
AArch64.vESBOperation();
return;
// If physical SError interrupts are routed to Hyp mode, and TGE is not set, then a
// virtual SError interrupt might be pending
vSEI_enabled = HCR.TGE == '0' && HCR.AMO == '1';
vSEI_pending = vSEI_enabled && HCR.VA == '1';
vintdis = Halted() || ExternalDebugInterruptsDisabled(EL1);
vmasked = vintdis || PSTATE.A == '1';
// Check for a masked virtual SError pending
if vSEI_pending && vmasked then
VDISR = AArch32.ReportDeferredSError(VDFSR<15:14>, VDFSR<12>);
HCR.VA = '0'; // Clear pending virtual SError
return;
// AArch32.vESBOperation()
// =======================
// Perform the ESB operation for virtual SError interrupts executed in AArch32 state// AArch32.ResetGeneralRegisters()
// ===============================
AArch32.vESBOperation()
assert PSTATE.EL IN {AArch32.ResetGeneralRegisters()
for i = 0 to 7EL0R,[i] = bits(32) UNKNOWN;
for i = 8 to 12 EL1Rmode} &&[i, EL2EnabledM32_User();
// Check for EL2 using AArch64 state
if !] = bits(32) UNKNOWN;ELUsingAArch32Rmode([i,M32_FIQ] = bits(32) UNKNOWN;
if HaveEL(EL2) then
AArch64.vESBOperationRmode();
return;
// If physical SError interrupts are routed to Hyp mode, and TGE is not set, then a
// virtual SError interrupt might be pending
vSEI_enabled = HCR.TGE == '0' && HCR.AMO == '1';
vSEI_pending = vSEI_enabled && HCR.VA == '1';
vintdis =[13, HaltedM32_Hyp() ||] = bits(32) UNKNOWN; // No R14_hyp
for i = 13 to 14 ExternalDebugInterruptsDisabledRmode([i,EL1M32_User);
vmasked = vintdis || PSTATE.A == '1';
// Check for a masked virtual SError pending
if vSEI_pending && vmasked then
VDISR =] = bits(32) UNKNOWN; [i, M32_FIQ] = bits(32) UNKNOWN;
Rmode[i, M32_IRQ] = bits(32) UNKNOWN;
Rmode[i, M32_Svc] = bits(32) UNKNOWN;
Rmode[i, M32_Abort] = bits(32) UNKNOWN;
Rmode[i, M32_Undef] = bits(32) UNKNOWN;
if HaveEL(EL3) then Rmode[i, M32_MonitorAArch32.ReportDeferredSErrorRmode(VDFSR<15:14>, VDFSR<12>);
HCR.VA = '0'; // Clear pending virtual SError
] = bits(32) UNKNOWN;
return;
// AArch32.ResetGeneralRegisters()
// ===============================// AArch32.ResetSIMDFPRegisters()
// ==============================
AArch32.ResetGeneralRegisters()
AArch32.ResetSIMDFPRegisters()
for i = 0 to 7 for i = 0 to 15
RQ[i] = bits(32) UNKNOWN;
for i = 8 to 12
Rmode[i, M32_User] = bits(32) UNKNOWN;
Rmode[i, M32_FIQ] = bits(32) UNKNOWN;
if HaveEL(EL2) then Rmode[13, M32_Hyp] = bits(32) UNKNOWN; // No R14_hyp
for i = 13 to 14
Rmode[i, M32_User] = bits(32) UNKNOWN;
Rmode[i, M32_FIQ] = bits(32) UNKNOWN;
Rmode[i, M32_IRQ] = bits(32) UNKNOWN;
Rmode[i, M32_Svc] = bits(32) UNKNOWN;
Rmode[i, M32_Abort] = bits(32) UNKNOWN;
Rmode[i, M32_Undef] = bits(32) UNKNOWN;
if HaveEL(EL3) then Rmode[i, M32_Monitor] = bits(32) UNKNOWN;
[i] = bits(128) UNKNOWN;
return;
// AArch32.ResetSIMDFPRegisters()
// ==============================// AArch32.ResetSpecialRegisters()
// ===============================
AArch32.ResetSIMDFPRegisters()
AArch32.ResetSpecialRegisters()
for i = 0 to 15 // AArch32 special registers
SPSR_fiq = bits(32) UNKNOWN;
SPSR_irq = bits(32) UNKNOWN;
SPSR_svc = bits(32) UNKNOWN;
SPSR_abt = bits(32) UNKNOWN;
SPSR_und = bits(32) UNKNOWN;
if
(EL2) then
SPSR_hyp = bits(32) UNKNOWN;
ELR_hyp = bits(32) UNKNOWN;
if HaveEL(EL3QHaveEL[i] = bits(128) UNKNOWN;
) then
SPSR_mon = bits(32) UNKNOWN;
// External debug special registers
DLR = bits(32) UNKNOWN;
DSPSR = bits(32) UNKNOWN;
return;
// AArch32.ResetSpecialRegisters()
// ===============================
AArch32.ResetSpecialRegisters()
// AArch32 special registers
SPSR_fiq = bits(32) UNKNOWN;
SPSR_irq = bits(32) UNKNOWN;
SPSR_svc = bits(32) UNKNOWN;
SPSR_abt = bits(32) UNKNOWN;
SPSR_und = bits(32) UNKNOWN;
if HaveEL(EL2) then
SPSR_hyp = bits(32) UNKNOWN;
ELR_hyp = bits(32) UNKNOWN;
if HaveEL(EL3) then
SPSR_mon = bits(32) UNKNOWN;
// External debug special registers
DLR = bits(32) UNKNOWN;
DSPSR = bits(32) UNKNOWN;
return;AArch32.ResetSystemRegisters(boolean cold_reset);
// ALUExceptionReturn()
// ====================
ALUExceptionReturn(bits(32) address)
if PSTATE.EL == EL2 then
UNDEFINED;
elsif PSTATE.M IN {M32_User,M32_System} then
UNPREDICTABLE; // UNDEFINED or NOP
else
AArch32.ExceptionReturn(address, SPSRAArch32.ResetSystemRegisters(boolean cold_reset);[]);
// ALUExceptionReturn()
// ====================// ALUWritePC()
// ============
ALUExceptionReturn(bits(32) address)
if PSTATE.EL ==ALUWritePC(bits(32) address)
if EL2CurrentInstrSet then
UNDEFINED;
elsif PSTATE.M IN {() ==M32_UserInstrSet_A32,thenM32_SystemBXWritePC} then
UNPREDICTABLE; // UNDEFINED or NOP
else(address,
AArch32.ExceptionReturnBranchType_INDIR(address,);
else (address, BranchType_INDIRSPSRBranchWritePC[]););
// ALUWritePC()
// ============// BXWritePC()
// ===========
ALUWritePC(bits(32) address)
ifBXWritePC(bits(32) address, CurrentInstrSetBranchType() ==branch_type)
if address<0> == '1' then SelectInstrSet(InstrSet_T32);
address<0> = '0';
else
SelectInstrSet(InstrSet_A32 then);
// For branches to an unaligned PC counter in A32 state, the processor takes the branch
// and does one of:
// * Forces the address to be aligned
// * Leaves the PC unaligned, meaning the target generates a PC Alignment fault.
if address<1> == '1' &&
BXWritePCConstrainUnpredictableBool(address,( BranchType_INDIRUnpredictable_A32FORCEALIGNPC);
else) then
address<1> = '0';
BranchWritePCBranchTo(address, BranchType_INDIR);(address, branch_type);
// BXWritePC()
// ===========// BranchWritePC()
// ===============
BXWritePC(bits(32) address,BranchWritePC(bits(32) address, BranchType branch_type)
if address<0> == '1' then if
SelectInstrSetCurrentInstrSet(() ==InstrSet_T32);
address<0> = '0';
else
SelectInstrSet(InstrSet_A32);
// For branches to an unaligned PC counter in A32 state, the processor takes the branch
// and does one of:
// * Forces the address to be aligned
// * Leaves the PC unaligned, meaning the target generates a PC Alignment fault.
if address<1> == '1' && ConstrainUnpredictableBool(Unpredictable_A32FORCEALIGNPC) then
address<1> = '0';then
address<1:0> = '00';
else
address<0> = '0';
BranchTo(address, branch_type);
// BranchWritePC()
// ===============// D[] - non-assignment form
// =========================
bits(64)
BranchWritePC(bits(32) address,D[integer n]
assert n >= 0 && n <= 31;
base = (n MOD 2) * 64;
bits(128) vreg = V[n DIV 2];
return vreg<base+63:base>;
// D[] - assignment form
// ===================== BranchType branch_type)
if CurrentInstrSet() == InstrSet_A32 then
address<1:0> = '00';
else
address<0> = '0';
BranchTo(address, branch_type);D[integer n] = bits(64) value
assert n >= 0 && n <= 31;
base = (n MOD 2) * 64;
bits(128) vreg = V[n DIV 2];
vreg<base+63:base> = value;
V[n DIV 2] = vreg;
return;
// D[] - non-assignment form
// =========================
// Din[] - non-assignment form
// ===========================
bits(64) D[integer n]
Din[integer n]
assert n >= 0 && n <= 31;
base = (n MOD 2) * 64;
bits(128) vreg = V[n DIV 2];
return vreg<base+63:base>;
// D[] - assignment form
// ===================== return _Dclone[n];
D[integer n] = bits(64) value
assert n >= 0 && n <= 31;
base = (n MOD 2) * 64;
bits(128) vreg = V[n DIV 2];
vreg<base+63:base> = value;
V[n DIV 2] = vreg;
return;
// Din[] - non-assignment form
// ===========================
bits(64)// LR - assignment form
// ==================== Din[integer n]
assert n >= 0 && n <= 31;
return _Dclone[n];LR = bits(32) valueR[14] = value;
return;
// LR - non-assignment form
// ========================
bits(32) LR
return R[14];
// LR - assignment form
// ====================// LoadWritePC()
// =============
LR = bits(32) valueLoadWritePC(bits(32) address)
RBXWritePC[14] = value;
return;
// LR - non-assignment form
// ========================
bits(32) LR
return(address, RBranchType_INDIR[14];);
// LoadWritePC()
// =============// LookUpRIndex()
// ==============
integer
LoadWritePC(bits(32) address)LookUpRIndex(integer n, bits(5) mode)
assert n >= 0 && n <= 14;
case n of // Select index by mode: usr fiq irq svc abt und hyp
when 8 result =
BXWritePCRBankSelect(address,(mode, 8, 24, 8, 8, 8, 8, 8);
when 9 result = (mode, 9, 25, 9, 9, 9, 9, 9);
when 10 result = RBankSelect(mode, 10, 26, 10, 10, 10, 10, 10);
when 11 result = RBankSelect(mode, 11, 27, 11, 11, 11, 11, 11);
when 12 result = RBankSelect(mode, 12, 28, 12, 12, 12, 12, 12);
when 13 result = RBankSelect(mode, 13, 29, 17, 19, 21, 23, 15);
when 14 result = RBankSelectBranchType_INDIRRBankSelect);(mode, 14, 30, 16, 18, 20, 22, 14);
otherwise result = n;
return result;
// LookUpRIndex()
// ==============
integerbits(32) SP_mon;
bits(32) LR_mon; LookUpRIndex(integer n, bits(5) mode)
assert n >= 0 && n <= 14;
case n of // Select index by mode: usr fiq irq svc abt und hyp
when 8 result = RBankSelect(mode, 8, 24, 8, 8, 8, 8, 8);
when 9 result = RBankSelect(mode, 9, 25, 9, 9, 9, 9, 9);
when 10 result = RBankSelect(mode, 10, 26, 10, 10, 10, 10, 10);
when 11 result = RBankSelect(mode, 11, 27, 11, 11, 11, 11, 11);
when 12 result = RBankSelect(mode, 12, 28, 12, 12, 12, 12, 12);
when 13 result = RBankSelect(mode, 13, 29, 17, 19, 21, 23, 15);
when 14 result = RBankSelect(mode, 14, 30, 16, 18, 20, 22, 14);
otherwise result = n;
return result;
bits(32) SP_mon;
bits(32) LR_mon;// PC - non-assignment form
// ========================
bits(32) PC
returnR[15]; // This includes the offset from AArch32 state
// PC - non-assignment form
// ========================
// PCStoreValue()
// ==============
bits(32) PC
returnbits(32) R[15]; // This includes the offset from AArch32 statePCStoreValue()
// This function returns the PC value. On architecture versions before Armv7, it
// is permitted to instead return PC+4, provided it does so consistently. It is
// used only to describe A32 instructions, so it returns the address of the current
// instruction plus 8 (normally) or 12 (when the alternative is permitted).
return PC;
// PCStoreValue()
// ==============
// Q[] - non-assignment form
// =========================
bits(32)bits(128) PCStoreValue()
// This function returns the PC value. On architecture versions before Armv7, it
// is permitted to instead return PC+4, provided it does so consistently. It is
// used only to describe A32 instructions, so it returns the address of the current
// instruction plus 8 (normally) or 12 (when the alternative is permitted).
return PC;Q[integer n]
assert n >= 0 && n <= 15;
return V[n];
// Q[] - assignment form
// =====================Q[integer n] = bits(128) value
assert n >= 0 && n <= 15;
V[n] = value;
return;
// Q[] - non-assignment form
// =========================
// Qin[] - non-assignment form
// ===========================
bits(128) Q[integer n]
Qin[integer n]
assert n >= 0 && n <= 15;
return V[n];
// Q[] - assignment form
// ===================== return
[2*n+1]:DinQ[integer n] = bits(128) value
assert n >= 0 && n <= 15;
V[n] = value;
return;[2*n];
// Qin[] - non-assignment form
// ===========================
bits(128)// R[] - assignment form
// ===================== Qin[integer n]
assert n >= 0 && n <= 15;
returnR[integer n] = bits(32) value DinRmode[2*n+1]:[n, PSTATE.M] = value;
return;
// R[] - non-assignment form
// =========================
bits(32)R[integer n]
if n == 15 then
offset = (if CurrentInstrSet() == InstrSet_A32 then 8 else 4);
return _PC<31:0> + offset;
else
return RmodeDin[2*n];[n, PSTATE.M];
// R[] - assignment form
// =====================// RBankSelect()
// =============
integer
R[integer n] = bits(32) valueRBankSelect(bits(5) mode, integer usr, integer fiq, integer irq,
integer svc, integer abt, integer und, integer hyp)
case mode of
when
RmodeM32_User[n, PSTATE.M] = value;
return;
// R[] - non-assignment form
// =========================
bits(32)result = usr; // User mode
when R[integer n]
if n == 15 then
offset = (ifresult = fiq; // FIQ mode
when CurrentInstrSetM32_IRQ() ==result = irq; // IRQ mode
when InstrSet_A32M32_Svc then 8 else 4);
return _PC<31:0> + offset;
else
returnresult = svc; // Supervisor mode
when result = abt; // Abort mode
when M32_Hyp result = hyp; // Hyp mode
when M32_Undef result = und; // Undefined mode
when M32_System result = usr; // System mode uses User mode registers
otherwise UnreachableRmodeM32_Abort[n, PSTATE.M];(); // Monitor mode
return result;
// RBankSelect()
// =============
// Rmode[] - non-assignment form
// =============================
integerbits(32) RBankSelect(bits(5) mode, integer usr, integer fiq, integer irq,
integer svc, integer abt, integer und, integer hyp)
Rmode[integer n, bits(5) mode]
assert n >= 0 && n <= 14;
case mode of
when // Check for attempted use of Monitor mode in Non-secure state.
if ! M32_UserIsSecure result = usr; // User mode
when() then assert mode != M32_FIQM32_Monitor result = fiq; // FIQ mode
when;
assert ! M32_IRQBadMode result = irq; // IRQ mode
when(mode);
if mode == M32_SvcM32_Monitor result = svc; // Supervisor mode
whenthen
if n == 13 then return SP_mon;
elsif n == 14 then return LR_mon;
else return _R[n]<31:0>;
else
return _R[ M32_AbortLookUpRIndex result = abt; // Abort mode
when(n, mode)]<31:0>;
// Rmode[] - assignment form
// ========================= M32_Hyp result = hyp; // Hyp mode
whenRmode[integer n, bits(5) mode] = bits(32) value
assert n >= 0 && n <= 14;
// Check for attempted use of Monitor mode in Non-secure state.
if ! M32_UndefIsSecure result = und; // Undefined mode
when() then assert mode != M32_SystemM32_Monitor result = usr; // System mode uses User mode registers
otherwise;
assert ! (mode);
if mode == M32_Monitor then
if n == 13 then SP_mon = value;
elsif n == 14 then LR_mon = value;
else _R[n]<31:0> = value;
else
// It is CONSTRAINED UNPREDICTABLE whether the upper 32 bits of the X
// register are unchanged or set to zero. This is also tested for on
// exception entry, as this applies to all AArch32 registers.
if !HighestELUsingAArch32() && ConstrainUnpredictableBool(Unpredictable_ZEROUPPER) then
_R[LookUpRIndex(n, mode)] = ZeroExtend(value);
else
_R[LookUpRIndexUnreachableBadMode(); // Monitor mode
(n, mode)]<31:0> = value;
return result; return;
// Rmode[] - non-assignment form
// =============================
// S[] - non-assignment form
// =========================
bits(32) Rmode[integer n, bits(5) mode]
assert n >= 0 && n <= 14;
S[integer n]
assert n >= 0 && n <= 31;
base = (n MOD 4) * 32;
bits(128) vreg = V[n DIV 4];
return vreg<base+31:base>;
// Check for attempted use of Monitor mode in Non-secure state.
if !// S[] - assignment form
// =====================IsSecure() then assert mode != M32_Monitor;
assert !BadMode(mode);
if mode == M32_Monitor then
if n == 13 then return SP_mon;
elsif n == 14 then return LR_mon;
else return _R[n]<31:0>;
else
return _R[LookUpRIndex(n, mode)]<31:0>;
// Rmode[] - assignment form
// =========================
Rmode[integer n, bits(5) mode] = bits(32) value
assert n >= 0 && n <= 14;
// Check for attempted use of Monitor mode in Non-secure state.
if !IsSecure() then assert mode != M32_Monitor;
assert !BadMode(mode);
if mode == M32_Monitor then
if n == 13 then SP_mon = value;
elsif n == 14 then LR_mon = value;
else _R[n]<31:0> = value;
else
// It is CONSTRAINED UNPREDICTABLE whether the upper 32 bits of the X
// register are unchanged or set to zero. This is also tested for on
// exception entry, as this applies to all AArch32 registers.
if !HighestELUsingAArch32() && ConstrainUnpredictableBool(Unpredictable_ZEROUPPER) then
_R[LookUpRIndex(n, mode)] = ZeroExtend(value);
else
_R[LookUpRIndex(n, mode)]<31:0> = value;
S[integer n] = bits(32) value
assert n >= 0 && n <= 31;
base = (n MOD 4) * 32;
bits(128) vreg = V[n DIV 4];
vreg<base+31:base> = value;
V[n DIV 4] = vreg;
return;
// S[] - non-assignment form
// =========================
bits(32)// SP - assignment form
// ==================== S[integer n]
assert n >= 0 && n <= 31;
base = (n MOD 4) * 32;
bits(128) vreg = V[n DIV 4];
return vreg<base+31:base>;
// S[] - assignment form
// =====================SP = bits(32) value
[13] = value;
return;
// SP - non-assignment form
// ========================
bits(32) SP
return RS[integer n] = bits(32) value
assert n >= 0 && n <= 31;
base = (n MOD 4) * 32;
bits(128) vreg = V[n DIV 4];
vreg<base+31:base> = value;
V[n DIV 4] = vreg;
return;[13];
// SP - assignment form
// ====================array bits(64) _Dclone[0..31];
SP = bits(32) value
R[13] = value;
return;
// SP - non-assignment form
// ========================
bits(32) SP
return R[13];
array bits(64) _Dclone[0..31];// AArch32.ExceptionReturn()
// =========================AArch32.ExceptionReturn(bits(32) new_pc, bits(32) spsr)
SynchronizeContext();
// Attempts to change to an illegal mode or state will invoke the Illegal Execution state
// mechanism
SetPSTATEFromPSR(spsr);
ClearExclusiveLocal(ProcessorID());
SendEventLocal();
if PSTATE.IL == '1' then
// If the exception return is illegal, PC[1:0] are UNKNOWN
new_pc<1:0> = bits(2) UNKNOWN;
else
// LR[1:0] or LR[0] are treated as being 0, depending on the target instruction set state
if PSTATE.T == '1' then
new_pc<0> = '0'; // T32
else
new_pc<1:0> = '00'; // A32
BranchTo(new_pc, BranchType_ERET);
// AArch32.ExceptionReturn()
// =========================// AArch32.ExecutingATS1xPInstr()
// ==============================
// Return TRUE if current instruction is AT S1CPR/WP
boolean
AArch32.ExceptionReturn(bits(32) new_pc, bits(32) spsr)AArch32.ExecutingATS1xPInstr()
if !
SynchronizeContextHavePrivATExt();
() then return FALSE;
// Attempts to change to an illegal mode or state will invoke the Illegal Execution state
// mechanism instr =
SetPSTATEFromPSRThisInstr(spsr);
ClearExclusiveLocal(ProcessorID());
SendEventLocal();
if PSTATE.IL == '1' then
// If the exception return is illegal, PC[1:0] are UNKNOWN
new_pc<1:0> = bits(2) UNKNOWN;
else
// LR[1:0] or LR[0] are treated as being 0, depending on the target instruction set state
if PSTATE.T == '1' then
new_pc<0> = '0'; // T32
else
new_pc<1:0> = '00'; // A32
BranchTo(new_pc, BranchType_ERET);();
if instr<24+:4> == '1110' && instr<8+:4> == '1110' then
op1 = instr<21+:3>;
CRn = instr<16+:4>;
CRm = instr<0+:4>;
op2 = instr<5+:3>;
return (op1 == '000' && CRn == '0111' && CRm == '1001' && op2 IN {'000','001'});
else
return FALSE;
// AArch32.ExecutingATS1xPInstr()
// ==============================
// Return TRUE if current instruction is AT S1CPR/WP
// AArch32.ExecutingCP10or11Instr()
// ================================
boolean AArch32.ExecutingATS1xPInstr()
if !AArch32.ExecutingCP10or11Instr()
instr =HavePrivATExtThisInstr() then return FALSE;
instr =();
instr_set = ();
assert instr_set IN {InstrSet_A32, InstrSet_T32};
if instr_set == InstrSet_A32ThisInstrCurrentInstrSet();
if instr<24+:4> == '1110' && instr<8+:4> == '1110' then
op1 = instr<21+:3>;
CRn = instr<16+:4>;
CRm = instr<0+:4>;
op2 = instr<5+:3>;
return (op1 == '000' && CRn == '0111' && CRm == '1001' && op2 IN {'000','001'});
else
return FALSE;then
return ((instr<27:24> == '1110' || instr<27:25> == '110') && instr<11:8> == '101x');
else // InstrSet_T32
return (instr<31:28> == '111x' && (instr<27:24> == '1110' || instr<27:25> == '110') && instr<11:8> == '101x');
// AArch32.ExecutingCP10or11Instr()
// ================================
// AArch32.ExecutingLSMInstr()
// ===========================
// Returns TRUE if processor is executing a Load/Store Multiple instruction
boolean AArch32.ExecutingCP10or11Instr()
AArch32.ExecutingLSMInstr()
instr = ThisInstr();
instr_set = CurrentInstrSet();
assert instr_set IN {InstrSet_A32, InstrSet_T32};
if instr_set == InstrSet_A32 then
return ((instr<27:24> == '1110' || instr<27:25> == '110') && instr<11:8> == '101x');
return (instr<28+:4> != '1111' && instr<25+:3> == '100');
else // InstrSet_T32
return (instr<31:28> == '111x' && (instr<27:24> == '1110' || instr<27:25> == '110') && instr<11:8> == '101x'); ifThisInstrLength() == 16 then
return (instr<12+:4> == '1100');
else
return (instr<25+:7> == '1110100' && instr<22> == '0');
// AArch32.ExecutingLSMInstr()
// ===========================
// Returns TRUE if processor is executing a Load/Store Multiple instruction
boolean// AArch32.ITAdvance()
// =================== AArch32.ExecutingLSMInstr()
instr =AArch32.ITAdvance()
if PSTATE.IT<2:0> == '000' then
PSTATE.IT = '00000000';
else
PSTATE.IT<4:0> = ThisInstrLSL();
instr_set = CurrentInstrSet();
assert instr_set IN {InstrSet_A32, InstrSet_T32};
if instr_set == InstrSet_A32 then
return (instr<28+:4> != '1111' && instr<25+:3> == '100');
else // InstrSet_T32
if ThisInstrLength() == 16 then
return (instr<12+:4> == '1100');
else
return (instr<25+:7> == '1110100' && instr<22> == '0');(PSTATE.IT<4:0>, 1);
return;
// AArch32.ITAdvance()
// ===================// Read from a 32-bit AArch32 System register and return the register's contents.
bits(32)
AArch32.ITAdvance()
if PSTATE.IT<2:0> == '000' then
PSTATE.IT = '00000000';
else
PSTATE.IT<4:0> =AArch32.SysRegRead(integer cp_num, bits(32) instr); LSL(PSTATE.IT<4:0>, 1);
return;
// Read from a 32-bit AArch32 System register and return the register's contents.
bits(32)// Read from a 64-bit AArch32 System register and return the register's contents.
bits(64) AArch32.SysRegRead(integer cp_num, bits(32) instr);AArch32.SysRegRead64(integer cp_num, bits(32) instr);
// Read from a 64-bit AArch32 System register and return the register's contents.
bits(64)// AArch32.SysRegReadCanWriteAPSR()
// ================================
// Determines whether the AArch32 System register read instruction can write to APSR flags.
boolean AArch32.SysRegRead64(integer cp_num, bits(32) instr);AArch32.SysRegReadCanWriteAPSR(integer cp_num, bits(32) instr)
assertUsingAArch32();
assert (cp_num IN {14,15});
assert cp_num == UInt(instr<11:8>);
opc1 = UInt(instr<23:21>);
opc2 = UInt(instr<7:5>);
CRn = UInt(instr<19:16>);
CRm = UInt(instr<3:0>);
if cp_num == 14 && opc1 == 0 && CRn == 0 && CRm == 1 && opc2 == 0 then // DBGDSCRint
return TRUE;
return FALSE;
// AArch32.SysRegReadCanWriteAPSR()
// ================================
// Determines whether the AArch32 System register read instruction can write to APSR flags.
boolean// Write to a 32-bit AArch32 System register. AArch32.SysRegReadCanWriteAPSR(integer cp_num, bits(32) instr)
assertAArch32.SysRegWrite(integer cp_num, bits(32) instr, bits(32) val); UsingAArch32();
assert (cp_num IN {14,15});
assert cp_num == UInt(instr<11:8>);
opc1 = UInt(instr<23:21>);
opc2 = UInt(instr<7:5>);
CRn = UInt(instr<19:16>);
CRm = UInt(instr<3:0>);
if cp_num == 14 && opc1 == 0 && CRn == 0 && CRm == 1 && opc2 == 0 then // DBGDSCRint
return TRUE;
return FALSE;
// Write to a 32-bit AArch32 System register.// Write to a 64-bit AArch32 System register.
AArch32.SysRegWrite(integer cp_num, bits(32) instr, bits(32) val);AArch32.SysRegWrite64(integer cp_num, bits(32) instr, bits(64) val);
// Write to a 64-bit AArch32 System register.// AArch32.WriteMode()
// ===================
// Function for dealing with writes to PSTATE.M from AArch32 state only.
// This ensures that PSTATE.EL and PSTATE.SP are always valid.
AArch32.SysRegWrite64(integer cp_num, bits(32) instr, bits(64) val);AArch32.WriteMode(bits(5) mode)
(valid,el) =ELFromM32(mode);
assert valid;
PSTATE.M = mode;
PSTATE.EL = el;
PSTATE.nRW = '1';
PSTATE.SP = (if mode IN {M32_User,M32_System} then '0' else '1');
return;
// AArch32.WriteMode()
// ===================
// Function for dealing with writes to PSTATE.M from AArch32 state only.
// This ensures that PSTATE.EL and PSTATE.SP are always valid.// AArch32.WriteModeByInstr()
// ==========================
// Function for dealing with writes to PSTATE.M from an AArch32 instruction, and ensuring that
// illegal state changes are correctly flagged in PSTATE.IL.
AArch32.WriteMode(bits(5) mode)
AArch32.WriteModeByInstr(bits(5) mode)
(valid,el) = ELFromM32(mode);
assert valid;
PSTATE.M = mode;
PSTATE.EL = el;
PSTATE.nRW = '1';
PSTATE.SP = (if mode IN {
// 'valid' is set to FALSE if' mode' is invalid for this implementation or the current value
// of SCR.NS/SCR_EL3.NS. Additionally, it is illegal for an instruction to write 'mode' to
// PSTATE.EL if it would result in any of:
// * A change to a mode that would cause entry to a higher Exception level.
ifM32_UserUInt,(el) >(PSTATE.EL) then
valid = FALSE;
// * A change to or from Hyp mode.
if (PSTATE.M == M32_Hyp || mode == M32_Hyp) && PSTATE.M != mode then
valid = FALSE;
// * When EL2 is implemented, the value of HCR.TGE is '1', a change to a Non-secure EL1 mode.
if PSTATE.M == M32_Monitor && HaveEL(EL2) && el == EL1 && SCR.NS == '1' && HCR.TGE == '1' then
valid = FALSE;
if !valid then
PSTATE.IL = '1';
else
AArch32.WriteModeM32_SystemUInt} then '0' else '1');
return;(mode);
// AArch32.WriteModeByInstr()
// ==========================
// Function for dealing with writes to PSTATE.M from an AArch32 instruction, and ensuring that
// illegal state changes are correctly flagged in PSTATE.IL.// BadMode()
// =========
boolean
AArch32.WriteModeByInstr(bits(5) mode)
(valid,el) =BadMode(bits(5) mode)
// Return TRUE if 'mode' encodes a mode that is not valid for this implementation
case mode of
when ELFromM32M32_Monitor(mode);
// 'valid' is set to FALSE if' mode' is invalid for this implementation or the current value
// of SCR.NS/SCR_EL3.NS. Additionally, it is illegal for an instruction to write 'mode' to
// PSTATE.EL if it would result in any of:
// * A change to a mode that would cause entry to a higher Exception level.
ifvalid = UIntHaveAArch32EL(el) >( UIntEL3(PSTATE.EL) then
valid = FALSE;
// * A change to or from Hyp mode.
if (PSTATE.M ==);
when M32_Hyp || mode ==valid = M32_HypHaveAArch32EL) && PSTATE.M != mode then
valid = FALSE;
// * When EL2 is implemented, the value of HCR.TGE is '1', a change to a Non-secure EL1 mode.
if PSTATE.M ==( M32_MonitorEL2 &&);
when HaveELM32_FIQ(,EL2M32_IRQ) && el ==, M32_Svc, M32_Abort, M32_Undef, M32_System
// If EL3 is implemented and using AArch32, then these modes are EL3 modes in Secure
// state, and EL1 modes in Non-secure state. If EL3 is not implemented or is using
// AArch64, then these modes are EL1 modes.
// Therefore it is sufficient to test this implementation supports EL1 using AArch32.
valid = HaveAArch32EL(EL1 && SCR.NS == '1' && HCR.TGE == '1' then
valid = FALSE;
if !valid then
PSTATE.IL = '1';
else);
when
valid = HaveAArch32EL(EL0AArch32.WriteModeM32_User(mode););
otherwise
valid = FALSE; // Passed an illegal mode value
return !valid;
// BadMode()
// =========
boolean// BankedRegisterAccessValid()
// ===========================
// Checks for MRS (Banked register) or MSR (Banked register) accesses to registers
// other than the SPSRs that are invalid. This includes ELR_hyp accesses. BadMode(bits(5) mode)
// Return TRUE if 'mode' encodes a mode that is not valid for this implementation
case mode of
whenBankedRegisterAccessValid(bits(5) SYSm, bits(5) mode)
case SYSm of
when '000xx', '00100' // R8_usr to R12_usr
if mode != M32_MonitorM32_FIQ
valid =then UNPREDICTABLE;
when '00101' // SP_usr
if mode == HaveAArch32ELM32_System(then UNPREDICTABLE;
when '00110' // LR_usr
if mode IN {EL3);
when M32_Hyp
valid =, HaveAArch32ELM32_System(} then UNPREDICTABLE;
when '010xx', '0110x', '01110' // R8_fiq to R12_fiq, SP_fiq, LR_fiq
if mode ==EL2);
when M32_FIQ,then UNPREDICTABLE;
when '1000x' // LR_irq, SP_irq
if mode == M32_IRQ,then UNPREDICTABLE;
when '1001x' // LR_svc, SP_svc
if mode == M32_Svc,then UNPREDICTABLE;
when '1010x' // LR_abt, SP_abt
if mode == M32_Abort,then UNPREDICTABLE;
when '1011x' // LR_und, SP_und
if mode == M32_Undef,then UNPREDICTABLE;
when '1110x' // LR_mon, SP_mon
if ! M32_SystemHaveEL
// If EL3 is implemented and using AArch32, then these modes are EL3 modes in Secure
// state, and EL1 modes in Non-secure state. If EL3 is not implemented or is using
// AArch64, then these modes are EL1 modes.
// Therefore it is sufficient to test this implementation supports EL1 using AArch32.
valid =( HaveAArch32ELEL3() || !EL1IsSecure);
when() || mode == M32_UserM32_Monitor
valid =then UNPREDICTABLE;
when '11110' // ELR_hyp, only from Monitor or Hyp mode
if ! HaveAArch32ELHaveEL() || !(mode IN {M32_Monitor,M32_Hyp}) then UNPREDICTABLE;
when '11111' // SP_hyp, only from Monitor mode
if !HaveEL(EL2) || mode != M32_MonitorEL0EL2);
then UNPREDICTABLE;
otherwise
valid = FALSE; // Passed an illegal mode value
return !valid; UNPREDICTABLE;
return;
// BankedRegisterAccessValid()
// ===========================
// Checks for MRS (Banked register) or MSR (Banked register) accesses to registers
// other than the SPSRs that are invalid. This includes ELR_hyp accesses.// CPSRWriteByInstr()
// ==================
// Update PSTATE.<N,Z,C,V,Q,GE,E,A,I,F,M> from a CPSR value written by an MSR instruction.
BankedRegisterAccessValid(bits(5) SYSm, bits(5) mode)
case SYSm of
when '000xx', '00100' // R8_usr to R12_usr
if mode !=CPSRWriteByInstr(bits(32) value, bits(4) bytemask)
privileged = PSTATE.EL != M32_FIQEL0 then UNPREDICTABLE;
when '00101' // SP_usr
if mode ==; // PSTATE.<A,I,F,M> are not writable at EL0
// Write PSTATE from 'value', ignoring bytes masked by 'bytemask'
if bytemask<3> == '1' then
PSTATE.<N,Z,C,V,Q> = value<31:27>;
// Bits <26:24> are ignored
if bytemask<2> == '1' then
// Bit <23> is RES0
if privileged then
PSTATE.PAN = value<22>;
// Bits <21:20> are RES0
PSTATE.GE = value<19:16>;
if bytemask<1> == '1' then
// Bits <15:10> are RES0
PSTATE.E = value<9>; // PSTATE.E is writable at EL0
if privileged then
PSTATE.A = value<8>;
if bytemask<0> == '1' then
if privileged then
PSTATE.<I,F> = value<7:6>;
// Bit <5> is RES0
// AArch32.WriteModeByInstr() sets PSTATE.IL to 1 if this is an illegal mode change. M32_SystemAArch32.WriteModeByInstr then UNPREDICTABLE;
when '00110' // LR_usr
if mode IN {M32_Hyp,M32_System} then UNPREDICTABLE;
when '010xx', '0110x', '01110' // R8_fiq to R12_fiq, SP_fiq, LR_fiq
if mode == M32_FIQ then UNPREDICTABLE;
when '1000x' // LR_irq, SP_irq
if mode == M32_IRQ then UNPREDICTABLE;
when '1001x' // LR_svc, SP_svc
if mode == M32_Svc then UNPREDICTABLE;
when '1010x' // LR_abt, SP_abt
if mode == M32_Abort then UNPREDICTABLE;
when '1011x' // LR_und, SP_und
if mode == M32_Undef then UNPREDICTABLE;
when '1110x' // LR_mon, SP_mon
if !HaveEL(EL3) || !IsSecure() || mode == M32_Monitor then UNPREDICTABLE;
when '11110' // ELR_hyp, only from Monitor or Hyp mode
if !HaveEL(EL2) || !(mode IN {M32_Monitor,M32_Hyp}) then UNPREDICTABLE;
when '11111' // SP_hyp, only from Monitor mode
if !HaveEL(EL2) || mode != M32_Monitor then UNPREDICTABLE;
otherwise
UNPREDICTABLE;
(value<4:0>);
return;
// CPSRWriteByInstr()
// ==================
// Update PSTATE.<N,Z,C,V,Q,GE,E,A,I,F,M> from a CPSR value written by an MSR instruction.// ConditionPassed()
// =================
boolean
CPSRWriteByInstr(bits(32) value, bits(4) bytemask)
privileged = PSTATE.EL !=ConditionPassed()
return EL0ConditionHolds; // PSTATE.<A,I,F,M> are not writable at EL0
// Write PSTATE from 'value', ignoring bytes masked by 'bytemask'
if bytemask<3> == '1' then
PSTATE.<N,Z,C,V,Q> = value<31:27>;
// Bits <26:24> are ignored
if bytemask<2> == '1' then
// Bit <23> is RES0
if privileged then
PSTATE.PAN = value<22>;
// Bits <21:20> are RES0
PSTATE.GE = value<19:16>;
if bytemask<1> == '1' then
// Bits <15:10> are RES0
PSTATE.E = value<9>; // PSTATE.E is writable at EL0
if privileged then
PSTATE.A = value<8>;
if bytemask<0> == '1' then
if privileged then
PSTATE.<I,F> = value<7:6>;
// Bit <5> is RES0
// AArch32.WriteModeByInstr() sets PSTATE.IL to 1 if this is an illegal mode change.(
AArch32.WriteModeByInstrAArch32.CurrentCond(value<4:0>);
return;());
// ConditionPassed()
// =================
booleanbits(4) ConditionPassed()
returnAArch32.CurrentCond(); ConditionHolds(AArch32.CurrentCond());
bits(4)// InITBlock()
// ===========
boolean AArch32.CurrentCond();InITBlock()
ifCurrentInstrSet() == InstrSet_T32 then
return PSTATE.IT<3:0> != '0000';
else
return FALSE;
// InITBlock()
// ===========
// LastInITBlock()
// ===============
boolean InITBlock()
ifLastInITBlock()
return (PSTATE.IT<3:0> == '1000'); CurrentInstrSet() == InstrSet_T32 then
return PSTATE.IT<3:0> != '0000';
else
return FALSE;
// LastInITBlock()
// ===============
boolean// SPSRWriteByInstr()
// ================== LastInITBlock()
return (PSTATE.IT<3:0> == '1000');SPSRWriteByInstr(bits(32) value, bits(4) bytemask)
new_spsr =SPSR[];
if bytemask<3> == '1' then
new_spsr<31:24> = value<31:24>; // N,Z,C,V,Q flags, IT[1:0],J bits
if bytemask<2> == '1' then
new_spsr<23:16> = value<23:16>; // IL bit, GE[3:0] flags
if bytemask<1> == '1' then
new_spsr<15:8> = value<15:8>; // IT[7:2] bits, E bit, A interrupt mask
if bytemask<0> == '1' then
new_spsr<7:0> = value<7:0>; // I,F interrupt masks, T bit, Mode bits
SPSR[] = new_spsr; // UNPREDICTABLE if User or System mode
return;
// SPSRWriteByInstr()
// ==================// SPSRaccessValid()
// =================
// Checks for MRS (Banked register) or MSR (Banked register) accesses to the SPSRs
// that are UNPREDICTABLE
SPSRWriteByInstr(bits(32) value, bits(4) bytemask)
new_spsr =SPSRaccessValid(bits(5) SYSm, bits(5) mode)
case SYSm of
when '01110' // SPSR_fiq
if mode == SPSRM32_FIQ[];
if bytemask<3> == '1' then
new_spsr<31:24> = value<31:24>; // N,Z,C,V,Q flags, IT[1:0],J bits
if bytemask<2> == '1' then
new_spsr<23:16> = value<23:16>; // IL bit, GE[3:0] flags
if bytemask<1> == '1' then
new_spsr<15:8> = value<15:8>; // IT[7:2] bits, E bit, A interrupt mask
if bytemask<0> == '1' then
new_spsr<7:0> = value<7:0>; // I,F interrupt masks, T bit, Mode bitsthen UNPREDICTABLE;
when '10000' // SPSR_irq
if mode ==
then UNPREDICTABLE;
when '10010' // SPSR_svc
if mode == M32_Svc then UNPREDICTABLE;
when '10100' // SPSR_abt
if mode == M32_Abort then UNPREDICTABLE;
when '10110' // SPSR_und
if mode == M32_Undef then UNPREDICTABLE;
when '11100' // SPSR_mon
if !HaveEL(EL3) || mode == M32_Monitor || !IsSecure() then UNPREDICTABLE;
when '11110' // SPSR_hyp
if !HaveEL(EL2) || mode != M32_MonitorSPSRM32_IRQ[] = new_spsr; // UNPREDICTABLE if User or System mode
then UNPREDICTABLE;
otherwise
UNPREDICTABLE;
return;
// SPSRaccessValid()
// =================
// Checks for MRS (Banked register) or MSR (Banked register) accesses to the SPSRs
// that are UNPREDICTABLE// SelectInstrSet()
// ================
SPSRaccessValid(bits(5) SYSm, bits(5) mode)
case SYSm of
when '01110' // SPSR_fiq
if mode ==SelectInstrSet( M32_FIQInstrSet then UNPREDICTABLE;
when '10000' // SPSR_irq
if mode ==iset)
assert M32_IRQCurrentInstrSet then UNPREDICTABLE;
when '10010' // SPSR_svc
if mode ==() IN { M32_SvcInstrSet_A32 then UNPREDICTABLE;
when '10100' // SPSR_abt
if mode ==, M32_AbortInstrSet_T32 then UNPREDICTABLE;
when '10110' // SPSR_und
if mode ==};
assert iset IN { M32_UndefInstrSet_A32 then UNPREDICTABLE;
when '11100' // SPSR_mon
if !,HaveELInstrSet_T32(};
PSTATE.T = if iset ==EL3InstrSet_A32) || mode == M32_Monitor || !IsSecure() then UNPREDICTABLE;
when '11110' // SPSR_hyp
if !HaveEL(EL2) || mode != M32_Monitor then UNPREDICTABLE;
otherwise
UNPREDICTABLE;
then '0' else '1';
return;
// SelectInstrSet()
// ================// Sat()
// =====
bits(N)
SelectInstrSet(Sat(integer i, integer N, boolean unsigned)
result = if unsigned thenInstrSetUnsignedSat iset)
assert(i, N) else CurrentInstrSetSignedSat() IN {InstrSet_A32, InstrSet_T32};
assert iset IN {InstrSet_A32, InstrSet_T32};
PSTATE.T = if iset == InstrSet_A32 then '0' else '1';
return;(i, N);
return result;
// Sat()
// =====
// SignedSat()
// ===========
bits(N) Sat(integer i, integer N, boolean unsigned)
result = if unsigned thenSignedSat(integer i, integer N)
(result, -) = UnsignedSatSignedSatQ(i, N) else SignedSat(i, N);
return result;
// SignedSat()
// ===========
// UnsignedSat()
// =============
bits(N) SignedSat(integer i, integer N)
UnsignedSat(integer i, integer N)
(result, -) = SignedSatQUnsignedSatQ(i, N);
return result;
// UnsignedSat()
// =============
// AArch32.DefaultTEXDecode()
// ==========================
bits(N)MemoryAttributes UnsignedSat(integer i, integer N)
(result, -) =AArch32.DefaultTEXDecode(bits(3) TEX, bit C, bit B, bit S, acctype)
MemoryAttributes memattrs;
// Reserved values map to allocated values
if (TEX == '001' && C:B == '01') || (TEX == '010' && C:B != '00') || TEX == '011' then
bits(5) texcb;
(-, texcb) = ConstrainUnpredictableBits(Unpredictable_RESTEXCB);
TEX = texcb<4:2>; C = texcb<1>; B = texcb<0>;
case TEX:C:B of
when '00000'
// Device-nGnRnE
memattrs.memtype = MemType_Device;
memattrs.device = DeviceType_nGnRnE;
when '00001', '01000'
// Device-nGnRE
memattrs.memtype = MemType_Device;
memattrs.device = DeviceType_nGnRE;
when '00010', '00011', '00100'
// Write-back or Write-through Read allocate, or Non-cacheable
memattrs.memtype = MemType_Normal;
memattrs.inner = ShortConvertAttrsHints(C:B, acctype, FALSE);
memattrs.outer = ShortConvertAttrsHints(C:B, acctype, FALSE);
memattrs.shareable = (S == '1');
when '00110'
memattrs = MemoryAttributes IMPLEMENTATION_DEFINED;
when '00111'
// Write-back Read and Write allocate
memattrs.memtype = MemType_Normal;
memattrs.inner = ShortConvertAttrsHints('01', acctype, FALSE);
memattrs.outer = ShortConvertAttrsHints('01', acctype, FALSE);
memattrs.shareable = (S == '1');
when '1xxxx'
// Cacheable, TEX<1:0> = Outer attrs, {C,B} = Inner attrs
memattrs.memtype = MemType_Normal;
memattrs.inner = ShortConvertAttrsHints(C:B, acctype, FALSE);
memattrs.outer = ShortConvertAttrsHints(TEX<1:0>, acctype, FALSE);
memattrs.shareable = (S == '1');
otherwise
// Reserved, handled above
Unreachable();
// transient bits are not supported in this format
memattrs.inner.transient = FALSE;
memattrs.outer.transient = FALSE;
// distinction between inner and outer shareable is not supported in this format
memattrs.outershareable = memattrs.shareable;
memattrs.tagged = FALSE;
return MemAttrDefaultsUnsignedSatQAccType(i, N);
return result;(memattrs);
// AArch32.DefaultTEXDecode()
// ==========================
// AArch32.InstructionDevice()
// ===========================
// Instruction fetches from memory marked as Device but not execute-never might generate a
// Permission Fault but are otherwise treated as if from Normal Non-cacheable memory.
MemoryAttributesAddressDescriptor AArch32.DefaultTEXDecode(bits(3) TEX, bit C, bit B, bit S,AArch32.InstructionDevice( AddressDescriptor addrdesc, bits(32) vaddress,
bits(40) ipaddress, integer level, bits(4) domain,
AccType acctype)acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
c =
MemoryAttributesConstrainUnpredictable memattrs;
// Reserved values map to allocated values
if (TEX == '001' && C:B == '01') || (TEX == '010' && C:B != '00') || TEX == '011' then
bits(5) texcb;
(-, texcb) =( ConstrainUnpredictableBitsUnpredictable_INSTRDEVICE();
assert c IN {Unpredictable_RESTEXCBConstraint_NONE);
TEX = texcb<4:2>; C = texcb<1>; B = texcb<0>;
case TEX:C:B of
when '00000'
// Device-nGnRnE
memattrs.memtype =, MemType_DeviceConstraint_FAULT;
memattrs.device =};
if c == DeviceType_nGnRnEConstraint_FAULT;
when '00001', '01000'
// Device-nGnRE
memattrs.memtype =then
addrdesc.fault = MemType_DeviceAArch32.PermissionFault;
memattrs.device =(ipaddress, domain, level, acctype, iswrite,
secondstage, s2fs1walk);
else
addrdesc.memattrs.memtype = DeviceType_nGnRE;
when '00010', '00011', '00100'
// Write-back or Write-through Read allocate, or Non-cacheable
memattrs.memtype = MemType_Normal;
memattrs.inner = addrdesc.memattrs.inner.attrs = ShortConvertAttrsHintsMemAttr_NC(C:B, acctype, FALSE);
memattrs.outer =;
addrdesc.memattrs.inner.hints = ShortConvertAttrsHintsMemHint_No(C:B, acctype, FALSE);
memattrs.shareable = (S == '1');
when '00110'
memattrs = MemoryAttributes IMPLEMENTATION_DEFINED;
when '00111'
// Write-back Read and Write allocate
memattrs.memtype = MemType_Normal;
memattrs.inner = ShortConvertAttrsHints('01', acctype, FALSE);
memattrs.outer = ShortConvertAttrsHints('01', acctype, FALSE);
memattrs.shareable = (S == '1');
when '1xxxx'
// Cacheable, TEX<1:0> = Outer attrs, {C,B} = Inner attrs
memattrs.memtype = MemType_Normal;
memattrs.inner = ShortConvertAttrsHints(C:B, acctype, FALSE);
memattrs.outer = ShortConvertAttrsHints(TEX<1:0>, acctype, FALSE);
memattrs.shareable = (S == '1');
otherwise
// Reserved, handled above
Unreachable();
// transient bits are not supported in this format
memattrs.inner.transient = FALSE;
memattrs.outer.transient = FALSE;
// distinction between inner and outer shareable is not supported in this format
memattrs.outershareable = memattrs.shareable;
memattrs.tagged = FALSE;
return;
addrdesc.memattrs.outer = addrdesc.memattrs.inner;
addrdesc.memattrs.tagged = FALSE;
addrdesc.memattrs = MemAttrDefaults(memattrs);(addrdesc.memattrs);
return addrdesc;
// AArch32.InstructionDevice()
// AArch32.RemappedTEXDecode()
// ===========================
// Instruction fetches from memory marked as Device but not execute-never might generate a
// Permission Fault but are otherwise treated as if from Normal Non-cacheable memory.
AddressDescriptorMemoryAttributes AArch32.InstructionDevice(AArch32.RemappedTEXDecode(bits(3) TEX, bit C, bit B, bit S,AddressDescriptor addrdesc, bits(32) vaddress,
bits(40) ipaddress, integer level, bits(4) domain,
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
c =acctype) ConstrainUnpredictableMemoryAttributes(memattrs;
region =Unpredictable_INSTRDEVICEUInt);
assert c IN {(TEX<0>:C:B); // TEX<2:1> are ignored in this mapping scheme
if region == 6 then
memattrs =Constraint_NONEMemoryAttributes,IMPLEMENTATION_DEFINED;
else
base = 2 * region;
attrfield = PRRR<base+1:base>;
if attrfield == '11' then // Reserved, maps to allocated value
(-, attrfield) = Constraint_FAULTConstrainUnpredictableBits};
if c ==( Constraint_FAULTUnpredictable_RESPRRR then
addrdesc.fault =);
case attrfield of
when '00' // Device-nGnRnE
memattrs.memtype = AArch32.PermissionFaultMemType_Device(ipaddress, domain, level, acctype, iswrite,
secondstage, s2fs1walk);
else
addrdesc.memattrs.memtype =;
memattrs.device = DeviceType_nGnRnE;
when '01' // Device-nGnRE
memattrs.memtype = MemType_Device;
memattrs.device = DeviceType_nGnRE;
when '10'
memattrs.memtype = MemType_Normal;
addrdesc.memattrs.inner.attrs = memattrs.inner = MemAttr_NCShortConvertAttrsHints;
addrdesc.memattrs.inner.hints =(NMRR<base+1:base>, acctype, FALSE);
memattrs.outer = (NMRR<base+17:base+16>, acctype, FALSE);
s_bit = if S == '0' then PRRR.NS0 else PRRR.NS1;
memattrs.shareable = (s_bit == '1');
memattrs.outershareable = (s_bit == '1' && PRRR<region+24> == '0');
when '11'
UnreachableMemHint_NoShortConvertAttrsHints;
addrdesc.memattrs.outer = addrdesc.memattrs.inner;
addrdesc.memattrs.tagged = FALSE;
addrdesc.memattrs =();
// transient bits are not supported in this format
memattrs.inner.transient = FALSE;
memattrs.outer.transient = FALSE;
memattrs.tagged = FALSE;
return MemAttrDefaults(addrdesc.memattrs);
return addrdesc;(memattrs);
// AArch32.RemappedTEXDecode()
// ===========================
// AArch32.S1AttrDecode()
// ======================
// Converts the Stage 1 attribute fields, using the MAIR, to orthogonal
// attributes and hints.
MemoryAttributes AArch32.RemappedTEXDecode(bits(3) TEX, bit C, bit B, bit S,AArch32.S1AttrDecode(bits(2) SH, bits(3) attr, AccType acctype)
MemoryAttributes memattrs;
region = if PSTATE.EL == EL2 then
mair = HMAIR1:HMAIR0;
else
mair = MAIR1:MAIR0;
index = 8 * UInt(TEX<0>:C:B); // TEX<2:1> are ignored in this mapping scheme
if region == 6 then
memattrs =(attr);
attrfield = mair<index+7:index>;
memattrs.tagged = FALSE;
if ((attrfield<7:4> != '0000' && attrfield<7:4> != '1111' && attrfield<3:0> == '0000') ||
(attrfield<7:4> == '0000' && attrfield<3:0> != 'xx00')) then
// Reserved, maps to an allocated value
(-, attrfield) = MemoryAttributesConstrainUnpredictableBits IMPLEMENTATION_DEFINED;
else
base = 2 * region;
attrfield = PRRR<base+1:base>;
if attrfield == '11' then // Reserved, maps to allocated value
(-, attrfield) =( Unpredictable_RESMAIR);
if !HaveMTEExt() && attrfield<7:4> == '1111' && attrfield<3:0> == '0000' then
// Reserved, maps to an allocated value
(-, attrfield) = ConstrainUnpredictableBits(Unpredictable_RESPRRRUnpredictable_RESMAIR);
case attrfield of
when '00' // Device-nGnRnE
memattrs.memtype = if attrfield<7:4> == '0000' then // Device
memattrs.memtype = MemType_Device;
memattrs.device = case attrfield<3:0> of
when '0000' memattrs.device = DeviceType_nGnRnE;
when '01' // Device-nGnRE
memattrs.memtype = when '0100' memattrs.device = MemType_Device;
memattrs.device = DeviceType_nGnRE;
when '10'
memattrs.memtype = when '1000' memattrs.device = DeviceType_nGRE;
when '1100' memattrs.device = DeviceType_GRE;
otherwise Unreachable(); // Reserved, handled above
elsif attrfield<3:0> != '0000' then // Normal
memattrs.memtype = MemType_Normal;
memattrs.inner = memattrs.outer = ShortConvertAttrsHintsLongConvertAttrsHints(NMRR<base+1:base>, acctype, FALSE);
memattrs.outer =(attrfield<7:4>, acctype);
memattrs.inner = (attrfield<3:0>, acctype);
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
elsif HaveMTEExt() && attrfield == '11110000' then // Normal, Tagged if WB-RWA
memattrs.memtype = MemType_Normal;
memattrs.outer = LongConvertAttrsHints('1111', acctype); // WB_RWA
memattrs.inner = LongConvertAttrsHints('1111', acctype); // WB_RWA
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
memattrs.tagged = (memattrs.inner.attrs == MemAttr_WB &&
memattrs.inner.hints == MemHint_RWA &&
memattrs.outer.attrs == MemAttr_WB &&
memattrs.outer.hints == MemHint_RWAShortConvertAttrsHintsLongConvertAttrsHints(NMRR<base+17:base+16>, acctype, FALSE);
s_bit = if S == '0' then PRRR.NS0 else PRRR.NS1;
memattrs.shareable = (s_bit == '1');
memattrs.outershareable = (s_bit == '1' && PRRR<region+24> == '0');
when '11');
else
Unreachable();
// transient bits are not supported in this format
memattrs.inner.transient = FALSE;
memattrs.outer.transient = FALSE;
memattrs.tagged = FALSE;
(); // Reserved, handled above
return MemAttrDefaults(memattrs);
// AArch32.S1AttrDecode()
// ======================
// Converts the Stage 1 attribute fields, using the MAIR, to orthogonal
// attributes and hints.
// AArch32.TranslateAddressS1Off()
// ===============================
// Called for stage 1 translations when translation is disabled to supply a default translation.
// Note that there are additional constraints on instruction prefetching that are not described in
// this pseudocode.
MemoryAttributesTLBRecord AArch32.S1AttrDecode(bits(2) SH, bits(3) attr,AArch32.TranslateAddressS1Off(bits(32) vaddress, AccType acctype)acctype, boolean iswrite)
assert
MemoryAttributesELUsingAArch32 memattrs;
if PSTATE.EL ==( EL2S1TranslationRegime then
mair = HMAIR1:HMAIR0;
else
mair = MAIR1:MAIR0;
index = 8 *()); UIntTLBRecord(attr);
attrfield = mair<index+7:index>;
result;
memattrs.tagged = FALSE;
if ((attrfield<7:4> != '0000' && attrfield<7:4> != '1111' && attrfield<3:0> == '0000') ||
(attrfield<7:4> == '0000' && attrfield<3:0> != 'xx00')) then
// Reserved, maps to an allocated value
(-, attrfield) = default_cacheable = ( ConstrainUnpredictableBitsHasS2Translation(() && ((ifUnpredictable_RESMAIRELUsingAArch32);
if !(HaveMTEExtEL2() && attrfield<7:4> == '1111' && attrfield<3:0> == '0000' then
// Reserved, maps to an allocated value
(-, attrfield) =) then HCR.DC else HCR_EL2.DC) == '1'));
if default_cacheable then
// Use default cacheable settings
result.addrdesc.memattrs.memtype = ConstrainUnpredictableBitsMemType_Normal(;
result.addrdesc.memattrs.inner.attrs =Unpredictable_RESMAIRMemAttr_WB);
if attrfield<7:4> == '0000' then // Device
memattrs.memtype =; // Write-back
result.addrdesc.memattrs.inner.hints = MemHint_RWA;
result.addrdesc.memattrs.shareable = FALSE;
result.addrdesc.memattrs.outershareable = FALSE;
result.addrdesc.memattrs.tagged = HCR_EL2.DCT == '1';
elsif acctype != AccType_IFETCH then
// Treat data as Device
result.addrdesc.memattrs.memtype = MemType_Device;
case attrfield<3:0> of
when '0000' memattrs.device = result.addrdesc.memattrs.device = DeviceType_nGnRnE;
when '0100' memattrs.device = result.addrdesc.memattrs.inner = DeviceType_nGnREMemAttrHints;
when '1000' memattrs.device =UNKNOWN;
result.addrdesc.memattrs.tagged = FALSE;
else
// Instruction cacheability controlled by SCTLR/HSCTLR.I
if PSTATE.EL == DeviceType_nGREEL2;
when '1100' memattrs.device =then
cacheable = HSCTLR.I == '1';
else
cacheable = SCTLR.I == '1';
result.addrdesc.memattrs.memtype = DeviceType_GRE;
otherwise Unreachable(); // Reserved, handled above
elsif attrfield<3:0> != '0000' then // Normal
memattrs.memtype = MemType_Normal;
memattrs.outer = if cacheable then
result.addrdesc.memattrs.inner.attrs = LongConvertAttrsHintsMemAttr_WT(attrfield<7:4>, acctype);
memattrs.inner =;
result.addrdesc.memattrs.inner.hints = LongConvertAttrsHintsMemHint_RA(attrfield<3:0>, acctype);
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
elsif;
else
result.addrdesc.memattrs.inner.attrs = HaveMTEExtMemAttr_NC() && attrfield == '11110000' then // Normal, Tagged if WB-RWA
memattrs.memtype =;
result.addrdesc.memattrs.inner.hints = MemType_NormalMemHint_No;
memattrs.outer = result.addrdesc.memattrs.shareable = TRUE;
result.addrdesc.memattrs.outershareable = TRUE;
result.addrdesc.memattrs.tagged = FALSE;
result.addrdesc.memattrs.outer = result.addrdesc.memattrs.inner;
result.addrdesc.memattrs = LongConvertAttrsHintsMemAttrDefaults('1111', acctype); // WB_RWA
memattrs.inner =(result.addrdesc.memattrs);
result.perms.ap = bits(3) UNKNOWN;
result.perms.xn = '0';
result.perms.pxn = '0';
result.nG = bit UNKNOWN;
result.contiguous = boolean UNKNOWN;
result.domain = bits(4) UNKNOWN;
result.level = integer UNKNOWN;
result.blocksize = integer UNKNOWN;
result.addrdesc.paddress.address = LongConvertAttrsHintsZeroExtend('1111', acctype); // WB_RWA
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
memattrs.tagged = (memattrs.inner.attrs ==(vaddress);
result.addrdesc.paddress.NS = if MemAttr_WBIsSecure &&
memattrs.inner.hints ==() then '0' else '1';
result.addrdesc.fault = MemHint_RWAAArch32.NoFault &&
memattrs.outer.attrs == MemAttr_WB &&
memattrs.outer.hints == MemHint_RWA);
else
Unreachable(); // Reserved, handled above
return MemAttrDefaults(memattrs);();
return result;
// AArch32.TranslateAddressS1Off()
// ===============================
// Called for stage 1 translations when translation is disabled to supply a default translation.
// Note that there are additional constraints on instruction prefetching that are not described in
// this pseudocode.
// AArch32.AccessIsPrivileged()
// ============================
TLBRecordboolean AArch32.TranslateAddressS1Off(bits(32) vaddress,AArch32.AccessIsPrivileged( AccType acctype, boolean iswrite)
assertacctype)
el = ELUsingAArch32AArch32.AccessUsesEL((acctype);
if el ==S1TranslationRegimeEL0());then
ispriv = FALSE;
elsif el !=
TLBRecordEL1 result;
default_cacheable = (then
ispriv = TRUE;
else
ispriv = (acctype !=HasS2TranslationAccType_UNPRIV() && ((if ELUsingAArch32(EL2) then HCR.DC else HCR_EL2.DC) == '1'));
if default_cacheable then
// Use default cacheable settings
result.addrdesc.memattrs.memtype = MemType_Normal;
result.addrdesc.memattrs.inner.attrs = MemAttr_WB; // Write-back
result.addrdesc.memattrs.inner.hints = MemHint_RWA;
result.addrdesc.memattrs.shareable = FALSE;
result.addrdesc.memattrs.outershareable = FALSE;
result.addrdesc.memattrs.tagged = HCR_EL2.DCT == '1';
elsif acctype != AccType_IFETCH then
// Treat data as Device
result.addrdesc.memattrs.memtype = MemType_Device;
result.addrdesc.memattrs.device = DeviceType_nGnRnE;
result.addrdesc.memattrs.inner = MemAttrHints UNKNOWN;
result.addrdesc.memattrs.tagged = FALSE;
else
// Instruction cacheability controlled by SCTLR/HSCTLR.I
if PSTATE.EL == EL2 then
cacheable = HSCTLR.I == '1';
else
cacheable = SCTLR.I == '1';
result.addrdesc.memattrs.memtype = MemType_Normal;
if cacheable then
result.addrdesc.memattrs.inner.attrs = MemAttr_WT;
result.addrdesc.memattrs.inner.hints = MemHint_RA;
else
result.addrdesc.memattrs.inner.attrs = MemAttr_NC;
result.addrdesc.memattrs.inner.hints = MemHint_No;
result.addrdesc.memattrs.shareable = TRUE;
result.addrdesc.memattrs.outershareable = TRUE;
result.addrdesc.memattrs.tagged = FALSE;
result.addrdesc.memattrs.outer = result.addrdesc.memattrs.inner;
result.addrdesc.memattrs = MemAttrDefaults(result.addrdesc.memattrs);
result.perms.ap = bits(3) UNKNOWN;
result.perms.xn = '0';
result.perms.pxn = '0';
result.nG = bit UNKNOWN;
result.contiguous = boolean UNKNOWN;
result.domain = bits(4) UNKNOWN;
result.level = integer UNKNOWN;
result.blocksize = integer UNKNOWN;
result.addrdesc.paddress.address = ZeroExtend(vaddress);
result.addrdesc.paddress.NS = if IsSecure() then '0' else '1';
result.addrdesc.fault = AArch32.NoFault();
return result;);
return ispriv;
// AArch32.AccessIsPrivileged()
// ============================
// AArch32.AccessUsesEL()
// ======================
// Returns the Exception Level of the regime that will manage the translation for a given access type.
booleanbits(2) AArch32.AccessIsPrivileged(AArch32.AccessUsesEL(AccType acctype)
el = if acctype == AArch32.AccessUsesELAccType_UNPRIV(acctype);
if el ==then
return EL0 then
ispriv = FALSE;
elsif el != EL1 then
ispriv = TRUE;
else
ispriv = (acctype != AccType_UNPRIV);
return ispriv;;
else
return PSTATE.EL;
// AArch32.AccessUsesEL()
// ======================
// Returns the Exception Level of the regime that will manage the translation for a given access type.
// AArch32.CheckDomain()
// =====================
bits(2)(boolean, FaultRecord) AArch32.AccessUsesEL(AArch32.CheckDomain(bits(4) domain, bits(32) vaddress, integer level,AccType acctype)
if acctype ==acctype, boolean iswrite)
index = 2 * AccType_UNPRIVUInt then
return(domain);
attrfield = DACR<index+1:index>;
if attrfield == '10' then // Reserved, maps to an allocated value
// Reserved value maps to an allocated value
(-, attrfield) = (Unpredictable_RESDACR);
if attrfield == '00' then
fault = AArch32.DomainFault(domain, level, acctype, iswrite);
else
fault = AArch32.NoFaultEL0ConstrainUnpredictableBits;
else
return PSTATE.EL;();
permissioncheck = (attrfield == '01');
return (permissioncheck, fault);
// AArch32.CheckDomain()
// =====================
// AArch32.CheckPermission()
// =========================
// Function used for permission checking from AArch32 stage 1 translations
(boolean, FaultRecord)FaultRecord AArch32.CheckDomain(bits(4) domain, bits(32) vaddress, integer level,AArch32.CheckPermission(
Permissions perms, bits(32) vaddress, integer level,
bits(4) domain, bit NS, AccType acctype, boolean iswrite)
index = 2 * assert UIntELUsingAArch32(domain);
attrfield = DACR<index+1:index>;
if attrfield == '10' then // Reserved, maps to an allocated value
// Reserved value maps to an allocated value
(-, attrfield) =( ConstrainUnpredictableBitsS1TranslationRegime(());
if PSTATE.EL !=Unpredictable_RESDACREL2);
then
wxn = SCTLR.WXN == '1';
if TTBCR.EAE == '1' || SCTLR.AFE == '1' || perms.ap<0> == '1' then
priv_r = TRUE;
priv_w = perms.ap<2> == '0';
user_r = perms.ap<1> == '1';
user_w = perms.ap<2:1> == '01';
else
priv_r = perms.ap<2:1> != '00';
priv_w = perms.ap<2:1> == '01';
user_r = perms.ap<1> == '1';
user_w = FALSE;
uwxn = SCTLR.UWXN == '1';
if attrfield == '00' then
fault = ispriv = (acctype);
pan = if HavePANExt() then PSTATE.PAN else '0';
is_ldst = !(acctype IN {AccType_DC, AccType_DC_UNPRIV, AccType_AT, AccType_IFETCH});
is_ats1xp = (acctype == AccType_AT && AArch32.ExecutingATS1xPInstr());
if pan == '1' && user_r && ispriv && (is_ldst || is_ats1xp) then
priv_r = FALSE;
priv_w = FALSE;
user_xn = !user_r || perms.xn == '1' || (user_w && wxn);
priv_xn = (!priv_r || perms.xn == '1' || perms.pxn == '1' ||
(priv_w && wxn) || (user_w && uwxn));
if ispriv then
(r, w, xn) = (priv_r, priv_w, priv_xn);
else
(r, w, xn) = (user_r, user_w, user_xn);
else
// Access from EL2
wxn = HSCTLR.WXN == '1';
r = TRUE;
w = perms.ap<2> == '0';
xn = perms.xn == '1' || (w && wxn);
// Restriction on Secure instruction fetch
if HaveEL(EL3) && IsSecure() && NS == '1' then
secure_instr_fetch = if ELUsingAArch32(EL3) then SCR.SIF else SCR_EL3.SIF;
if secure_instr_fetch == '1' then xn = TRUE;
if acctype == AccType_IFETCH then
fail = xn;
failedread = TRUE;
elsif acctype IN { AccType_ATOMICRW, AccType_ORDEREDRW, AccType_ORDEREDATOMICRW } then
fail = !r || !w;
failedread = !r;
elsif acctype == AccType_DC then
// DC maintenance instructions operating by VA, cannot fault from stage 1 translation.
fail = FALSE;
elsif iswrite then
fail = !w;
failedread = FALSE;
else
fail = !r;
failedread = TRUE;
if fail then
secondstage = FALSE;
s2fs1walk = FALSE;
ipaddress = bits(40) UNKNOWN;
return AArch32.PermissionFaultAArch32.DomainFaultAArch32.AccessIsPrivileged(domain, level, acctype, iswrite);
(ipaddress, domain, level, acctype,
!failedread, secondstage, s2fs1walk);
else
fault = return AArch32.NoFault();
permissioncheck = (attrfield == '01');
return (permissioncheck, fault);();
// AArch32.CheckPermission()
// =========================
// Function used for permission checking from AArch32 stage 1 translations
// AArch32.CheckS2Permission()
// ===========================
// Function used for permission checking from AArch32 stage 2 translations
FaultRecord AArch32.CheckPermission(AArch32.CheckS2Permission(Permissions perms, bits(32) vaddress, integer level,
bits(4) domain, bit NS,perms, bits(32) vaddress, bits(40) ipaddress,
integer level, AccType acctype, boolean iswrite)
acctype, boolean iswrite,
boolean s2fs1walk)
assert ELUsingAArch32HaveEL(S1TranslationRegime());
if PSTATE.EL != EL2 then
wxn = SCTLR.WXN == '1';
if TTBCR.EAE == '1' || SCTLR.AFE == '1' || perms.ap<0> == '1' then
priv_r = TRUE;
priv_w = perms.ap<2> == '0';
user_r = perms.ap<1> == '1';
user_w = perms.ap<2:1> == '01';
else
priv_r = perms.ap<2:1> != '00';
priv_w = perms.ap<2:1> == '01';
user_r = perms.ap<1> == '1';
user_w = FALSE;
uwxn = SCTLR.UWXN == '1';
ispriv =) && ! AArch32.AccessIsPrivilegedIsSecure(acctype);
pan = if() && HavePANExtELUsingAArch32() then PSTATE.PAN else '0';
is_ldst = !(acctype IN {(AccType_DCEL2,) && AccType_DC_UNPRIVHasS2Translation,();
r = perms.ap<1> == '1';
w = perms.ap<2> == '1';
if AccType_ATHaveExtendedExecuteNeverExt,() then
case perms.xn:perms.xxn of
when '00' xn = !r;
when '01' xn = !r || PSTATE.EL == AccType_IFETCHEL1});
is_ats1xp = (acctype ==;
when '10' xn = TRUE;
when '11' xn = !r || PSTATE.EL == AccType_ATEL0 && AArch32.ExecutingATS1xPInstr());
if pan == '1' && user_r && ispriv && (is_ldst || is_ats1xp) then
priv_r = FALSE;
priv_w = FALSE;
user_xn = !user_r || perms.xn == '1' || (user_w && wxn);
priv_xn = (!priv_r || perms.xn == '1' || perms.pxn == '1' ||
(priv_w && wxn) || (user_w && uwxn));
if ispriv then
(r, w, xn) = (priv_r, priv_w, priv_xn);
else
(r, w, xn) = (user_r, user_w, user_xn);
else
// Access from EL2
wxn = HSCTLR.WXN == '1';
r = TRUE;
w = perms.ap<2> == '0';
xn = perms.xn == '1' || (w && wxn);
// Restriction on Secure instruction fetch
if HaveEL(EL3) && IsSecure() && NS == '1' then
secure_instr_fetch = if ELUsingAArch32(EL3) then SCR.SIF else SCR_EL3.SIF;
if secure_instr_fetch == '1' then xn = TRUE;
;
else
xn = !r || perms.xn == '1';
// Stage 1 walk is checked as a read, regardless of the original type
if acctype == AccType_IFETCH then
&& !s2fs1walk then
fail = xn;
failedread = TRUE;
elsif acctype IN { elsif (acctype IN { AccType_ATOMICRW, AccType_ORDEREDRW, AccType_ORDEREDATOMICRW } then
}) && !s2fs1walk then
fail = !r || !w;
failedread = !r;
elsif acctype == AccType_DC then
// DC maintenance instructions operating by VA, cannot fault from stage 1 translation.
&& !s2fs1walk then
// DC maintenance instructions operating by VA, do not generate Permission faults
// from stage 2 translation, other than from stage 1 translation table walk.
fail = FALSE;
elsif iswrite then
elsif iswrite && !s2fs1walk then
fail = !w;
failedread = FALSE;
else
fail = !r;
failedread = TRUE;
failedread = !iswrite;
if fail then
secondstage = FALSE;
s2fs1walk = FALSE;
ipaddress = bits(40) UNKNOWN;
domain = bits(4) UNKNOWN;
secondstage = TRUE;
return AArch32.PermissionFault(ipaddress, domain, level, acctype,
!failedread, secondstage, s2fs1walk);
else
return AArch32.NoFault();
// AArch32.CheckS2Permission()
// ===========================
// Function used for permission checking from AArch32 stage 2 translations
// AArch32.CheckBreakpoint()
// =========================
// Called before executing the instruction of length "size" bytes at "vaddress" in an AArch32
// translation regime.
// The breakpoint can in fact be evaluated well ahead of execution, for example, at instruction
// fetch. This is the simple sequential execution of the program.
FaultRecord AArch32.CheckS2Permission(AArch32.CheckBreakpoint(bits(32) vaddress, integer size)
assertPermissionsELUsingAArch32 perms, bits(32) vaddress, bits(40) ipaddress,
integer level,( AccTypeS1TranslationRegime acctype, boolean iswrite,
boolean s2fs1walk)
());
assert size IN {2,4};
assert match = FALSE;
mismatch = FALSE;
for i = 0 to HaveELUInt((DBGDIDR.BRPs)
(match_i, mismatch_i) =EL2AArch32.BreakpointMatch) && !(i, vaddress, size);
match = match || match_i;
mismatch = mismatch || mismatch_i;
if match &&IsSecureHaltOnBreakpointOrWatchpoint() &&() then
reason = ELUsingAArch32DebugHalt_Breakpoint(;EL2Halt) &&(reason);
elsif (match || mismatch) && DBGDSCRext.MDBGen == '1' && HasS2TranslationAArch32.GenerateDebugExceptions();
r = perms.ap<1> == '1';
w = perms.ap<2> == '1';
if() then
acctype = HaveExtendedExecuteNeverExt() then
case perms.xn:perms.xxn of
when '00' xn = !r;
when '01' xn = !r || PSTATE.EL == EL1;
when '10' xn = TRUE;
when '11' xn = !r || PSTATE.EL == EL0;
else
xn = !r || perms.xn == '1';
// Stage 1 walk is checked as a read, regardless of the original type
if acctype == AccType_IFETCH && !s2fs1walk then
fail = xn;
failedread = TRUE;
elsif (acctype IN {;
iswrite = FALSE;
debugmoe = AccType_ATOMICRWDebugException_Breakpoint,;
return AccType_ORDEREDRWAArch32.DebugFault, AccType_ORDEREDATOMICRW }) && !s2fs1walk then
fail = !r || !w;
failedread = !r;
elsif acctype == AccType_DC && !s2fs1walk then
// DC maintenance instructions operating by VA, do not generate Permission faults
// from stage 2 translation, other than from stage 1 translation table walk.
fail = FALSE;
elsif iswrite && !s2fs1walk then
fail = !w;
failedread = FALSE;
else
fail = !r;
failedread = !iswrite;
if fail then
domain = bits(4) UNKNOWN;
secondstage = TRUE;
return AArch32.PermissionFault(ipaddress, domain, level, acctype,
!failedread, secondstage, s2fs1walk);
(acctype, iswrite, debugmoe);
else
return AArch32.NoFault();
// AArch32.CheckBreakpoint()
// =========================
// Called before executing the instruction of length "size" bytes at "vaddress" in an AArch32
// translation regime.
// The breakpoint can in fact be evaluated well ahead of execution, for example, at instruction
// fetch. This is the simple sequential execution of the program.
// AArch32.CheckDebug()
// ====================
// Called on each access to check for a debug exception or entry to Debug state.
FaultRecord AArch32.CheckBreakpoint(bits(32) vaddress, integer size)
assertAArch32.CheckDebug(bits(32) vaddress, ELUsingAArch32AccType(acctype, boolean iswrite, integer size)S1TranslationRegimeFaultRecord());
assert size IN {2,4};
match = FALSE;
mismatch = FALSE;
for i = 0 tofault = UIntAArch32.NoFault(DBGDIDR.BRPs)
(match_i, mismatch_i) =();
d_side = (acctype != AArch32.BreakpointMatchAccType_IFETCH(i, vaddress, size);
match = match || match_i;
mismatch = mismatch || mismatch_i;
if match &&);
generate_exception = AArch32.GenerateDebugExceptions() && DBGDSCRext.MDBGen == '1';
halt = HaltOnBreakpointOrWatchpoint() then
reason =();
// Relative priority of Vector Catch and Breakpoint exceptions not defined in the architecture
vector_catch_first = DebugHalt_BreakpointConstrainUnpredictableBool;(
HaltUnpredictable_BPVECTORCATCHPRI(reason);
elsif (match || mismatch) && DBGDSCRext.MDBGen == '1' &&);
if !d_side && vector_catch_first && generate_exception then
fault = AArch32.GenerateDebugExceptionsAArch32.CheckVectorCatch() then
acctype =(vaddress, size);
if fault.statuscode == AccType_IFETCHFault_None;
iswrite = FALSE;
debugmoe =&& (generate_exception || halt) then
if d_side then
fault = DebugException_BreakpointAArch32.CheckWatchpoint;
return(vaddress, acctype, iswrite, size);
else
fault = AArch32.DebugFaultAArch32.CheckBreakpoint(acctype, iswrite, debugmoe);
else
return(vaddress, size);
if fault.statuscode == && !d_side && !vector_catch_first && generate_exception then
return AArch32.CheckVectorCatchAArch32.NoFaultFault_None();(vaddress, size);
return fault;
// AArch32.CheckDebug()
// ====================
// Called on each access to check for a debug exception or entry to Debug state.
// AArch32.CheckVectorCatch()
// ==========================
// Called before executing the instruction of length "size" bytes at "vaddress" in an AArch32
// translation regime.
// Vector Catch can in fact be evaluated well ahead of execution, for example, at instruction
// fetch. This is the simple sequential execution of the program.
FaultRecord AArch32.CheckDebug(bits(32) vaddress,AArch32.CheckVectorCatch(bits(32) vaddress, integer size)
assert AccTypeELUsingAArch32 acctype, boolean iswrite, integer size)(
FaultRecordS1TranslationRegime fault =());
match = AArch32.NoFaultAArch32.VCRMatch();
d_side = (acctype !=(vaddress);
if size == 4 && !match && AccType_IFETCHAArch32.VCRMatch);
generate_exception =(vaddress + 2) then
match = AArch32.GenerateDebugExceptions() && DBGDSCRext.MDBGen == '1';
halt = HaltOnBreakpointOrWatchpoint();
// Relative priority of Vector Catch and Breakpoint exceptions not defined in the architecture
vector_catch_first = ConstrainUnpredictableBool(Unpredictable_BPVECTORCATCHPRIUnpredictable_VCMATCHHALF);
if !d_side && vector_catch_first && generate_exception then
fault = if match && DBGDSCRext.MDBGen == '1' && AArch32.CheckVectorCatchAArch32.GenerateDebugExceptions(vaddress, size);
if fault.statuscode ==() then
acctype = Fault_NoneAccType_IFETCH && (generate_exception || halt) then
if d_side then
fault =;
iswrite = FALSE;
debugmoe = AArch32.CheckWatchpointDebugException_VectorCatch(vaddress, acctype, iswrite, size);
else
fault =;
return AArch32.CheckBreakpointAArch32.DebugFault(vaddress, size);
if fault.statuscode ==(acctype, iswrite, debugmoe);
else
return Fault_NoneAArch32.NoFault && !d_side && !vector_catch_first && generate_exception then
return AArch32.CheckVectorCatch(vaddress, size);
return fault;();
// AArch32.CheckVectorCatch()
// ==========================
// Called before executing the instruction of length "size" bytes at "vaddress" in an AArch32
// translation regime.
// Vector Catch can in fact be evaluated well ahead of execution, for example, at instruction
// fetch. This is the simple sequential execution of the program.
// AArch32.CheckWatchpoint()
// =========================
// Called before accessing the memory location of "size" bytes at "address".
FaultRecord AArch32.CheckVectorCatch(bits(32) vaddress, integer size)
assertAArch32.CheckWatchpoint(bits(32) vaddress, AccType acctype,
boolean iswrite, integer size)
assert ELUsingAArch32(S1TranslationRegime());
match = match = FALSE;
ispriv = AArch32.VCRMatchAArch32.AccessIsPrivileged(vaddress);
if size == 4 && !match &&(acctype);
for i = 0 to AArch32.VCRMatchUInt(vaddress + 2) then
match =(DBGDIDR.WRPs)
match = match || ConstrainUnpredictableBoolAArch32.WatchpointMatch((i, vaddress, size, ispriv, iswrite);
if match &&Unpredictable_VCMATCHHALFHaltOnBreakpointOrWatchpoint);
if match && DBGDSCRext.MDBGen == '1' &&() then
reason = DebugHalt_Watchpoint;
Halt(reason);
elsif match && DBGDSCRext.MDBGen == '1' && AArch32.GenerateDebugExceptions() then
acctype = debugmoe = AccType_IFETCHDebugException_Watchpoint;
iswrite = FALSE;
debugmoe = DebugException_VectorCatch;
return AArch32.DebugFault(acctype, iswrite, debugmoe);
else
return AArch32.NoFault();
// AArch32.CheckWatchpoint()
// AArch32.AccessFlagFault()
// =========================
// Called before accessing the memory location of "size" bytes at "address".
FaultRecord AArch32.CheckWatchpoint(bits(32) vaddress,AArch32.AccessFlagFault(bits(40) ipaddress, bits(4) domain, integer level, AccType acctype,
boolean iswrite, integer size)
assertacctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
return ELUsingAArch32AArch32.CreateFaultRecord(S1TranslationRegimeFault_AccessFlag());
match = FALSE;
ispriv = AArch32.AccessIsPrivileged(acctype);
for i = 0 to UInt(DBGDIDR.WRPs)
match = match || AArch32.WatchpointMatch(i, vaddress, size, ispriv, iswrite);
if match && HaltOnBreakpointOrWatchpoint() then
reason = DebugHalt_Watchpoint;
Halt(reason);
elsif match && DBGDSCRext.MDBGen == '1' && AArch32.GenerateDebugExceptions() then
debugmoe = DebugException_Watchpoint;
return AArch32.DebugFault(acctype, iswrite, debugmoe);
else
return AArch32.NoFault();, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.AccessFlagFault()
// =========================
// AArch32.AddressSizeFault()
// ==========================
FaultRecord AArch32.AccessFlagFault(bits(40) ipaddress, bits(4) domain, integer level,AArch32.AddressSizeFault(bits(40) ipaddress, bits(4) domain, integer level,
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
return AArch32.CreateFaultRecord(Fault_AccessFlagFault_AddressSize, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.AddressSizeFault()
// ==========================
// AArch32.AlignmentFault()
// ========================
FaultRecord AArch32.AddressSizeFault(bits(40) ipaddress, bits(4) domain, integer level,AArch32.AlignmentFault(
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
acctype, boolean iswrite, boolean secondstage)
ipaddress = bits(40) UNKNOWN;
domain = bits(4) UNKNOWN;
level = integer UNKNOWN;
extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
s2fs1walk = boolean UNKNOWN;
return AArch32.CreateFaultRecord(Fault_AddressSizeFault_Alignment, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.AlignmentFault()
// ========================
// AArch32.AsynchExternalAbort()
// =============================
// Wrapper function for asynchronous external aborts
FaultRecord AArch32.AlignmentFault(AArch32.AsynchExternalAbort(boolean parity, bits(2) errortype, bit extflag)
faulttype = if parity thenAccTypeFault_AsyncParity acctype, boolean iswrite, boolean secondstage)
ipaddress = bits(40) UNKNOWN;
domain = bits(4) UNKNOWN;
level = integer UNKNOWN;
extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
s2fs1walk = boolean UNKNOWN;
returnelse Fault_AsyncExternal;
ipaddress = bits(40) UNKNOWN;
domain = bits(4) UNKNOWN;
level = integer UNKNOWN;
acctype = AccType_NORMAL;
iswrite = boolean UNKNOWN;
debugmoe = bits(4) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch32.CreateFaultRecord(Fault_Alignment, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);(faulttype, ipaddress, domain, level, acctype, iswrite, extflag,
debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.AsynchExternalAbort()
// =============================
// Wrapper function for asynchronous external aborts
// AArch32.DebugFault()
// ====================
FaultRecord AArch32.AsynchExternalAbort(boolean parity, bits(2) errortype, bit extflag)
faulttype = if parity thenAArch32.DebugFault( Fault_AsyncParityAccType elseacctype, boolean iswrite, bits(4) debugmoe)
ipaddress = bits(40) UNKNOWN;
domain = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
level = integer UNKNOWN;
extflag = bit UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return Fault_AsyncExternalAArch32.CreateFaultRecord;
ipaddress = bits(40) UNKNOWN;
domain = bits(4) UNKNOWN;
level = integer UNKNOWN;
acctype =( AccType_NORMALFault_Debug;
iswrite = boolean UNKNOWN;
debugmoe = bits(4) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch32.CreateFaultRecord(faulttype, ipaddress, domain, level, acctype, iswrite, extflag,
debugmoe, errortype, secondstage, s2fs1walk);, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.DebugFault()
// ====================
// AArch32.DomainFault()
// =====================
FaultRecord AArch32.DebugFault(AArch32.DomainFault(bits(4) domain, integer level,AccType acctype, boolean iswrite, bits(4) debugmoe)
acctype, boolean iswrite)
ipaddress = bits(40) UNKNOWN;
domain = bits(4) UNKNOWN;
extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
level = integer UNKNOWN;
extflag = bit UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch32.CreateFaultRecord(Fault_DebugFault_Domain, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.DomainFault()
// =====================
// AArch32.NoFault()
// =================
FaultRecord AArch32.DomainFault(bits(4) domain, integer level,AArch32.NoFault()
ipaddress = bits(40) UNKNOWN;
domain = bits(4) UNKNOWN;
level = integer UNKNOWN;
acctype = AccTypeAccType_NORMAL acctype, boolean iswrite)
ipaddress = bits(40) UNKNOWN;
;
iswrite = boolean UNKNOWN;
extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch32.CreateFaultRecord(Fault_DomainFault_None, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.NoFault()
// =================
// AArch32.PermissionFault()
// =========================
FaultRecord AArch32.NoFault()
ipaddress = bits(40) UNKNOWN;
domain = bits(4) UNKNOWN;
level = integer UNKNOWN;
acctype =AArch32.PermissionFault(bits(40) ipaddress, bits(4) domain, integer level, AccType_NORMALAccType;
iswrite = boolean UNKNOWN;
acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch32.CreateFaultRecord(Fault_NoneFault_Permission, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.PermissionFault()
// =========================
// AArch32.TranslationFault()
// ==========================
FaultRecord AArch32.PermissionFault(bits(40) ipaddress, bits(4) domain, integer level,AArch32.TranslationFault(bits(40) ipaddress, bits(4) domain, integer level,
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
return AArch32.CreateFaultRecord(Fault_PermissionFault_Translation, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);
// AArch32.TranslationFault()
// ==========================
// AArch32.FirstStageTranslate()
// =============================
// Perform a stage 1 translation walk. The function used by Address Translation operations is
// similar except it uses the translation regime specified for the instruction.
FaultRecordAddressDescriptor AArch32.TranslationFault(bits(40) ipaddress, bits(4) domain, integer level,AArch32.FirstStageTranslate(bits(32) vaddress,
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
acctype, boolean iswrite,
boolean wasaligned, integer size)
extflag = bit UNKNOWN;
debugmoe = bits(4) UNKNOWN;
errortype = bits(2) UNKNOWN;
return if PSTATE.EL == AArch32.CreateFaultRecordEL2(then
s1_enabled = HSCTLR.M == '1';
elsif() then
tge = (if ELUsingAArch32(EL2) then HCR.TGE else HCR_EL2.TGE);
dc = (if ELUsingAArch32(EL2) then HCR.DC else HCR_EL2.DC);
s1_enabled = tge == '0' && dc == '0' && SCTLR.M == '1';
else
s1_enabled = SCTLR.M == '1';
ipaddress = bits(40) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
if s1_enabled then // First stage enabled
use_long_descriptor_format = PSTATE.EL == EL2 || TTBCR.EAE == '1';
if use_long_descriptor_format then
S1 = AArch32.TranslationTableWalkLD(ipaddress, vaddress, acctype, iswrite, secondstage,
s2fs1walk, size);
permissioncheck = TRUE; domaincheck = FALSE;
else
S1 = AArch32.TranslationTableWalkSD(vaddress, acctype, iswrite, size);
permissioncheck = TRUE; domaincheck = TRUE;
else
S1 = AArch32.TranslateAddressS1Off(vaddress, acctype, iswrite);
permissioncheck = FALSE; domaincheck = FALSE;
if UsingAArch32() && HaveTrapLoadStoreMultipleDeviceExt() && AArch32.ExecutingLSMInstr() then
if S1.addrdesc.memattrs.memtype == MemType_Device && S1.addrdesc.memattrs.device != DeviceType_GRE then
nTLSMD = if S1TranslationRegime() == EL2 then HSCTLR.nTLSMD else SCTLR.nTLSMD;
if nTLSMD == '0' then
S1.addrdesc.fault = AArch32.AlignmentFault(acctype, iswrite, secondstage);
// Check for unaligned data accesses to Device memory
if ((!wasaligned && acctype != AccType_IFETCH) || (acctype == AccType_DCZVA))
&& S1.addrdesc.memattrs.memtype == MemType_Device && !IsFault(S1.addrdesc) then
S1.addrdesc.fault = AArch32.AlignmentFault(acctype, iswrite, secondstage);
if !IsFault(S1.addrdesc) && domaincheck then
(permissioncheck, abort) = AArch32.CheckDomain(S1.domain, vaddress, S1.level, acctype,
iswrite);
S1.addrdesc.fault = abort;
if !IsFault(S1.addrdesc) && permissioncheck then
S1.addrdesc.fault = AArch32.CheckPermission(S1.perms, vaddress, S1.level,
S1.domain, S1.addrdesc.paddress.NS,
acctype, iswrite);
// Check for instruction fetches from Device memory not marked as execute-never. If there has
// not been a Permission Fault then the memory is not marked execute-never.
if (!IsFault(S1.addrdesc) && S1.addrdesc.memattrs.memtype == MemType_Device &&
acctype == AccType_IFETCH) then
S1.addrdesc = AArch32.InstructionDeviceFault_TranslationEL2Enabled, ipaddress, domain, level, acctype, iswrite,
extflag, debugmoe, errortype, secondstage, s2fs1walk);(S1.addrdesc, vaddress, ipaddress, S1.level,
S1.domain, acctype, iswrite,
secondstage, s2fs1walk);
return S1.addrdesc;
// AArch32.FirstStageTranslate()
// =============================
// Perform a stage 1 translation walk. The function used by Address Translation operations is
// similar except it uses the translation regime specified for the instruction.
// AArch32.FullTranslate()
// =======================
// Perform both stage 1 and stage 2 translation walks for the current translation regime. The
// function used by Address Translation operations is similar except it uses the translation
// regime specified for the instruction.
AddressDescriptor AArch32.FirstStageTranslate(bits(32) vaddress,AArch32.FullTranslate(bits(32) vaddress, AccType acctype, boolean iswrite,
boolean wasaligned, integer size)
boolean wasaligned, integer size)
if PSTATE.EL == // First Stage Translation
S1 = EL2AArch32.FirstStageTranslate then
s1_enabled = HSCTLR.M == '1';
elsif(vaddress, acctype, iswrite, wasaligned, size);
if ! EL2Enabled() then
tge = (if ELUsingAArch32(EL2) then HCR.TGE else HCR_EL2.TGE);
dc = (if ELUsingAArch32(EL2) then HCR.DC else HCR_EL2.DC);
s1_enabled = tge == '0' && dc == '0' && SCTLR.M == '1';
else
s1_enabled = SCTLR.M == '1';
ipaddress = bits(40) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
if s1_enabled then // First stage enabled
use_long_descriptor_format = PSTATE.EL == EL2 || TTBCR.EAE == '1';
if use_long_descriptor_format then
S1 = AArch32.TranslationTableWalkLD(ipaddress, vaddress, acctype, iswrite, secondstage,
s2fs1walk, size);
permissioncheck = TRUE; domaincheck = FALSE;
else
S1 = AArch32.TranslationTableWalkSD(vaddress, acctype, iswrite, size);
permissioncheck = TRUE; domaincheck = TRUE;
else
S1 = AArch32.TranslateAddressS1Off(vaddress, acctype, iswrite);
permissioncheck = FALSE; domaincheck = FALSE;
if UsingAArch32() && HaveTrapLoadStoreMultipleDeviceExt() && AArch32.ExecutingLSMInstr() then
if S1.addrdesc.memattrs.memtype == MemType_Device && S1.addrdesc.memattrs.device != DeviceType_GRE then
nTLSMD = if S1TranslationRegime() == EL2 then HSCTLR.nTLSMD else SCTLR.nTLSMD;
if nTLSMD == '0' then
S1.addrdesc.fault = AArch32.AlignmentFault(acctype, iswrite, secondstage);
// Check for unaligned data accesses to Device memory
if ((!wasaligned && acctype != AccType_IFETCH) || (acctype == AccType_DCZVA))
&& S1.addrdesc.memattrs.memtype == MemType_Device && !IsFault(S1.addrdesc) then
S1.addrdesc.fault =(S1) && !( AArch32.AlignmentFaultHaveNV2Ext(acctype, iswrite, secondstage);
if !() && acctype ==IsFaultAccType_NV2REGISTER(S1.addrdesc) && domaincheck then
(permissioncheck, abort) =) && AArch32.CheckDomainHasS2Translation(S1.domain, vaddress, S1.level, acctype,
iswrite);
S1.addrdesc.fault = abort;
if !() then
s2fs1walk = FALSE;
result =IsFaultAArch32.SecondStageTranslate(S1.addrdesc) && permissioncheck then
S1.addrdesc.fault = AArch32.CheckPermission(S1.perms, vaddress, S1.level,
S1.domain, S1.addrdesc.paddress.NS,
acctype, iswrite);
// Check for instruction fetches from Device memory not marked as execute-never. If there has
// not been a Permission Fault then the memory is not marked execute-never.
if (!IsFault(S1.addrdesc) && S1.addrdesc.memattrs.memtype == MemType_Device &&
acctype == AccType_IFETCH) then
S1.addrdesc = AArch32.InstructionDevice(S1.addrdesc, vaddress, ipaddress, S1.level,
S1.domain, acctype, iswrite,
secondstage, s2fs1walk);
(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk,
size);
else
result = S1;
return S1.addrdesc; return result;
// AArch32.FullTranslate()
// =======================
// Perform both stage 1 and stage 2 translation walks for the current translation regime. The
// function used by Address Translation operations is similar except it uses the translation
// regime specified for the instruction.
// AArch32.SecondStageTranslate()
// ==============================
// Perform a stage 2 translation walk. The function used by Address Translation operations is
// similar except it uses the translation regime specified for the instruction.
AddressDescriptor AArch32.FullTranslate(bits(32) vaddress,AArch32.SecondStageTranslate( AddressDescriptor S1, bits(32) vaddress,
AccType acctype, boolean iswrite,
boolean wasaligned, integer size)
// First Stage Translation
S1 =acctype, boolean iswrite, boolean wasaligned,
boolean s2fs1walk, integer size)
assert AArch32.FirstStageTranslateHasS2Translation(vaddress, acctype, iswrite, wasaligned, size);
if !();
assertIsZero(S1.paddress.address<47:40>);
hwupdatewalk = FALSE;
if !ELUsingAArch32(EL2) then
return AArch64.SecondStageTranslate(S1, ZeroExtend(vaddress, 64), acctype, iswrite,
wasaligned, s2fs1walk, size, hwupdatewalk);
s2_enabled = HCR.VM == '1' || HCR.DC == '1';
secondstage = TRUE;
if s2_enabled then // Second stage enabled
ipaddress = S1.paddress.address<39:0>;
S2 = AArch32.TranslationTableWalkLD(ipaddress, vaddress, acctype, iswrite, secondstage,
s2fs1walk, size);
// Check for unaligned data accesses to Device memory
if ((!wasaligned && acctype != AccType_IFETCH) || (acctype == AccType_DCZVA))
&& S2.addrdesc.memattrs.memtype == MemType_Device && !IsFault(S1) && !((S2.addrdesc) then
S2.addrdesc.fault =HaveNV2ExtAArch32.AlignmentFault() && acctype ==(acctype, iswrite, secondstage);
// Check for permissions on Stage2 translations
if ! AccType_NV2REGISTERIsFault) &&(S2.addrdesc) then
S2.addrdesc.fault = HasS2TranslationAArch32.CheckS2Permission() then
s2fs1walk = FALSE;
result =(S2.perms, vaddress, ipaddress, S2.level,
acctype, iswrite, s2fs1walk);
// Check for instruction fetches from Device memory not marked as execute-never. As there
// has not been a Permission Fault then the memory is not marked execute-never.
if (!s2fs1walk && ! (S2.addrdesc) && S2.addrdesc.memattrs.memtype == MemType_Device &&
acctype == AccType_IFETCH) then
domain = bits(4) UNKNOWN;
S2.addrdesc = AArch32.InstructionDevice(S2.addrdesc, vaddress, ipaddress, S2.level,
domain, acctype, iswrite,
secondstage, s2fs1walk);
// Check for protected table walk
if (s2fs1walk && !IsFault(S2.addrdesc) && HCR.PTW == '1' &&
S2.addrdesc.memattrs.memtype == MemType_Device) then
domain = bits(4) UNKNOWN;
S2.addrdesc.fault = AArch32.PermissionFault(ipaddress, domain, S2.level, acctype,
iswrite, secondstage, s2fs1walk);
result = CombineS1S2DescAArch32.SecondStageTranslateIsFault(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk,
size);
(S1, S2.addrdesc);
else
result = S1;
return result;
// AArch32.SecondStageTranslate()
// ==============================
// Perform a stage 2 translation walk. The function used by Address Translation operations is
// similar except it uses the translation regime specified for the instruction.
// AArch32.SecondStageWalk()
// =========================
// Perform a stage 2 translation on a stage 1 translation page table walk access.
AddressDescriptor AArch32.SecondStageTranslate(AArch32.SecondStageWalk(AddressDescriptor S1, bits(32) vaddress,
AccType acctype, boolean iswrite, boolean wasaligned,
boolean s2fs1walk, integer size)
acctype,
boolean iswrite, integer size)
assert HasS2Translation();
assert
s2fs1walk = TRUE;
wasaligned = TRUE;
return IsZeroAArch32.SecondStageTranslate(S1.paddress.address<47:40>);
hwupdatewalk = FALSE;
if !ELUsingAArch32(EL2) then
return AArch64.SecondStageTranslate(S1, ZeroExtend(vaddress, 64), acctype, iswrite,
wasaligned, s2fs1walk, size, hwupdatewalk);
s2_enabled = HCR.VM == '1' || HCR.DC == '1';
secondstage = TRUE;
if s2_enabled then // Second stage enabled
ipaddress = S1.paddress.address<39:0>;
S2 = AArch32.TranslationTableWalkLD(ipaddress, vaddress, acctype, iswrite, secondstage,
s2fs1walk, size);
// Check for unaligned data accesses to Device memory
if ((!wasaligned && acctype != AccType_IFETCH) || (acctype == AccType_DCZVA))
&& S2.addrdesc.memattrs.memtype == MemType_Device && !IsFault(S2.addrdesc) then
S2.addrdesc.fault = AArch32.AlignmentFault(acctype, iswrite, secondstage);
// Check for permissions on Stage2 translations
if !IsFault(S2.addrdesc) then
S2.addrdesc.fault = AArch32.CheckS2Permission(S2.perms, vaddress, ipaddress, S2.level,
acctype, iswrite, s2fs1walk);
// Check for instruction fetches from Device memory not marked as execute-never. As there
// has not been a Permission Fault then the memory is not marked execute-never.
if (!s2fs1walk && !IsFault(S2.addrdesc) && S2.addrdesc.memattrs.memtype == MemType_Device &&
acctype == AccType_IFETCH) then
domain = bits(4) UNKNOWN;
S2.addrdesc = AArch32.InstructionDevice(S2.addrdesc, vaddress, ipaddress, S2.level,
domain, acctype, iswrite,
secondstage, s2fs1walk);
// Check for protected table walk
if (s2fs1walk && !IsFault(S2.addrdesc) && HCR.PTW == '1' &&
S2.addrdesc.memattrs.memtype == MemType_Device) then
domain = bits(4) UNKNOWN;
S2.addrdesc.fault = AArch32.PermissionFault(ipaddress, domain, S2.level, acctype,
iswrite, secondstage, s2fs1walk);
result = CombineS1S2Desc(S1, S2.addrdesc);
else
result = S1;
return result;(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk,
size);
// AArch32.SecondStageWalk()
// =========================
// Perform a stage 2 translation on a stage 1 translation page table walk access.
// AArch32.TranslateAddress()
// ==========================
// Main entry point for translating an address
AddressDescriptor AArch32.SecondStageWalk(AArch32.TranslateAddress(bits(32) vaddress,AddressDescriptor S1, bits(32) vaddress, AccType acctype,
boolean iswrite, integer size)
acctype, boolean iswrite,
boolean wasaligned, integer size)
assert if ! HasS2TranslationELUsingAArch32();
s2fs1walk = TRUE;
wasaligned = TRUE;
return( ()) then
return AArch64.TranslateAddress(ZeroExtend(vaddress, 64), acctype, iswrite, wasaligned,
size);
result = AArch32.FullTranslate(vaddress, acctype, iswrite, wasaligned, size);
if !(acctype IN {AccType_PTW, AccType_IC, AccType_AT}) && !IsFault(result) then
result.fault = AArch32.CheckDebug(vaddress, acctype, iswrite, size);
// Update virtual address for abort functions
result.vaddress = ZeroExtendAArch32.SecondStageTranslateS1TranslationRegime(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk,
size);(vaddress);
return result;
// AArch32.TranslateAddress()
// ==========================
// Main entry point for translating an address
// AArch32.TranslationTableWalkLD()
// ================================
// Returns a result of a translation table walk using the Long-descriptor format
//
// Implementations might cache information from memory in any number of non-coherent TLB
// caching structures, and so avoid memory accesses that have been expressed in this
// pseudocode. The use of such TLBs is not expressed in this pseudocode.
AddressDescriptorTLBRecord AArch32.TranslateAddress(bits(32) vaddress,AArch32.TranslationTableWalkLD(bits(40) ipaddress, bits(32) vaddress, AccType acctype, boolean iswrite,
boolean wasaligned, integer size)
if !acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk, integer size)
if !secondstage then
assertELUsingAArch32(S1TranslationRegime()) then
return());
else
assert AArch64.TranslateAddressHaveEL(EL2) && !IsSecure() && ELUsingAArch32(EL2) && HasS2Translation();
TLBRecord result;
AddressDescriptor descaddr;
bits(64) baseregister;
bits(40) inputaddr; // Input Address is 'vaddress' for stage 1, 'ipaddress' for stage 2
bit nswalk; // Stage 2 translation table walks are to Secure or to Non-secure PA space
domain = bits(4) UNKNOWN;
descaddr.memattrs.memtype = MemType_Normal;
// Fixed parameters for the page table walk:
// grainsize = Log2(Size of Table) - Size of Table is 4KB in AArch32
// stride = Log2(Address per Level) - Bits of address consumed at each level
constant integer grainsize = 12; // Log2(4KB page size)
constant integer stride = grainsize - 3; // Log2(page size / 8 bytes)
// Derived parameters for the page table walk:
// inputsize = Log2(Size of Input Address) - Input Address size in bits
// level = Level to start walk from
// This means that the number of levels after start level = 3-level
if !secondstage then
// First stage translation
inputaddr = ZeroExtend(vaddress, 64), acctype, iswrite, wasaligned,
size);
result =(vaddress);
el = AArch32.FullTranslateAArch32.AccessUsesEL(vaddress, acctype, iswrite, wasaligned, size);
if !(acctype IN {(acctype);
if el ==AccType_PTWEL2,then
inputsize = 32 - AccType_ICUInt,(HTCR.T0SZ);
basefound = inputsize == 32 || AccType_ATIsZero}) && !(inputaddr<31:inputsize>);
disabled = FALSE;
baseregister = HTTBR;
descaddr.memattrs =WalkAttrDecode(HTCR.SH0, HTCR.ORGN0, HTCR.IRGN0, secondstage);
reversedescriptors = HSCTLR.EE == '1';
lookupsecure = FALSE;
singlepriv = TRUE;
hierattrsdisabled = AArch32.HaveHPDExt() && HTCR.HPD == '1';
else
basefound = FALSE;
disabled = FALSE;
t0size = UInt(TTBCR.T0SZ);
if t0size == 0 || IsZero(inputaddr<31:(32-t0size)>) then
inputsize = 32 - t0size;
basefound = TRUE;
baseregister = TTBR0;
descaddr.memattrs = WalkAttrDecode(TTBCR.SH0, TTBCR.ORGN0, TTBCR.IRGN0, secondstage);
hierattrsdisabled = AArch32.HaveHPDExt() && TTBCR.T2E == '1' && TTBCR2.HPD0 == '1';
t1size = UInt(TTBCR.T1SZ);
if (t1size == 0 && !basefound) || (t1size > 0 && IsOnes(inputaddr<31:(32-t1size)>)) then
inputsize = 32 - t1size;
basefound = TRUE;
baseregister = TTBR1;
descaddr.memattrs = WalkAttrDecode(TTBCR.SH1, TTBCR.ORGN1, TTBCR.IRGN1, secondstage);
hierattrsdisabled = AArch32.HaveHPDExt() && TTBCR.T2E == '1' && TTBCR2.HPD1 == '1';
reversedescriptors = SCTLR.EE == '1';
lookupsecure = IsSecure();
singlepriv = FALSE;
// The starting level is the number of strides needed to consume the input address
level = 4 - RoundUp(Real(inputsize - grainsize) / Real(stride));
else
// Second stage translation
inputaddr = ipaddress;
inputsize = 32 - SInt(VTCR.T0SZ);
// VTCR.S must match VTCR.T0SZ[3]
if VTCR.S != VTCR.T0SZ<3> then
(-, inputsize) = ConstrainUnpredictableInteger(32-7, 32+8, Unpredictable_RESVTCRS);
basefound = inputsize == 40 || IsZero(inputaddr<39:inputsize>);
disabled = FALSE;
descaddr.memattrs = WalkAttrDecode(VTCR.SH0, VTCR.ORGN0, VTCR.IRGN0, secondstage);
reversedescriptors = HSCTLR.EE == '1';
singlepriv = TRUE;
lookupsecure = FALSE;
baseregister = VTTBR;
startlevel = UInt(VTCR.SL0);
level = 2 - startlevel;
if level <= 0 then basefound = FALSE;
// Number of entries in the starting level table =
// (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table))
startsizecheck = inputsize - ((3 - level)*stride + grainsize); // Log2(Num of entries)
// Check for starting level table with fewer than 2 entries or longer than 16 pages.
// Lower bound check is: startsizecheck < Log2(2 entries)
// That is, VTCR.SL0 == '00' and SInt(VTCR.T0SZ) > 1, Size of Input Address < 2^31 bytes
// Upper bound check is: startsizecheck > Log2(pagesize/8*16)
// That is, VTCR.SL0 == '01' and SInt(VTCR.T0SZ) < -2, Size of Input Address > 2^34 bytes
if startsizecheck < 1 || startsizecheck > stride + 4 then basefound = FALSE;
if !basefound || disabled then
level = 1; // AArch64 reports this as a level 0 fault
result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
if !IsZero(baseregister<47:40>) then
level = 0;
result.addrdesc.fault = AArch32.AddressSizeFault(ipaddress, domain, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
// Bottom bound of the Base address is:
// Log2(8 bytes per entry)+Log2(Number of entries in starting level table)
// Number of entries in starting level table =
// (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table))
baselowerbound = 3 + inputsize - ((3-level)*stride + grainsize); // Log2(Num of entries*8)
baseaddress = baseregister<39:baselowerbound>:Zeros(baselowerbound);
ns_table = if lookupsecure then '0' else '1';
ap_table = '00';
xn_table = '0';
pxn_table = '0';
addrselecttop = inputsize - 1;
repeat
addrselectbottom = (3-level)*stride + grainsize;
bits(40) index = ZeroExtend(inputaddr<addrselecttop:addrselectbottom>:'000');
descaddr.paddress.address = ZeroExtend(baseaddress OR index);
descaddr.paddress.NS = if secondstage then nswalk else ns_table;
// If there are two stages of translation, then the first stage table walk addresses
// are themselves subject to translation
if secondstage || !HasS2Translation() || (HaveNV2Ext() && acctype == AccType_NV2REGISTER) then
descaddr2 = descaddr;
else
descaddr2 = AArch32.SecondStageWalk(descaddr, vaddress, acctype, iswrite, 8);
// Check for a fault on the stage 2 walk
if IsFault(result) then
result.fault =(descaddr2) then
result.addrdesc.fault = descaddr2.fault;
return result;
// Update virtual address for abort functions
descaddr2.vaddress = AArch32.CheckDebugZeroExtend(vaddress, acctype, iswrite, size);
(vaddress);
// Update virtual address for abort functions
result.vaddress = accdesc = CreateAccessDescriptorPTW(acctype, secondstage, s2fs1walk, level);
desc = _Mem[descaddr2, 8, accdesc];
if reversedescriptors then desc = BigEndianReverse(desc);
if desc<0> == '0' || (desc<1:0> == '01' && level == 3) then
// Fault (00), Reserved (10), or Block (01) at level 3.
result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Valid Block, Page, or Table entry
if desc<1:0> == '01' || level == 3 then // Block (01) or Page (11)
blocktranslate = TRUE;
else // Table (11)
if !IsZero(desc<47:40>) then
result.addrdesc.fault = AArch32.AddressSizeFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
baseaddress = desc<39:grainsize>:Zeros(grainsize);
if !secondstage then
// Unpack the upper and lower table attributes
ns_table = ns_table OR desc<63>;
if !secondstage && !hierattrsdisabled then
ap_table<1> = ap_table<1> OR desc<62>; // read-only
xn_table = xn_table OR desc<60>;
// pxn_table and ap_table[0] apply only in EL1&0 translation regimes
if !singlepriv then
pxn_table = pxn_table OR desc<59>;
ap_table<0> = ap_table<0> OR desc<61>; // privileged
level = level + 1;
addrselecttop = addrselectbottom - 1;
blocktranslate = FALSE;
until blocktranslate;
// Unpack the descriptor into address and upper and lower block attributes
outputaddress = desc<39:addrselectbottom>:inputaddr<addrselectbottom-1:0>;
// Check the output address is inside the supported range
if !IsZero(desc<47:40>) then
result.addrdesc.fault = AArch32.AddressSizeFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Check the access flag
if desc<10> == '0' then
result.addrdesc.fault = AArch32.AccessFlagFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
xn = desc<54>; // Bit[54] of the block/page descriptor holds UXN
pxn = desc<53>; // Bit[53] of the block/page descriptor holds PXN
ap = desc<7:6>:'1'; // Bits[7:6] of the block/page descriptor hold AP[2:1]
contiguousbit = desc<52>;
nG = desc<11>;
sh = desc<9:8>;
memattr = desc<5:2>; // AttrIndx and NS bit in stage 1
result.domain = bits(4) UNKNOWN; // Domains not used
result.level = level;
result.blocksize = 2^((3-level)*stride + grainsize);
// Stage 1 translation regimes also inherit attributes from the tables
if !secondstage then
result.perms.xn = xn OR xn_table;
result.perms.ap<2> = ap<2> OR ap_table<1>; // Force read-only
// PXN, nG and AP[1] apply only in EL1&0 stage 1 translation regimes
if !singlepriv then
result.perms.ap<1> = ap<1> AND NOT(ap_table<0>); // Force privileged only
result.perms.pxn = pxn OR pxn_table;
// Pages from Non-secure tables are marked non-global in Secure EL1&0
if IsSecure() then
result.nG = nG OR ns_table;
else
result.nG = nG;
else
result.perms.ap<1> = '1';
result.perms.pxn = '0';
result.nG = '0';
result.GP = desc<50>; // Stage 1 block or pages might be guarded
result.perms.ap<0> = '1';
result.addrdesc.memattrs = AArch32.S1AttrDecode(sh, memattr<2:0>, acctype);
result.addrdesc.paddress.NS = memattr<3> OR ns_table;
else
result.perms.ap<2:1> = ap<2:1>;
result.perms.ap<0> = '1';
result.perms.xn = xn;
if HaveExtendedExecuteNeverExt() then result.perms.xxn = desc<53>;
result.perms.pxn = '0';
result.nG = '0';
if s2fs1walk then
result.addrdesc.memattrs = S2AttrDecode(sh, memattr, AccType_PTW);
else
result.addrdesc.memattrs = S2AttrDecode(sh, memattr, acctype);
result.addrdesc.paddress.NS = '1';
result.addrdesc.paddress.address = ZeroExtend(outputaddress);
result.addrdesc.fault = AArch32.NoFault();
result.contiguous = contiguousbit == '1';
if HaveCommonNotPrivateTransExt(vaddress);
() then result.CnP = baseregister<0>;
return result;
// AArch32.TranslationTableWalkLD()
// AArch32.TranslationTableWalkSD()
// ================================
// Returns a result of a translation table walk using the Long-descriptor format
// Returns a result of a translation table walk using the Short-descriptor format
//
// Implementations might cache information from memory in any number of non-coherent TLB
// caching structures, and so avoid memory accesses that have been expressed in this
// pseudocode. The use of such TLBs is not expressed in this pseudocode.
TLBRecord AArch32.TranslationTableWalkLD(bits(40) ipaddress, bits(32) vaddress,AArch32.TranslationTableWalkSD(bits(32) vaddress,
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk, integer size)
if !secondstage then
assertacctype, boolean iswrite,
integer size)
assert ELUsingAArch32(S1TranslationRegime());
else
assert
// This is only called when address translation is enabled HaveEL(EL2) && !IsSecure() && ELUsingAArch32(EL2) && HasS2Translation();
TLBRecord result;
AddressDescriptor descaddr;
bits(64) baseregister;
bits(40) inputaddr; // Input Address is 'vaddress' for stage 1, 'ipaddress' for stage 2
bit nswalk; // Stage 2 translation table walks are to Secure or to Non-secure PA space
domain = bits(4) UNKNOWN;
descaddr.memattrs.memtype =l1descaddr; MemType_NormalAddressDescriptor;
l2descaddr;
bits(40) outputaddress;
// Fixed parameters for the page table walk:
// grainsize = Log2(Size of Table) - Size of Table is 4KB in AArch32
// stride = Log2(Address per Level) - Bits of address consumed at each level
constant integer grainsize = 12; // Log2(4KB page size)
constant integer stride = grainsize - 3; // Log2(page size / 8 bytes)
// Variables for Abort functions
ipaddress = bits(40) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
NS = bit UNKNOWN;
// Derived parameters for the page table walk:
// inputsize = Log2(Size of Input Address) - Input Address size in bits
// level = Level to start walk from
// This means that the number of levels after start level = 3-level
// Default setting of the domain
domain = bits(4) UNKNOWN;
if !secondstage then
// First stage translation
inputaddr = // Determine correct Translation Table Base Register to use.
bits(64) ttbr;
n = ZeroExtend(vaddress);
el = AArch32.AccessUsesEL(acctype);
if el == EL2 then
inputsize = 32 - UInt(HTCR.T0SZ);
basefound = inputsize == 32 ||(TTBCR.N);
if n == 0 || IsZero(inputaddr<31:inputsize>);
disabled = FALSE;
baseregister = HTTBR;
descaddr.memattrs =(vaddress<31:(32-n)>) then
ttbr = TTBR0;
disabled = (TTBCR.PD0 == '1');
else
ttbr = TTBR1;
disabled = (TTBCR.PD1 == '1');
n = 0; // TTBR1 translation always works like N=0 TTBR0 translation
// Check this Translation Table Base Register is not disabled.
if disabled then
level = 1;
result.addrdesc.fault = WalkAttrDecodeAArch32.TranslationFault(HTCR.SH0, HTCR.ORGN0, HTCR.IRGN0, secondstage);
reversedescriptors = HSCTLR.EE == '1';
lookupsecure = FALSE;
singlepriv = TRUE;
hierattrsdisabled =(ipaddress, domain, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
// Obtain descriptor from initial lookup.
l1descaddr.paddress.address = AArch32.HaveHPDExtZeroExtend() && HTCR.HPD == '1';
else
basefound = FALSE;
disabled = FALSE;
t0size =(ttbr<31:14-n>:vaddress<31-n:20>:'00');
l1descaddr.paddress.NS = if UIntIsSecure(TTBCR.T0SZ);
if t0size == 0 ||() then '0' else '1';
IRGN = ttbr<0>:ttbr<6>; // TTBR.IRGN
RGN = ttbr<4:3>; // TTBR.RGN
SH = ttbr<1>:ttbr<5>; // TTBR.S:TTBR.NOS
l1descaddr.memattrs = IsZero(inputaddr<31:(32-t0size)>) then
inputsize = 32 - t0size;
basefound = TRUE;
baseregister = TTBR0;
descaddr.memattrs = WalkAttrDecode(TTBCR.SH0, TTBCR.ORGN0, TTBCR.IRGN0, secondstage);
hierattrsdisabled =(SH, RGN, IRGN, secondstage);
if ! AArch32.HaveHPDExtHaveEL() && TTBCR.T2E == '1' && TTBCR2.HPD0 == '1';
t1size =( UIntEL2(TTBCR.T1SZ);
if (t1size == 0 && !basefound) || (t1size > 0 &&) || ( IsOnes(inputaddr<31:(32-t1size)>)) then
inputsize = 32 - t1size;
basefound = TRUE;
baseregister = TTBR1;
descaddr.memattrs = WalkAttrDecode(TTBCR.SH1, TTBCR.ORGN1, TTBCR.IRGN1, secondstage);
hierattrsdisabled = AArch32.HaveHPDExt() && TTBCR.T2E == '1' && TTBCR2.HPD1 == '1';
reversedescriptors = SCTLR.EE == '1';
lookupsecure = IsSecure();
singlepriv = FALSE;
// The starting level is the number of strides needed to consume the input address
level = 4 - (1 + (inputsize - grainsize - 1) DIV stride);
else
// Second stage translation
inputaddr = ipaddress;
inputsize = 32 -() && ! SIntIsSecureEL2Enabled(VTCR.T0SZ);
// VTCR.S must match VTCR.T0SZ[3]
if VTCR.S != VTCR.T0SZ<3> then
(-, inputsize) =()) then
// if only 1 stage of translation
l1descaddr2 = l1descaddr;
else
l1descaddr2 = ConstrainUnpredictableIntegerAArch32.SecondStageWalk(32-7, 32+8,(l1descaddr, vaddress, acctype, iswrite, 4);
// Check for a fault on the stage 2 walk
if Unpredictable_RESVTCRSIsFault);
basefound = inputsize == 40 ||(l1descaddr2) then
result.addrdesc.fault = l1descaddr2.fault;
return result;
// Update virtual address for abort functions
l1descaddr2.vaddress = IsZeroZeroExtend(inputaddr<39:inputsize>);
disabled = FALSE;
descaddr.memattrs =(vaddress);
accdesc = CreateAccessDescriptorPTW(acctype, secondstage, s2fs1walk, level);
l1desc = _Mem[l1descaddr2, 4,accdesc];
if SCTLR.EE == '1' then l1desc = WalkAttrDecodeBigEndianReverse(VTCR.SH0, VTCR.ORGN0, VTCR.IRGN0, secondstage);
reversedescriptors = HSCTLR.EE == '1';
singlepriv = TRUE;
(l1desc);
lookupsecure = FALSE;
baseregister = VTTBR;
startlevel = // Process descriptor from initial lookup.
case l1desc<1:0> of
when '00' // Fault, Reserved
level = 1;
result.addrdesc.fault = UInt(VTCR.SL0);
level = 2 - startlevel;
if level <= 0 then basefound = FALSE;
// Number of entries in the starting level table =
// (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table))
startsizecheck = inputsize - ((3 - level)*stride + grainsize); // Log2(Num of entries)
// Check for starting level table with fewer than 2 entries or longer than 16 pages.
// Lower bound check is: startsizecheck < Log2(2 entries)
// That is, VTCR.SL0 == '00' and SInt(VTCR.T0SZ) > 1, Size of Input Address < 2^31 bytes
// Upper bound check is: startsizecheck > Log2(pagesize/8*16)
// That is, VTCR.SL0 == '01' and SInt(VTCR.T0SZ) < -2, Size of Input Address > 2^34 bytes
if startsizecheck < 1 || startsizecheck > stride + 4 then basefound = FALSE;
if !basefound || disabled then
level = 1; // AArch64 reports this as a level 0 fault
result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
if ! when '01' // Large page or Small page
domain = l1desc<8:5>;
level = 2;
pxn = l1desc<2>;
NS = l1desc<3>;
// Obtain descriptor from level 2 lookup.
l2descaddr.paddress.address =IsZeroZeroExtend(baseregister<47:40>) then
level = 0;
result.addrdesc.fault =(l1desc<31:10>:vaddress<19:12>:'00');
l2descaddr.paddress.NS = if AArch32.AddressSizeFaultIsSecure(ipaddress, domain, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
() then '0' else '1';
l2descaddr.memattrs = l1descaddr.memattrs;
// Bottom bound of the Base address is:
// Log2(8 bytes per entry)+Log2(Number of entries in starting level table)
// Number of entries in starting level table =
// (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table))
baselowerbound = 3 + inputsize - ((3-level)*stride + grainsize); // Log2(Num of entries*8)
baseaddress = baseregister<39:baselowerbound>: if !ZerosHaveEL(baselowerbound);
ns_table = if lookupsecure then '0' else '1';
ap_table = '00';
xn_table = '0';
pxn_table = '0';
addrselecttop = inputsize - 1;
repeat
addrselectbottom = (3-level)*stride + grainsize;
bits(40) index =( ZeroExtendEL2(inputaddr<addrselecttop:addrselectbottom>:'000');
descaddr.paddress.address =) || ( ZeroExtendIsSecure(baseaddress OR index);
descaddr.paddress.NS = ns_table;
// If there are two stages of translation, then the first stage table walk addresses
// are themselves subject to translation
if secondstage || !() && !HasS2TranslationIsSecureEL2Enabled() || (()) then
// if only 1 stage of translation
l2descaddr2 = l2descaddr;
else
l2descaddr2 =HaveNV2Ext() && acctype == AccType_NV2REGISTER) then
descaddr2 = descaddr;
else
descaddr2 = AArch32.SecondStageWalk(descaddr, vaddress, acctype, iswrite, 8);
// Check for a fault on the stage 2 walk
if(l2descaddr, vaddress, acctype, iswrite, 4);
// Check for a fault on the stage 2 walk
if IsFault(descaddr2) then
result.addrdesc.fault = descaddr2.fault;
return result;
(l2descaddr2) then
result.addrdesc.fault = l2descaddr2.fault;
return result;
// Update virtual address for abort functions
descaddr2.vaddress = // Update virtual address for abort functions
l2descaddr2.vaddress = ZeroExtend(vaddress);
accdesc = accdesc = CreateAccessDescriptorPTW(acctype, secondstage, s2fs1walk, level);
l2desc = _Mem[l2descaddr2, 4, accdesc];
if SCTLR.EE == '1' then l2desc = CreateAccessDescriptorPTW(acctype, secondstage, s2fs1walk, level);
desc = _Mem[descaddr2, 8, accdesc];
if reversedescriptors then desc = BigEndianReverse(desc);
(l2desc);
if desc<0> == '0' || (desc<1:0> == '01' && level == 3) then
// Fault (00), Reserved (10), or Block (01) at level 3.
result.addrdesc.fault = // Process descriptor from level 2 lookup.
if l2desc<1:0> == '00' then
result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Valid Block, Page, or Table entry
if desc<1:0> == '01' || level == 3 then // Block (01) or Page (11)
blocktranslate = TRUE;
else // Table (11)
if ! nG = l2desc<11>;
S = l2desc<10>;
ap = l2desc<9,5:4>;
if SCTLR.AFE == '1' && l2desc<4> == '0' then
// Armv8 VMSAv8-32 does not support hardware management of the Access flag.
result.addrdesc.fault =IsZeroAArch32.AccessFlagFault(desc<47:40>) then
result.addrdesc.fault =(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
if l2desc<1> == '0' then // Large page
xn = l2desc<15>;
tex = l2desc<14:12>;
c = l2desc<3>;
b = l2desc<2>;
blocksize = 64;
outputaddress = AArch32.AddressSizeFaultZeroExtend(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
baseaddress = desc<39:grainsize>:(l2desc<31:16>:vaddress<15:0>);
else // Small page
tex = l2desc<8:6>;
c = l2desc<3>;
b = l2desc<2>;
xn = l2desc<0>;
blocksize = 4;
outputaddress =ZerosZeroExtend(grainsize);
if !secondstage then
// Unpack the upper and lower table attributes
ns_table = ns_table OR desc<63>;
if !secondstage && !hierattrsdisabled then
ap_table<1> = ap_table<1> OR desc<62>; // read-only
(l2desc<31:12>:vaddress<11:0>);
xn_table = xn_table OR desc<60>;
// pxn_table and ap_table[0] apply only in EL1&0 translation regimes
if !singlepriv then
pxn_table = pxn_table OR desc<59>;
ap_table<0> = ap_table<0> OR desc<61>; // privileged
when '1x' // Section or Supersection
NS = l1desc<19>;
nG = l1desc<17>;
S = l1desc<16>;
ap = l1desc<15,11:10>;
tex = l1desc<14:12>;
xn = l1desc<4>;
c = l1desc<3>;
b = l1desc<2>;
pxn = l1desc<0>;
level = 1;
level = level + 1;
addrselecttop = addrselectbottom - 1;
blocktranslate = FALSE;
until blocktranslate;
// Unpack the descriptor into address and upper and lower block attributes
outputaddress = desc<39:addrselectbottom>:inputaddr<addrselectbottom-1:0>;
// Check the output address is inside the supported range
if ! if SCTLR.AFE == '1' && l1desc<10> == '0' then
// Armv8 VMSAv8-32 does not support hardware management of the Access flag.
result.addrdesc.fault =IsZero(desc<47:40>) then
result.addrdesc.fault = AArch32.AddressSizeFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Check the access flag
if desc<10> == '0' then
result.addrdesc.fault = AArch32.AccessFlagFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
xn = desc<54>; // Bit[54] of the block/page descriptor holds UXN
pxn = desc<53>; // Bit[53] of the block/page descriptor holds PXN
ap = desc<7:6>:'1'; // Bits[7:6] of the block/page descriptor hold AP[2:1]
contiguousbit = desc<52>;
nG = desc<11>;
sh = desc<9:8>;
memattr = desc<5:2>; // AttrIndx and NS bit in stage 1
(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
result.domain = bits(4) UNKNOWN; // Domains not used
result.level = level;
result.blocksize = 2^((3-level)*stride + grainsize);
// Stage 1 translation regimes also inherit attributes from the tables
if !secondstage then
result.perms.xn = xn OR xn_table;
result.perms.ap<2> = ap<2> OR ap_table<1>; // Force read-only
// PXN, nG and AP[1] apply only in EL1&0 stage 1 translation regimes
if !singlepriv then
result.perms.ap<1> = ap<1> AND NOT(ap_table<0>); // Force privileged only
result.perms.pxn = pxn OR pxn_table;
// Pages from Non-secure tables are marked non-global in Secure EL1&0
if if l1desc<18> == '0' then // Section
domain = l1desc<8:5>;
blocksize = 1024;
outputaddress = IsSecureZeroExtend() then
result.nG = nG OR ns_table;
else
result.nG = nG;
else
result.perms.ap<1> = '1';
result.perms.pxn = '0';
result.nG = '0';
result.GP = desc<50>; // Stage 1 block or pages might be guarded
result.perms.ap<0> = '1';
result.addrdesc.memattrs =(l1desc<31:20>:vaddress<19:0>);
else // Supersection
domain = '0000';
blocksize = 16384;
outputaddress = l1desc<8:5>:l1desc<23:20>:l1desc<31:24>:vaddress<23:0>;
// Decode the TEX, C, B and S bits to produce the TLBRecord's memory attributes
if SCTLR.TRE == '0' then
if AArch32.S1AttrDecodeRemapRegsHaveResetValues(sh, memattr<2:0>, acctype);
result.addrdesc.paddress.NS = memattr<3> OR ns_table;
else
result.perms.ap<2:1> = ap<2:1>;
result.perms.ap<0> = '1';
result.perms.xn = xn;
if() then
result.addrdesc.memattrs = HaveExtendedExecuteNeverExtAArch32.DefaultTEXDecode() then result.perms.xxn = desc<53>;
result.perms.pxn = '0';
result.nG = '0';
if s2fs1walk then
(tex, c, b, S, acctype);
else
result.addrdesc.memattrs = S2AttrDecodeMemoryAttributes(sh, memattr,IMPLEMENTATION_DEFINED;
else
result.addrdesc.memattrs = AccType_PTWAArch32.RemappedTEXDecode);
else
result.addrdesc.memattrs =(tex, c, b, S, acctype);
// Set the rest of the TLBRecord, try to add it to the TLB, and return it.
result.perms.ap = ap;
result.perms.xn = xn;
result.perms.pxn = pxn;
result.nG = nG;
result.domain = domain;
result.level = level;
result.blocksize = blocksize;
result.addrdesc.paddress.address = S2AttrDecode(sh, memattr, acctype);
result.addrdesc.paddress.NS = '1';
result.addrdesc.paddress.address = ZeroExtend(outputaddress);
result.addrdesc.fault = result.addrdesc.paddress.NS = if IsSecure() then NS else '1';
result.addrdesc.fault = AArch32.NoFault();
result.contiguous = contiguousbit == '1';
if HaveCommonNotPrivateTransExt() then result.CnP = baseregister<0>;
();
return result;
// AArch32.TranslationTableWalkSD()
// ================================
// Returns a result of a translation table walk using the Short-descriptor format
//
// Implementations might cache information from memory in any number of non-coherent TLB
// caching structures, and so avoid memory accesses that have been expressed in this
// pseudocode. The use of such TLBs is not expressed in this pseudocode.
TLBRecordboolean AArch32.TranslationTableWalkSD(bits(32) vaddress,RemapRegsHaveResetValues(); AccType acctype, boolean iswrite,
integer size)
assert ELUsingAArch32(S1TranslationRegime());
// This is only called when address translation is enabled
TLBRecord result;
AddressDescriptor l1descaddr;
AddressDescriptor l2descaddr;
bits(40) outputaddress;
// Variables for Abort functions
ipaddress = bits(40) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
NS = bit UNKNOWN;
// Default setting of the domain
domain = bits(4) UNKNOWN;
// Determine correct Translation Table Base Register to use.
bits(64) ttbr;
n = UInt(TTBCR.N);
if n == 0 || IsZero(vaddress<31:(32-n)>) then
ttbr = TTBR0;
disabled = (TTBCR.PD0 == '1');
else
ttbr = TTBR1;
disabled = (TTBCR.PD1 == '1');
n = 0; // TTBR1 translation always works like N=0 TTBR0 translation
// Check this Translation Table Base Register is not disabled.
if disabled then
level = 1;
result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
// Obtain descriptor from initial lookup.
l1descaddr.paddress.address = ZeroExtend(ttbr<31:14-n>:vaddress<31-n:20>:'00');
l1descaddr.paddress.NS = if IsSecure() then '0' else '1';
IRGN = ttbr<0>:ttbr<6>; // TTBR.IRGN
RGN = ttbr<4:3>; // TTBR.RGN
SH = ttbr<1>:ttbr<5>; // TTBR.S:TTBR.NOS
l1descaddr.memattrs = WalkAttrDecode(SH, RGN, IRGN, secondstage);
if !HaveEL(EL2) || (IsSecure() && !IsSecureEL2Enabled()) then
// if only 1 stage of translation
l1descaddr2 = l1descaddr;
else
l1descaddr2 = AArch32.SecondStageWalk(l1descaddr, vaddress, acctype, iswrite, 4);
// Check for a fault on the stage 2 walk
if IsFault(l1descaddr2) then
result.addrdesc.fault = l1descaddr2.fault;
return result;
// Update virtual address for abort functions
l1descaddr2.vaddress = ZeroExtend(vaddress);
accdesc = CreateAccessDescriptorPTW(acctype, secondstage, s2fs1walk, level);
l1desc = _Mem[l1descaddr2, 4,accdesc];
if SCTLR.EE == '1' then l1desc = BigEndianReverse(l1desc);
// Process descriptor from initial lookup.
case l1desc<1:0> of
when '00' // Fault, Reserved
level = 1;
result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
when '01' // Large page or Small page
domain = l1desc<8:5>;
level = 2;
pxn = l1desc<2>;
NS = l1desc<3>;
// Obtain descriptor from level 2 lookup.
l2descaddr.paddress.address = ZeroExtend(l1desc<31:10>:vaddress<19:12>:'00');
l2descaddr.paddress.NS = if IsSecure() then '0' else '1';
l2descaddr.memattrs = l1descaddr.memattrs;
if !HaveEL(EL2) || (IsSecure() && !IsSecureEL2Enabled()) then
// if only 1 stage of translation
l2descaddr2 = l2descaddr;
else
l2descaddr2 = AArch32.SecondStageWalk(l2descaddr, vaddress, acctype, iswrite, 4);
// Check for a fault on the stage 2 walk
if IsFault(l2descaddr2) then
result.addrdesc.fault = l2descaddr2.fault;
return result;
// Update virtual address for abort functions
l2descaddr2.vaddress = ZeroExtend(vaddress);
accdesc = CreateAccessDescriptorPTW(acctype, secondstage, s2fs1walk, level);
l2desc = _Mem[l2descaddr2, 4, accdesc];
if SCTLR.EE == '1' then l2desc = BigEndianReverse(l2desc);
// Process descriptor from level 2 lookup.
if l2desc<1:0> == '00' then
result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
nG = l2desc<11>;
S = l2desc<10>;
ap = l2desc<9,5:4>;
if SCTLR.AFE == '1' && l2desc<4> == '0' then
// Armv8 VMSAv8-32 does not support hardware management of the Access flag.
result.addrdesc.fault = AArch32.AccessFlagFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
if l2desc<1> == '0' then // Large page
xn = l2desc<15>;
tex = l2desc<14:12>;
c = l2desc<3>;
b = l2desc<2>;
blocksize = 64;
outputaddress = ZeroExtend(l2desc<31:16>:vaddress<15:0>);
else // Small page
tex = l2desc<8:6>;
c = l2desc<3>;
b = l2desc<2>;
xn = l2desc<0>;
blocksize = 4;
outputaddress = ZeroExtend(l2desc<31:12>:vaddress<11:0>);
when '1x' // Section or Supersection
NS = l1desc<19>;
nG = l1desc<17>;
S = l1desc<16>;
ap = l1desc<15,11:10>;
tex = l1desc<14:12>;
xn = l1desc<4>;
c = l1desc<3>;
b = l1desc<2>;
pxn = l1desc<0>;
level = 1;
if SCTLR.AFE == '1' && l1desc<10> == '0' then
// Armv8 VMSAv8-32 does not support hardware management of the Access flag.
result.addrdesc.fault = AArch32.AccessFlagFault(ipaddress, domain, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
if l1desc<18> == '0' then // Section
domain = l1desc<8:5>;
blocksize = 1024;
outputaddress = ZeroExtend(l1desc<31:20>:vaddress<19:0>);
else // Supersection
domain = '0000';
blocksize = 16384;
outputaddress = l1desc<8:5>:l1desc<23:20>:l1desc<31:24>:vaddress<23:0>;
// Decode the TEX, C, B and S bits to produce the TLBRecord's memory attributes
if SCTLR.TRE == '0' then
if RemapRegsHaveResetValues() then
result.addrdesc.memattrs = AArch32.DefaultTEXDecode(tex, c, b, S, acctype);
else
result.addrdesc.memattrs = MemoryAttributes IMPLEMENTATION_DEFINED;
else
result.addrdesc.memattrs = AArch32.RemappedTEXDecode(tex, c, b, S, acctype);
// Set the rest of the TLBRecord, try to add it to the TLB, and return it.
result.perms.ap = ap;
result.perms.xn = xn;
result.perms.pxn = pxn;
result.nG = nG;
result.domain = domain;
result.level = level;
result.blocksize = blocksize;
result.addrdesc.paddress.address = ZeroExtend(outputaddress);
result.addrdesc.paddress.NS = if IsSecure() then NS else '1';
result.addrdesc.fault = AArch32.NoFault();
return result;
// AArch64.BreakpointMatch()
// =========================
// Breakpoint matching in an AArch64 translation regime.
boolean RemapRegsHaveResetValues();AArch64.BreakpointMatch(integer n, bits(64) vaddress,AccType acctype, integer size)
assert !ELUsingAArch32(S1TranslationRegime());
assert n <= UInt(ID_AA64DFR0_EL1.BRPs);
enabled = DBGBCR_EL1[n].E == '1';
ispriv = PSTATE.EL != EL0;
linked = DBGBCR_EL1[n].BT == '0x01';
isbreakpnt = TRUE;
linked_to = FALSE;
state_match = AArch64.StateMatch(DBGBCR_EL1[n].SSC, DBGBCR_EL1[n].HMC, DBGBCR_EL1[n].PMC,
linked, DBGBCR_EL1[n].LBN, isbreakpnt, acctype, ispriv);
value_match = AArch64.BreakpointValueMatch(n, vaddress, linked_to);
if HaveAnyAArch32() && size == 4 then // Check second halfword
// If the breakpoint address and BAS of an Address breakpoint match the address of the
// second halfword of an instruction, but not the address of the first halfword, it is
// CONSTRAINED UNPREDICTABLE whether or not this breakpoint generates a Breakpoint debug
// event.
match_i = AArch64.BreakpointValueMatch(n, vaddress + 2, linked_to);
if !value_match && match_i then
value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF);
if vaddress<1> == '1' && DBGBCR_EL1[n].BAS == '1111' then
// The above notwithstanding, if DBGBCR_EL1[n].BAS == '1111', then it is CONSTRAINED
// UNPREDICTABLE whether or not a Breakpoint debug event is generated for an instruction
// at the address DBGBVR_EL1[n]+2.
if value_match then value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF);
match = value_match && state_match && enabled;
return match;
// AArch64.BreakpointMatch()
// =========================
// Breakpoint matching in an AArch64 translation regime.
// AArch64.BreakpointValueMatch()
// ==============================
boolean AArch64.BreakpointMatch(integer n, bits(64) vaddress,AArch64.BreakpointValueMatch(integer n, bits(64) vaddress, boolean linked_to)
// "n" is the identity of the breakpoint unit to match against.
// "vaddress" is the current instruction address, ignored if linked_to is TRUE and for Context
// matching breakpoints.
// "linked_to" is TRUE if this is a call from StateMatch for linking.
// If a non-existent breakpoint then it is CONSTRAINED UNPREDICTABLE whether this gives
// no match or the breakpoint is mapped to another UNKNOWN implemented breakpoint.
if n > AccTypeUInt acctype, integer size)
assert !(ID_AA64DFR0_EL1.BRPs) then
(c, n) =ELUsingAArch32ConstrainUnpredictableInteger((0,S1TranslationRegimeUInt());
assert n <=(ID_AA64DFR0_EL1.BRPs), Unpredictable_BPNOTIMPL);
assert c IN {Constraint_DISABLED, Constraint_UNKNOWN};
if c == Constraint_DISABLED then return FALSE;
// If this breakpoint is not enabled, it cannot generate a match. (This could also happen on a
// call from StateMatch for linking).
if DBGBCR_EL1[n].E == '0' then return FALSE;
context_aware = (n >= UInt(ID_AA64DFR0_EL1.BRPs);
enabled = DBGBCR_EL1[n].E == '1';
ispriv = PSTATE.EL !=(ID_AA64DFR0_EL1.BRPs) - EL0UInt;
linked = DBGBCR_EL1[n].BT == '0x01';
isbreakpnt = TRUE;
linked_to = FALSE;
(ID_AA64DFR0_EL1.CTX_CMPs));
state_match = // If BT is set to a reserved type, behaves either as disabled or as a not-reserved type.
dbgtype = DBGBCR_EL1[n].BT;
if ((dbgtype IN {'011x','11xx'} && ! AArch64.StateMatchHaveVirtHostExt(DBGBCR_EL1[n].SSC, DBGBCR_EL1[n].HMC, DBGBCR_EL1[n].PMC,
linked, DBGBCR_EL1[n].LBN, isbreakpnt, acctype, ispriv);
value_match =()) || // Context matching
dbgtype == '010x' || // Reserved
(dbgtype != '0x0x' && !context_aware) || // Context matching
(dbgtype == '1xxx' && ! AArch64.BreakpointValueMatchHaveEL(n, vaddress, linked_to);
if( EL2))) then // EL2 extension
(c, dbgtype) = ConstrainUnpredictableBits(Unpredictable_RESBPTYPE);
assert c IN {Constraint_DISABLED, Constraint_UNKNOWN};
if c == Constraint_DISABLED then return FALSE;
// Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value
// Determine what to compare against.
match_addr = (dbgtype == '0x0x');
match_vmid = (dbgtype == '10xx');
match_cid = (dbgtype == '001x');
match_cid1 = (dbgtype IN { '101x', 'x11x'});
match_cid2 = (dbgtype == '11xx');
linked = (dbgtype == 'xxx1');
// If this is a call from StateMatch, return FALSE if the breakpoint is not programmed for a
// VMID and/or context ID match, of if not context-aware. The above assertions mean that the
// code can just test for match_addr == TRUE to confirm all these things.
if linked_to && (!linked || match_addr) then return FALSE;
// If called from BreakpointMatch return FALSE for Linked context ID and/or VMID matches.
if !linked_to && linked && !match_addr then return FALSE;
// Do the comparison.
if match_addr then
byte = UInt(vaddress<1:0>);
if HaveAnyAArch32() && size == 4 then // Check second halfword
// If the breakpoint address and BAS of an Address breakpoint match the address of the
// second halfword of an instruction, but not the address of the first halfword, it is
// CONSTRAINED UNPREDICTABLE whether or not this breakpoint generates a Breakpoint debug
// event.
match_i =() then
// T32 instructions can be executed at EL0 in an AArch64 translation regime.
assert byte IN {0,2}; // "vaddress" is halfword aligned
byte_select_match = (DBGBCR_EL1[n].BAS<byte> == '1');
else
assert byte == 0; // "vaddress" is word aligned
byte_select_match = TRUE; // DBGBCR_EL1[n].BAS<byte> is RES1
top = AArch64.BreakpointValueMatchAddrTop(n, vaddress + 2, linked_to);
if !value_match && match_i then
value_match =(vaddress, TRUE, PSTATE.EL);
BVR_match = vaddress<top:2> == DBGBVR_EL1[n]<top:2> && byte_select_match;
elsif match_cid then
if ConstrainUnpredictableBoolIsInHost(() then
BVR_match = (CONTEXTIDR_EL2 == DBGBVR_EL1[n]<31:0>);
else
BVR_match = (PSTATE.EL IN {Unpredictable_BPMATCHHALFEL0);
if vaddress<1> == '1' && DBGBCR_EL1[n].BAS == '1111' then
// The above notwithstanding, if DBGBCR_EL1[n].BAS == '1111', then it is CONSTRAINED
// UNPREDICTABLE whether or not a Breakpoint debug event is generated for an instruction
// at the address DBGBVR_EL1[n]+2.
if value_match then value_match =, ConstrainUnpredictableBoolEL1(} && CONTEXTIDR_EL1 == DBGBVR_EL1[n]<31:0>);
elsif match_cid1 then
BVR_match = (PSTATE.EL IN {, EL1} && !IsInHost() && CONTEXTIDR_EL1 == DBGBVR_EL1[n]<31:0>);
if match_vmid then
if !Have16bitVMID() || VTCR_EL2.VS == '0' then
vmid = ZeroExtend(VTTBR_EL2.VMID<7:0>, 16);
bvr_vmid = ZeroExtend(DBGBVR_EL1[n]<39:32>, 16);
else
vmid = VTTBR_EL2.VMID;
bvr_vmid = DBGBVR_EL1[n]<47:32>;
BXVR_match = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() &&
!IsInHost() &&
vmid == bvr_vmid);
elsif match_cid2 then
BXVR_match = (!IsSecure() && HaveVirtHostExtUnpredictable_BPMATCHHALFEL0);
() &&
DBGBVR_EL1[n]<63:32> == CONTEXTIDR_EL2);
match = value_match && state_match && enabled;
bvr_match_valid = (match_addr || match_cid || match_cid1);
bxvr_match_valid = (match_vmid || match_cid2);
match = (!bxvr_match_valid || BXVR_match) && (!bvr_match_valid || BVR_match);
return match;
// AArch64.BreakpointValueMatch()
// ==============================
// AArch64.StateMatch()
// ====================
// Determine whether a breakpoint or watchpoint is enabled in the current mode and state.
boolean AArch64.BreakpointValueMatch(integer n, bits(64) vaddress, boolean linked_to)
// "n" is the identity of the breakpoint unit to match against.
// "vaddress" is the current instruction address, ignored if linked_to is TRUE and for Context
// matching breakpoints.
// "linked_to" is TRUE if this is a call from StateMatch for linking.
// If a non-existent breakpoint then it is CONSTRAINED UNPREDICTABLE whether this gives
// no match or the breakpoint is mapped to another UNKNOWN implemented breakpoint.
if n >AArch64.StateMatch(bits(2) SSC, bit HMC, bits(2) PxC, boolean linked, bits(4) LBN,
boolean isbreakpnt, UIntAccType(ID_AA64DFR0_EL1.BRPs) then
(c, n) =acctype, boolean ispriv)
// "SSC", "HMC", "PxC" are the control fields from the DBGBCR[n] or DBGWCR[n] register.
// "linked" is TRUE if this is a linked breakpoint/watchpoint type.
// "LBN" is the linked breakpoint number from the DBGBCR[n] or DBGWCR[n] register.
// "isbreakpnt" is TRUE for breakpoints, FALSE for watchpoints.
// "ispriv" is valid for watchpoints, and selects between privileged and unprivileged accesses.
// If parameters are set to a reserved type, behaves as either disabled or a defined type
if ((HMC:SSC:PxC) IN {'011xx','100x0','101x0','11010','11101','1111x'} || // Reserved
(HMC == '0' && PxC == '00' && (!isbreakpnt || ! ConstrainUnpredictableIntegerHaveAArch32EL(0,( UIntEL1(ID_AA64DFR0_EL1.BRPs),))) || // Usr/Svc/Sys
(SSC IN {'01','10'} && ! Unpredictable_BPNOTIMPLHaveEL);
assert c IN {(Constraint_DISABLEDEL3,)) || // No EL3
(HMC:SSC != '000' && HMC:SSC != '111' && ! Constraint_UNKNOWNHaveEL};
if c ==( Constraint_DISABLEDEL3 then return FALSE;
// If this breakpoint is not enabled, it cannot generate a match. (This could also happen on a
// call from StateMatch for linking).
if DBGBCR_EL1[n].E == '0' then return FALSE;
context_aware = (n >=) && ! UIntHaveEL(ID_AA64DFR0_EL1.BRPs) -( UIntEL2(ID_AA64DFR0_EL1.CTX_CMPs));
// If BT is set to a reserved type, behaves either as disabled or as a not-reserved type.
dbgtype = DBGBCR_EL1[n].BT;
if ((dbgtype IN {'011x','11xx'} && !)) || // No EL3/EL2
(HMC:SSC:PxC == '11100' && !HaveVirtHostExt()) || // Context matching
dbgtype == '010x' || // Reserved
(dbgtype != '0x0x' && !context_aware) || // Context matching
(dbgtype == '1xxx' && !HaveEL(EL2))) then // EL2 extension
(c, dbgtype) =))) then // No EL2
(c, <HMC,SSC,PxC>) = ConstrainUnpredictableBits(Unpredictable_RESBPTYPEUnpredictable_RESBPWPCTRL);
assert c IN {Constraint_DISABLED, Constraint_UNKNOWN};
if c == Constraint_DISABLED then return FALSE;
// Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value
// Determine what to compare against.
match_addr = (dbgtype == '0x0x');
match_vmid = (dbgtype == '10xx');
match_cid = (dbgtype == '001x');
match_cid1 = (dbgtype IN { '101x', 'x11x'});
match_cid2 = (dbgtype == '11xx');
linked = (dbgtype == 'xxx1');
// If this is a call from StateMatch, return FALSE if the breakpoint is not programmed for a
// VMID and/or context ID match, of if not context-aware. The above assertions mean that the
// code can just test for match_addr == TRUE to confirm all these things.
if linked_to && (!linked || match_addr) then return FALSE;
// If called from BreakpointMatch return FALSE for Linked context ID and/or VMID matches.
if !linked_to && linked && !match_addr then return FALSE;
// Do the comparison.
if match_addr then
byte = EL3_match = UIntHaveEL(vaddress<1:0>);
if( HaveAnyAArch32EL3() then
// T32 instructions can be executed at EL0 in an AArch64 translation regime.
assert byte IN {0,2}; // "vaddress" is halfword aligned
byte_select_match = (DBGBCR_EL1[n].BAS<byte> == '1');
else
assert byte == 0; // "vaddress" is word aligned
byte_select_match = TRUE; // DBGBCR_EL1[n].BAS<byte> is RES1
top =) && HMC == '1' && SSC<0> == '0';
EL2_match = AddrTopHaveEL(vaddress, TRUE, PSTATE.EL);
BVR_match = vaddress<top:2> == DBGBVR_EL1[n]<top:2> && byte_select_match;
elsif match_cid then
if( IsInHostEL2() then
BVR_match = (CONTEXTIDR_EL2 == DBGBVR_EL1[n]<31:0>);
else
BVR_match = (PSTATE.EL IN {) && HMC == '1';
EL1_match = PxC<0> == '1';
EL0_match = PxC<1> == '1';
el = ifEL0HaveNV2Ext,() && acctype == AccType_NV2REGISTER then EL2 else PSTATE.EL;
if !ispriv && !isbreakpnt then
priv_match = EL0_match;
else
case el of
when EL3 priv_match = EL3_match;
when EL2 priv_match = EL2_match;
when EL1} && CONTEXTIDR_EL1 == DBGBVR_EL1[n]<31:0>);
elsif match_cid1 then
BVR_match = (PSTATE.EL IN {priv_match = EL1_match;
whenEL0,priv_match = EL0_match;
case SSC of
when '00' security_state_match = TRUE; // Both
when '01' security_state_match = ! EL1IsSecure} && !(); // Non-secure only
when '10' security_state_match =IsInHostIsSecure() && CONTEXTIDR_EL1 == DBGBVR_EL1[n]<31:0>);
if match_vmid then
if !(); // Secure only
when '11' security_state_match = TRUE; // Both
if linked then
// "LBN" must be an enabled context-aware breakpoint unit. If it is not context-aware then
// it is CONSTRAINED UNPREDICTABLE whether this gives no match, or LBN is mapped to some
// UNKNOWN breakpoint that is context-aware.
lbn =Have16bitVMIDUInt() || VTCR_EL2.VS == '0' then
vmid =(LBN);
first_ctx_cmp = ( ZeroExtendUInt(VTTBR_EL2.VMID<7:0>, 16);
bvr_vmid =(ID_AA64DFR0_EL1.BRPs) - ZeroExtendUInt(DBGBVR_EL1[n]<39:32>, 16);
else
vmid = VTTBR_EL2.VMID;
bvr_vmid = DBGBVR_EL1[n]<47:32>;
BXVR_match = (PSTATE.EL IN {(ID_AA64DFR0_EL1.CTX_CMPs));
last_ctx_cmp =EL0UInt,(ID_AA64DFR0_EL1.BRPs);
if (lbn < first_ctx_cmp || lbn > last_ctx_cmp) then
(c, lbn) = EL1ConstrainUnpredictableInteger} &&(first_ctx_cmp, last_ctx_cmp, EL2EnabledUnpredictable_BPNOTCTXCMP() &&
!);
assert c IN {IsInHostConstraint_DISABLED() &&
vmid == bvr_vmid);
elsif match_cid2 then
BXVR_match = (!,IsSecureConstraint_NONE() &&, };
case c of
when Constraint_DISABLED return FALSE; // Disabled
when Constraint_NONE linked = FALSE; // No linking
// Otherwise ConstrainUnpredictableInteger returned a context-aware breakpoint
if linked then
vaddress = bits(64) UNKNOWN;
linked_to = TRUE;
linked_match = AArch64.BreakpointValueMatchHaveVirtHostExtConstraint_UNKNOWN() &&
DBGBVR_EL1[n]<63:32> == CONTEXTIDR_EL2);
(lbn, vaddress, linked_to);
bvr_match_valid = (match_addr || match_cid || match_cid1);
bxvr_match_valid = (match_vmid || match_cid2);
match = (!bxvr_match_valid || BXVR_match) && (!bvr_match_valid || BVR_match);
return match; return priv_match && security_state_match && (!linked || linked_match);
// AArch64.StateMatch()
// ====================
// Determine whether a breakpoint or watchpoint is enabled in the current mode and state.
// AArch64.GenerateDebugExceptions()
// =================================
boolean AArch64.StateMatch(bits(2) SSC, bit HMC, bits(2) PxC, boolean linked, bits(4) LBN,
boolean isbreakpnt,AArch64.GenerateDebugExceptions()
return AccTypeAArch64.GenerateDebugExceptionsFrom acctype, boolean ispriv)
// "SSC", "HMC", "PxC" are the control fields from the DBGBCR[n] or DBGWCR[n] register.
// "linked" is TRUE if this is a linked breakpoint/watchpoint type.
// "LBN" is the linked breakpoint number from the DBGBCR[n] or DBGWCR[n] register.
// "isbreakpnt" is TRUE for breakpoints, FALSE for watchpoints.
// "ispriv" is valid for watchpoints, and selects between privileged and unprivileged accesses.
// If parameters are set to a reserved type, behaves as either disabled or a defined type
(c, SSC, HMC, PxC) =(PSTATE.EL, CheckValidStateMatch(SSC, HMC, PxC, isbreakpnt);
if c == Constraint_DISABLED then return FALSE;
// Otherwise the HMC,SSC,PxC values are either valid or the values returned by
// CheckValidStateMatch are valid.
EL3_match = HaveEL(EL3) && HMC == '1' && SSC<0> == '0';
EL2_match = HaveEL(EL2) && ((HMC == '1' && (SSC:PxC != '1000')) || SSC == '11');
EL1_match = PxC<0> == '1';
EL0_match = PxC<1> == '1';
if HaveNV2Ext() && acctype == AccType_NV2REGISTER && !isbreakpnt then
priv_match = EL2_match;
elsif !ispriv && !isbreakpnt then
priv_match = EL0_match;
else
case PSTATE.EL of
when EL3 priv_match = EL3_match;
when EL2 priv_match = EL2_match;
when EL1 priv_match = EL1_match;
when EL0 priv_match = EL0_match;
case SSC of
when '00' security_state_match = TRUE; // Both
when '01' security_state_match = !IsSecure(); // Non-secure only
when '10' security_state_match = IsSecure(); // Secure only
when '11' security_state_match = (HMC == '1' || IsSecure()); // HMC=1 -> Both, 0 -> Secure only
if linked then
// "LBN" must be an enabled context-aware breakpoint unit. If it is not context-aware then
// it is CONSTRAINED UNPREDICTABLE whether this gives no match, or LBN is mapped to some
// UNKNOWN breakpoint that is context-aware.
lbn = UInt(LBN);
first_ctx_cmp = (UInt(ID_AA64DFR0_EL1.BRPs) - UInt(ID_AA64DFR0_EL1.CTX_CMPs));
last_ctx_cmp = UInt(ID_AA64DFR0_EL1.BRPs);
if (lbn < first_ctx_cmp || lbn > last_ctx_cmp) then
(c, lbn) = ConstrainUnpredictableInteger(first_ctx_cmp, last_ctx_cmp, Unpredictable_BPNOTCTXCMP);
assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN};
case c of
when Constraint_DISABLED return FALSE; // Disabled
when Constraint_NONE linked = FALSE; // No linking
// Otherwise ConstrainUnpredictableInteger returned a context-aware breakpoint
if linked then
vaddress = bits(64) UNKNOWN;
linked_to = TRUE;
linked_match = AArch64.BreakpointValueMatch(lbn, vaddress, linked_to);
return priv_match && security_state_match && (!linked || linked_match);(), PSTATE.D);
// CheckValidStateMatch()
// ======================
// Checks for an invalid state match that will generate Constrained Unpredictable behaviour, otherwise
// returns Constraint_NONE.
// AArch64.GenerateDebugExceptionsFrom()
// =====================================
(Constraint, bits(2), bit, bits(2))boolean CheckValidStateMatch(bits(2) SSC, bit HMC, bits(2) PxC, boolean isbreakpnt)
boolean reserved = FALSE;
AArch64.GenerateDebugExceptionsFrom(bits(2) from, boolean secure, bit mask)
// Match 'Usr/Sys/Svc' only valid for AArch32 breakpoints
if (!isbreakpnt || ! if OSLSR_EL1.OSLK == '1' ||HaveAArch32ELDoubleLockStatus(() ||EL1Halted)) && HMC:PxC == '000' && SSC != '11' then
reserved = TRUE;
() then
return FALSE;
// Both EL3 and EL2 are not implemented
if ! route_to_el2 =HaveEL(EL3EL2) && !) && (!secure ||HaveELIsSecureEL2Enabled(()) && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1');
target = (if route_to_el2 thenEL2) && (HMC != '0' || SSC != '00') then
reserved = TRUE;
// EL3 is not implemented
if !elseHaveELEL1();
enabled = !EL3) && SSC IN {'01','10'} && HMC:SSC:PxC != '10100' then
reserved = TRUE;
// EL3 using AArch64 only
if (!HaveEL(EL3) ||) || !secure || MDCR_EL3.SDD == '0';
if from == target then
enabled = enabled && MDSCR_EL1.KDE == '1' && mask == '0';
else
enabled = enabled && HighestELUsingAArch32UInt()) && HMC:SSC:PxC == '11000' then
reserved = TRUE;
// EL2 is not implemented
if !(target) >HaveELUInt(EL2) && HMC:SSC:PxC == '11100' then
reserved = TRUE;
// Secure EL2 is not implemented
if !HaveSecureEL2Ext() && (HMC:SSC:PxC) IN {'01100','10100','x11x1'} then
reserved = TRUE;
// Values that are not allocated in any architecture version
if (HMC:SSC:PxC) IN {'01110','100x0','10110','11x10'} then
reserved = TRUE;
if reserved then
// If parameters are set to a reserved type, behaves as either disabled or a defined type
(c, <HMC,SSC,PxC>) = ConstrainUnpredictableBits(Unpredictable_RESBPWPCTRL);
assert c IN {Constraint_DISABLED, Constraint_UNKNOWN};
if c == Constraint_DISABLED then
return (c, bits(2) UNKNOWN, bit UNKNOWN, bits(2) UNKNOWN);
// Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value
return (Constraint_NONE, SSC, HMC, PxC);(from);
return enabled;
// AArch64.GenerateDebugExceptions()
// =================================
// AArch64.CheckForPMUOverflow()
// =============================
// Signal Performance Monitors overflow IRQ and CTI overflow events
boolean AArch64.GenerateDebugExceptions()
returnAArch64.CheckForPMUOverflow()
pmuirq = PMCR_EL0.E == '1' && PMINTENSET_EL1<31> == '1' && PMOVSSET_EL0<31> == '1';
for n = 0 to AArch64.GenerateDebugExceptionsFromUInt(PSTATE.EL,(PMCR_EL0.N) - 1
if (EL2) then
E = (if n < UInt(MDCR_EL2.HPMN) then PMCR_EL0.E else MDCR_EL2.HPME);
else
E = PMCR_EL0.E;
if E == '1' && PMINTENSET_EL1<n> == '1' && PMOVSSET_EL0<n> == '1' then pmuirq = TRUE;
SetInterruptRequestLevel(InterruptID_PMUIRQ, if pmuirq then HIGH else LOW);
CTI_SetEventLevel(CrossTriggerIn_PMUOverflowIsSecureHaveEL(), PSTATE.D);, if pmuirq then HIGH else LOW);
// The request remains set until the condition is cleared. (For example, an interrupt handler
// or cross-triggered event handler clears the overflow status flag by writing to PMOVSCLR_EL0.)
return pmuirq;
// AArch64.GenerateDebugExceptionsFrom()
// =====================================
// AArch64.CountEvents()
// =====================
// Return TRUE if counter "n" should count its event. For the cycle counter, n == 31.
boolean AArch64.GenerateDebugExceptionsFrom(bits(2) from, boolean secure, bit mask)
if OSLSR_EL1.OSLK == '1' ||AArch64.CountEvents(integer n)
assert n == 31 || n < DoubleLockStatusUInt() ||(PMCR_EL0.N);
// Event counting is disabled in Debug state
debug = Halted() then
return FALSE;
();
route_to_el2 = // In Non-secure state, some counters are reserved for EL2
if HaveEL(EL2) && (!secure ||) then
E = if n < IsSecureEL2EnabledUInt()) && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1');
target = (if route_to_el2 then(MDCR_EL2.HPMN) || n == 31 then PMCR_EL0.E else MDCR_EL2.HPME;
else
E = PMCR_EL0.E;
enabled = E == '1' && PMCNTENSET_EL0<n> == '1';
if ! IsSecure() then
// Event counting in Non-secure state is allowed unless all of:
// * EL2 and the HPMD Extension are implemented
// * Executing at EL2
// * PMNx is not reserved for EL2
// * MDCR_EL2.HPMD == 1
if HaveHPMDExt() && PSTATE.EL == EL2 else&& (n < EL1UInt);
enabled = !(MDCR_EL2.HPMN) || n == 31) then
prohibited = (MDCR_EL2.HPMD == '1');
else
prohibited = FALSE;
else
// Event counting in Secure state is prohibited unless any one of:
// * EL3 is not implemented
// * EL3 is using AArch64 and MDCR_EL3.SPME == 1
prohibited =HaveEL(EL3) || !secure || MDCR_EL3.SDD == '0';
) && MDCR_EL3.SPME == '0';
if from == target then
enabled = enabled && MDSCR_EL1.KDE == '1' && mask == '0';
else
enabled = enabled && // The IMPLEMENTATION DEFINED authentication interface might override software controls
if prohibited && ! UIntHaveNoSecurePMUDisableOverride(target) >() then
prohibited = ! ();
// For the cycle counter, PMCR_EL0.DP enables counting when otherwise prohibited
if prohibited && n == 31 then prohibited = (PMCR_EL0.DP == '1');
// Event counting can be filtered by the {P, U, NSK, NSU, NSH, M} bits
filter = if n == 31 then PMCCFILTR else PMEVTYPER[n];
P = filter<31>;
U = filter<30>;
NSK = if HaveEL(EL3) then filter<29> else '0';
NSU = if HaveEL(EL3) then filter<28> else '0';
NSH = if HaveEL(EL2) then filter<27> else '0';
M = if HaveEL(EL3) then filter<26> else '0';
case PSTATE.EL of
when EL0 filtered = if IsSecure() then U == '1' else U != NSU;
when EL1 filtered = if IsSecure() then P == '1' else P != NSK;
when EL2 filtered = (NSH == '0');
when EL3UIntExternalSecureNoninvasiveDebugEnabled(from);
filtered = (M != P);
return enabled; return !debug && enabled && !prohibited && !filtered;
// AArch64.CheckForPMUOverflow()
// =============================
// Signal Performance Monitors overflow IRQ and CTI overflow events
// CheckProfilingBufferAccess()
// ============================
booleanSysRegAccess AArch64.CheckForPMUOverflow()
pmuirq = PMCR_EL0.E == '1' && PMINTENSET_EL1<31> == '1' && PMOVSSET_EL0<31> == '1';
for n = 0 toCheckProfilingBufferAccess()
if ! UIntHaveStatisticalProfiling(PMCR_EL0.N) - 1
if() || PSTATE.EL == EL0 || UsingAArch32() then
return SysRegAccess_UNDEFINED;
if PSTATE.EL == EL1 && EL2Enabled() && MDCR_EL2.E2PB<0> != '1' then
return SysRegAccess_TrapToEL2;
if HaveEL(EL2EL3) then
E = (if n <) && PSTATE.EL != UIntEL3(MDCR_EL2.HPMN) then PMCR_EL0.E else MDCR_EL2.HPME);
else
E = PMCR_EL0.E;
if E == '1' && PMINTENSET_EL1<n> == '1' && PMOVSSET_EL0<n> == '1' then pmuirq = TRUE;
SetInterruptRequestLevel(&& MDCR_EL3.NSPB != SCR_EL3.NS:'1' then
returnInterruptID_PMUIRQSysRegAccess_TrapToEL3, if pmuirq then HIGH else LOW);
;
CTI_SetEventLevel( returnCrossTriggerIn_PMUOverflowSysRegAccess_OK, if pmuirq then HIGH else LOW);
// The request remains set until the condition is cleared. (For example, an interrupt handler
// or cross-triggered event handler clears the overflow status flag by writing to PMOVSCLR_EL0.)
return pmuirq;;
// AArch64.CountEvents()
// =====================
// Return TRUE if counter "n" should count its event. For the cycle counter, n == 31.
// CheckStatisticalProfilingAccess()
// =================================
booleanSysRegAccess AArch64.CountEvents(integer n)
assert n == 31 || n <CheckStatisticalProfilingAccess()
if ! UIntHaveStatisticalProfiling(PMCR_EL0.N);
// Event counting is disabled in Debug state
debug =() || PSTATE.EL == HaltedEL0();
// In Non-secure state, some counters are reserved for EL2
if|| HaveELUsingAArch32(() then
returnEL2SysRegAccess_UNDEFINED) then
E = if n <;
if PSTATE.EL == UIntEL1(MDCR_EL2.HPMN) || n == 31 then PMCR_EL0.E else MDCR_EL2.HPME;
else
E = PMCR_EL0.E;
enabled = E == '1' && PMCNTENSET_EL0<n> == '1';
// Event counting in Secure state is prohibited unless any one of:
// * EL3 is not implemented
// * EL3 is using AArch64 and MDCR_EL3.SPME == 1
prohibited =&& HaveELEL2Enabled(() && MDCR_EL2.TPMS == '1' then
returnEL3SysRegAccess_TrapToEL2) &&;
if IsSecure() && MDCR_EL3.SPME == '0';
// Event counting at EL2 is prohibited if all of:
// * The HPMD Extension is implemented
// * Executing at EL2
// * PMNx is not reserved for EL2
// * MDCR_EL2.HPMD == 1
if !prohibited && HaveEL(EL2) && HaveHPMDExt() && PSTATE.EL == EL2 && (n < UInt(MDCR_EL2.HPMN) || n == 31) then
prohibited = (MDCR_EL2.HPMD == '1');
// The IMPLEMENTATION DEFINED authentication interface might override software controls
if prohibited && !HaveNoSecurePMUDisableOverride() then
prohibited = !ExternalSecureNoninvasiveDebugEnabled();
// For the cycle counter, PMCR_EL0.DP enables counting when otherwise prohibited
if prohibited && n == 31 then prohibited = (PMCR_EL0.DP == '1');
// Event counting can be filtered by the {P, U, NSK, NSU, NSH, M, SH} bits
filter = if n == 31 then PMCCFILTR else PMEVTYPER[n];
P = filter<31>;
U = filter<30>;
NSK = if HaveEL(EL3) then filter<29> else '0';
NSU = if) && PSTATE.EL != HaveEL(EL3) then filter<28> else '0';
NSH = if&& MDCR_EL3.NSPB != SCR_EL3.NS:'1' then
return HaveELSysRegAccess_TrapToEL3(;
returnEL2SysRegAccess_OK) then filter<27> else '0';
M = if HaveEL(EL3) then filter<26> else '0';
SH = if HaveSecureEL2Ext() then filter<24> else '0';
case PSTATE.EL of
when EL0 filtered = if IsSecure() then U == '1' else U != NSU;
when EL1 filtered = if IsSecure() then P == '1' else P != NSK;
when EL2 filtered = (if IsSecure() then NSH == SH else NSH == '0');
when EL3 filtered = (M != P);
return !debug && enabled && !prohibited && !filtered;;
// CheckProfilingBufferAccess()
// ============================
// CollectContextIDR1()
// ====================
SysRegAccessboolean CheckProfilingBufferAccess()
CollectContextIDR1()
if !HaveStatisticalProfilingStatisticalProfilingEnabled() || PSTATE.EL ==() then return FALSE;
if PSTATE.EL == EL0EL2 ||then return FALSE;
if UsingAArch32() then
return SysRegAccess_UNDEFINED;
if PSTATE.EL == EL1 && EL2Enabled() && MDCR_EL2.E2PB<0> != '1' then
return SysRegAccess_TrapToEL2;
if HaveEL(EL3) && PSTATE.EL != EL3 && MDCR_EL3.NSPB != SCR_EL3.NS:'1' then
return SysRegAccess_TrapToEL3;
return SysRegAccess_OK;() && HCR_EL2.TGE == '1' then return FALSE;
return PMSCR_EL1.CX == '1';
// CheckStatisticalProfilingAccess()
// =================================
// CollectContextIDR2()
// ====================
SysRegAccessboolean CheckStatisticalProfilingAccess()
CollectContextIDR2()
if !HaveStatisticalProfilingStatisticalProfilingEnabled() || PSTATE.EL ==() then return FALSE;
if EL0 || UsingAArch32() then
return SysRegAccess_UNDEFINED;
if PSTATE.EL == EL1 && EL2Enabled() && MDCR_EL2.TPMS == '1' then
return SysRegAccess_TrapToEL2;
if HaveEL(EL3) && PSTATE.EL != EL3 && MDCR_EL3.NSPB != SCR_EL3.NS:'1' then
return SysRegAccess_TrapToEL3;
return SysRegAccess_OK;() then return FALSE;
return PMSCR_EL2.CX == '1';
// CollectContextIDR1()
// ====================
// CollectPhysicalAddress()
// ========================
boolean CollectContextIDR1()
CollectPhysicalAddress()
if !StatisticalProfilingEnabled() then return FALSE;
if PSTATE.EL == (secure, el) = ProfilingBufferOwner();
if !secure && HaveEL(EL2 then return FALSE;
if) then
return PMSCR_EL2.PA == '1' && (el == EL2EnabledEL2() && HCR_EL2.TGE == '1' then return FALSE;
return PMSCR_EL1.CX == '1';|| PMSCR_EL1.PA == '1');
else
return PMSCR_EL1.PA == '1';
// CollectContextIDR2()
// ====================
// CollectRecord()
// ===============
boolean CollectContextIDR2()
if !CollectRecord(bits(64) events, integer total_latency,OpType optype)
assert StatisticalProfilingEnabled() then return FALSE;
if();
if PMSFCR_EL1.FE == '1' then
e = events<63:48,31:24,15:12,7,5,3,1>;
m = PMSEVFR_EL1<63:48,31:24,15:12,7,5,3,1>;
// Check for UNPREDICTABLE case
if (PMSEVFR_EL1) && ConstrainUnpredictableBool(Unpredictable_ZEROPMSEVFR) then return FALSE;
if !IsZero(NOT(e) AND m) then return FALSE;
if PMSFCR_EL1.FT == '1' then
// Check for UNPREDICTABLE case
if IsZero(PMSFCR_EL1.<B,LD,ST>) && ConstrainUnpredictableBool(Unpredictable_NOOPTYPES) then
return FALSE;
case optype of
when OpType_Branch if PMSFCR_EL1.B == '0' then return FALSE;
when OpType_Load if PMSFCR_EL1.LD == '0' then return FALSE;
when OpType_Store if PMSFCR_EL1.ST == '0' then return FALSE;
when OpType_LoadAtomic if PMSFCR_EL1.<LD,ST> == '00' then return FALSE;
otherwise return FALSE;
if PMSFCR_EL1.FL == '1' then
if IsZero(PMSLATFR_EL1.MINLAT) && ConstrainUnpredictableBool(Unpredictable_ZEROMINLATENCY) then // UNPREDICTABLE case
return FALSE;
if total_latency < UIntEL2EnabledIsZero() then return FALSE;
return PMSCR_EL2.CX == '1';(PMSLATFR_EL1.MINLAT) then return FALSE;
return TRUE;
// CollectPhysicalAddress()
// ========================
// CollectTimeStamp()
// ==================
booleanTimeStamp CollectPhysicalAddress()
CollectTimeStamp()
if !StatisticalProfilingEnabled() then return FALSE;
(secure, el) =() then return TimeStamp_None;
(secure, el) = ProfilingBufferOwner();
if !secure && if el == HaveELEL2(then
if PMSCR_EL2.TS == '0' then returnTimeStamp_None;
else
if PMSCR_EL1.TS == '0' then return TimeStamp_None;
if EL2Enabled() then
pct = PMSCR_EL2.PCT == '1' && (el == EL2) then
return PMSCR_EL2.PA == '1' && (el ==|| PMSCR_EL1.PCT == '1');
else
pct = PMSCR_EL1.PCT == '1';
return (if pct then else TimeStamp_VirtualEL2TimeStamp_Physical || PMSCR_EL1.PA == '1');
else
return PMSCR_EL1.PA == '1';);
// CollectRecord()
// ===============
booleanenumeration CollectRecord(bits(64) events, integer total_latency,OpType { OpType optype)
assertOpType_Load, // Any memory-read operation other than atomics, compare-and-swap, and swap StatisticalProfilingEnabled();
// Filtering by event
if PMSFCR_EL1.FE == '1' then
// Check for UNPREDICTABLE case
ifOpType_Store, // Any memory-write operation, including atomics without return IsZero(PMSEVFR_EL1) &&OpType_LoadAtomic, // Atomics with return, compare-and-swap and swap ConstrainUnpredictableBool(OpType_Branch, // Software write to the PCUnpredictable_ZEROPMSEVFR) then
return FALSE;
bits(64) mask = 0xFFFF0000FF00F0AA<63:0>; // Bits [63:48,31:24,15:12,7,5,3,1]
if HaveStatisticalProfiling() then
mask<11> = '1'; // Alignment flag
if HaveSVE() then mask<18:17> = Ones(); // Predicate flags
e = events AND mask;
m = PMSEVFR_EL1 AND mask;
if !IsZero(NOT(e) AND m) then return FALSE;
// Filtering by type
if PMSFCR_EL1.FT == '1' then
// Check for UNPREDICTABLE case
if IsZero(PMSFCR_EL1.<B,LD,ST>) && ConstrainUnpredictableBool(Unpredictable_NOOPTYPES) then
return FALSE;
case optype of
when OpType_Branch
if PMSFCR_EL1.B == '0' then return FALSE;
when OpType_Load
if PMSFCR_EL1.LD == '0' then return FALSE;
when OpType_Store
if PMSFCR_EL1.ST == '0' then return FALSE;
when OpType_LoadAtomic
if PMSFCR_EL1.<LD,ST> == '00' then return FALSE;
otherwise
return FALSE;
// Filtering by latency
if PMSFCR_EL1.FL == '1' then
// Check for UNPREDICTABLE case
if IsZero(PMSLATFR_EL1.MINLAT) && ConstrainUnpredictableBool(Unpredictable_ZEROMINLATENCY) then
return FALSE;
if total_latency < UInt(PMSLATFR_EL1.MINLAT) then
return FALSE;
return TRUE;OpType_Other // Any other class of operation
};
// CollectTimeStamp()
// ==================
// ProfilingBufferEnabled()
// ========================
TimeStampboolean CollectTimeStamp()
ProfilingBufferEnabled()
if !StatisticalProfilingEnabledHaveStatisticalProfiling() then return() then return FALSE;
(secure, el) = TimeStamp_None;
(secure, el) = ProfilingBufferOwner();
if el == non_secure_bit = if secure then '0' else '1';
return (! EL2ELUsingAArch32 then
if PMSCR_EL2.TS == '0' then return TimeStamp_None;
else
if PMSCR_EL1.TS == '0' then return TimeStamp_None;
if EL2Enabled() then
pct = PMSCR_EL2.PCT == '1' && (el == EL2 || PMSCR_EL1.PCT == '1');
else
pct = PMSCR_EL1.PCT == '1';
return (if pct then TimeStamp_Physical else TimeStamp_Virtual);(el) && non_secure_bit == SCR_EL3.NS &&
PMBLIMITR_EL1.E == '1' && PMBSR_EL1.S == '0');
enumeration// ProfilingBufferOwner()
// ======================
(boolean, bits(2)) OpType {ProfilingBufferOwner()
secure = if
OpType_Load, // Any memory-read operation other than atomics, compare-and-swap, and swap(
OpType_Store, // Any memory-write operation, including atomics without return) then (MDCR_EL3.NSPB<1> == '0') else
OpType_LoadAtomic, // Atomics with return, compare-and-swap and swap();
el = if !secure &&
OpType_Branch, // Software write to the PC(
) && MDCR_EL2.E2PB == '00' then EL2 else EL1OpType_Other // Any other class of operation
};;
return (secure, el);
// ProfilingBufferEnabled()
// ========================
boolean// Barrier to ensure that all existing profiling data has been formatted, and profiling buffer
// addresses have been translated such that writes to the profiling buffer have been initiated.
// A following DSB completes when writes to the profiling buffer have completed. ProfilingBufferEnabled()
if !ProfilingSynchronizationBarrier();HaveStatisticalProfiling() then return FALSE;
(secure, el) = ProfilingBufferOwner();
non_secure_bit = if secure then '0' else '1';
return (!ELUsingAArch32(el) && non_secure_bit == SCR_EL3.NS &&
PMBLIMITR_EL1.E == '1' && PMBSR_EL1.S == '0');
// ProfilingBufferOwner()
// ======================
// StatisticalProfilingEnabled()
// =============================
(boolean, bits(2))boolean ProfilingBufferOwner()
secure = ifStatisticalProfilingEnabled()
if ! HaveELHaveStatisticalProfiling(() ||EL3UsingAArch32) then (MDCR_EL3.NSPB<1> == '0') else() || ! ProfilingBufferEnabled() then
return FALSE;
in_host = EL2Enabled() && HCR_EL2.TGE == '1';
(secure, el) = ProfilingBufferOwner();
if UInt(el) < UInt(PSTATE.EL) || secure != IsSecure();
el = if !secure &&() || (in_host && el == HaveELEL1() then
return FALSE;
case PSTATE.EL of
whenEL2EL3) && MDCR_EL2.E2PB == '00' then Unreachable();
when EL2 elsespe_bit = PMSCR_EL2.E2SPE;
when EL1 spe_bit = PMSCR_EL1.E1SPE;
when EL0;
return (secure, el);spe_bit = (if in_host then PMSCR_EL2.E0HSPE else PMSCR_EL1.E0SPE);
return spe_bit == '1';
// Barrier to ensure that all existing profiling data has been formatted, and profiling buffer
// addresses have been translated such that writes to the profiling buffer have been initiated.
// A following DSB completes when writes to the profiling buffer have completed.enumeration
ProfilingSynchronizationBarrier();SysRegAccess {SysRegAccess_OK,
SysRegAccess_UNDEFINED,
SysRegAccess_TrapToEL1,
SysRegAccess_TrapToEL2,
SysRegAccess_TrapToEL3 };
// StatisticalProfilingEnabled()
// =============================
booleanenumeration StatisticalProfilingEnabled()
if !TimeStamp {HaveStatisticalProfiling() ||TimeStamp_None, // No timestamp UsingAArch32() || !TimeStamp_CoreSight, // CoreSight time (IMPLEMENTATION DEFINED)ProfilingBufferEnabled() then
return FALSE;
in_host =TimeStamp_Virtual, // Physical counter value minus CNTVOFF_EL2 EL2Enabled() && HCR_EL2.TGE == '1';
(secure, el) = ProfilingBufferOwner();
if UInt(el) < UInt(PSTATE.EL) || secure != IsSecure() || (in_host && el == EL1) then
return FALSE;
case PSTATE.EL of
when EL3 Unreachable();
when EL2 spe_bit = PMSCR_EL2.E2SPE;
when EL1 spe_bit = PMSCR_EL1.E1SPE;
when EL0 spe_bit = (if in_host then PMSCR_EL2.E0HSPE else PMSCR_EL1.E0SPE);
return spe_bit == '1';TimeStamp_Physical }; // Physical counter value with no offset
enumeration// AArch64.TakeExceptionInDebugState()
// ===================================
// Take an exception in Debug state to an Exception Level using AArch64. SysRegAccess {AArch64.TakeExceptionInDebugState(bits(2) target_el, SysRegAccess_OK,exception)
assert
SysRegAccess_UNDEFINED,(target_el) && !
SysRegAccess_TrapToEL1,(target_el) &&
SysRegAccess_TrapToEL2,(target_el) >=
(PSTATE.EL);
sync_errors = HaveIESB() && SCTLR[].IESB == '1';
if HaveDoubleFaultExt() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == EL3);
// SCTLR[].IESB might be ignored in Debug state.
if !ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then
sync_errors = FALSE;
SynchronizeContext();
// If coming from AArch32 state, the top parts of the X[] registers might be set to zero
from_32 = UsingAArch32();
if from_32 then AArch64.MaybeZeroRegisterUppers();
MaybeZeroSVEUppers(target_el);
AArch64.ReportException(exception, target_el);
PSTATE.EL = target_el;
PSTATE.nRW = '0';
PSTATE.SP = '1';
SPSR[] = bits(32) UNKNOWN;
ELR[] = bits(64) UNKNOWN;
// PSTATE.{SS,D,A,I,F} are not observable and ignored in Debug state, so behave as if UNKNOWN.
PSTATE.<SS,D,A,I,F> = bits(5) UNKNOWN;
PSTATE.IL = '0';
if from_32 then // Coming from AArch32
PSTATE.IT = '00000000';
PSTATE.T = '0'; // PSTATE.J is RES0
if (HavePANExt() && (PSTATE.EL == EL1 || (PSTATE.EL == EL2 && ELIsInHost(EL0))) &&
SCTLR[].SPAN == '0') then
PSTATE.PAN = '1';
if HaveUAOExt() then PSTATE.UAO = '0';
if HaveBTIExt() then PSTATE.BTYPE = '00';
if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN;
if HaveMTEExt() then PSTATE.TCO = '1';
DLR_EL0 = bits(64) UNKNOWN;
DSPSR_EL0 = bits(32) UNKNOWN;
EDSCR.ERR = '1';
UpdateEDSCRFields(); // Update EDSCR processor state flags.
if sync_errors then
SynchronizeErrors();
EndOfInstructionSysRegAccess_TrapToEL3 };();
enumeration// AArch64.WatchpointByteMatch()
// =============================
boolean TimeStamp {AArch64.WatchpointByteMatch(integer n,
TimeStamp_None, // No timestampacctype, bits(64) vaddress)
el = if
TimeStamp_CoreSight, // CoreSight time (IMPLEMENTATION DEFINED)() && acctype ==
TimeStamp_Virtual, // Physical counter value minus CNTVOFF_EL2then
else PSTATE.EL;
top = AddrTop(vaddress, FALSE, el);
bottom = if DBGWVR_EL1[n]<2> == '1' then 2 else 3; // Word or doubleword
byte_select_match = (DBGWCR_EL1[n].BAS<UInt(vaddress<bottom-1:0>)> != '0');
mask = UInt(DBGWCR_EL1[n].MASK);
// If DBGWCR_EL1[n].MASK is non-zero value and DBGWCR_EL1[n].BAS is not set to '11111111', or
// DBGWCR_EL1[n].BAS specifies a non-contiguous set of bytes behavior is CONSTRAINED
// UNPREDICTABLE.
if mask > 0 && !IsOnes(DBGWCR_EL1[n].BAS) then
byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPMASKANDBAS);
else
LSB = (DBGWCR_EL1[n].BAS AND NOT(DBGWCR_EL1[n].BAS - 1)); MSB = (DBGWCR_EL1[n].BAS + LSB);
if !IsZero(MSB AND (MSB - 1)) then // Not contiguous
byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPBASCONTIGUOUS);
bottom = 3; // For the whole doubleword
// If the address mask is set to a reserved value, the behavior is CONSTRAINED UNPREDICTABLE.
if mask > 0 && mask <= 2 then
(c, mask) = ConstrainUnpredictableInteger(3, 31, Unpredictable_RESWPMASK);
assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN};
case c of
when Constraint_DISABLED return FALSE; // Disabled
when Constraint_NONE mask = 0; // No masking
// Otherwise the value returned by ConstrainUnpredictableInteger is a not-reserved value
if mask > bottom then
WVR_match = (vaddress<top:mask> == DBGWVR_EL1[n]<top:mask>);
// If masked bits of DBGWVR_EL1[n] are not zero, the behavior is CONSTRAINED UNPREDICTABLE.
if WVR_match && !IsZero(DBGWVR_EL1[n]<mask-1:bottom>) then
WVR_match = ConstrainUnpredictableBool(Unpredictable_WPMASKEDBITSTimeStamp_Physical }; // Physical counter value with no offset);
else
WVR_match = vaddress<top:bottom> == DBGWVR_EL1[n]<top:bottom>;
return WVR_match && byte_select_match;
// AArch64.TakeExceptionInDebugState()
// ===================================
// Take an exception in Debug state to an Exception Level using AArch64.// AArch64.WatchpointMatch()
// =========================
// Watchpoint matching in an AArch64 translation regime.
boolean
AArch64.TakeExceptionInDebugState(bits(2) target_el,AArch64.WatchpointMatch(integer n, bits(64) vaddress, integer size, boolean ispriv, ExceptionRecordAccType exception)
assertacctype, boolean iswrite)
assert ! HaveEL(target_el) && !ELUsingAArch32(target_el) &&( UIntS1TranslationRegime(target_el) >=());
assert n <= UInt(PSTATE.EL);
(ID_AA64DFR0_EL1.WRPs);
sync_errors = // "ispriv" is FALSE for LDTR/STTR instructions executed at EL1 and all
// load/stores at EL0, TRUE for all other load/stores. "iswrite" is TRUE for stores, FALSE for
// loads.
enabled = DBGWCR_EL1[n].E == '1';
linked = DBGWCR_EL1[n].WT == '1';
isbreakpnt = FALSE;
state_match = HaveIESBAArch64.StateMatch() &&(DBGWCR_EL1[n].SSC, DBGWCR_EL1[n].HMC, DBGWCR_EL1[n].PAC,
linked, DBGWCR_EL1[n].LBN, isbreakpnt, acctype, ispriv);
ls_match = (DBGWCR_EL1[n].LSC<(if iswrite then 1 else 0)> == '1');
value_match = FALSE;
for byte = 0 to size - 1
value_match = value_match || SCTLRAArch64.WatchpointByteMatch[].IESB == '1';
if HaveDoubleFaultExt() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == EL3);
// SCTLR[].IESB might be ignored in Debug state.
if !ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then
sync_errors = FALSE;
SynchronizeContext();
// If coming from AArch32 state, the top parts of the X[] registers might be set to zero
from_32 = UsingAArch32();
if from_32 then AArch64.MaybeZeroRegisterUppers();
MaybeZeroSVEUppers(target_el);
AArch64.ReportException(exception, target_el);
PSTATE.EL = target_el;
PSTATE.nRW = '0';
PSTATE.SP = '1';
SPSR[] = bits(32) UNKNOWN;
ELR[] = bits(64) UNKNOWN;
// PSTATE.{SS,D,A,I,F} are not observable and ignored in Debug state, so behave as if UNKNOWN.
PSTATE.<SS,D,A,I,F> = bits(5) UNKNOWN;
PSTATE.IL = '0';
if from_32 then // Coming from AArch32
PSTATE.IT = '00000000';
PSTATE.T = '0'; // PSTATE.J is RES0
if (HavePANExt() && (PSTATE.EL == EL1 || (PSTATE.EL == EL2 && ELIsInHost(EL0))) &&
SCTLR[].SPAN == '0') then
PSTATE.PAN = '1';
if HaveUAOExt() then PSTATE.UAO = '0';
if HaveBTIExt() then PSTATE.BTYPE = '00';
if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN;
if HaveMTEExt() then PSTATE.TCO = '1';
DLR_EL0 = bits(64) UNKNOWN;
DSPSR_EL0 = bits(32) UNKNOWN;
EDSCR.ERR = '1';
UpdateEDSCRFields(); // Update EDSCR processor state flags.
if sync_errors then
SynchronizeErrors();
EndOfInstruction();(n, acctype, vaddress + byte);
return value_match && state_match && ls_match && enabled;
// AArch64.WatchpointByteMatch()
// =============================
boolean// AArch64.Abort()
// ===============
// Abort and Debug exception handling in an AArch64 translation regime. AArch64.WatchpointByteMatch(integer n,AArch64.Abort(bits(64) vaddress, AccTypeFaultRecord acctype, bits(64) vaddress)
fault)
el = if if HaveNV2ExtIsDebugException() && acctype ==(fault) then
if fault.acctype == AccType_NV2REGISTERAccType_IFETCH thenthen
if EL2UsingAArch32 else PSTATE.EL;
top =() && fault.debugmoe == AddrTopDebugException_VectorCatch(vaddress, FALSE, el);
bottom = if DBGWVR_EL1[n]<2> == '1' then 2 else 3; // Word or doubleword
byte_select_match = (DBGWCR_EL1[n].BAS<thenUIntAArch64.VectorCatchException(vaddress<bottom-1:0>)> != '0');
mask =(fault);
else UIntAArch64.BreakpointException(DBGWCR_EL1[n].MASK);
// If DBGWCR_EL1[n].MASK is non-zero value and DBGWCR_EL1[n].BAS is not set to '11111111', or
// DBGWCR_EL1[n].BAS specifies a non-contiguous set of bytes behavior is CONSTRAINED
// UNPREDICTABLE.
if mask > 0 && !(fault);
elseIsOnesAArch64.WatchpointException(DBGWCR_EL1[n].BAS) then
byte_select_match =(vaddress, fault);
elsif fault.acctype == ConstrainUnpredictableBoolAccType_IFETCH(thenUnpredictable_WPMASKANDBASAArch64.InstructionAbort);
else
LSB = (DBGWCR_EL1[n].BAS AND NOT(DBGWCR_EL1[n].BAS - 1)); MSB = (DBGWCR_EL1[n].BAS + LSB);
if !(vaddress, fault);
elseIsZeroAArch64.DataAbort(MSB AND (MSB - 1)) then // Not contiguous
byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPBASCONTIGUOUS);
bottom = 3; // For the whole doubleword
// If the address mask is set to a reserved value, the behavior is CONSTRAINED UNPREDICTABLE.
if mask > 0 && mask <= 2 then
(c, mask) = ConstrainUnpredictableInteger(3, 31, Unpredictable_RESWPMASK);
assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN};
case c of
when Constraint_DISABLED return FALSE; // Disabled
when Constraint_NONE mask = 0; // No masking
// Otherwise the value returned by ConstrainUnpredictableInteger is a not-reserved value
if mask > bottom then
WVR_match = (vaddress<top:mask> == DBGWVR_EL1[n]<top:mask>);
// If masked bits of DBGWVR_EL1[n] are not zero, the behavior is CONSTRAINED UNPREDICTABLE.
if WVR_match && !IsZero(DBGWVR_EL1[n]<mask-1:bottom>) then
WVR_match = ConstrainUnpredictableBool(Unpredictable_WPMASKEDBITS);
else
WVR_match = vaddress<top:bottom> == DBGWVR_EL1[n]<top:bottom>;
return WVR_match && byte_select_match;(vaddress, fault);
// AArch64.WatchpointMatch()
// =========================
// Watchpoint matching in an AArch64 translation regime.
// AArch64.AbortSyndrome()
// =======================
// Creates an exception syndrome record for Abort and Watchpoint exceptions
// from an AArch64 translation regime.
booleanExceptionRecord AArch64.WatchpointMatch(integer n, bits(64) vaddress, integer size, boolean ispriv,AArch64.AbortSyndrome(
AccTypeException acctype, boolean iswrite)
assert !exceptype,ELUsingAArch32FaultRecord(fault, bits(64) vaddress)
exception =S1TranslationRegimeExceptionSyndrome());
assert n <=(exceptype);
d_side = exceptype IN { UIntException_DataAbort(ID_AA64DFR0_EL1.WRPs);
// "ispriv" is FALSE for LDTR/STTR instructions executed at EL1 and all
// load/stores at EL0, TRUE for all other load/stores. "iswrite" is TRUE for stores, FALSE for
// loads.
enabled = DBGWCR_EL1[n].E == '1';
linked = DBGWCR_EL1[n].WT == '1';
isbreakpnt = FALSE;
state_match =, AArch64.StateMatchException_NV2DataAbort(DBGWCR_EL1[n].SSC, DBGWCR_EL1[n].HMC, DBGWCR_EL1[n].PAC,
linked, DBGWCR_EL1[n].LBN, isbreakpnt, acctype, ispriv);
ls_match = (DBGWCR_EL1[n].LSC<(if iswrite then 1 else 0)> == '1');
value_match = FALSE;
for byte = 0 to size - 1
value_match = value_match ||, };
exception.syndrome = AArch64.FaultSyndrome(d_side, fault);
exception.vaddress = ZeroExtend(vaddress);
if IPAValidAArch64.WatchpointByteMatchException_Watchpoint(n, acctype, vaddress + byte);
(fault) then
exception.ipavalid = TRUE;
exception.NS = fault.ipaddress.NS;
exception.ipaddress = fault.ipaddress.address;
else
exception.ipavalid = FALSE;
return value_match && state_match && ls_match && enabled; return exception;
// AArch64.Abort()
// ===============
// Abort and Debug exception handling in an AArch64 translation regime.// AArch64.CheckPCAlignment()
// ==========================
AArch64.Abort(bits(64) vaddress,AArch64.CheckPCAlignment()
bits(64) pc = FaultRecordThisInstrAddr fault)
if();
if pc<1:0> != '00' then IsDebugExceptionAArch64.PCAlignmentFault(fault) then
if fault.acctype == AccType_IFETCH then
if UsingAArch32() && fault.debugmoe == DebugException_VectorCatch then
AArch64.VectorCatchException(fault);
else
AArch64.BreakpointException(fault);
else
AArch64.WatchpointException(vaddress, fault);
elsif fault.acctype == AccType_IFETCH then
AArch64.InstructionAbort(vaddress, fault);
else
AArch64.DataAbort(vaddress, fault);();
// AArch64.AbortSyndrome()
// =======================
// Creates an exception syndrome record for Abort and Watchpoint exceptions
// from an AArch64 translation regime.
ExceptionRecord// AArch64.DataAbort()
// =================== AArch64.AbortSyndrome(AArch64.DataAbort(bits(64) vaddress,Exception exceptype, FaultRecord fault, bits(64) vaddress)
exception =fault)
route_to_el3 = ExceptionSyndromeHaveEL(exceptype);
d_side = exceptype IN {(Exception_DataAbortEL3,) && SCR_EL3.EA == '1' && IsExternalAbort(fault);
route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR_EL2.TGE == '1' ||
(HaveRASExt() && HCR_EL2.TEA == '1' && IsExternalAbort(fault)) ||
(HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER) ||
IsSecondStage(fault)));
bits(64) preferred_exception_return = ThisInstrAddr();
if (HaveDoubleFaultExt() && (PSTATE.EL == EL3 || route_to_el3) &&
IsExternalAbort(fault) && SCR_EL3.EASE == '1') then
vect_offset = 0x180;
else
vect_offset = 0x0;
if HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER then
exception = AArch64.AbortSyndrome(Exception_NV2DataAbort,, fault, vaddress);
else
exception = Exception_WatchpointAArch64.AbortSyndrome,( Exception_NV2WatchpointException_DataAbort};
exception.syndrome =, fault, vaddress);
if PSTATE.EL == AArch64.FaultSyndromeEL3(d_side, fault);
exception.vaddress =|| route_to_el3 then ZeroExtendAArch64.TakeException(vaddress);
if( , exception, preferred_exception_return, vect_offset);
elsif PSTATE.EL == EL2 || route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1IPAValidEL3(fault) then
exception.ipavalid = TRUE;
exception.NS = fault.ipaddress.NS;
exception.ipaddress = fault.ipaddress.address;
else
exception.ipavalid = FALSE;
return exception;, exception, preferred_exception_return, vect_offset);
// AArch64.CheckPCAlignment()
// ==========================// AArch64.EffectiveTCF()
// ======================
// Returns the TCF field applied to Tag Check Fails in the given Exception Level.
bits(2)
AArch64.CheckPCAlignment()
bits(64) pc =AArch64.EffectiveTCF(bits(2) el)
if el == ThisInstrAddrEL3();
if pc<1:0> != '00' thenthen
tcf = SCTLR_EL3.TCF;
elsif el ==
then
tcf = SCTLR_EL2.TCF;
elsif el == EL1 then
tcf = SCTLR_EL1.TCF;
elsif el == EL0 && HCR_EL2.<E2H,TGE> == '11' then
tcf = SCTLR_EL2.TCF0;
elsif el == EL0AArch64.PCAlignmentFaultEL2();&& HCR_EL2.<E2H,TGE> != '11' then
tcf = SCTLR_EL1.TCF0;
return tcf;
// AArch64.DataAbort()
// ===================// AArch64.InstructionAbort()
// ==========================
AArch64.DataAbort(bits(64) vaddress,AArch64.InstructionAbort(bits(64) vaddress, FaultRecord fault)
route_to_el3 = // External aborts on instruction fetch must be taken synchronously
if HaveDoubleFaultExt() then assert fault.statuscode != Fault_AsyncExternal;
route_to_el3 = HaveEL(EL3) && SCR_EL3.EA == '1' && IsExternalAbort(fault);
route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR_EL2.TGE == '1' ||
(() &&
(HCR_EL2.TGE == '1' ||IsSecondStage(fault) ||
(HaveRASExt() && HCR_EL2.TEA == '1' && IsExternalAbort(fault)) ||
((fault))));
bits(64) preferred_exception_return =HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER) ||
IsSecondStage(fault)));
bits(64) preferred_exception_return = ThisInstrAddr();
if ( vect_offset = 0x0;
exception =HaveDoubleFaultExt() && (PSTATE.EL == EL3 || route_to_el3) &&
IsExternalAbort(fault) && SCR_EL3.EASE == '1') then
vect_offset = 0x180;
else
vect_offset = 0x0;
if HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER then
exception = AArch64.AbortSyndrome(Exception_NV2DataAbortException_InstructionAbort, fault, vaddress);
else
exception = AArch64.AbortSyndrome(Exception_DataAbort, fault, vaddress);
if PSTATE.EL == EL3 || route_to_el3 then
AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset);
elsif PSTATE.EL == EL2 || route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.EffectiveTCF()
// ======================
// Returns the TCF field applied to Tag Check Fails in the given Exception Level.
bits(2)// AArch64.PCAlignmentFault()
// ==========================
// Called on unaligned program counter in AArch64 state. AArch64.EffectiveTCF(bits(2) el)
bits(2) tcf;
AArch64.PCAlignmentFault()
if el == bits(64) preferred_exception_return = EL3ThisInstrAddr then
tcf = SCTLR_EL3.TCF;
elsif el ==();
vect_offset = 0x0;
exception = EL2ExceptionSyndrome then
tcf = SCTLR_EL2.TCF;
elsif el ==( Exception_PCAlignment);
exception.vaddress = ThisInstrAddr();
if UInt(PSTATE.EL) > UInt(EL1 then
tcf = SCTLR_EL1.TCF;
elsif el ==) then EL0AArch64.TakeException && HCR_EL2.<E2H,TGE> == '11' then
tcf = SCTLR_EL2.TCF0;
elsif el ==(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif EL0EL2Enabled && HCR_EL2.<E2H,TGE> != '11' then
tcf = SCTLR_EL1.TCF0;
if tcf == '11' then
(-,tcf) =() && HCR_EL2.TGE == '1' then ConstrainUnpredictableBitsAArch64.TakeException(, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1Unpredictable_RESTCFEL2);
return tcf;, exception, preferred_exception_return, vect_offset);
// AArch64.InstructionAbort()
// ==========================// AArch64.ReportTagCheckFail()
// ============================
// Records a tag fail exception into the appropriate TCFR_ELx.
AArch64.InstructionAbort(bits(64) vaddress,AArch64.ReportTagCheckFail(bits(2) el, bit ttbr)
if el == FaultRecord fault)
// External aborts on instruction fetch must be taken synchronously
if HaveDoubleFaultExt() then assert fault.statuscode != Fault_AsyncExternal;
route_to_el3 = HaveEL(EL3) && SCR_EL3.EA == '1' &&then
assert ttbr == '0';
TFSR_EL3.TF0 = '1';
elsif el == IsExternalAbortEL2(fault);
route_to_el2 = (PSTATE.EL IN {then
if ttbr == '0' then
TFSR_EL2.TF0 = '1';
else
TFSR_EL2.TF1 = '1';
elsif el ==EL0, EL1} &&then
if ttbr == '0' then
TFSR_EL1.TF0 = '1';
else
TFSR_EL1.TF1 = '1';
elsif el == EL2EnabledEL0() &&
(HCR_EL2.TGE == '1' || IsSecondStage(fault) ||
(HaveRASExt() && HCR_EL2.TEA == '1' && IsExternalAbort(fault))));
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
exception = AArch64.AbortSyndrome(Exception_InstructionAbort, fault, vaddress);
if PSTATE.EL == EL3 || route_to_el3 then
AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset);
elsif PSTATE.EL == EL2 || route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);then
if ttbr == '0' then
TFSRE0_EL1.TF0 = '1';
else
TFSRE0_EL1.TF1 = '1';
// AArch64.PCAlignmentFault()
// AArch64.SPAlignmentFault()
// ==========================
// Called on unaligned program counter in AArch64 state.// Called on an unaligned stack pointer in AArch64 state.
AArch64.PCAlignmentFault()
AArch64.SPAlignmentFault()
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_PCAlignmentException_SPAlignment);
exception.vaddress = ThisInstrAddr();
);
if UInt(PSTATE.EL) > UInt(EL1) then
AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif EL2Enabled() && HCR_EL2.TGE == '1' then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.ReportTagCheckFail()
// ============================
// Records a tag fail exception into the appropriate TCFR_ELx.// AArch64.TagCheckFail()
// ======================
// Handle a tag check fail condition.
AArch64.ReportTagCheckFail(bits(2) el, bit ttbr)
if el ==AArch64.TagCheckFail(bits(64) vaddress, boolean iswrite)
bits(2) tcf = EL3AArch64.EffectiveTCF then
assert ttbr == '0';
TFSR_EL3.TF0 = '1';
elsif el ==(PSTATE.EL);
if tcf == '01' then EL2AArch64.TagCheckFault then
if ttbr == '0' then
TFSR_EL2.TF0 = '1';
else
TFSR_EL2.TF1 = '1';
elsif el ==(vaddress, iswrite);
elsif tcf == '10' then EL1AArch64.ReportTagCheckFail then
if ttbr == '0' then
TFSR_EL1.TF0 = '1';
else
TFSR_EL1.TF1 = '1';
elsif el == EL0 then
if ttbr == '0' then
TFSRE0_EL1.TF0 = '1';
else
TFSRE0_EL1.TF1 = '1';(PSTATE.EL, vaddress<55>);
// AArch64.SPAlignmentFault()
// ==========================
// Called on an unaligned stack pointer in AArch64 state.// AArch64.TagCheckFault()
// =======================
// Raise a tag check fail exception.
AArch64.SPAlignmentFault()
AArch64.TagCheckFault(bits(64) va, boolean write)
bits(2) target_el;
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
integer vect_offset = 0x0;
exception = if PSTATE.EL == ExceptionSyndromeEL0(then
target_el = if HCR_EL2.TGE == '0' thenException_SPAlignment);
if UInt(PSTATE.EL) > UInt(EL1) thenelse
AArch64.TakeExceptionEL2(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif;
else
target_el = PSTATE.EL;
exception = EL2EnabledExceptionSyndrome() && HCR_EL2.TGE == '1' then(
AArch64.TakeExceptionException_DataAbort();
exception.syndrome<5:0> = '010001';
if write then
exception.syndrome<6> = '1';
exception.vaddress = va;EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);(target_el, exception, preferred_exception_return, vect_offset);
// AArch64.TagCheckFail()
// ======================
// Handle a tag check fail condition.// BranchTargetException
// =====================
// Raise branch target exception.
AArch64.TagCheckFail(bits(64) vaddress, boolean iswrite)
bits(2) tcf =AArch64.BranchTargetException(bits(52) vaddress)
route_to_el2 = PSTATE.EL == AArch64.EffectiveTCFEL0(PSTATE.EL);
if tcf == '01' then&&
AArch64.TagCheckFaultEL2Enabled(vaddress, iswrite);
elsif tcf == '10' then() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return =
();
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_BranchTarget);
exception.syndrome<1:0> = PSTATE.BTYPE;
exception.syndrome<24:2> = Zeros(); // RES0
if UInt(PSTATE.EL) > UInt(EL1) then
AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1AArch64.ReportTagCheckFailThisInstrAddr(PSTATE.EL, vaddress<55>);, exception, preferred_exception_return, vect_offset);
// AArch64.TagCheckFault()
// =======================
// Raise a tag check fail exception.// AArch64.TakePhysicalFIQException()
// ==================================
AArch64.TagCheckFault(bits(64) va, boolean write)
bits(2) target_el;
bits(64) preferred_exception_return =AArch64.TakePhysicalFIQException()
route_to_el3 = ThisInstrAddrHaveEL();
integer vect_offset = 0x0;
if PSTATE.EL ==( EL3) && SCR_EL3.FIQ == '1';
route_to_el2 = (PSTATE.EL IN {EL0 then
target_el = if HCR_EL2.TGE == '0' then, EL1 else} && EL2EL2Enabled;
else
target_el = PSTATE.EL;
exception =() &&
(HCR_EL2.TGE == '1' || HCR_EL2.FMO == '1'));
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x100;
exception = ExceptionSyndrome(Exception_DataAbortException_FIQ);
exception.syndrome<5:0> = '010001';
if write then
exception.syndrome<6> = '1';
exception.vaddress = va;
if route_to_el3 then
AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset);
elsif PSTATE.EL == EL2 || route_to_el2 then
assert PSTATE.EL != EL3;
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
assert PSTATE.EL IN {EL0, EL1};
AArch64.TakeException(EL1(target_el, exception, preferred_exception_return, vect_offset);, exception, preferred_exception_return, vect_offset);
// BranchTargetException
// =====================
// Raise branch target exception.// AArch64.TakePhysicalIRQException()
// ==================================
// Take an enabled physical IRQ exception.
AArch64.BranchTargetException(bits(52) vaddress)
AArch64.TakePhysicalIRQException()
route_to_el2 = PSTATE.EL == route_to_el3 = HaveEL(EL3) && SCR_EL3.IRQ == '1';
route_to_el2 = (PSTATE.EL IN {EL0 &&, EL1} && EL2Enabled() && HCR_EL2.TGE == '1';
() &&
(HCR_EL2.TGE == '1' || HCR_EL2.IMO == '1'));
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
vect_offset = 0x80;
exception = ExceptionSyndrome(Exception_BranchTargetException_IRQ);
exception.syndrome<1:0> = PSTATE.BTYPE;
exception.syndrome<24:2> =
if route_to_el3 then ZerosAArch64.TakeException(); // RES0
if( UIntEL3(PSTATE.EL) >, exception, preferred_exception_return, vect_offset);
elsif PSTATE.EL == UIntEL2(|| route_to_el2 then
assert PSTATE.EL !=EL1EL3) then;
AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then(
AArch64.TakeExceptionEL2(, exception, preferred_exception_return, vect_offset);
else
assert PSTATE.EL IN {, EL1EL2EL0, exception, preferred_exception_return, vect_offset);
else};
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.TakePhysicalFIQException()
// ==================================// AArch64.TakePhysicalSErrorException()
// =====================================
AArch64.TakePhysicalFIQException()
AArch64.TakePhysicalSErrorException(boolean impdef_syndrome, bits(24) syndrome)
route_to_el3 = HaveEL(EL3) && SCR_EL3.FIQ == '1';
) && SCR_EL3.EA == '1';
route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() &&
(HCR_EL2.TGE == '1' || HCR_EL2.FMO == '1'));
bits(64) preferred_exception_return = (HCR_EL2.TGE == '1' || (! IsInHost() && HCR_EL2.AMO == '1')));
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x100;
vect_offset = 0x180;
exception = ExceptionSyndrome(Exception_FIQException_SError);
if route_to_el3 then exception.syndrome<24> = if impdef_syndrome then '1' else '0';
exception.syndrome<23:0> = syndrome;
ClearPendingPhysicalSError();
if PSTATE.EL == EL3 || route_to_el3 then
AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset);
elsif PSTATE.EL == EL2 || route_to_el2 then
assert PSTATE.EL !=|| route_to_el2 then EL3;
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
assert PSTATE.EL IN {EL0, EL1};, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.TakePhysicalIRQException()
// ==================================
// Take an enabled physical IRQ exception.// AArch64.TakeVirtualFIQException()
// =================================
AArch64.TakePhysicalIRQException()
route_to_el3 =AArch64.TakeVirtualFIQException()
assert PSTATE.EL IN { HaveEL(EL3) && SCR_EL3.IRQ == '1';
route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() &&
(HCR_EL2.TGE == '1' || HCR_EL2.IMO == '1'));
();
assert HCR_EL2.TGE == '0' && HCR_EL2.FMO == '1'; // Virtual IRQ enabled if TGE==0 and FMO==1
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x80;
vect_offset = 0x100;
exception = ExceptionSyndrome(Exception_IRQException_FIQ);
if route_to_el3 then
AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset);
elsif PSTATE.EL == EL2 || route_to_el2 then
assert PSTATE.EL != EL3;
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
assert PSTATE.EL IN {EL0, EL1};);
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.TakePhysicalSErrorException()
// =====================================// AArch64.TakeVirtualIRQException()
// =================================
AArch64.TakePhysicalSErrorException(boolean impdef_syndrome, bits(24) syndrome)
route_to_el3 =AArch64.TakeVirtualIRQException()
assert PSTATE.EL IN { HaveEL(EL3) && SCR_EL3.EA == '1';
route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() &&
(HCR_EL2.TGE == '1' || (!();
assert HCR_EL2.TGE == '0' && HCR_EL2.IMO == '1'; // Virtual IRQ enabled if TGE==0 and IMO==1
bits(64) preferred_exception_return =IsInHost() && HCR_EL2.AMO == '1')));
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x180;
vect_offset = 0x80;
exception = ExceptionSyndrome(Exception_SErrorException_IRQ);
exception.syndrome<24> = if impdef_syndrome then '1' else '0';
exception.syndrome<23:0> = syndrome;
ClearPendingPhysicalSError();
if PSTATE.EL == EL3 || route_to_el3 then
AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset);
elsif PSTATE.EL == EL2 || route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else);
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.TakeVirtualFIQException()
// =================================// AArch64.TakeVirtualSErrorException()
// ====================================
AArch64.TakeVirtualFIQException()
AArch64.TakeVirtualSErrorException(boolean impdef_syndrome, bits(24) syndrome)
assert PSTATE.EL IN {EL0, EL1} && EL2Enabled();
assert HCR_EL2.TGE == '0' && HCR_EL2.FMO == '1'; // Virtual IRQ enabled if TGE==0 and FMO==1
assert HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1'; // Virtual SError enabled if TGE==0 and AMO==1
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x100;
vect_offset = 0x180;
exception = ExceptionSyndrome();
if HaveRASExt() then
exception.syndrome<24> = VSESR_EL2.IDS;
exception.syndrome<23:0> = VSESR_EL2.ISS;
else
exception.syndrome<24> = if impdef_syndrome then '1' else '0';
if impdef_syndrome then exception.syndrome<23:0> = syndrome;
ClearPendingVirtualSErrorException_FIQException_SError);();
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.TakeVirtualIRQException()
// =================================// AArch64.BreakpointException()
// =============================
AArch64.TakeVirtualIRQException()
assert PSTATE.EL IN {AArch64.BreakpointException(FaultRecord fault)
assert PSTATE.EL != EL3;
route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled();
assert HCR_EL2.TGE == '0' && HCR_EL2.IMO == '1'; // Virtual IRQ enabled if TGE==0 and IMO==1
() &&
(HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1'));
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x80;
vect_offset = 0x0;
vaddress = bits(64) UNKNOWN;
exception = ExceptionSyndromeAArch64.AbortSyndrome(, fault, vaddress);
if PSTATE.EL == EL2 || route_to_el2 then
AArch64.TakeException(EL2Exception_IRQException_Breakpoint);, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.TakeVirtualSErrorException()
// ====================================// AArch64.SoftwareBreakpoint()
// ============================
AArch64.TakeVirtualSErrorException(boolean impdef_syndrome, bits(24) syndrome)
AArch64.SoftwareBreakpoint(bits(16) immediate)
assert PSTATE.EL IN { route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled();
assert HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1'; // Virtual SError enabled if TGE==0 and AMO==1
() && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1'));
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x180;
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_SErrorException_SoftwareBreakpoint);
exception.syndrome<15:0> = immediate;
if HaveRASExtUInt() then
exception.syndrome<24> = VSESR_EL2.IDS;
exception.syndrome<23:0> = VSESR_EL2.ISS;
else
exception.syndrome<24> = if impdef_syndrome then '1' else '0';
if impdef_syndrome then exception.syndrome<23:0> = syndrome;(PSTATE.EL) >
(EL1) then
AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then
AArch64.TakeException(EL2ClearPendingVirtualSErrorUInt();, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.BreakpointException()
// =============================// AArch64.SoftwareStepException()
// ===============================
AArch64.BreakpointException(AArch64.SoftwareStepException()
assert PSTATE.EL !=FaultRecord fault)
assert PSTATE.EL != EL3;
route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() &&
(HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1'));
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
vaddress = bits(64) UNKNOWN;
exception = AArch64.AbortSyndromeExceptionSyndrome();
if SoftwareStep_DidNotStep() then
exception.syndrome<24> = '0';
else
exception.syndrome<24> = '1';
exception.syndrome<6> = if SoftwareStep_SteppedEXException_BreakpointException_SoftwareStep, fault, vaddress);
() then '1' else '0';
if PSTATE.EL == EL2 || route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.SoftwareBreakpoint()
// ============================// AArch64.VectorCatchException()
// ==============================
// Vector Catch taken from EL0 or EL1 to EL2. This can only be called when debug exceptions are
// being routed to EL2, as Vector Catch is a legacy debug event.
AArch64.SoftwareBreakpoint(bits(16) immediate)
route_to_el2 = (PSTATE.EL IN {AArch64.VectorCatchException(EL0FaultRecord,fault)
assert PSTATE.EL != EL1EL2} &&;
assert
EL2Enabled() && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1'));
() && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1');
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
vaddress = bits(64) UNKNOWN;
exception = ExceptionSyndromeAArch64.AbortSyndrome(Exception_SoftwareBreakpointException_VectorCatch);
exception.syndrome<15:0> = immediate;
if, fault, vaddress); UInt(PSTATE.EL) > UInt(EL1) then
AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then(
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.SoftwareStepException()
// ===============================// AArch64.WatchpointException()
// =============================
AArch64.SoftwareStepException()
assert PSTATE.EL !=AArch64.WatchpointException(bits(64) vaddress, FaultRecord fault)
assert PSTATE.EL != EL3;
route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() &&
(HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1'));
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
exception = ExceptionSyndromeAArch64.AbortSyndrome(Exception_SoftwareStepException_Watchpoint);
if SoftwareStep_DidNotStep() then
exception.syndrome<24> = '0';
else
exception.syndrome<24> = '1';
exception.syndrome<6> = if SoftwareStep_SteppedEX() then '1' else '0';
, fault, vaddress);
if PSTATE.EL == EL2 || route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.VectorCatchException()
// ==============================
// Vector Catch taken from EL0 or EL1 to EL2. This can only be called when debug exceptions are
// being routed to EL2, as Vector Catch is a legacy debug event.// AArch64.ExceptionClass()
// ========================
// Returns the Exception Class and Instruction Length fields to be reported in ESR
(integer,bit)
AArch64.VectorCatchException(AArch64.ExceptionClass(FaultRecordException fault)
assert PSTATE.EL !=exceptype, bits(2) target_el)
il = if EL2ThisInstrLength;
assert() == 32 then '1' else '0';
from_32 = EL2EnabledUsingAArch32() && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1');
();
assert from_32 || il == '1'; // AArch64 instructions always 32-bit
bits(64) preferred_exception_return = case exceptype of
when ThisInstrAddrException_Uncategorized();
vect_offset = 0x0;
vaddress = bits(64) UNKNOWN;
exception =ec = 0x00; il = '1';
when AArch64.AbortSyndromeException_WFxTrap(ec = 0x01;
whenException_CP15RTTrap ec = 0x03; assert from_32;
when Exception_CP15RRTTrap ec = 0x04; assert from_32;
when Exception_CP14RTTrap ec = 0x05; assert from_32;
when Exception_CP14DTTrap ec = 0x06; assert from_32;
when Exception_AdvSIMDFPAccessTrap ec = 0x07;
when Exception_FPIDTrap ec = 0x08;
when Exception_PACTrap ec = 0x09;
when Exception_CP14RRTTrap ec = 0x0C; assert from_32;
when Exception_BranchTarget ec = 0x0D;
when Exception_IllegalState ec = 0x0E; il = '1';
when Exception_SupervisorCall ec = 0x11;
when Exception_HypervisorCall ec = 0x12;
when Exception_MonitorCall ec = 0x13;
when Exception_SystemRegisterTrap ec = 0x18; assert !from_32;
when Exception_SVEAccessTrap ec = 0x19; assert !from_32;
when Exception_ERetTrap ec = 0x1A;
when Exception_InstructionAbort ec = 0x20; il = '1';
when Exception_PCAlignment ec = 0x22; il = '1';
when Exception_DataAbort ec = 0x24;
when Exception_NV2DataAbort ec = 0x25;
when Exception_SPAlignment ec = 0x26; il = '1'; assert !from_32;
when Exception_FPTrappedException ec = 0x28;
when Exception_SError ec = 0x2F; il = '1';
when Exception_Breakpoint ec = 0x30; il = '1';
when Exception_SoftwareStep ec = 0x32; il = '1';
when Exception_Watchpoint ec = 0x34; il = '1';
when Exception_SoftwareBreakpoint ec = 0x38;
when Exception_VectorCatch, fault, vaddress);ec = 0x3A; il = '1'; assert from_32;
otherwise
AArch64.TakeExceptionUnreachable(EL2, exception, preferred_exception_return, vect_offset);();
if ec IN {0x20,0x24,0x30,0x32,0x34} && target_el == PSTATE.EL then
ec = ec + 1;
if ec IN {0x11,0x12,0x13,0x28,0x38} && !from_32 then
ec = ec + 4;
return (ec,il);
// AArch64.WatchpointException()
// =============================// AArch64.ReportException()
// =========================
// Report syndrome information for exception taken to AArch64 state.
AArch64.WatchpointException(bits(64) vaddress,AArch64.ReportException( FaultRecordExceptionRecord fault)
assert PSTATE.EL !=exception, bits(2) target_el) EL3Exception;
exceptype = exception.exceptype;
route_to_el2 = (PSTATE.EL IN { (ec,il) =EL0AArch64.ExceptionClass,(exceptype, target_el);
iss = exception.syndrome;
// IL is not valid for Data Abort exceptions without valid instruction syndrome information
if ec IN {0x24,0x25} && iss<24> == '0' then
il = '1'; EL1ESR} &&[target_el] = ec<5:0>:il:iss;
if exceptype IN { EL2EnabledException_InstructionAbort() &&
(HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1'));
bits(64) preferred_exception_return =, ThisInstrAddrException_PCAlignment();
vect_offset = 0x0;
if, HaveNV2ExtException_DataAbort() && fault.acctype ==, AccType_NV2REGISTERException_NV2DataAbort then
exception =, AArch64.AbortSyndrome(Exception_NV2Watchpoint, fault, vaddress);
else
exception = AArch64.AbortSyndrome(Exception_Watchpoint, fault, vaddress);
if PSTATE.EL ==} then EL2FAR || route_to_el2 then[target_el] = exception.vaddress;
else
AArch64.TakeExceptionFAR([target_el] = bits(64) UNKNOWN;
if target_el ==EL2, exception, preferred_exception_return, vect_offset);
elsethen
if exception.ipavalid then
HPFAR_EL2<43:4> = exception.ipaddress<51:12>;
if
AArch64.TakeExceptionHaveSecureEL2Ext(() then
ifEL1IsSecureEL2Enabled, exception, preferred_exception_return, vect_offset);() then
HPFAR_EL2.NS = exception.NS;
else
HPFAR_EL2.NS = '0';
else
HPFAR_EL2<43:4> = bits(40) UNKNOWN;
return;
// AArch64.ExceptionClass()
// ========================
// Returns the Exception Class and Instruction Length fields to be reported in ESR
(integer,bit)// Resets System registers and memory-mapped control registers that have architecturally-defined
// reset values to those values. AArch64.ExceptionClass(AArch64.ResetControlRegisters(boolean cold_reset);Exception exceptype, bits(2) target_el)
il = if ThisInstrLength() == 32 then '1' else '0';
from_32 = UsingAArch32();
assert from_32 || il == '1'; // AArch64 instructions always 32-bit
case exceptype of
when Exception_Uncategorized ec = 0x00; il = '1';
when Exception_WFxTrap ec = 0x01;
when Exception_CP15RTTrap ec = 0x03; assert from_32;
when Exception_CP15RRTTrap ec = 0x04; assert from_32;
when Exception_CP14RTTrap ec = 0x05; assert from_32;
when Exception_CP14DTTrap ec = 0x06; assert from_32;
when Exception_AdvSIMDFPAccessTrap ec = 0x07;
when Exception_FPIDTrap ec = 0x08;
when Exception_PACTrap ec = 0x09;
when Exception_CP14RRTTrap ec = 0x0C; assert from_32;
when Exception_BranchTarget ec = 0x0D;
when Exception_IllegalState ec = 0x0E; il = '1';
when Exception_SupervisorCall ec = 0x11;
when Exception_HypervisorCall ec = 0x12;
when Exception_MonitorCall ec = 0x13;
when Exception_SystemRegisterTrap ec = 0x18; assert !from_32;
when Exception_SVEAccessTrap ec = 0x19; assert !from_32;
when Exception_ERetTrap ec = 0x1A;
when Exception_InstructionAbort ec = 0x20; il = '1';
when Exception_PCAlignment ec = 0x22; il = '1';
when Exception_DataAbort ec = 0x24;
when Exception_NV2DataAbort ec = 0x25;
when Exception_SPAlignment ec = 0x26; il = '1'; assert !from_32;
when Exception_FPTrappedException ec = 0x28;
when Exception_SError ec = 0x2F; il = '1';
when Exception_Breakpoint ec = 0x30; il = '1';
when Exception_SoftwareStep ec = 0x32; il = '1';
when Exception_Watchpoint ec = 0x34; il = '1';
when Exception_NV2Watchpoint ec = 0x35; il = '1';
when Exception_SoftwareBreakpoint ec = 0x38;
when Exception_VectorCatch ec = 0x3A; il = '1'; assert from_32;
otherwise Unreachable();
if ec IN {0x20,0x24,0x30,0x32,0x34} && target_el == PSTATE.EL then
ec = ec + 1;
if ec IN {0x11,0x12,0x13,0x28,0x38} && !from_32 then
ec = ec + 4;
return (ec,il);
// AArch64.ReportException()
// =========================
// Report syndrome information for exception taken to AArch64 state.// AArch64.TakeReset()
// ===================
// Reset into AArch64 state
AArch64.ReportException(AArch64.TakeReset(boolean cold_reset)
assert !ExceptionRecordHighestELUsingAArch32 exception, bits(2) target_el)();
// Enter the highest implemented Exception level in AArch64 state
PSTATE.nRW = '0';
if
ExceptionHaveEL exceptype = exception.exceptype;
(ec,il) =( AArch64.ExceptionClassEL3(exceptype, target_el);
iss = exception.syndrome;
// IL is not valid for Data Abort exceptions without valid instruction syndrome information
if ec IN {0x24,0x25} && iss<24> == '0' then
il = '1';) then
PSTATE.EL =
ESREL3[target_el] = ec<5:0>:il:iss;
if exceptype IN {;
elsifException_InstructionAbortHaveEL,( Exception_PCAlignmentEL2,) then
PSTATE.EL = Exception_DataAbortEL2,;
else
PSTATE.EL =
Exception_NV2DataAbortEL1,;
// Reset the system registers and other system components Exception_NV2WatchpointAArch64.ResetControlRegisters,(cold_reset);
// Reset all other PSTATE fields
PSTATE.SP = '1'; // Select stack pointer
PSTATE.<D,A,I,F> = '1111'; // All asynchronous exceptions masked
PSTATE.SS = '0'; // Clear software step bit
PSTATE.DIT = '0'; // PSTATE.DIT is reset to 0 when resetting into AArch64
PSTATE.IL = '0'; // Clear Illegal Execution state bit
// All registers, bits and fields not reset by the above pseudocode or by the BranchTo() call
// below are UNKNOWN bitstrings after reset. In particular, the return information registers
// ELR_ELx and SPSR_ELx have UNKNOWN values, so that it
// is impossible to return from a reset in an architecturally defined way.
Exception_WatchpointAArch64.ResetGeneralRegisters} then();
FARAArch64.ResetSIMDFPRegisters[target_el] = exception.vaddress;
else();
FARAArch64.ResetSpecialRegisters[target_el] = bits(64) UNKNOWN;
if target_el ==(); ResetExternalDebugRegisters(cold_reset);
bits(64) rv; // IMPLEMENTATION DEFINED reset vector
if HaveEL(EL3) then
rv = RVBAR_EL3;
elsif HaveEL(EL2 then
if exception.ipavalid then
HPFAR_EL2<43:4> = exception.ipaddress<51:12>;
if) then
rv = RVBAR_EL2;
else
rv = RVBAR_EL1;
// The reset vector must be correctly aligned
assert HaveSecureEL2ExtIsZero() then
if(rv<63: ()>) && IsZero(rv<1:0>);
BranchTo(rv, BranchType_RESETIsSecureEL2EnabledPAMax() then
HPFAR_EL2.NS = exception.NS;
else
HPFAR_EL2.NS = '0';
else
HPFAR_EL2<43:4> = bits(40) UNKNOWN;
return;);
// Resets System registers and memory-mapped control registers that have architecturally-defined
// reset values to those values.// AArch64.FPTrappedException()
// ============================
AArch64.ResetControlRegisters(boolean cold_reset);AArch64.FPTrappedException(boolean is_ase, integer element, bits(8) accumulated_exceptions)
exception =ExceptionSyndrome(Exception_FPTrappedException);
if is_ase then
if boolean IMPLEMENTATION_DEFINED "vector instructions set TFV to 1" then
exception.syndrome<23> = '1'; // TFV
else
exception.syndrome<23> = '0'; // TFV
else
exception.syndrome<23> = '1'; // TFV
exception.syndrome<10:8> = bits(3) UNKNOWN; // VECITR
if exception.syndrome<23> == '1' then
exception.syndrome<7,4:0> = accumulated_exceptions<7,4:0>; // IDF,IXF,UFF,OFF,DZF,IOF
else
exception.syndrome<7,4:0> = bits(6) UNKNOWN;
route_to_el2 = EL2Enabled() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
if UInt(PSTATE.EL) > UInt(EL1) then
AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);
// AArch64.TakeReset()
// ===================
// Reset into AArch64 state// AArch64.CallHypervisor()
// ========================
// Performs a HVC call
AArch64.TakeReset(boolean cold_reset)
assert !AArch64.CallHypervisor(bits(16) immediate)
assertHighestELUsingAArch32();
// Enter the highest implemented Exception level in AArch64 state
PSTATE.nRW = '0';
if HaveEL(EL3) then
PSTATE.EL = EL3;
elsif HaveEL(EL2) then
PSTATE.EL =);
if EL2UsingAArch32;
else
PSTATE.EL =() then EL1AArch32.ITAdvance;
// Reset the system registers and other system components();
AArch64.ResetControlRegistersSSAdvance(cold_reset);
// Reset all other PSTATE fields
PSTATE.SP = '1'; // Select stack pointer
PSTATE.<D,A,I,F> = '1111'; // All asynchronous exceptions masked
PSTATE.SS = '0'; // Clear software step bit
PSTATE.DIT = '0'; // PSTATE.DIT is reset to 0 when resetting into AArch64
PSTATE.IL = '0'; // Clear Illegal Execution state bit
// All registers, bits and fields not reset by the above pseudocode or by the BranchTo() call
// below are UNKNOWN bitstrings after reset. In particular, the return information registers
// ELR_ELx and SPSR_ELx have UNKNOWN values, so that it
// is impossible to return from a reset in an architecturally defined way.();
bits(64) preferred_exception_return =
AArch64.ResetGeneralRegistersNextInstrAddr();();
vect_offset = 0x0;
exception =
AArch64.ResetSIMDFPRegistersExceptionSyndrome();(
AArch64.ResetSpecialRegistersException_HypervisorCall(););
exception.syndrome<15:0> = immediate;
if PSTATE.EL ==
ResetExternalDebugRegistersEL3(cold_reset);
bits(64) rv; // IMPLEMENTATION DEFINED reset vector
ifthen HaveELAArch64.TakeException(EL3) then
rv = RVBAR_EL3;
elsif, exception, preferred_exception_return, vect_offset);
else HaveELAArch64.TakeException(EL2) then
rv = RVBAR_EL2;
else
rv = RVBAR_EL1;
// The reset vector must be correctly aligned
assert IsZero(rv<63:PAMax()>) && IsZero(rv<1:0>);
BranchTo(rv, BranchType_RESET);, exception, preferred_exception_return, vect_offset);
// AArch64.FPTrappedException()
// ============================// AArch64.CallSecureMonitor()
// ===========================
AArch64.FPTrappedException(boolean is_ase, integer element, bits(8) accumulated_exceptions)
exception =AArch64.CallSecureMonitor(bits(16) immediate)
assert ExceptionSyndromeHaveEL(Exception_FPTrappedExceptionEL3);
if is_ase then
if boolean IMPLEMENTATION_DEFINED "vector instructions set TFV to 1" then
exception.syndrome<23> = '1'; // TFV
else
exception.syndrome<23> = '0'; // TFV
else
exception.syndrome<23> = '1'; // TFV
exception.syndrome<10:8> = bits(3) UNKNOWN; // VECITR
if exception.syndrome<23> == '1' then
exception.syndrome<7,4:0> = accumulated_exceptions<7,4:0>; // IDF,IXF,UFF,OFF,DZF,IOF
else
exception.syndrome<7,4:0> = bits(6) UNKNOWN;
route_to_el2 =) && ! EL2EnabledELUsingAArch32() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return =( ThisInstrAddrEL3();
vect_offset = 0x0;
);
if UIntUsingAArch32(PSTATE.EL) >() then UIntAArch32.ITAdvance(();EL1SSAdvance) then();
bits(64) preferred_exception_return =
AArch64.TakeExceptionNextInstrAddr(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then();
vect_offset = 0x0;
exception =
AArch64.TakeExceptionExceptionSyndrome(EL2Exception_MonitorCall, exception, preferred_exception_return, vect_offset);
else);
exception.syndrome<15:0> = immediate;
AArch64.TakeException(EL1EL3, exception, preferred_exception_return, vect_offset);
// AArch64.CallHypervisor()
// AArch64.CallSupervisor()
// ========================
// Performs a HVC call// Calls the Supervisor
AArch64.CallHypervisor(bits(16) immediate)
assertAArch64.CallSupervisor(bits(16) immediate)
if HaveEL(EL2);
if UsingAArch32() then AArch32.ITAdvance();
SSAdvance();
bits(64) preferred_exception_return = route_to_el2 = PSTATE.EL == EL0 && EL2Enabled() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return = NextInstrAddr();
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_HypervisorCallException_SupervisorCall);
exception.syndrome<15:0> = immediate;
if PSTATE.EL == if EL3UInt then(PSTATE.EL) >
UInt(EL1) then
AArch64.TakeException((PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 thenEL3AArch64.TakeException, exception, preferred_exception_return, vect_offset);
else(
EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL2EL1, exception, preferred_exception_return, vect_offset);
// AArch64.CallSecureMonitor()
// ===========================// AArch64.TakeException()
// =======================
// Take an exception to an Exception Level using AArch64.
AArch64.CallSecureMonitor(bits(16) immediate)
assertAArch64.TakeException(bits(2) target_el, ExceptionRecord exception,
bits(64) preferred_exception_return, integer vect_offset)
assert HaveEL((target_el) && !ELUsingAArch32(target_el) && UInt(target_el) >= UInt(PSTATE.EL);
sync_errors = HaveIESB() && SCTLR[].IESB == '1';
if HaveDoubleFaultExt() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == EL3) && !);
if sync_errors &&InsertIESBBeforeException(target_el) then
SynchronizeErrors();
iesb_req = FALSE;
sync_errors = FALSE;
TakeUnmaskedPhysicalSErrorInterrupts(iesb_req);
SynchronizeContext();
// If coming from AArch32 state, the top parts of the X[] registers might be set to zero
from_32 = UsingAArch32();
if from_32 then AArch64.MaybeZeroRegisterUppers();
MaybeZeroSVEUppers(target_el);
if UInt(target_el) > UInt(PSTATE.EL) then
boolean lower_32;
if target_el == EL3 then
if EL2Enabled() then
lower_32 = ELUsingAArch32(EL2);
else
lower_32 = ELUsingAArch32(EL1);
elsif IsInHost() && PSTATE.EL == EL0 && target_el == EL2 then
lower_32 = ELUsingAArch32(EL0);
else
lower_32 = ELUsingAArch32(target_el - 1);
vect_offset = vect_offset + (if lower_32 then 0x600 else 0x400);
elsif PSTATE.SP == '1' then
vect_offset = vect_offset + 0x200;
spsr = GetPSRFromPSTATE();
if HaveNVExt() && PSTATE.EL == EL1 && target_el == EL1 && EL2Enabled() && HCR_EL2.<NV,NV1> == '10' then
spsr<3:2> = '10';
if HaveBTIExt() then
// SPSR[].BTYPE is only guaranteed valid for these exception types
if exception.exceptype IN {Exception_SError, Exception_IRQ, Exception_FIQ,
Exception_SoftwareStep, Exception_PCAlignment,
Exception_InstructionAbort, Exception_Breakpoint,
Exception_VectorCatch, Exception_SoftwareBreakpoint,
Exception_IllegalState, Exception_BranchTarget} then
zero_btype = FALSE;
else
zero_btype = ConstrainUnpredictableBool(Unpredictable_ZEROBTYPE);
if zero_btype then spsr<11:10> = '00';
if HaveNV2Ext() && exception.exceptype == Exception_NV2DataAbort && target_el == EL3);
ifthen
// external aborts are configured to be taken to EL3
exception.exceptype = UsingAArch32Exception_DataAbort() then;
if !(exception.exceptype IN { AArch32.ITAdvanceException_IRQ();,
SSAdvanceException_FIQ();
bits(64) preferred_exception_return =}) then NextInstrAddrAArch64.ReportException();
vect_offset = 0x0;
(exception, target_el);
exception = PSTATE.EL = target_el;
PSTATE.nRW = '0';
PSTATE.SP = '1'; ExceptionSyndromeSPSR([] = spsr;Exception_MonitorCallELR);
exception.syndrome<15:0> = immediate;[] = preferred_exception_return;
PSTATE.SS = '0';
PSTATE.<D,A,I,F> = '1111';
PSTATE.IL = '0';
if from_32 then // Coming from AArch32
PSTATE.IT = '00000000';
PSTATE.T = '0'; // PSTATE.J is RES0
if (
AArch64.TakeExceptionHavePANExt(() && (PSTATE.EL == || (PSTATE.EL == EL2 && ELIsInHost(EL0))) &&
SCTLR[].SPAN == '0') then
PSTATE.PAN = '1';
if HaveUAOExt() then PSTATE.UAO = '0';
if HaveBTIExt() then PSTATE.BTYPE = '00';
if HaveSSBSExt() then PSTATE.SSBS = SCTLR[].DSSBS;
if HaveMTEExt() then PSTATE.TCO = '1';
BranchTo(VBAR[]<63:11>:vect_offset<10:0>, BranchType_EXCEPTION);
if sync_errors then
SynchronizeErrors();
iesb_req = TRUE;
TakeUnmaskedPhysicalSErrorInterrupts(iesb_req);
EndOfInstructionEL3EL1, exception, preferred_exception_return, vect_offset);();
// AArch64.CallSupervisor()
// ========================
// Calls the Supervisor// AArch64.AArch32SystemAccessTrap()
// =================================
// Trapped AARCH32 system register access.
AArch64.CallSupervisor(bits(16) immediate)
ifAArch64.AArch32SystemAccessTrap(bits(2) target_el, integer ec)
assert UsingAArch32HaveEL() then(target_el) && target_el != AArch32.ITAdvance();
SSAdvance();
route_to_el2 = PSTATE.EL == EL0 && EL2Enabled() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return = NextInstrAddr();
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_SupervisorCall);
exception.syndrome<15:0> = immediate;
if UInt(PSTATE.EL) >(target_el) >= UInt((PSTATE.EL);
bits(64) preferred_exception_return =EL1ThisInstrAddr) then();
vect_offset = 0x0;
exception =
AArch64.TakeExceptionAArch64.AArch32SystemAccessTrapSyndrome(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then(
AArch64.TakeExceptionThisInstr((), ec);EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);(target_el, exception, preferred_exception_return, vect_offset);
// AArch64.TakeException()
// =======================
// Take an exception to an Exception Level using AArch64.// AArch64.AArch32SystemAccessTrapSyndrome()
// =========================================
// Returns the syndrome information for traps on AArch32 MCR, MCRR, MRC, MRRC, and VMRS, VMSR instructions,
// other than traps that are due to HCPTR or CPACR.
ExceptionRecord
AArch64.TakeException(bits(2) target_el,AArch64.AArch32SystemAccessTrapSyndrome(bits(32) instr, integer ec) ExceptionRecord exception,
bits(64) preferred_exception_return, integer vect_offset)
assertexception;
case ec of
when 0x0 exception = HaveELExceptionSyndrome(target_el) && !(ELUsingAArch32Exception_Uncategorized(target_el) &&);
when 0x3 exception = UIntExceptionSyndrome(target_el) >=( UIntException_CP15RTTrap(PSTATE.EL);
sync_errors =);
when 0x4 exception = HaveIESBExceptionSyndrome() &&( SCTLRException_CP15RRTTrap[].IESB == '1';
if);
when 0x5 exception = HaveDoubleFaultExtExceptionSyndrome() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL ==( EL3Exception_CP14RTTrap);
if sync_errors && when 0x6 exception = InsertIESBBeforeExceptionExceptionSyndrome(target_el) then(
SynchronizeErrorsException_CP14DTTrap();
iesb_req = FALSE;
sync_errors = FALSE;);
when 0x7 exception =
TakeUnmaskedPhysicalSErrorInterruptsExceptionSyndrome(iesb_req);(
SynchronizeContextException_AdvSIMDFPAccessTrap();
// If coming from AArch32 state, the top parts of the X[] registers might be set to zero
from_32 =);
when 0x8 exception = UsingAArch32ExceptionSyndrome();
if from_32 then( AArch64.MaybeZeroRegisterUppersException_FPIDTrap(););
when 0xC exception =
MaybeZeroSVEUppersExceptionSyndrome(target_el);
if( UIntException_CP14RRTTrap(target_el) >);
otherwise UIntUnreachable(PSTATE.EL) then
boolean lower_32;
if target_el ==();
bits(20) iss = EL3Zeros then
if();
if exception.exceptype IN { EL2EnabledException_FPIDTrap() then
lower_32 =, ELUsingAArch32Exception_CP14RTTrap(,EL2Exception_CP15RTTrap);
else
lower_32 =} then
// Trapped MRC/MCR, VMRS on FPSID
if exception.exceptype != ELUsingAArch32Exception_FPIDTrap(then // When trap is not for VMRS
iss<19:17> = instr<7:5>; // opc2
iss<16:14> = instr<23:21>; // opc1
iss<13:10> = instr<19:16>; // CRn
iss<4:1> = instr<3:0>; // CRm
else
iss<19:17> = '000';
iss<16:14> = '111';
iss<13:10> = instr<19:16>; // reg
iss<4:1> = '0000';
if instr<20> == '1' && instr<15:12> == '1111' then // MRC, Rt==15
iss<9:5> = '11111';
elsif instr<20> == '0' && instr<15:12> == '1111' then // MCR, Rt==15
iss<9:5> = bits(5) UNKNOWN;
else
iss<9:5> = LookUpRIndex(UInt(instr<15:12>), PSTATE.M)<4:0>;
elsif exception.exceptype IN {EL1Exception_CP14RRTTrap);
elsif, IsInHostException_AdvSIMDFPAccessTrap() && PSTATE.EL ==, EL0Exception_CP15RRTTrap && target_el ==} then
// Trapped MRRC/MCRR, VMRS/VMSR
iss<19:16> = instr<7:4>; // opc1
if instr<19:16> == '1111' then // Rt2==15
iss<14:10> = bits(5) UNKNOWN;
else
iss<14:10> = LookUpRIndex(UInt(instr<19:16>), PSTATE.M)<4:0>;
if instr<15:12> == '1111' then // Rt==15
iss<9:5> = bits(5) UNKNOWN;
else
iss<9:5> = LookUpRIndex(UInt(instr<15:12>), PSTATE.M)<4:0>;
iss<4:1> = instr<3:0>; // CRm
elsif exception.exceptype == EL2Exception_CP14DTTrap then
lower_32 = // Trapped LDC/STC
iss<19:12> = instr<7:0>; // imm8
iss<4> = instr<23>; // U
iss<2:1> = instr<24,21>; // P,W
if instr<19:16> == '1111' then // Rn==15, LDC(Literal addressing)/STC
iss<9:5> = bits(5) UNKNOWN;
iss<3> = '1';
elsif exception.exceptype == ELUsingAArch32Exception_Uncategorized(then
// Trapped for unknown reason
iss<9:5> = LookUpRIndex(UInt(instr<19:16>), PSTATE.M)<4:0>; // Rn
iss<3> = '0';
iss<0> = instr<20>; // Direction
exception.syndrome<24:20> =EL0ConditionSyndrome);
else
lower_32 = ELUsingAArch32(target_el - 1);
vect_offset = vect_offset + (if lower_32 then 0x600 else 0x400);
elsif PSTATE.SP == '1' then
vect_offset = vect_offset + 0x200;
spsr = GetPSRFromPSTATE();
if HaveNVExt() && PSTATE.EL == EL1 && target_el == EL1 && EL2Enabled() && HCR_EL2.<NV,NV1> == '10' then
spsr<3:2> = '10';
if HaveBTIExt() then
// SPSR[].BTYPE is only guaranteed valid for these exception types
if exception.exceptype IN {Exception_SError, Exception_IRQ, Exception_FIQ,
Exception_SoftwareStep, Exception_PCAlignment,
Exception_InstructionAbort, Exception_Breakpoint,
Exception_VectorCatch, Exception_SoftwareBreakpoint,
Exception_IllegalState, Exception_BranchTarget} then
zero_btype = FALSE;
else
zero_btype = ConstrainUnpredictableBool(Unpredictable_ZEROBTYPE);
if zero_btype then spsr<11:10> = '00';
if HaveNV2Ext() && exception.exceptype == Exception_NV2DataAbort && target_el == EL3 then
// external aborts are configured to be taken to EL3
exception.exceptype = Exception_DataAbort;
if !(exception.exceptype IN {Exception_IRQ, Exception_FIQ}) then
AArch64.ReportException(exception, target_el);
PSTATE.EL = target_el;
PSTATE.nRW = '0';
PSTATE.SP = '1';
SPSR[] = spsr;
ELR[] = preferred_exception_return;
PSTATE.SS = '0';
PSTATE.<D,A,I,F> = '1111';
PSTATE.IL = '0';
if from_32 then // Coming from AArch32
PSTATE.IT = '00000000';
PSTATE.T = '0'; // PSTATE.J is RES0
if (HavePANExt() && (PSTATE.EL == EL1 || (PSTATE.EL == EL2 && ELIsInHost(EL0))) &&
SCTLR[].SPAN == '0') then
PSTATE.PAN = '1';
if HaveUAOExt() then PSTATE.UAO = '0';
if HaveBTIExt() then PSTATE.BTYPE = '00';
if HaveSSBSExt() then PSTATE.SSBS = SCTLR[].DSSBS;
if HaveMTEExt() then PSTATE.TCO = '1';
BranchTo(VBAR[]<63:11>:vect_offset<10:0>, BranchType_EXCEPTION);
if sync_errors then
SynchronizeErrors();
iesb_req = TRUE;
TakeUnmaskedPhysicalSErrorInterrupts(iesb_req);
EndOfInstruction();();
exception.syndrome<19:0> = iss;
return exception;
// AArch64.AArch32SystemAccessTrap()
// =================================
// Trapped AARCH32 system register access.// AArch64.AdvSIMDFPAccessTrap()
// =============================
// Trapped access to Advanced SIMD or FP registers due to CPACR[].
AArch64.AArch32SystemAccessTrap(bits(2) target_el, integer ec)
assertAArch64.AdvSIMDFPAccessTrap(bits(2) target_el)
bits(64) preferred_exception_return = HaveELThisInstrAddr(target_el) && target_el !=();
vect_offset = 0x0;
route_to_el2 = (target_el == EL0EL1 && UIntEL2Enabled(target_el) >=() && HCR_EL2.TGE == '1');
if route_to_el2 then
exception = UIntExceptionSyndrome(PSTATE.EL);
bits(64) preferred_exception_return =( ThisInstrAddrException_Uncategorized();
vect_offset = 0x0;
exception =); AArch64.AArch32SystemAccessTrapSyndromeAArch64.TakeException(, exception, preferred_exception_return, vect_offset);
else
exception = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrap);
exception.syndrome<24:20> = ConditionSyndromeThisInstrEL2(), ec);();
AArch64.TakeException(target_el, exception, preferred_exception_return, vect_offset);(target_el, exception, preferred_exception_return, vect_offset);
return;
// AArch64.AArch32SystemAccessTrapSyndrome()
// =========================================
// Returns the syndrome information for traps on AArch32 MCR, MCRR, MRC, MRRC, and VMRS, VMSR instructions,
// other than traps that are due to HCPTR or CPACR.
// AArch64.CheckCP15InstrCoarseTraps()
// ===================================
// Check for coarse-grained AArch32 CP15 traps in HSTR_EL2 and HCR_EL2.
ExceptionRecordboolean AArch64.AArch32SystemAccessTrapSyndrome(bits(32) instr, integer ec)AArch64.CheckCP15InstrCoarseTraps(integer CRn, integer nreg, integer CRm)
// Check for coarse-grained Hyp traps
if PSTATE.EL IN {
ExceptionRecordEL0 exception;
case ec of
when 0x0 exception =, ExceptionSyndromeEL1(} &&Exception_UncategorizedEL2Enabled);
when 0x3 exception =() then
// Check for MCR, MRC, MCRR and MRRC disabled by HSTR_EL2<CRn/CRm>
major = if nreg == 1 then CRn else CRm;
if ! ExceptionSyndromeIsInHost(Exception_CP15RTTrap);
when 0x4 exception = ExceptionSyndrome(Exception_CP15RRTTrap);
when 0x5 exception = ExceptionSyndrome(Exception_CP14RTTrap);
when 0x6 exception = ExceptionSyndrome(Exception_CP14DTTrap);
when 0x7 exception = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrap);
when 0x8 exception = ExceptionSyndrome(Exception_FPIDTrap);
when 0xC exception = ExceptionSyndrome(Exception_CP14RRTTrap);
otherwise Unreachable();
bits(20) iss = Zeros();
if exception.exceptype IN {Exception_FPIDTrap, Exception_CP14RTTrap, Exception_CP15RTTrap} then
// Trapped MRC/MCR, VMRS on FPSID
if exception.exceptype != Exception_FPIDTrap then // When trap is not for VMRS
iss<19:17> = instr<7:5>; // opc2
iss<16:14> = instr<23:21>; // opc1
iss<13:10> = instr<19:16>; // CRn
iss<4:1> = instr<3:0>; // CRm
else
iss<19:17> = '000';
iss<16:14> = '111';
iss<13:10> = instr<19:16>; // reg
iss<4:1> = '0000';
if instr<20> == '1' && instr<15:12> == '1111' then // MRC, Rt==15
iss<9:5> = '11111';
elsif instr<20> == '0' && instr<15:12> == '1111' then // MCR, Rt==15
iss<9:5> = bits(5) UNKNOWN;
else
iss<9:5> = LookUpRIndex(UInt(instr<15:12>), PSTATE.M)<4:0>;
elsif exception.exceptype IN {Exception_CP14RRTTrap, Exception_AdvSIMDFPAccessTrap, Exception_CP15RRTTrap} then
// Trapped MRRC/MCRR, VMRS/VMSR
iss<19:16> = instr<7:4>; // opc1
if instr<19:16> == '1111' then // Rt2==15
iss<14:10> = bits(5) UNKNOWN;
else
iss<14:10> = LookUpRIndex(UInt(instr<19:16>), PSTATE.M)<4:0>;
if instr<15:12> == '1111' then // Rt==15
iss<9:5> = bits(5) UNKNOWN;
else
iss<9:5> = LookUpRIndex(UInt(instr<15:12>), PSTATE.M)<4:0>;
iss<4:1> = instr<3:0>; // CRm
elsif exception.exceptype == Exception_CP14DTTrap then
// Trapped LDC/STC
iss<19:12> = instr<7:0>; // imm8
iss<4> = instr<23>; // U
iss<2:1> = instr<24,21>; // P,W
if instr<19:16> == '1111' then // Rn==15, LDC(Literal addressing)/STC
iss<9:5> = bits(5) UNKNOWN;
iss<3> = '1';
elsif exception.exceptype == Exception_Uncategorized then
// Trapped for unknown reason
iss<9:5> = LookUpRIndex(UInt(instr<19:16>), PSTATE.M)<4:0>; // Rn
iss<3> = '0';
iss<0> = instr<20>; // Direction
exception.syndrome<24:20> = ConditionSyndrome();
exception.syndrome<19:0> = iss;
() && !(major IN {4,14}) && HSTR_EL2<major> == '1' then
return TRUE;
return exception; // Check for MRC and MCR disabled by HCR_EL2.TIDCP
if (HCR_EL2.TIDCP == '1' && nreg == 1 &&
((CRn == 9 && CRm IN {0,1,2, 5,6,7,8 }) ||
(CRn == 10 && CRm IN {0,1, 4, 8 }) ||
(CRn == 11 && CRm IN {0,1,2,3,4,5,6,7,8,15}))) then
return TRUE;
return FALSE;
// AArch64.AdvSIMDFPAccessTrap()
// =============================
// Trapped access to Advanced SIMD or FP registers due to CPACR[].// AArch64.CheckFPAdvSIMDEnabled()
// ===============================
// Check against CPACR[]
AArch64.AdvSIMDFPAccessTrap(bits(2) target_el)
bits(64) preferred_exception_return =AArch64.CheckFPAdvSIMDEnabled()
if PSTATE.EL IN { ThisInstrAddrEL0();
vect_offset = 0x0;
route_to_el2 = (target_el ==, EL1 &&} && ! EL2EnabledIsInHost() && HCR_EL2.TGE == '1');
if route_to_el2 then
exception =() then
// Check if access disabled in CPACR_EL1
case ExceptionSyndromeCPACR([].FPEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL ==Exception_UncategorizedEL0);;
when '11' disabled = FALSE;
if disabled then
AArch64.TakeExceptionAArch64.AdvSIMDFPAccessTrap(EL2EL1, exception, preferred_exception_return, vect_offset);
else
exception =); ExceptionSyndromeAArch64.CheckFPAdvSIMDTrap(Exception_AdvSIMDFPAccessTrap);
exception.syndrome<24:20> = ConditionSyndrome();
AArch64.TakeException(target_el, exception, preferred_exception_return, vect_offset);
return;(); // Also check against CPTR_EL2 and CPTR_EL3
// AArch64.CheckCP15InstrCoarseTraps()
// ===================================
// Check for coarse-grained AArch32 CP15 traps in HSTR_EL2 and HCR_EL2.
boolean// AArch64.CheckFPAdvSIMDTrap()
// ============================
// Check against CPTR_EL2 and CPTR_EL3. AArch64.CheckCP15InstrCoarseTraps(integer CRn, integer nreg, integer CRm)
AArch64.CheckFPAdvSIMDTrap()
// Check for coarse-grained Hyp traps
if PSTATE.EL IN {EL0, EL1} &&, EL2} && EL2Enabled() then
// Check for MCR, MRC, MCRR and MRRC disabled by HSTR_EL2<CRn/CRm>
major = if nreg == 1 then CRn else CRm;
if ! // Check if access disabled in CPTR_EL2
if() && HCR_EL2.E2H == '1' then
case CPTR_EL2.FPEN of
when 'x0' disabled = !(PSTATE.EL == EL1 && HCR_EL2.TGE == '1');
when '01' disabled = (PSTATE.EL == EL0 && HCR_EL2.TGE == '1');
when '11' disabled = FALSE;
if disabled then AArch64.AdvSIMDFPAccessTrap(EL2);
else
if CPTR_EL2.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL2);
if HaveEL(EL3) then
// Check if access disabled in CPTR_EL3
if CPTR_EL3.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL3IsInHostHaveVirtHostExt() && !(major IN {4,14}) && HSTR_EL2<major> == '1' then
return TRUE;
);
// Check for MRC and MCR disabled by HCR_EL2.TIDCP
if (HCR_EL2.TIDCP == '1' && nreg == 1 &&
((CRn == 9 && CRm IN {0,1,2, 5,6,7,8 }) ||
(CRn == 10 && CRm IN {0,1, 4, 8 }) ||
(CRn == 11 && CRm IN {0,1,2,3,4,5,6,7,8,15}))) then
return TRUE;
return FALSE; return;
// AArch64.CheckFPAdvSIMDEnabled()
// ===============================
// Check against CPACR[]// AArch64.CheckForERetTrap()
// ==========================
// Check for trap on ERET, ERETAA, ERETAB instruction
AArch64.CheckFPAdvSIMDEnabled()
if PSTATE.EL IN {AArch64.CheckForERetTrap(boolean eret_with_pac, boolean pac_uses_key_a)
// Non-secure EL1 execution of ERET, ERETAA, ERETAB when HCR_EL2.NV bit is set, is trapped to EL2
route_to_el2 =EL0HaveNVExt,() && PSTATE.EL == EL1} && !&&IsInHostEL2Enabled() then
// Check if access disabled in CPACR_EL1
case() && HCR_EL2.NV == '1';
if route_to_el2 then CPACRExceptionRecord[].FPEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL ==exception;
bits(64) preferred_exception_return = EL0ThisInstrAddr;
when '11' disabled = FALSE;
if disabled then();
vect_offset = 0x0;
exception = AArch64.AdvSIMDFPAccessTrapExceptionSyndrome(EL1Exception_ERetTrap););
if !eret_with_pac then // ERET
exception.syndrome<1> = '0';
exception.syndrome<0> = '0'; // RES0
else
exception.syndrome<1> = '1';
if pac_uses_key_a then // ERETAA
exception.syndrome<0> = '0';
else // ERETAB
exception.syndrome<0> = '1';
(EL2AArch64.CheckFPAdvSIMDTrapAArch64.TakeException(); // Also check against CPTR_EL2 and CPTR_EL3, exception, preferred_exception_return, vect_offset);
// AArch64.CheckFPAdvSIMDTrap()
// ============================
// Check against CPTR_EL2 and CPTR_EL3.// AArch64.CheckForSMCUndefOrTrap()
// ================================
// Check for UNDEFINED or trap on SMC instruction
AArch64.CheckFPAdvSIMDTrap()
if PSTATE.EL IN {AArch64.CheckForSMCUndefOrTrap(bits(16) imm)
route_to_el2 = PSTATE.EL ==EL0, EL1,&& EL2} && EL2Enabled() then
// Check if access disabled in CPTR_EL2
if() && HCR_EL2.TSC == '1';
if PSTATE.EL == HaveVirtHostExtEL0() && HCR_EL2.E2H == '1' then
case CPTR_EL2.FPEN of
when 'x0' disabled = !(PSTATE.EL ==then UNDEFINED;
if ! HaveEL(EL3) then
if PSTATE.EL == EL1 && HCR_EL2.TGE == '1');
when '01' disabled = (PSTATE.EL ==&& EL0EL2Enabled && HCR_EL2.TGE == '1');
when '11' disabled = FALSE;
if disabled then() then
if AArch64.AdvSIMDFPAccessTrapHaveNVExt(() && HCR_EL2.NV == '1' && HCR_EL2.TSC == '1' then
route_to_el2 = TRUE;
else
UNDEFINED;
else
UNDEFINED;
else
route_to_el2 = PSTATE.EL ==EL2EL1);
else
if CPTR_EL2.TFP == '1' then&& AArch64.AdvSIMDFPAccessTrapEL2Enabled(() && HCR_EL2.TSC == '1';
if route_to_el2 then
bits(64) preferred_exception_return =EL2ThisInstrAddr);
if();
vect_offset = 0x0;
exception = HaveELExceptionSyndrome(EL3Exception_MonitorCall) then
// Check if access disabled in CPTR_EL3
if CPTR_EL3.TFP == '1' then);
exception.syndrome<15:0> = imm; AArch64.AdvSIMDFPAccessTrapAArch64.TakeException(EL3EL2);
return;, exception, preferred_exception_return, vect_offset);
// AArch64.CheckForERetTrap()
// ==========================
// Check for trap on ERET, ERETAA, ERETAB instruction// AArch64.CheckForWFxTrap()
// =========================
// Check for trap on WFE or WFI instruction
AArch64.CheckForERetTrap(boolean eret_with_pac, boolean pac_uses_key_a)
// Non-secure EL1 execution of ERET, ERETAA, ERETAB when HCR_EL2.NV bit is set, is trapped to EL2
route_to_el2 =AArch64.CheckForWFxTrap(bits(2) target_el, boolean is_wfe)
assert HaveNVExtHaveEL() && PSTATE.EL ==(target_el);
case target_el of
when EL1 &&trap = (if is_wfe then EL2EnabledSCTLR() && HCR_EL2.NV == '1';
if route_to_el2 then[].nTWE else
ExceptionRecordSCTLR exception;
bits(64) preferred_exception_return =[].nTWI) == '0';
when ThisInstrAddrEL2();
vect_offset = 0x0;
exception =trap = (if is_wfe then HCR_EL2.TWE else HCR_EL2.TWI) == '1';
when ExceptionSyndromeEL3(trap = (if is_wfe then SCR_EL3.TWE else SCR_EL3.TWI) == '1';
if trap thenException_ERetTrapAArch64.WFxTrap);
if !eret_with_pac then // ERET
exception.syndrome<1> = '0';
exception.syndrome<0> = '0'; // RES0
else
exception.syndrome<1> = '1';
if pac_uses_key_a then // ERETAA
exception.syndrome<0> = '0';
else // ERETAB
exception.syndrome<0> = '1';
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);(target_el, is_wfe);
// AArch64.CheckForSMCUndefOrTrap()
// ================================
// Check for UNDEFINED or trap on SMC instruction// AArch64.CheckIllegalState()
// ===========================
// Check PSTATE.IL bit and generate Illegal Execution state exception if set.
AArch64.CheckForSMCUndefOrTrap(bits(16) imm)
route_to_el2 = PSTATE.EL ==AArch64.CheckIllegalState()
if PSTATE.IL == '1' then
route_to_el2 = PSTATE.EL == EL1EL0 && EL2Enabled() && HCR_EL2.TSC == '1';
if PSTATE.EL ==() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return = EL0ThisInstrAddr then UNDEFINED;
if !();
vect_offset = 0x0;
exception =HaveELExceptionSyndrome(EL3Exception_IllegalState) then
if PSTATE.EL ==);
if EL1UInt &&(PSTATE.EL) > EL2EnabledUInt() then
if( HaveNVExt() && HCR_EL2.NV == '1' && HCR_EL2.TSC == '1' then
route_to_el2 = TRUE;
else
UNDEFINED;
else
UNDEFINED;
else
route_to_el2 = PSTATE.EL == EL1 &&) then EL2EnabledAArch64.TakeException() && HCR_EL2.TSC == '1';
if route_to_el2 then
bits(64) preferred_exception_return =(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then ThisInstrAddrAArch64.TakeException();
vect_offset = 0x0;
exception =( ExceptionSyndromeEL2(, exception, preferred_exception_return, vect_offset);
elseException_MonitorCall);
exception.syndrome<15:0> = imm;
AArch64.TakeException(EL2EL1, exception, preferred_exception_return, vect_offset);
// AArch64.CheckForWFxTrap()
// AArch64.MonitorModeTrap()
// =========================
// Check for trap on WFE or WFI instruction// Trapped use of Monitor mode features in a Secure EL1 AArch32 mode
AArch64.CheckForWFxTrap(bits(2) target_el, boolean is_wfe)
assertAArch64.MonitorModeTrap()
bits(64) preferred_exception_return = HaveELThisInstrAddr(target_el);
();
vect_offset = 0x0;
case target_el of
when exception = EL1ExceptionSyndrome trap = (if is_wfe then( SCTLRException_Uncategorized[].nTWE else);
if SCTLRIsSecureEL2Enabled[].nTWI) == '0';
when() then AArch64.TakeException(EL2 trap = (if is_wfe then HCR_EL2.TWE else HCR_EL2.TWI) == '1';
when, exception, preferred_exception_return, vect_offset); AArch64.TakeException(EL3 trap = (if is_wfe then SCR_EL3.TWE else SCR_EL3.TWI) == '1';
if trap then
AArch64.WFxTrap(target_el, is_wfe);, exception, preferred_exception_return, vect_offset);
// AArch64.CheckIllegalState()
// ===========================
// Check PSTATE.IL bit and generate Illegal Execution state exception if set.// AArch64.SystemAccessTrap()
// ==========================
// Trapped access to AArch64 system register or system instruction.
AArch64.CheckIllegalState()
if PSTATE.IL == '1' then
route_to_el2 = PSTATE.EL ==AArch64.SystemAccessTrap(bits(2) target_el, integer ec)
assert HaveEL(target_el) && target_el != EL0 && EL2Enabled() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_IllegalState);
if UInt(PSTATE.EL) >(target_el) >= UInt((PSTATE.EL);
bits(64) preferred_exception_return =EL1ThisInstrAddr) then();
vect_offset = 0x0;
exception =
AArch64.TakeExceptionAArch64.SystemAccessTrapSyndrome(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then(
AArch64.TakeExceptionThisInstr((), ec);EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);(target_el, exception, preferred_exception_return, vect_offset);
// AArch64.MonitorModeTrap()
// =========================
// Trapped use of Monitor mode features in a Secure EL1 AArch32 mode// AArch64.SystemAccessTrapSyndrome()
// ==================================
// Returns the syndrome information for traps on AArch64 MSR/MRS instructions.
ExceptionRecord
AArch64.MonitorModeTrap()
bits(64) preferred_exception_return =AArch64.SystemAccessTrapSyndrome(bits(32) instr, integer ec) ThisInstrAddrExceptionRecord();
vect_offset = 0x0;
exception =exception;
case ec of
when 0x0 // Trapped access due to unknown reason.
exception = ExceptionSyndrome(Exception_Uncategorized);
if when 0x7 // Trapped access to SVE, Advance SIMD&FP system register.
exception = IsSecureEL2EnabledExceptionSyndrome() then(
AArch64.TakeExceptionException_AdvSIMDFPAccessTrap();
exception.syndrome<24:20> =EL2ConditionSyndrome, exception, preferred_exception_return, vect_offset);();
when 0x18 // Trapped access to system register or system instruction.
exception =
AArch64.TakeExceptionExceptionSyndrome();
instr = ThisInstr();
exception.syndrome<21:20> = instr<20:19>; // Op0
exception.syndrome<19:17> = instr<7:5>; // Op2
exception.syndrome<16:14> = instr<18:16>; // Op1
exception.syndrome<13:10> = instr<15:12>; // CRn
exception.syndrome<9:5> = instr<4:0>; // Rt
exception.syndrome<4:1> = instr<11:8>; // CRm
exception.syndrome<0> = instr<21>; // Direction
otherwise
UnreachableEL3Exception_SystemRegisterTrap, exception, preferred_exception_return, vect_offset);();
return exception;
// AArch64.SystemAccessTrap()
// ==========================
// Trapped access to AArch64 system register or system instruction.// AArch64.UndefinedFault()
// ========================
AArch64.SystemAccessTrap(bits(2) target_el, integer ec)
assertAArch64.UndefinedFault()
route_to_el2 = PSTATE.EL == HaveEL(target_el) && target_el != EL0 && EL2Enabled() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_Uncategorized);
if UInt(target_el) >=(PSTATE.EL) > UInt(PSTATE.EL);
bits(64) preferred_exception_return =( ThisInstrAddrEL1();
vect_offset = 0x0;
exception =) then AArch64.SystemAccessTrapSyndromeAArch64.TakeException((PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 thenThisInstrAArch64.TakeException(), ec);(
EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1(target_el, exception, preferred_exception_return, vect_offset);, exception, preferred_exception_return, vect_offset);
// AArch64.SystemAccessTrapSyndrome()
// ==================================
// Returns the syndrome information for traps on AArch64 MSR/MRS instructions.
ExceptionRecord// AArch64.WFxTrap()
// ================= AArch64.SystemAccessTrapSyndrome(bits(32) instr, integer ec)AArch64.WFxTrap(bits(2) target_el, boolean is_wfe)
assert
ExceptionRecordUInt exception;
case ec of
when 0x0 // Trapped access due to unknown reason.
exception =(target_el) > ExceptionSyndromeUInt((PSTATE.EL);
bits(64) preferred_exception_return =Exception_UncategorizedThisInstrAddr);
when 0x7 // Trapped access to SVE, Advance SIMD&FP system register.
exception =();
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrapException_WFxTrap);
exception.syndrome<24:20> = ConditionSyndrome();
when 0x18 // Trapped access to system register or system instruction.
exception = exception.syndrome<0> = if is_wfe then '1' else '0';
if target_el == ExceptionSyndromeEL1(&&Exception_SystemRegisterTrapEL2Enabled);
instr =() && HCR_EL2.TGE == '1' then ThisInstrAArch64.TakeException();
exception.syndrome<21:20> = instr<20:19>; // Op0
exception.syndrome<19:17> = instr<7:5>; // Op2
exception.syndrome<16:14> = instr<18:16>; // Op1
exception.syndrome<13:10> = instr<15:12>; // CRn
exception.syndrome<9:5> = instr<4:0>; // Rt
exception.syndrome<4:1> = instr<11:8>; // CRm
exception.syndrome<0> = instr<21>; // Direction
otherwise(
, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeExceptionUnreachableEL2();
return exception;(target_el, exception, preferred_exception_return, vect_offset);
// AArch64.UndefinedFault()
// ========================// CheckFPAdvSIMDEnabled64()
// =========================
// AArch64 instruction wrapper
AArch64.UndefinedFault()
route_to_el2 = PSTATE.EL ==CheckFPAdvSIMDEnabled64() EL0AArch64.CheckFPAdvSIMDEnabled && EL2Enabled() && HCR_EL2.TGE == '1';
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_Uncategorized);
if UInt(PSTATE.EL) > UInt(EL1) then
AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset);
elsif route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);();
// AArch64.WFxTrap()
// =================// AArch64.CreateFaultRecord()
// ===========================
FaultRecord
AArch64.WFxTrap(bits(2) target_el, boolean is_wfe)
assertAArch64.CreateFaultRecord( UIntFault(target_el) >statuscode, bits(52) ipaddress, boolean NS,
integer level, UIntAccType(PSTATE.EL);
bits(64) preferred_exception_return =acctype, boolean write, bit extflag,
bits(2) errortype, boolean secondstage, boolean s2fs1walk) ThisInstrAddrFaultRecord();
vect_offset = 0x0;
exception = ExceptionSyndrome(Exception_WFxTrap);
exception.syndrome<24:20> = ConditionSyndrome();
exception.syndrome<0> = if is_wfe then '1' else '0';
if target_el == EL1 && EL2Enabled() && HCR_EL2.TGE == '1' then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeException(target_el, exception, preferred_exception_return, vect_offset);fault;
fault.statuscode = statuscode;
fault.domain = bits(4) UNKNOWN; // Not used from AArch64
fault.debugmoe = bits(4) UNKNOWN; // Not used from AArch64
fault.errortype = errortype;
fault.ipaddress.NS = if NS then '1' else '0';
fault.ipaddress.address = ipaddress;
fault.level = level;
fault.acctype = acctype;
fault.write = write;
fault.extflag = extflag;
fault.secondstage = secondstage;
fault.s2fs1walk = s2fs1walk;
return fault;
// CheckFPAdvSIMDEnabled64()
// =========================
// AArch64 instruction wrapper// AArch64.FaultSyndrome()
// =======================
// Creates an exception syndrome value for Abort and Watchpoint exceptions taken to
// an Exception Level using AArch64.
bits(25)
CheckFPAdvSIMDEnabled64()AArch64.FaultSyndrome(boolean d_side,
fault)
assert fault.statuscode != Fault_None;
bits(25) iss = Zeros();
if HaveRASExt() && IsExternalSyncAbort(fault) then iss<12:11> = fault.errortype; // SET
if d_side then
if IsSecondStage(fault) && !fault.s2fs1walk then iss<24:14> = LSInstructionSyndrome();
if HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER then
iss<13> = '1'; // Value of '1' indicates fault is generated by use of VNCR_EL2
if fault.acctype IN {AccType_DC, AccType_DC_UNPRIV, AccType_IC, AccType_AT} then
iss<8> = '1'; iss<6> = '1';
else
iss<6> = if fault.write then '1' else '0';
if IsExternalAbort(fault) then iss<9> = fault.extflag;
iss<7> = if fault.s2fs1walk then '1' else '0';
iss<5:0> = EncodeLDFSCAArch64.CheckFPAdvSIMDEnabledFaultRecord();(fault.statuscode, fault.level);
return iss;
// AArch64.CreateFaultRecord()
// ===========================
// AArch64.ExclusiveMonitorsPass()
// ===============================
FaultRecord// Return TRUE if the Exclusives monitors for the current PE include all of the addresses
// associated with the virtual address region of size bytes starting at address.
// The immediately following memory write must be to the same addresses.
boolean AArch64.CreateFaultRecord(AArch64.ExclusiveMonitorsPass(bits(64) address, integer size)
// It is IMPLEMENTATION DEFINED whether the detection of memory aborts happens
// before or after the check on the local Exclusives monitor. As a result a failure
// of the local monitor can occur on some implementations even if the memory
// access would give an memory abort.
acctype =FaultAccType_ATOMIC statuscode, bits(52) ipaddress, boolean NS,
integer level,;
iswrite = TRUE;
aligned = (address == AccTypeAlign acctype, boolean write, bit extflag,
bits(2) errortype, boolean secondstage, boolean s2fs1walk)(address, size));
if !aligned then
secondstage = FALSE;
(address, AArch64.AlignmentFault(acctype, iswrite, secondstage));
passed = AArch64.IsExclusiveVA(address, ProcessorID(), size);
if !passed then
return FALSE;
memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, aligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch64.Abort(address, memaddrdesc.fault);
passed = IsExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size);
if passed then
ClearExclusiveLocal(ProcessorID());
if memaddrdesc.memattrs.shareable then
passed = IsExclusiveGlobal(memaddrdesc.paddress, ProcessorIDFaultRecordAArch64.Abort fault;
fault.statuscode = statuscode;
fault.domain = bits(4) UNKNOWN; // Not used from AArch64
fault.debugmoe = bits(4) UNKNOWN; // Not used from AArch64
fault.errortype = errortype;
fault.ipaddress.NS = if NS then '1' else '0';
fault.ipaddress.address = ipaddress;
fault.level = level;
fault.acctype = acctype;
fault.write = write;
fault.extflag = extflag;
fault.secondstage = secondstage;
fault.s2fs1walk = s2fs1walk;
(), size);
return fault; return passed;
// AArch64.FaultSyndrome()
// =======================
// Creates an exception syndrome value for Abort and Watchpoint exceptions taken to
// an Exception Level using AArch64.
bits(25)// An optional IMPLEMENTATION DEFINED test for an exclusive access to a virtual
// address region of size bytes starting at address.
//
// It is permitted (but not required) for this function to return FALSE and
// cause a store exclusive to fail if the virtual address region is not
// totally included within the region recorded by MarkExclusiveVA().
//
// It is always safe to return TRUE which will check the physical address only.
boolean AArch64.FaultSyndrome(boolean d_side,AArch64.IsExclusiveVA(bits(64) address, integer processorid, integer size); FaultRecord fault)
assert fault.statuscode != Fault_None;
bits(25) iss = Zeros();
if HaveRASExt() && IsExternalSyncAbort(fault) then iss<12:11> = fault.errortype; // SET
if d_side then
if IsSecondStage(fault) && !fault.s2fs1walk then iss<24:14> = LSInstructionSyndrome();
if HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER then
iss<13> = '1'; // Value of '1' indicates fault is generated by use of VNCR_EL2
if fault.acctype IN {AccType_DC, AccType_DC_UNPRIV, AccType_IC, AccType_AT} then
iss<8> = '1'; iss<6> = '1';
else
iss<6> = if fault.write then '1' else '0';
if IsExternalAbort(fault) then iss<9> = fault.extflag;
iss<7> = if fault.s2fs1walk then '1' else '0';
iss<5:0> = EncodeLDFSC(fault.statuscode, fault.level);
return iss;
// AArch64.ExclusiveMonitorsPass()
// ===============================
// Return TRUE if the Exclusives monitors for the current PE include all of the addresses
// associated with the virtual address region of size bytes starting at address.
// The immediately following memory write must be to the same addresses.
boolean// Optionally record an exclusive access to the virtual address region of size bytes
// starting at address for processorid. AArch64.ExclusiveMonitorsPass(bits(64) address, integer size)
// It is IMPLEMENTATION DEFINED whether the detection of memory aborts happens
// before or after the check on the local Exclusives monitor. As a result a failure
// of the local monitor can occur on some implementations even if the memory
// access would give an memory abort.
acctype =AArch64.MarkExclusiveVA(bits(64) address, integer processorid, integer size); AccType_ATOMIC;
iswrite = TRUE;
aligned = AArch64.CheckAlignment(address, size, acctype, iswrite);
passed = AArch64.IsExclusiveVA(address, ProcessorID(), size);
if !passed then
return FALSE;
memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, aligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch64.Abort(address, memaddrdesc.fault);
passed = IsExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size);
ClearExclusiveLocal(ProcessorID());
if passed then
if memaddrdesc.memattrs.shareable then
passed = IsExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size);
return passed;
// An optional IMPLEMENTATION DEFINED test for an exclusive access to a virtual
// address region of size bytes starting at address.
//
// It is permitted (but not required) for this function to return FALSE and
// cause a store exclusive to fail if the virtual address region is not
// totally included within the region recorded by MarkExclusiveVA().
//
// It is always safe to return TRUE which will check the physical address only.
boolean// AArch64.SetExclusiveMonitors()
// ==============================
// Sets the Exclusives monitors for the current PE to record the addresses associated
// with the virtual address region of size bytes starting at address. AArch64.IsExclusiveVA(bits(64) address, integer processorid, integer size);AArch64.SetExclusiveMonitors(bits(64) address, integer size)
acctype =AccType_ATOMIC;
iswrite = FALSE;
aligned = (address == Align(address, size));
memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, aligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
return;
if memaddrdesc.memattrs.shareable then
MarkExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size);
MarkExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size);
AArch64.MarkExclusiveVA(address, ProcessorID(), size);
// Optionally record an exclusive access to the virtual address region of size bytes
// starting at address for processorid.// FPRSqrtStepFused()
// ==================
bits(N)
AArch64.MarkExclusiveVA(bits(64) address, integer processorid, integer size);FPRSqrtStepFused(bits(N) op1, bits(N) op2)
assert N IN {16, 32, 64};
bits(N) result;
op1 =FPNeg(op1);
(type1,sign1,value1) = FPUnpack(op1, FPCR);
(type2,sign2,value2) = FPUnpack(op2, FPCR);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, FPCR);
if !done then
inf1 = (type1 == FPType_Infinity);
inf2 = (type2 == FPType_Infinity);
zero1 = (type1 == FPType_Zero);
zero2 = (type2 == FPType_Zero);
if (inf1 && zero2) || (zero1 && inf2) then
result = FPOnePointFive('0');
elsif inf1 || inf2 then
result = FPInfinity(sign1 EOR sign2);
else
// Fully fused multiply-add and halve
result_value = (3.0 + (value1 * value2)) / 2.0;
if result_value == 0.0 then
// Sign of exact zero result depends on rounding mode
sign = if FPRoundingMode(FPCR) == FPRounding_NEGINF then '1' else '0';
result = FPZero(sign);
else
result = FPRound(result_value, FPCR);
return result;
// AArch64.SetExclusiveMonitors()
// ==============================
// FPRecipStepFused()
// ==================
// Sets the Exclusives monitors for the current PE to record the addresses associated
// with the virtual address region of size bytes starting at address.bits(N)
AArch64.SetExclusiveMonitors(bits(64) address, integer size)
acctype =FPRecipStepFused(bits(N) op1, bits(N) op2)
assert N IN {16, 32, 64};
bits(N) result;
op1 = AccType_ATOMICFPNeg;
iswrite = FALSE;
aligned = (address ==(op1);
(type1,sign1,value1) = AlignFPUnpack(address, size));
memaddrdesc =(op1, FPCR);
(type2,sign2,value2) = AArch64.TranslateAddressFPUnpack(address, acctype, iswrite, aligned, size);
// Check for aborts or debug exceptions
if(op2, FPCR);
(done,result) = IsFaultFPProcessNaNs(memaddrdesc) then
return;
if memaddrdesc.memattrs.shareable then(type1, type2, op1, op2, FPCR);
if !done then
inf1 = (type1 ==
MarkExclusiveGlobalFPType_Infinity(memaddrdesc.paddress,);
inf2 = (type2 == ProcessorIDFPType_Infinity(), size););
zero1 = (type1 ==
MarkExclusiveLocalFPType_Zero(memaddrdesc.paddress,);
zero2 = (type2 == ProcessorIDFPType_Zero(), size););
if (inf1 && zero2) || (zero1 && inf2) then
result =
AArch64.MarkExclusiveVAFPTwo(address,('0');
elsif inf1 || inf2 then
result = (sign1 EOR sign2);
else
// Fully fused multiply-add
result_value = 2.0 + (value1 * value2);
if result_value == 0.0 then
// Sign of exact zero result depends on rounding mode
sign = if FPRoundingMode(FPCR) == FPRounding_NEGINF then '1' else '0';
result = FPZero(sign);
else
result = FPRoundProcessorIDFPInfinity(), size);(result_value, FPCR);
return result;
// FPRSqrtStepFused()
// ==================
// AArch64.AccessIsTagChecked()
// ============================
// TRUE if a given access is tag-checked, FALSE otherwise.
bits(N)boolean FPRSqrtStepFused(bits(N) op1, bits(N) op2)
assert N IN {16, 32, 64};
bits(N) result;
op1 =AArch64.AccessIsTagChecked(bits(64) vaddr, FPNegAccType(op1);
(type1,sign1,value1) =acctype)
if PSTATE.M<4> == '1' then return FALSE;
if FPUnpackEffectiveTBI(op1, FPCR);
(type2,sign2,value2) =(vaddr, FALSE, PSTATE.EL) == '0' then
return FALSE;
if FPUnpackEffectiveTCMA(op2, FPCR);
(done,result) =(vaddr, PSTATE.EL) == '1' && (vaddr<59:55> == '00000' || vaddr<59:55> == '11111') then
return FALSE;
if ! FPProcessNaNsAArch64.AllocationTagAccessIsEnabled(type1, type2, op1, op2, FPCR);
if !done then
inf1 = (type1 ==() then
return FALSE;
if acctype IN { FPType_InfinityAccType_IFETCH);
inf2 = (type2 ==, FPType_InfinityAccType_PTW);
zero1 = (type1 ==} then
return FALSE;
if acctype == FPType_ZeroAccType_NV2REGISTER);
zero2 = (type2 == FPType_Zero);
if (inf1 && zero2) || (zero1 && inf2) then
result = FPOnePointFive('0');
elsif inf1 || inf2 then
result = FPInfinity(sign1 EOR sign2);
else
// Fully fused multiply-add and halve
result_value = (3.0 + (value1 * value2)) / 2.0;
if result_value == 0.0 then
// Sign of exact zero result depends on rounding mode
sign = if FPRoundingMode(FPCR) == FPRounding_NEGINF then '1' else '0';
result = FPZero(sign);
else
result = FPRound(result_value, FPCR);
return result;then
return FALSE;
if PSTATE.TCO=='1' then
return FALSE;
if IsNonTagCheckedInstruction() then
return FALSE;
return TRUE;
// FPRecipStepFused()
// ==================
// AArch64.AddressWithAllocationTag()
// ==================================
// Generate a 64-bit value containing a Logical Address Tag from a 64-bit
// virtual address and an Allocation Tag.
// If the extension is disabled, treats the Allocation Tag as '0000'.
bits(N)bits(64) FPRecipStepFused(bits(N) op1, bits(N) op2)
assert N IN {16, 32, 64};
bits(N) result;
op1 =AArch64.AddressWithAllocationTag(bits(64) address, bits(4) allocation_tag)
bits(64) result = address;
bits(4) tag = allocation_tag - ('000':address<55>);
result<59:56> = tag;
return result; FPNeg(op1);
(type1,sign1,value1) = FPUnpack(op1, FPCR);
(type2,sign2,value2) = FPUnpack(op2, FPCR);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, FPCR);
if !done then
inf1 = (type1 == FPType_Infinity);
inf2 = (type2 == FPType_Infinity);
zero1 = (type1 == FPType_Zero);
zero2 = (type2 == FPType_Zero);
if (inf1 && zero2) || (zero1 && inf2) then
result = FPTwo('0');
elsif inf1 || inf2 then
result = FPInfinity(sign1 EOR sign2);
else
// Fully fused multiply-add
result_value = 2.0 + (value1 * value2);
if result_value == 0.0 then
// Sign of exact zero result depends on rounding mode
sign = if FPRoundingMode(FPCR) == FPRounding_NEGINF then '1' else '0';
result = FPZero(sign);
else
result = FPRound(result_value, FPCR);
return result;
// AArch64.AccessIsTagChecked()
// ============================
// TRUE if a given access is tag-checked, FALSE otherwise.
// AArch64.AllocationTagFromAddress()
// ==================================
// Generate a Tag from a 64-bit value containing a Logical Address Tag.
// If access to Allocation Tags is disabled, this function returns '0000'.
booleanbits(4) AArch64.AccessIsTagChecked(bits(64) vaddr,AArch64.AllocationTagFromAddress(bits(64) tagged_address)
bits(4) logical_tag = tagged_address<59:56>;
bits(4) tag = logical_tag + ('000':tagged_address<55>);
return tag; AccType acctype)
if PSTATE.M<4> == '1' then return FALSE;
if EffectiveTBI(vaddr, FALSE, PSTATE.EL) == '0' then
return FALSE;
if EffectiveTCMA(vaddr, PSTATE.EL) == '1' && (vaddr<59:55> == '00000' || vaddr<59:55> == '11111') then
return FALSE;
if !AArch64.AllocationTagAccessIsEnabled() then
return FALSE;
if acctype IN {AccType_IFETCH, AccType_PTW} then
return FALSE;
if acctype == AccType_NV2REGISTER then
return FALSE;
if PSTATE.TCO=='1' then
return FALSE;
if IsNonTagCheckedInstruction() then
return FALSE;
return TRUE;
// AArch64.AddressWithAllocationTag()
// ==================================
// Generate a 64-bit value containing a Logical Address Tag from a 64-bit
// virtual address and an Allocation Tag.
// If the extension is disabled, treats the Allocation Tag as '0000'.
// AArch64.CheckAlignment()
// ========================
bits(64)boolean AArch64.AddressWithAllocationTag(bits(64) address, bits(4) allocation_tag)
bits(64) result = address;
bits(4) tag;
ifAArch64.CheckAlignment(bits(64) address, integer alignment, acctype,
boolean iswrite)
aligned = (address == Align(address, alignment));
atomic = acctype IN { AccType_ATOMIC, AccType_ATOMICRW, AccType_ORDEREDATOMIC, AccType_ORDEREDATOMICRW };
ordered = acctype IN { AccType_ORDERED, AccType_ORDEREDRW, AccType_LIMITEDORDERED, AccType_ORDEREDATOMIC, AccType_ORDEREDATOMICRW };
vector = acctype == AccType_VEC;
if SCTLR[].A == '1' then check = TRUE;
elsif HaveUA16Ext() then
check = (UInt(address<0+:4>) + alignment > 16) && ((ordered && SCTLR[].nAA == '0') || atomic);
else check = atomic || ordered;
if check && !aligned then
secondstage = FALSE;
AArch64.Abort(address, AArch64.AlignmentFaultAArch64.AllocationTagAccessIsEnabledAccType() then
tag = allocation_tag;
else
tag = '0000';
result<59:56> = tag;
return result;(acctype, iswrite, secondstage));
return aligned;
// AArch64.AllocationTagFromAddress()
// ==================================
// Generate an ALlocation Tag from a 64-bit value containing a Logical Address Tag.
// AArch64.CheckTag()
// ==================
// Performs a Tag Check operation for a memory access and returns
// whether the check passed
bits(4)boolean AArch64.AllocationTagFromAddress(bits(64) tagged_address)
return tagged_address<59:56>;AArch64.CheckTag(AddressDescriptor memaddrdesc, bits(4) ptag, boolean write)
if memaddrdesc.memattrs.tagged then
return ptag == _MemTag[memaddrdesc];
else
return TRUE;
// AArch64.CheckAlignment()
// ========================
// AArch64.MemSingle[] - non-assignment (read) form
// ================================================
// Perform an atomic, little-endian read of 'size' bytes.
booleanbits(size*8) AArch64.CheckAlignment(bits(64) address, integer alignment,AArch64.MemSingle[bits(64) address, integer size, AccType acctype,
boolean iswrite)
aligned = (address ==acctype, boolean wasaligned]
assert size IN {1, 2, 4, 8, 16};
assert address == Align(address, alignment));
atomic = acctype IN {(address, size); AccType_ATOMICAddressDescriptor,memaddrdesc;
bits(size*8) value;
iswrite = FALSE;
// MMU or MPU
memaddrdesc = AccType_ATOMICRWAArch64.TranslateAddress,(address, acctype, iswrite, wasaligned, size);
// Check for aborts or debug exceptions
if AccType_ORDEREDATOMICIsFault,(memaddrdesc) then AccType_ORDEREDATOMICRWAArch64.Abort };
ordered = acctype IN {(address, memaddrdesc.fault);
// Memory array access
accdesc = CreateAccessDescriptor(acctype);
if AccType_ORDEREDHaveMTEExt,() then
if AccType_ORDEREDRWAArch64.AccessIsTagChecked,( AccType_LIMITEDORDEREDZeroExtend,(address, 64), acctype) then
bits(4) ptag = AccType_ORDEREDATOMICAArch64.TransformTag,( AccType_ORDEREDATOMICRWZeroExtend };
vector = acctype ==(address, 64));
if ! AccType_VECAArch64.CheckTag;
if(memaddrdesc, ptag, iswrite) then SCTLRAArch64.TagCheckFail[].A == '1' then check = TRUE;
elsif( HaveUA16ExtZeroExtend() then
check = ((address, 64), iswrite);
value = _Mem[memaddrdesc, size, accdesc];
return value;
// AArch64.MemSingle[] - assignment (write) form
// =============================================
// Perform an atomic, little-endian write of 'size' bytes.UInt(address<0+:4>) + alignment > 16) && ((ordered &&AArch64.MemSingle[bits(64) address, integer size, SCTLRAccType[].nAA == '0') || atomic);
else check = atomic || ordered;
if check && !aligned then
secondstage = FALSE;acctype, boolean wasaligned] = bits(size*8) value
assert size IN {1, 2, 4, 8, 16};
assert address ==
Align(address, size);
AddressDescriptor memaddrdesc;
iswrite = TRUE;
// MMU or MPU
memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, wasaligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch64.Abort(address,(address, memaddrdesc.fault);
// Effect on exclusives
if memaddrdesc.memattrs.shareable then (memaddrdesc.paddress, ProcessorID(), size);
// Memory array access
accdesc = CreateAccessDescriptor(acctype);
if HaveMTEExt() then
if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then
bits(4) ptag = AArch64.TransformTag(ZeroExtend(address, 64));
if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then
AArch64.TagCheckFail(ZeroExtendAArch64.AlignmentFaultClearExclusiveByAddress(acctype, iswrite, secondstage));
return aligned;(address, 64), iswrite);
_Mem[memaddrdesc, size, accdesc] = value;
return;
// AArch64.CheckTag()
// ==================
// Performs a Tag Check operation for a memory access and returns
// whether the check passed
// AArch64.MemTag[] - non-assignment (read) form
// =============================================
// Load an Allocation Tag from memory.
booleanbits(4) AArch64.CheckTag(AArch64.MemTag[bits(64) address]AddressDescriptor memaddrdesc, bits(4) ptag, boolean write)
if memaddrdesc.memattrs.tagged then
return ptag == _MemTag[memaddrdesc];
else
return TRUE;memaddrdesc;
bits(4) value;
iswrite = FALSE;
memaddrdesc =AArch64.TranslateAddress(address, AccType_NORMAL, iswrite, TRUE, TAG_GRANULE);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch64.Abort(address, memaddrdesc.fault);
// Return the granule tag if tagging is enabled...
if AArch64.AllocationTagAccessIsEnabled() && memaddrdesc.memattrs.tagged then
return _MemTag[memaddrdesc];
else
// ...otherwise read tag as zero.
return '0000';
// AArch64.MemTag[] - assignment (write) form
// ==========================================
// Store an Allocation Tag to memory.
AArch64.MemTag[bits(64) address] = bits(4) value
AddressDescriptor memaddrdesc;
iswrite = TRUE;
// Stores of allocation tags must be aligned
if address != Align(address, TAG_GRANULE) then
boolean secondstage = FALSE;
AArch64.Abort(address, AArch64.AlignmentFault(AccType_NORMAL, iswrite, secondstage));
wasaligned = TRUE;
memaddrdesc = AArch64.TranslateAddress(address, AccType_NORMAL, iswrite, wasaligned, TAG_GRANULE);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch64.Abort(address, memaddrdesc.fault);
// Memory array access
if AArch64.AllocationTagAccessIsEnabled() && memaddrdesc.memattrs.tagged then
_MemTag[memaddrdesc] = value;
// AArch64.MemSingle[] - non-assignment (read) form
// ================================================
// Perform an atomic, little-endian read of 'size' bytes.
// AArch64.TransformTag()
// ======================
// Apply tag transformation rules.
bits(size*8)bits(4) AArch64.MemSingle[bits(64) address, integer size,AArch64.TransformTag(bits(64) vaddr)
bits(4) vtag = vaddr<59:56>;
bits(4) tagdelta = AccType acctype, boolean wasaligned]
assert size IN {1, 2, 4, 8, 16};
assert address == Align(address, size);
AddressDescriptor memaddrdesc;
bits(size*8) value;
iswrite = FALSE;
// MMU or MPU
memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, wasaligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch64.Abort(address, memaddrdesc.fault);
// Memory array access
accdesc = CreateAccessDescriptor(acctype);
if HaveMTEExt() then
if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then
bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64));
if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then
AArch64.TagCheckFail(ZeroExtend(address, 64), iswrite);
value = _Mem[memaddrdesc, size, accdesc];
return value;
// AArch64.MemSingle[] - assignment (write) form
// =============================================
// Perform an atomic, little-endian write of 'size' bytes.
AArch64.MemSingle[bits(64) address, integer size, AccType acctype, boolean wasaligned] = bits(size*8) value
assert size IN {1, 2, 4, 8, 16};
assert address == Align(address, size);
AddressDescriptor memaddrdesc;
iswrite = TRUE;
// MMU or MPU
memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, wasaligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch64.Abort(address, memaddrdesc.fault);
// Effect on exclusives
if memaddrdesc.memattrs.shareable then
ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), size);
// Memory array access
accdesc = CreateAccessDescriptor(acctype);
if HaveMTEExt() then
if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then
bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64));
if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then
AArch64.TagCheckFail(ZeroExtend(address, 64), iswrite);
_Mem[memaddrdesc, size, accdesc] = value;
return;(vaddr<55>);
bits(4) ptag = vtag + tagdelta;
return ptag;
// AArch64.MemTag[] - non-assignment (read) form
// =============================================
// Load an Allocation Tag from memory.
// AArch64.TranslateAddressForAtomicAccess()
// =========================================
// Performs an alignment check for atomic memory operations.
// Also translates 64-bit Virtual Address into Physical Address.
bits(4)AddressDescriptor AArch64.MemTag[bits(64) address]AArch64.TranslateAddressForAtomicAccess(bits(64) address, integer sizeinbits)
boolean iswrite = FALSE;
size = sizeinbits DIV 8;
assert size IN {1, 2, 4, 8, 16};
aligned =
AddressDescriptorAArch64.CheckAlignment memaddrdesc;
bits(4) value;
iswrite = FALSE;
memaddrdesc =(address, size, AccType_ATOMICRW, iswrite);
// MMU or MPU lookup
memaddrdesc = AArch64.TranslateAddress(address, AccType_NORMALAccType_ATOMICRW, iswrite, TRUE, TAG_GRANULE);
, iswrite, aligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch64.Abort(address, memaddrdesc.fault);
// Return the granule tag if tagging is enabled...
if // Effect on exclusives
if memaddrdesc.memattrs.shareable then AArch64.AllocationTagAccessIsEnabledClearExclusiveByAddress() && memaddrdesc.memattrs.tagged then
return _MemTag[memaddrdesc];
else
// ...otherwise read tag as zero.
return '0000';
// AArch64.MemTag[] - assignment (write) form
// ==========================================
// Store an Allocation Tag to memory.(memaddrdesc.paddress,
AArch64.MemTag[bits(64) address] = bits(4) value(), size);
if
AddressDescriptorHaveMTEExt memaddrdesc;
iswrite = TRUE;
// Stores of allocation tags must be aligned
if address !=() && AlignAArch64.AccessIsTagChecked(address, TAG_GRANULE) then
boolean secondstage = FALSE;(address,
AArch64.AbortAccType_ATOMICRW(address,) then
bits(4) ptag = AArch64.AlignmentFaultAArch64.TransformTag((address);
if !AccType_NORMALAArch64.CheckTag, iswrite, secondstage));
wasaligned = TRUE;
memaddrdesc =(memaddrdesc, ptag, iswrite) then AArch64.TranslateAddressAArch64.TagCheckFail(address, AccType_NORMAL, iswrite, wasaligned, TAG_GRANULE);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch64.Abort(address, memaddrdesc.fault);
// Memory array access
if AArch64.AllocationTagAccessIsEnabled() && memaddrdesc.memattrs.tagged then
_MemTag[memaddrdesc] = value;(address, iswrite);
return memaddrdesc;
// AArch64.PhysicalTag()
// =====================
// Generate a Physical Tag from a Logical Tag in an address
bits(4)// CheckSPAlignment()
// ==================
// Check correct stack pointer alignment for AArch64 state. AArch64.PhysicalTag(bits(64) vaddr)
return vaddr<59:56>;CheckSPAlignment()
bits(64) sp =SP[];
if PSTATE.EL == EL0 then
stack_align_check = (SCTLR[].SA0 != '0');
else
stack_align_check = (SCTLR[].SA != '0');
if stack_align_check && sp != Align(sp, 16) then
AArch64.SPAlignmentFault();
return;
// AArch64.TranslateAddressForAtomicAccess()
// =========================================
// Performs an alignment check for atomic memory operations.
// Also translates 64-bit Virtual Address into Physical Address.
AddressDescriptor// If the implementation supports changing the block size without a break-before-make
// approach, then for implementations that have level 1 or 2 support, the nT bit in
// the block descriptor is valid.
boolean AArch64.TranslateAddressForAtomicAccess(bits(64) address, integer sizeinbits)
boolean iswrite = FALSE;
size = sizeinbits DIV 8;
assert size IN {1, 2, 4, 8, 16};
aligned =IsBlockDescriptorNTBitValid(); AArch64.CheckAlignment(address, size, AccType_ATOMICRW, iswrite);
// MMU or MPU lookup
memaddrdesc = AArch64.TranslateAddress(address, AccType_ATOMICRW, iswrite, aligned, size);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
AArch64.Abort(address, memaddrdesc.fault);
// Effect on exclusives
if memaddrdesc.memattrs.shareable then
ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), size);
if HaveMTEExt() && AArch64.AccessIsTagChecked(address, AccType_ATOMICRW) then
bits(4) ptag = AArch64.PhysicalTag(address);
if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then
AArch64.TagCheckFail(address, iswrite);
return memaddrdesc;
// CheckSPAlignment()
// ==================
// Check correct stack pointer alignment for AArch64 state.// Mem[] - non-assignment (read) form
// ==================================
// Perform a read of 'size' bytes. The access byte order is reversed for a big-endian access.
// Instruction fetches would call AArch64.MemSingle directly.
bits(size*8)
CheckSPAlignment()
bits(64) sp =Mem[bits(64) address, integer size, SPAccType[];
if PSTATE.EL ==acctype]
assert size IN {1, 2, 4, 8, 16};
bits(size*8) value;
boolean iswrite = FALSE;
aligned = EL0AArch64.CheckAlignment then
stack_align_check = ((address, size, acctype, iswrite);
if size != 16 || !(acctype IN {SCTLRAccType_VEC[].SA0 != '0');
else
stack_align_check = (,SCTLRAccType_VECSTREAM[].SA != '0');
if stack_align_check && sp !=}) then
atomic = aligned;
else
// 128-bit SIMD&FP loads are treated as a pair of 64-bit single-copy atomic accesses
// 64-bit aligned.
atomic = address == Align(sp, 16) then(address, 8);
if !atomic then
assert size > 1;
value<7:0> =
[address, 1, acctype, aligned];
// For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory
// access will generate an Alignment Fault, as to get this far means the first byte did
// not, so we must be changing to a new translation page.
if !aligned then
c = ConstrainUnpredictable(Unpredictable_DEVPAGE2);
assert c IN {Constraint_FAULT, Constraint_NONE};
if c == Constraint_NONE then aligned = TRUE;
for i = 1 to size-1
value<8*i+7:8*i> = AArch64.MemSingle[address+i, 1, acctype, aligned];
elsif size == 16 && acctype IN {AccType_VEC, AccType_VECSTREAM} then
value<63:0> = AArch64.MemSingle[address, 8, acctype, aligned];
value<127:64> = AArch64.MemSingle[address+8, 8, acctype, aligned];
else
value = AArch64.MemSingle[address, size, acctype, aligned];
if (HaveNV2Ext() && acctype == AccType_NV2REGISTER && SCTLR_EL2.EE == '1') || BigEndian() then
value = BigEndianReverse(value);
return value;
// Mem[] - assignment (write) form
// ===============================
// Perform a write of 'size' bytes. The byte order is reversed for a big-endian access.
Mem[bits(64) address, integer size, AccType acctype] = bits(size*8) value
boolean iswrite = TRUE;
if (HaveNV2Ext() && acctype == AccType_NV2REGISTER && SCTLR_EL2.EE == '1') || BigEndian() then
value = BigEndianReverse(value);
aligned = AArch64.CheckAlignment(address, size, acctype, iswrite);
if size != 16 || !(acctype IN {AccType_VEC, AccType_VECSTREAM}) then
atomic = aligned;
else
// 128-bit SIMD&FP stores are treated as a pair of 64-bit single-copy atomic accesses
// 64-bit aligned.
atomic = address == Align(address, 8);
if !atomic then
assert size > 1;
AArch64.MemSingle[address, 1, acctype, aligned] = value<7:0>;
// For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory
// access will generate an Alignment Fault, as to get this far means the first byte did
// not, so we must be changing to a new translation page.
if !aligned then
c = ConstrainUnpredictable(Unpredictable_DEVPAGE2);
assert c IN {Constraint_FAULT, Constraint_NONE};
if c == Constraint_NONE then aligned = TRUE;
for i = 1 to size-1
AArch64.MemSingle[address+i, 1, acctype, aligned] = value<8*i+7:8*i>;
elsif size == 16 && acctype IN {AccType_VEC, AccType_VECSTREAM} then
AArch64.MemSingle[address, 8, acctype, aligned] = value<63:0>;
AArch64.MemSingle[address+8, 8, acctype, aligned] = value<127:64>;
else
AArch64.MemSingleAArch64.SPAlignmentFaultAArch64.MemSingle();
[address, size, acctype, aligned] = value;
return;
// If the implementation supports changing the block size without a break-before-make
// approach, then for implementations that have level 1 or 2 support, the nT bit in
// the block descriptor is valid.
boolean// MemAtomic()
// ===========
// Performs a load and store memory operation for a given virtual address.
bits(size) IsBlockDescriptorNTBitValid();MemAtomic(bits(64) address,MemAtomicOp op, bits(size) value, AccType ldacctype, AccType stacctype)
bits(size) newvalue;
memaddrdesc = AArch64.TranslateAddressForAtomicAccess(address, size);
ldaccdesc = CreateAccessDescriptor(ldacctype);
staccdesc = CreateAccessDescriptor(stacctype);
// All observers in the shareability domain observe the
// following load and store atomically.
oldvalue = _Mem[memaddrdesc, size DIV 8, ldaccdesc];
if BigEndian() then
oldvalue = BigEndianReverse(oldvalue);
case op of
when MemAtomicOp_ADD newvalue = oldvalue + value;
when MemAtomicOp_BIC newvalue = oldvalue AND NOT(value);
when MemAtomicOp_EOR newvalue = oldvalue EOR value;
when MemAtomicOp_ORR newvalue = oldvalue OR value;
when MemAtomicOp_SMAX newvalue = if SInt(oldvalue) > SInt(value) then oldvalue else value;
when MemAtomicOp_SMIN newvalue = if SInt(oldvalue) > SInt(value) then value else oldvalue;
when MemAtomicOp_UMAX newvalue = if UInt(oldvalue) > UInt(value) then oldvalue else value;
when MemAtomicOp_UMIN newvalue = if UInt(oldvalue) > UInt(value) then value else oldvalue;
when MemAtomicOp_SWP newvalue = value;
if BigEndian() then
newvalue = BigEndianReverse(newvalue);
_Mem[memaddrdesc, size DIV 8, staccdesc] = newvalue;
// Load operations return the old (pre-operation) value
return oldvalue;
// Mem[] - non-assignment (read) form
// ==================================
// Perform a read of 'size' bytes. The access byte order is reversed for a big-endian access.
// Instruction fetches would call AArch64.MemSingle directly.
// MemAtomicCompareAndSwap()
// =========================
// Compares the value stored at the passed-in memory address against the passed-in expected
// value. If the comparison is successful, the value at the passed-in memory address is swapped
// with the passed-in new_value.
bits(size*8)bits(size) Mem[bits(64) address, integer size,MemAtomicCompareAndSwap(bits(64) address, bits(size) expectedvalue,
bits(size) newvalue, AccType acctype]
assert size IN {1, 2, 4, 8, 16};
bits(size*8) value;
boolean iswrite = FALSE;
aligned =ldacctype, AArch64.CheckAlignmentAccType(address, size, acctype, iswrite);
if size != 16 || !(acctype IN {stacctype)
memaddrdesc =AccType_VECAArch64.TranslateAddressForAtomicAccess,(address, size);
ldaccdesc = CreateAccessDescriptor(ldacctype);
staccdesc = CreateAccessDescriptor(stacctype);
// All observers in the shareability domain observe the
// following load and store atomically.
oldvalue = _Mem[memaddrdesc, size DIV 8, ldaccdesc];
if AccType_VECSTREAM}) then
atomic = aligned;
else
// 128-bit SIMD&FP loads are treated as a pair of 64-bit single-copy atomic accesses
// 64-bit aligned.
atomic = address == Align(address, 8);
if !atomic then
assert size > 1;
value<7:0> = AArch64.MemSingle[address, 1, acctype, aligned];
// For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory
// access will generate an Alignment Fault, as to get this far means the first byte did
// not, so we must be changing to a new translation page.
if !aligned then
c = ConstrainUnpredictable(Unpredictable_DEVPAGE2);
assert c IN {Constraint_FAULT, Constraint_NONE};
if c == Constraint_NONE then aligned = TRUE;
for i = 1 to size-1
value<8*i+7:8*i> = AArch64.MemSingle[address+i, 1, acctype, aligned];
elsif size == 16 && acctype IN {AccType_VEC, AccType_VECSTREAM} then
value<63:0> = AArch64.MemSingle[address, 8, acctype, aligned];
value<127:64> = AArch64.MemSingle[address+8, 8, acctype, aligned];
else
value = AArch64.MemSingle[address, size, acctype, aligned];
if (HaveNV2Ext() && acctype == AccType_NV2REGISTER && SCTLR_EL2.EE == '1') || BigEndian() then
value = oldvalue = BigEndianReverse(value);
return value;
(oldvalue);
// Mem[] - assignment (write) form
// ===============================
// Perform a write of 'size' bytes. The byte order is reversed for a big-endian access. if oldvalue == expectedvalue then
if
Mem[bits(64) address, integer size, AccType acctype] = bits(size*8) value
boolean iswrite = TRUE;
if (HaveNV2Ext() && acctype == AccType_NV2REGISTER && SCTLR_EL2.EE == '1') || BigEndian() then
value = newvalue = BigEndianReverse(value);
aligned = AArch64.CheckAlignment(address, size, acctype, iswrite);
if size != 16 || !(acctype IN {AccType_VEC, AccType_VECSTREAM}) then
atomic = aligned;
else
// 128-bit SIMD&FP stores are treated as a pair of 64-bit single-copy atomic accesses
// 64-bit aligned.
atomic = address == Align(address, 8);
if !atomic then
assert size > 1;
AArch64.MemSingle[address, 1, acctype, aligned] = value<7:0>;
// For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory
// access will generate an Alignment Fault, as to get this far means the first byte did
// not, so we must be changing to a new translation page.
if !aligned then
c = ConstrainUnpredictable(Unpredictable_DEVPAGE2);
assert c IN {Constraint_FAULT, Constraint_NONE};
if c == Constraint_NONE then aligned = TRUE;
for i = 1 to size-1
AArch64.MemSingle[address+i, 1, acctype, aligned] = value<8*i+7:8*i>;
elsif size == 16 && acctype IN {AccType_VEC, AccType_VECSTREAM} then
AArch64.MemSingle[address, 8, acctype, aligned] = value<63:0>;
AArch64.MemSingle[address+8, 8, acctype, aligned] = value<127:64>;
else
AArch64.MemSingle[address, size, acctype, aligned] = value;
return;(newvalue);
_Mem[memaddrdesc, size DIV 8, staccdesc] = newvalue;
return oldvalue;
// MemAtomic()
// ===========
// Performs load and store memory operations for a given virtual address.
// AddPAC()
// ========
// Calculates the pointer authentication code for a 64-bit quantity and then
// inserts that into pointer authentication code field of that 64-bit quantity.
bits(size)bits(64) MemAtomic(bits(64) address,AddPAC(bits(64) ptr, bits(64) modifier, bits(128) K, boolean data)
bits(64) PAC;
bits(64) result;
bits(64) ext_ptr;
bits(64) extfield;
bit selbit;
boolean tbi = MemAtomicOpCalculateTBI op, bits(size) value,(ptr, data);
integer top_bit = if tbi then 55 else 63;
// If tagged pointers are in use for a regime with two TTBRs, use bit<55> of
// the pointer to select between upper and lower ranges, and preserve this.
// This handles the awkward case where there is apparently no correct choice between
// the upper and lower address range - ie an addr of 1xxxxxxx0... with TBI0=0 and TBI1=1
// and 0xxxxxxx1 with TBI1=0 and TBI0=1:
if AccTypePtrHasUpperAndLowerAddRanges ldacctype,() then
assert AccTypeS1TranslationRegime stacctype)
bits(size) newvalue;
memaddrdesc =() IN { AArch64.TranslateAddressForAtomicAccessEL1(address, size);
ldaccdesc =, CreateAccessDescriptorEL2(ldacctype);
staccdesc =};
if CreateAccessDescriptorS1TranslationRegime(stacctype);
// All observers in the shareability domain observe the
// following load and store atomically.
oldvalue = _Mem[memaddrdesc, size DIV 8, ldaccdesc];
if() == BigEndianEL1() then
oldvalue =then
// EL1 translation regime registers
if data then
selbit = if TCR_EL1.TBI1 == '1' || TCR_EL1.TBI0 == '1' then ptr<55> else ptr<63>;
else
if ((TCR_EL1.TBI1 == '1' && TCR_EL1.TBID1 == '0') ||
(TCR_EL1.TBI0 == '1' && TCR_EL1.TBID0 == '0')) then
selbit = ptr<55>;
else
selbit = ptr<63>;
else
// EL2 translation regime registers
if data then
selbit = if (( BigEndianReverseHaveEL(oldvalue);
case op of
when( MemAtomicOp_ADDEL2 newvalue = oldvalue + value;
when) && TCR_EL2.TBI1 == '1') ||
( MemAtomicOp_BICHaveEL newvalue = oldvalue AND NOT(value);
when( MemAtomicOp_EOREL2 newvalue = oldvalue EOR value;
when) && TCR_EL2.TBI0 == '1')) then ptr<55> else ptr<63>;
else
selbit = if (( MemAtomicOp_ORRHaveEL newvalue = oldvalue OR value;
when( MemAtomicOp_SMAXEL2 newvalue = if) && TCR_EL2.TBI1 == '1' && TCR_EL1.TBID1 == '0') ||
( SIntHaveEL(oldvalue) >( SIntEL2(value) then oldvalue else value;
when) && TCR_EL2.TBI0 == '1' && TCR_EL1.TBID0 == '0')) then ptr<55> else ptr<63>;
else selbit = if tbi then ptr<55> else ptr<63>;
integer bottom_PAC_bit = MemAtomicOp_SMINCalculateBottomPACBit newvalue = if(selbit);
// The pointer authentication code field takes all the available bits in between
extfield = SIntReplicate(oldvalue) >(selbit, 64);
// Compute the pointer authentication code for a ptr with good extension bits
if tbi then
ext_ptr = ptr<63:56>:extfield<(56-bottom_PAC_bit)-1:0>:ptr<bottom_PAC_bit-1:0>;
else
ext_ptr = extfield<(64-bottom_PAC_bit)-1:0>:ptr<bottom_PAC_bit-1:0>;
PAC = SIntComputePAC(value) then value else oldvalue;
when(ext_ptr, modifier, K<127:64>, K<63:0>);
// Check if the ptr has good extension bits and corrupt the pointer authentication code if not
if ! MemAtomicOp_UMAXIsZero newvalue = if(ptr<top_bit:bottom_PAC_bit>) && ! UIntIsOnes(oldvalue) >(ptr<top_bit:bottom_PAC_bit>) then
if UIntHaveEnhancedPAC(value) then oldvalue else value;
when() then
PAC = MemAtomicOp_UMINZeros newvalue = if UInt(oldvalue) > UInt(value) then value else oldvalue;
when MemAtomicOp_SWP newvalue = value;
if BigEndian() then
newvalue = BigEndianReverse(newvalue);
_Mem[memaddrdesc, size DIV 8, staccdesc] = newvalue;
();
else
PAC<top_bit-1> = NOT(PAC<top_bit-1>);
// Load operations return the old (pre-operation) value
return oldvalue; // Preserve the determination between upper and lower address at bit<55> and insert PAC
if tbi then
result = ptr<63:56>:selbit:PAC<54:bottom_PAC_bit>:ptr<bottom_PAC_bit-1:0>;
else
result = PAC<63:56>:selbit:PAC<54:bottom_PAC_bit>:ptr<bottom_PAC_bit-1:0>;
return result;
// MemAtomicCompareAndSwap()
// =========================
// Compares the value stored at the passed-in memory address against the passed-in expected
// value. If the comparison is successful, the value at the passed-in memory address is swapped
// with the passed-in new_value.
// AddPACDA()
// ==========
// Returns a 64-bit value containing X, but replacing the pointer authentication code
// field bits with a pointer authentication code, where the pointer authentication
// code is derived using a cryptographic algorithm as a combination of X, Y and the
// APDAKey_EL1.
bits(size)bits(64) MemAtomicCompareAndSwap(bits(64) address, bits(size) expectedvalue,
bits(size) newvalue,AddPACDA(bits(64) X, bits(64) Y)
boolean TrapEL2;
boolean TrapEL3;
bits(1) Enable;
bits(128) APDAKey_EL1;
APDAKey_EL1 = APDAKeyHi_EL1<63:0> : APDAKeyLo_EL1<63:0>;
case PSTATE.EL of
when AccTypeEL0 ldacctype,boolean IsEL1Regime = AccTypeS1TranslationRegime stacctype)
memaddrdesc =() == AArch64.TranslateAddressForAtomicAccessEL1(address, size);
ldaccdesc =;
Enable = if IsEL1Regime then SCTLR_EL1.EnDA else SCTLR_EL2.EnDA;
TrapEL2 = ( CreateAccessDescriptorEL2Enabled(ldacctype);
staccdesc =() && HCR_EL2.API == '0' &&
(HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0'));
TrapEL3 = CreateAccessDescriptorHaveEL(stacctype);
// All observers in the shareability domain observe the
// following load and store atomically.
oldvalue = _Mem[memaddrdesc, size DIV 8, ldaccdesc];
if( BigEndianEL3() then
oldvalue =) && SCR_EL3.API == '0';
when BigEndianReverseEL1(oldvalue);
if oldvalue == expectedvalue then
ifEnable = SCTLR_EL1.EnDA;
TrapEL2 = BigEndianEL2Enabled() then
newvalue =() && HCR_EL2.API == '0';
TrapEL3 = (EL3) && SCR_EL3.API == '0';
when EL2
Enable = SCTLR_EL2.EnDA;
TrapEL2 = FALSE;
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL3
Enable = SCTLR_EL3.EnDA;
TrapEL2 = FALSE;
TrapEL3 = FALSE;
if Enable == '0' then return X;
elsif TrapEL2 then TrapPACUse(EL2);
elsif TrapEL3 then TrapPACUse(EL3);
else return AddPACBigEndianReverseHaveEL(newvalue);
_Mem[memaddrdesc, size DIV 8, staccdesc] = newvalue;
return oldvalue;(X, Y, APDAKey_EL1, TRUE);
// AddPAC()
// ========
// Calculates the pointer authentication code for a 64-bit quantity and then
// inserts that into pointer authentication code field of that 64-bit quantity.
// AddPACDB()
// ==========
// Returns a 64-bit value containing X, but replacing the pointer authentication code
// field bits with a pointer authentication code, where the pointer authentication
// code is derived using a cryptographic algorithm as a combination of X, Y and the
// APDBKey_EL1.
bits(64) AddPAC(bits(64) ptr, bits(64) modifier, bits(128) K, boolean data)
bits(64) PAC;
bits(64) result;
bits(64) ext_ptr;
bits(64) extfield;
bit selbit;
boolean tbi =AddPACDB(bits(64) X, bits(64) Y)
boolean TrapEL2;
boolean TrapEL3;
bits(1) Enable;
bits(128) APDBKey_EL1;
APDBKey_EL1 = APDBKeyHi_EL1<63:0> : APDBKeyLo_EL1<63:0>;
case PSTATE.EL of
when CalculateTBIEL0(ptr, data);
integer top_bit = if tbi then 55 else 63;
// If tagged pointers are in use for a regime with two TTBRs, use bit<55> of
// the pointer to select between upper and lower ranges, and preserve this.
// This handles the awkward case where there is apparently no correct choice between
// the upper and lower address range - ie an addr of 1xxxxxxx0... with TBI0=0 and TBI1=1
// and 0xxxxxxx1 with TBI1=0 and TBI0=1:
ifboolean IsEL1Regime = PtrHasUpperAndLowerAddRanges() then
assert S1TranslationRegime() IN {() ==EL1,;
Enable = if IsEL1Regime then SCTLR_EL1.EnDB else SCTLR_EL2.EnDB;
TrapEL2 = ( EL2EL2Enabled};
if() && HCR_EL2.API == '0' &&
(HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0'));
TrapEL3 = S1TranslationRegimeHaveEL() ==( EL3) && SCR_EL3.API == '0';
when EL1 then
// EL1 translation regime registers
if data then
selbit = if TCR_EL1.TBI1 == '1' || TCR_EL1.TBI0 == '1' then ptr<55> else ptr<63>;
else
if ((TCR_EL1.TBI1 == '1' && TCR_EL1.TBID1 == '0') ||
(TCR_EL1.TBI0 == '1' && TCR_EL1.TBID0 == '0')) then
selbit = ptr<55>;
else
selbit = ptr<63>;
else
// EL2 translation regime registers
if data then
selbit = if ((Enable = SCTLR_EL1.EnDB;
TrapEL2 =HaveELEL2Enabled(() && HCR_EL2.API == '0';
TrapEL3 =EL2) && TCR_EL2.TBI1 == '1') ||
(HaveEL(EL2EL3) && TCR_EL2.TBI0 == '1')) then ptr<55> else ptr<63>;
else
selbit = if (() && SCR_EL3.API == '0';
whenHaveEL(EL2) && TCR_EL2.TBI1 == '1' && TCR_EL1.TBID1 == '0') ||
(Enable = SCTLR_EL2.EnDB;
TrapEL2 = FALSE;
TrapEL3 =HaveEL(EL2EL3) && TCR_EL2.TBI0 == '1' && TCR_EL1.TBID0 == '0')) then ptr<55> else ptr<63>;
else selbit = if tbi then ptr<55> else ptr<63>;
integer bottom_PAC_bit =) && SCR_EL3.API == '0';
when CalculateBottomPACBitEL3(selbit);
Enable = SCTLR_EL3.EnDB;
TrapEL2 = FALSE;
TrapEL3 = FALSE;
// The pointer authentication code field takes all the available bits in between
extfield = if Enable == '0' then return X;
elsif TrapEL2 then ReplicateTrapPACUse(selbit, 64);
// Compute the pointer authentication code for a ptr with good extension bits
if tbi then
ext_ptr = ptr<63:56>:extfield<(56-bottom_PAC_bit)-1:0>:ptr<bottom_PAC_bit-1:0>;
else
ext_ptr = extfield<(64-bottom_PAC_bit)-1:0>:ptr<bottom_PAC_bit-1:0>;
PAC =( ComputePACEL2(ext_ptr, modifier, K<127:64>, K<63:0>);
// Check if the ptr has good extension bits and corrupt the pointer authentication code if not
if !);
elsif TrapEL3 thenIsZeroTrapPACUse(ptr<top_bit:bottom_PAC_bit>) && !(IsOnesEL3(ptr<top_bit:bottom_PAC_bit>) then
if);
else return HaveEnhancedPACAddPAC() then
PAC = Zeros();
else
PAC<top_bit-1> = NOT(PAC<top_bit-1>);
// Preserve the determination between upper and lower address at bit<55> and insert PAC
if tbi then
result = ptr<63:56>:selbit:PAC<54:bottom_PAC_bit>:ptr<bottom_PAC_bit-1:0>;
else
result = PAC<63:56>:selbit:PAC<54:bottom_PAC_bit>:ptr<bottom_PAC_bit-1:0>;
return result;(X, Y, APDBKey_EL1, TRUE);
// AddPACDA()
// AddPACGA()
// ==========
// Returns a 64-bit value containing X, but replacing the pointer authentication code
// field bits with a pointer authentication code, where the pointer authentication
// code is derived using a cryptographic algorithm as a combination of X, Y and the
// APDAKey_EL1.
// Returns a 64-bit value where the lower 32 bits are 0, and the upper 32 bits contain
// a 32-bit pointer authentication code which is derived using a cryptographic
// algorithm as a combination of X, Y and the APGAKey_EL1.
bits(64) AddPACDA(bits(64) X, bits(64) Y)
AddPACGA(bits(64) X, bits(64) Y)
boolean TrapEL2;
boolean TrapEL3;
bits(1) Enable;
bits(128) APDAKey_EL1;
bits(128) APGAKey_EL1;
APDAKey_EL1 = APDAKeyHi_EL1<63:0> : APDAKeyLo_EL1<63:0>;
APGAKey_EL1 = APGAKeyHi_EL1<63:0> : APGAKeyLo_EL1<63:0>;
case PSTATE.EL of
when EL0
boolean IsEL1Regime =TrapEL2 = ( S1TranslationRegime() == EL1;
Enable = if IsEL1Regime then SCTLR_EL1.EnDA else SCTLR_EL2.EnDA;
TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' &&
(HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0'));
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL1
Enable = SCTLR_EL1.EnDA;
TrapEL2 =TrapEL2 = EL2Enabled() && HCR_EL2.API == '0';
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL2
Enable = SCTLR_EL2.EnDA;
TrapEL2 = FALSE;
TrapEL2 = FALSE;
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL3
Enable = SCTLR_EL3.EnDA;
TrapEL2 = FALSE;
TrapEL2 = FALSE;
TrapEL3 = FALSE;
if Enable == '0' then return X;
elsif TrapEL2 then if TrapEL2 then TrapPACUse(EL2);
elsif TrapEL3 then TrapPACUse(EL3);
else return (X, Y, APGAKey_EL1<127:64>, APGAKey_EL1<63:0>)<63:32>:ZerosAddPACComputePAC(X, Y, APDAKey_EL1, TRUE);(32);
// AddPACDB()
// AddPACIA()
// ==========
// Returns a 64-bit value containing X, but replacing the pointer authentication code
// field bits with a pointer authentication code, where the pointer authentication
// code is derived using a cryptographic algorithm as a combination of X, Y and the
// APDBKey_EL1.
// code is derived using a cryptographic algorithm as a combination of X, Y, and the
// APIAKey_EL1.
bits(64) AddPACDB(bits(64) X, bits(64) Y)
AddPACIA(bits(64) X, bits(64) Y)
boolean TrapEL2;
boolean TrapEL3;
bits(1) Enable;
bits(128) APDBKey_EL1;
bits(128) APIAKey_EL1;
APDBKey_EL1 = APDBKeyHi_EL1<63:0> : APDBKeyLo_EL1<63:0>;
APIAKey_EL1 = APIAKeyHi_EL1<63:0>:APIAKeyLo_EL1<63:0>;
case PSTATE.EL of
when EL0
boolean IsEL1Regime = S1TranslationRegime() == EL1;
Enable = if IsEL1Regime then SCTLR_EL1.EnDB else SCTLR_EL2.EnDB;
Enable = if IsEL1Regime then SCTLR_EL1.EnIA else SCTLR_EL2.EnIA;
TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' &&
(HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0'));
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL1
Enable = SCTLR_EL1.EnDB;
Enable = SCTLR_EL1.EnIA;
TrapEL2 = EL2Enabled() && HCR_EL2.API == '0';
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL2
Enable = SCTLR_EL2.EnDB;
Enable = SCTLR_EL2.EnIA;
TrapEL2 = FALSE;
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL3
Enable = SCTLR_EL3.EnDB;
Enable = SCTLR_EL3.EnIA;
TrapEL2 = FALSE;
TrapEL3 = FALSE;
if Enable == '0' then return X;
elsif TrapEL2 then TrapPACUse(EL2);
elsif TrapEL3 then TrapPACUse(EL3);
else return AddPAC(X, Y, APDBKey_EL1, TRUE);(X, Y, APIAKey_EL1, FALSE);
// AddPACGA()
// AddPACIB()
// ==========
// Returns a 64-bit value where the lower 32 bits are 0, and the upper 32 bits contain
// a 32-bit pointer authentication code which is derived using a cryptographic
// algorithm as a combination of X, Y and the APGAKey_EL1.
// Returns a 64-bit value containing X, but replacing the pointer authentication code
// field bits with a pointer authentication code, where the pointer authentication
// code is derived using a cryptographic algorithm as a combination of X, Y and the
// APIBKey_EL1.
bits(64) AddPACGA(bits(64) X, bits(64) Y)
AddPACIB(bits(64) X, bits(64) Y)
boolean TrapEL2;
boolean TrapEL3;
bits(128) APGAKey_EL1;
bits(1) Enable;
bits(128) APIBKey_EL1;
APGAKey_EL1 = APGAKeyHi_EL1<63:0> : APGAKeyLo_EL1<63:0>;
APIBKey_EL1 = APIBKeyHi_EL1<63:0> : APIBKeyLo_EL1<63:0>;
case PSTATE.EL of
when EL0
TrapEL2 = (boolean IsEL1Regime =S1TranslationRegime() == EL1;
Enable = if IsEL1Regime then SCTLR_EL1.EnIB else SCTLR_EL2.EnIB;
TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' &&
(HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0'));
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL1
TrapEL2 =Enable = SCTLR_EL1.EnIB;
TrapEL2 = EL2Enabled() && HCR_EL2.API == '0';
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL2
TrapEL2 = FALSE;
Enable = SCTLR_EL2.EnIB;
TrapEL2 = FALSE;
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL3
TrapEL2 = FALSE;
Enable = SCTLR_EL3.EnIB;
TrapEL2 = FALSE;
TrapEL3 = FALSE;
if TrapEL2 then if Enable == '0' then return X;
elsif TrapEL2 then TrapPACUse(EL2);
elsif TrapEL3 then TrapPACUse(EL3);
else return ComputePACAddPAC(X, Y, APGAKey_EL1<127:64>, APGAKey_EL1<63:0>)<63:32>:Zeros(32);(X, Y, APIBKey_EL1, FALSE);
// AddPACIA()
// ==========
// Returns a 64-bit value containing X, but replacing the pointer authentication code
// field bits with a pointer authentication code, where the pointer authentication
// code is derived using a cryptographic algorithm as a combination of X, Y, and the
// APIAKey_EL1.
// Auth()
// ======
// Restores the upper bits of the address to be all zeros or all ones (based on the
// value of bit[55]) and computes and checks the pointer authentication code. If the
// check passes, then the restored address is returned. If the check fails, the
// second-top and third-top bits of the extension bits in the pointer authentication code
// field are corrupted to ensure that accessing the address will give a translation fault.
bits(64) AddPACIA(bits(64) X, bits(64) Y)
boolean TrapEL2;
boolean TrapEL3;
bits(1) Enable;
bits(128) APIAKey_EL1;
Auth(bits(64) ptr, bits(64) modifier, bits(128) K, boolean data, bit keynumber)
bits(64) PAC;
bits(64) result;
bits(64) original_ptr;
bits(2) error_code;
bits(64) extfield;
APIAKey_EL1 = APIAKeyHi_EL1<63:0>:APIAKeyLo_EL1<63:0>;
case PSTATE.EL of
when // Reconstruct the extension field used of adding the PAC to the pointer
boolean tbi = EL0CalculateTBI
boolean IsEL1Regime =(ptr, data);
integer bottom_PAC_bit = S1TranslationRegimeCalculateBottomPACBit() ==(ptr<55>);
extfield = EL1Replicate;
Enable = if IsEL1Regime then SCTLR_EL1.EnIA else SCTLR_EL2.EnIA;
TrapEL2 = ((ptr<55>, 64);
if tbi then
original_ptr = ptr<63:56>:extfield<56-bottom_PAC_bit-1:0>:ptr<bottom_PAC_bit-1:0>;
else
original_ptr = extfield<64-bottom_PAC_bit-1:0>:ptr<bottom_PAC_bit-1:0>;
PAC =EL2EnabledComputePAC() && HCR_EL2.API == '0' &&
(HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0'));
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL1
Enable = SCTLR_EL1.EnIA;
TrapEL2 = EL2Enabled() && HCR_EL2.API == '0';
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL2
Enable = SCTLR_EL2.EnIA;
TrapEL2 = FALSE;
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL3
Enable = SCTLR_EL3.EnIA;
TrapEL2 = FALSE;
TrapEL3 = FALSE;
if Enable == '0' then return X;
elsif TrapEL2 then TrapPACUse(EL2);
elsif TrapEL3 then TrapPACUse(EL3);
else return AddPAC(X, Y, APIAKey_EL1, FALSE);(original_ptr, modifier, K<127:64>, K<63:0>);
// Check pointer authentication code
if tbi then
if PAC<54:bottom_PAC_bit> == ptr<54:bottom_PAC_bit> then
result = original_ptr;
else
error_code = keynumber:NOT(keynumber);
result = original_ptr<63:55>:error_code:original_ptr<52:0>;
else
if ((PAC<54:bottom_PAC_bit> == ptr<54:bottom_PAC_bit>) &&
(PAC<63:56> == ptr<63:56>)) then
result = original_ptr;
else
error_code = keynumber:NOT(keynumber);
result = original_ptr<63>:error_code:original_ptr<60:0>;
return result;
// AddPACIB()
// ==========
// AuthDA()
// ========
// Returns a 64-bit value containing X, but replacing the pointer authentication code
// field bits with a pointer authentication code, where the pointer authentication
// code is derived using a cryptographic algorithm as a combination of X, Y and the
// APIBKey_EL1.
// field bits with the extension of the address bits. The instruction checks a pointer
// authentication code in the pointer authentication code field bits of X, using the same
// algorithm and key as AddPACDA().
bits(64) AddPACIB(bits(64) X, bits(64) Y)
AuthDA(bits(64) X, bits(64) Y)
boolean TrapEL2;
boolean TrapEL3;
bits(1) Enable;
bits(128) APIBKey_EL1;
bits(128) APDAKey_EL1;
APIBKey_EL1 = APIBKeyHi_EL1<63:0> : APIBKeyLo_EL1<63:0>;
APDAKey_EL1 = APDAKeyHi_EL1<63:0> : APDAKeyLo_EL1<63:0>;
case PSTATE.EL of
when EL0
boolean IsEL1Regime = S1TranslationRegime() == EL1;
Enable = if IsEL1Regime then SCTLR_EL1.EnIB else SCTLR_EL2.EnIB;
Enable = if IsEL1Regime then SCTLR_EL1.EnDA else SCTLR_EL2.EnDA;
TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' &&
(HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0'));
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL1
Enable = SCTLR_EL1.EnIB;
Enable = SCTLR_EL1.EnDA;
TrapEL2 = EL2Enabled() && HCR_EL2.API == '0';
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL2
Enable = SCTLR_EL2.EnIB;
Enable = SCTLR_EL2.EnDA;
TrapEL2 = FALSE;
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL3
Enable = SCTLR_EL3.EnIB;
Enable = SCTLR_EL3.EnDA;
TrapEL2 = FALSE;
TrapEL3 = FALSE;
if Enable == '0' then return X;
elsif TrapEL2 then TrapPACUse(EL2);
elsif TrapEL3 then TrapPACUse(EL3);
else return AddPACAuth(X, Y, APIBKey_EL1, FALSE);(X, Y, APDAKey_EL1, TRUE, '0');
// Auth()
// ======
// Restores the upper bits of the address to be all zeros or all ones (based on the
// value of bit[55]) and computes and checks the pointer authentication code. If the
// check passes, then the restored address is returned. If the check fails, the
// second-top and third-top bits of the extension bits in the pointer authentication code
// field are corrupted to ensure that accessing the address will give a translation fault.
// AuthDB()
// ========
// Returns a 64-bit value containing X, but replacing the pointer authentication code
// field bits with the extension of the address bits. The instruction checks a
// pointer authentication code in the pointer authentication code field bits of X, using
// the same algorithm and key as AddPACDB().
bits(64) Auth(bits(64) ptr, bits(64) modifier, bits(128) K, boolean data, bit keynumber)
bits(64) PAC;
bits(64) result;
bits(64) original_ptr;
bits(2) error_code;
bits(64) extfield;
AuthDB(bits(64) X, bits(64) Y)
boolean TrapEL2;
boolean TrapEL3;
bits(1) Enable;
bits(128) APDBKey_EL1;
// Reconstruct the extension field used of adding the PAC to the pointer
boolean tbi = APDBKey_EL1 = APDBKeyHi_EL1<63:0> : APDBKeyLo_EL1<63:0>;
case PSTATE.EL of
when CalculateTBIEL0(ptr, data);
integer bottom_PAC_bit =boolean IsEL1Regime = CalculateBottomPACBitS1TranslationRegime(ptr<55>);
extfield =() == ReplicateEL1(ptr<55>, 64);
if tbi then
original_ptr = ptr<63:56>:extfield<56-bottom_PAC_bit-1:0>:ptr<bottom_PAC_bit-1:0>;
else
original_ptr = extfield<64-bottom_PAC_bit-1:0>:ptr<bottom_PAC_bit-1:0>;
PAC =;
Enable = if IsEL1Regime then SCTLR_EL1.EnDB else SCTLR_EL2.EnDB;
TrapEL2 = ( () && HCR_EL2.API == '0' &&
(HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0'));
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL1
Enable = SCTLR_EL1.EnDB;
TrapEL2 = EL2Enabled() && HCR_EL2.API == '0';
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL2
Enable = SCTLR_EL2.EnDB;
TrapEL2 = FALSE;
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL3
Enable = SCTLR_EL3.EnDB;
TrapEL2 = FALSE;
TrapEL3 = FALSE;
if Enable == '0' then return X;
elsif TrapEL2 then TrapPACUse(EL2);
elsif TrapEL3 then TrapPACUse(EL3);
else return AuthComputePACEL2Enabled(original_ptr, modifier, K<127:64>, K<63:0>);
// Check pointer authentication code
if tbi then
if PAC<54:bottom_PAC_bit> == ptr<54:bottom_PAC_bit> then
result = original_ptr;
else
error_code = keynumber:NOT(keynumber);
result = original_ptr<63:55>:error_code:original_ptr<52:0>;
else
if ((PAC<54:bottom_PAC_bit> == ptr<54:bottom_PAC_bit>) &&
(PAC<63:56> == ptr<63:56>)) then
result = original_ptr;
else
error_code = keynumber:NOT(keynumber);
result = original_ptr<63>:error_code:original_ptr<60:0>;
return result;(X, Y, APDBKey_EL1, TRUE, '1');
// AuthDA()
// AuthIA()
// ========
// Returns a 64-bit value containing X, but replacing the pointer authentication code
// field bits with the extension of the address bits. The instruction checks a pointer
// authentication code in the pointer authentication code field bits of X, using the same
// algorithm and key as AddPACDA().
// algorithm and key as AddPACIA().
bits(64) AuthDA(bits(64) X, bits(64) Y)
AuthIA(bits(64) X, bits(64) Y)
boolean TrapEL2;
boolean TrapEL3;
bits(1) Enable;
bits(128) APDAKey_EL1;
bits(128) APIAKey_EL1;
APDAKey_EL1 = APDAKeyHi_EL1<63:0> : APDAKeyLo_EL1<63:0>;
APIAKey_EL1 = APIAKeyHi_EL1<63:0> : APIAKeyLo_EL1<63:0>;
case PSTATE.EL of
when EL0
boolean IsEL1Regime = S1TranslationRegime() == EL1;
Enable = if IsEL1Regime then SCTLR_EL1.EnDA else SCTLR_EL2.EnDA;
Enable = if IsEL1Regime then SCTLR_EL1.EnIA else SCTLR_EL2.EnIA;
TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' &&
(HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0'));
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL1
Enable = SCTLR_EL1.EnDA;
Enable = SCTLR_EL1.EnIA;
TrapEL2 = EL2Enabled() && HCR_EL2.API == '0';
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL2
Enable = SCTLR_EL2.EnDA;
Enable = SCTLR_EL2.EnIA;
TrapEL2 = FALSE;
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL3
Enable = SCTLR_EL3.EnDA;
Enable = SCTLR_EL3.EnIA;
TrapEL2 = FALSE;
TrapEL3 = FALSE;
if Enable == '0' then return X;
elsif TrapEL2 then TrapPACUse(EL2);
elsif TrapEL3 then TrapPACUse(EL3);
else return Auth(X, Y, APDAKey_EL1, TRUE, '0');(X, Y, APIAKey_EL1, FALSE, '0');
// AuthDB()
// AuthIB()
// ========
// Returns a 64-bit value containing X, but replacing the pointer authentication code
// field bits with the extension of the address bits. The instruction checks a
// pointer authentication code in the pointer authentication code field bits of X, using
// the same algorithm and key as AddPACDB().
// field bits with the extension of the address bits. The instruction checks a pointer
// authentication code in the pointer authentication code field bits of X, using the same
// algorithm and key as AddPACIB().
bits(64) AuthDB(bits(64) X, bits(64) Y)
AuthIB(bits(64) X, bits(64) Y)
boolean TrapEL2;
boolean TrapEL3;
bits(1) Enable;
bits(128) APDBKey_EL1;
bits(128) APIBKey_EL1;
APDBKey_EL1 = APDBKeyHi_EL1<63:0> : APDBKeyLo_EL1<63:0>;
APIBKey_EL1 = APIBKeyHi_EL1<63:0> : APIBKeyLo_EL1<63:0>;
case PSTATE.EL of
when EL0
boolean IsEL1Regime = S1TranslationRegime() == EL1;
Enable = if IsEL1Regime then SCTLR_EL1.EnDB else SCTLR_EL2.EnDB;
Enable = if IsEL1Regime then SCTLR_EL1.EnIB else SCTLR_EL2.EnIB;
TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' &&
(HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0'));
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL1
Enable = SCTLR_EL1.EnDB;
Enable = SCTLR_EL1.EnIB;
TrapEL2 = EL2Enabled() && HCR_EL2.API == '0';
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL2
Enable = SCTLR_EL2.EnDB;
Enable = SCTLR_EL2.EnIB;
TrapEL2 = FALSE;
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL3
Enable = SCTLR_EL3.EnDB;
Enable = SCTLR_EL3.EnIB;
TrapEL2 = FALSE;
TrapEL3 = FALSE;
if Enable == '0' then return X;
elsif TrapEL2 then TrapPACUse(EL2);
elsif TrapEL3 then TrapPACUse(EL3);
else return Auth(X, Y, APDBKey_EL1, TRUE, '1');(X, Y, APIBKey_EL1, FALSE, '1');
// AuthIA()
// ========
// Returns a 64-bit value containing X, but replacing the pointer authentication code
// field bits with the extension of the address bits. The instruction checks a pointer
// authentication code in the pointer authentication code field bits of X, using the same
// algorithm and key as AddPACIA().
// CalculateBottomPACBit()
// =======================
bits(64)integer AuthIA(bits(64) X, bits(64) Y)
boolean TrapEL2;
boolean TrapEL3;
bits(1) Enable;
bits(128) APIAKey_EL1;
CalculateBottomPACBit(bit top_bit)
integer tsz_field;
APIAKey_EL1 = APIAKeyHi_EL1<63:0> : APIAKeyLo_EL1<63:0>;
case PSTATE.EL of
when if EL0PtrHasUpperAndLowerAddRanges
boolean IsEL1Regime =() then
assert S1TranslationRegime() ==() IN { EL1;
Enable = if IsEL1Regime then SCTLR_EL1.EnIA else SCTLR_EL2.EnIA;
TrapEL2 = (,EL2EnabledEL2() && HCR_EL2.API == '0' &&
(HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0'));
TrapEL3 =};
if HaveELS1TranslationRegime(() ==EL3) && SCR_EL3.API == '0';
when EL1
Enable = SCTLR_EL1.EnIA;
TrapEL2 =then
// EL1 translation regime registers
tsz_field = if top_bit == '1' then EL2EnabledUInt() && HCR_EL2.API == '0';
TrapEL3 =(TCR_EL1.T1SZ) else UInt(TCR_EL1.T0SZ);
using64k = if top_bit == '1' then TCR_EL1.TG1 == '11' else TCR_EL1.TG0 == '01';
else
// EL2 translation regime registers
assert HaveEL(EL3) && SCR_EL3.API == '0';
when EL2
Enable = SCTLR_EL2.EnIA;
TrapEL2 = FALSE;
TrapEL3 =);
tsz_field = if top_bit == '1' then HaveELUInt((TCR_EL2.T1SZ) elseEL3UInt) && SCR_EL3.API == '0';
when(TCR_EL2.T0SZ);
using64k = if top_bit == '1' then TCR_EL2.TG1 == '11' else TCR_EL2.TG0 == '01';
else
tsz_field = if PSTATE.EL == EL3EL2
Enable = SCTLR_EL3.EnIA;
TrapEL2 = FALSE;
TrapEL3 = FALSE;
if Enable == '0' then return X;
elsif TrapEL2 thenthen TrapPACUseUInt((TCR_EL2.T0SZ) elseUInt(TCR_EL3.T0SZ);
using64k = if PSTATE.EL == EL2);
elsif TrapEL3 thenthen TCR_EL2.TG0 == '01' else TCR_EL3.TG0 == '01';
max_limit_tsz_field = (if ! TrapPACUseHaveSmallPageTblExt(() then 39 else if using64k then 47 else 48);
if tsz_field > max_limit_tsz_field then
// TCR_ELx.TySZ is out of range
c =EL3ConstrainUnpredictable);
else return( );
assert c IN {Constraint_FORCE, Constraint_NONE};
if c == Constraint_FORCE then tsz_field = max_limit_tsz_field;
tszmin = if using64k && VAMax() == 52 then 12 else 16;
if tsz_field < tszmin then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_NONE};
if c == Constraint_FORCEAuthUnpredictable_RESTnSZ(X, Y, APIAKey_EL1, FALSE, '0');then tsz_field = tszmin;
return (64-tsz_field);
// AuthIB()
// ========
// Returns a 64-bit value containing X, but replacing the pointer authentication code
// field bits with the extension of the address bits. The instruction checks a pointer
// authentication code in the pointer authentication code field bits of X, using the same
// algorithm and key as AddPACIB().
// CalculateTBI()
// ==============
bits(64)boolean AuthIB(bits(64) X, bits(64) Y)
boolean TrapEL2;
boolean TrapEL3;
bits(1) Enable;
bits(128) APIBKey_EL1;
CalculateTBI(bits(64) ptr, boolean data)
boolean tbi = FALSE;
APIBKey_EL1 = APIBKeyHi_EL1<63:0> : APIBKeyLo_EL1<63:0>;
case PSTATE.EL of
when if EL0PtrHasUpperAndLowerAddRanges
boolean IsEL1Regime =() then
assert S1TranslationRegime() ==() IN { EL1;
Enable = if IsEL1Regime then SCTLR_EL1.EnIB else SCTLR_EL2.EnIB;
TrapEL2 = (,EL2EnabledEL2() && HCR_EL2.API == '0' &&
(HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0'));
TrapEL3 =};
if HaveELS1TranslationRegime(() ==EL3) && SCR_EL3.API == '0';
when EL1
Enable = SCTLR_EL1.EnIB;
TrapEL2 =then
// EL1 translation regime registers
if data then
tbi = if ptr<55> == '1' then TCR_EL1.TBI1 == '1' else TCR_EL1.TBI0 == '1';
else
if ptr<55> == '1' then
tbi = TCR_EL1.TBI1 == '1' && TCR_EL1.TBID1 == '0';
else
tbi = TCR_EL1.TBI0 == '1' && TCR_EL1.TBID0 == '0';
else
// EL2 translation regime registers
if data then
tbi = if ptr<55> == '1' then TCR_EL2.TBI1 == '1' else TCR_EL2.TBI0 == '1';
else
if ptr<55> == '1' then
tbi = TCR_EL2.TBI1 == '1' && TCR_EL2.TBID1 == '0';
else
tbi = TCR_EL2.TBI0 == '1' && TCR_EL2.TBID0 == '0';
elsif PSTATE.EL == EL2Enabled() && HCR_EL2.API == '0';
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL2
Enable = SCTLR_EL2.EnIB;
TrapEL2 = FALSE;
TrapEL3 =then
tbi = if data then TCR_EL2.TBI=='1' else TCR_EL2.TBI=='1' && TCR_EL2.TBID=='0';
elsif PSTATE.EL == HaveEL(EL3) && SCR_EL3.API == '0';
when EL3
Enable = SCTLR_EL3.EnIB;
TrapEL2 = FALSE;
TrapEL3 = FALSE;
if Enable == '0' then return X;
elsif TrapEL2 then TrapPACUse(EL2);
elsif TrapEL3 then TrapPACUse(EL3);
else return Auth(X, Y, APIBKey_EL1, FALSE, '1');then
tbi = if data then TCR_EL3.TBI=='1' else TCR_EL3.TBI=='1' && TCR_EL3.TBID=='0';
return tbi;
// CalculateBottomPACBit()
// =======================
array bits(64) RC[0..4];
integerbits(64) CalculateBottomPACBit(bit top_bit)
integer tsz_field;
ComputePAC(bits(64) data, bits(64) modifier, bits(64) key0, bits(64) key1)
bits(64) workingval;
bits(64) runningmod;
bits(64) roundkey;
bits(64) modk0;
constant bits(64) Alpha = 0xC0AC29B7C97C50DD<63:0>;
if RC[0] = 0x0000000000000000<63:0>;
RC[1] = 0x13198A2E03707344<63:0>;
RC[2] = 0xA4093822299F31D0<63:0>;
RC[3] = 0x082EFA98EC4E6C89<63:0>;
RC[4] = 0x452821E638D01377<63:0>;
modk0 = key0<0>:key0<63:2>:(key0<63> EOR key0<1>);
runningmod = modifier;
workingval = data EOR key0;
for i = 0 to 4
roundkey = key1 EOR runningmod;
workingval = workingval EOR roundkey;
workingval = workingval EOR RC[i];
if i > 0 then
workingval = PtrHasUpperAndLowerAddRangesPACCellShuffle() then
assert(workingval);
workingval = S1TranslationRegimePACMult() IN {(workingval);
workingval =EL1PACSub,(workingval);
runningmod = EL2TweakShuffle};
if(runningmod<63:0>);
roundkey = modk0 EOR runningmod;
workingval = workingval EOR roundkey;
workingval = S1TranslationRegimePACCellShuffle() ==(workingval);
workingval = EL1PACMult then
// EL1 translation regime registers
tsz_field = if top_bit == '1' then(workingval);
workingval = UIntPACSub(TCR_EL1.T1SZ) else(workingval);
workingval = UIntPACCellShuffle(TCR_EL1.T0SZ);
using64k = if top_bit == '1' then TCR_EL1.TG1 == '11' else TCR_EL1.TG0 == '01';
else
// EL2 translation regime registers
assert(workingval);
workingval = HaveELPACMult((workingval);
workingval = key1 EOR workingval;
workingval =EL2PACCellInvShuffle);
tsz_field = if top_bit == '1' then(workingval);
workingval = UIntPACInvSub(TCR_EL2.T1SZ) else(workingval);
workingval = UIntPACMult(TCR_EL2.T0SZ);
using64k = if top_bit == '1' then TCR_EL2.TG1 == '11' else TCR_EL2.TG0 == '01';
else
tsz_field = if PSTATE.EL ==(workingval);
workingval = EL2PACCellInvShuffle then(workingval);
workingval = workingval EOR key0;
workingval = workingval EOR runningmod;
for i = 0 to 4
workingval = UIntPACInvSub(TCR_EL2.T0SZ) else(workingval);
if i < 4 then
workingval = UIntPACMult(TCR_EL3.T0SZ);
using64k = if PSTATE.EL ==(workingval);
workingval = EL2PACCellInvShuffle then TCR_EL2.TG0 == '01' else TCR_EL3.TG0 == '01';
max_limit_tsz_field = (if !HaveSmallPageTblExt() then 39 else if using64k then 47 else 48);
if tsz_field > max_limit_tsz_field then
// TCR_ELx.TySZ is out of range
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_NONE};
if c == Constraint_FORCE then tsz_field = max_limit_tsz_field;
tszmin = if using64k && VAMax() == 52 then 12 else 16;
if tsz_field < tszmin then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_NONE};
if c == Constraint_FORCE then tsz_field = tszmin;
return (64-tsz_field);(workingval);
runningmod = TweakInvShuffle(runningmod<63:0>);
roundkey = key1 EOR runningmod;
workingval = workingval EOR RC[4-i];
workingval = workingval EOR roundkey;
workingval = workingval EOR Alpha;
workingval = workingval EOR modk0;
return workingval;
// CalculateTBI()
// ==============
// PACCellInvShuffle()
// ===================
booleanbits(64) CalculateTBI(bits(64) ptr, boolean data)
boolean tbi = FALSE;
ifPACCellInvShuffle(bits(64) indata)
bits(64) outdata;
outdata<3:0> = indata<15:12>;
outdata<7:4> = indata<27:24>;
outdata<11:8> = indata<51:48>;
outdata<15:12> = indata<39:36>;
outdata<19:16> = indata<59:56>;
outdata<23:20> = indata<47:44>;
outdata<27:24> = indata<7:4>;
outdata<31:28> = indata<19:16>;
outdata<35:32> = indata<35:32>;
outdata<39:36> = indata<55:52>;
outdata<43:40> = indata<31:28>;
outdata<47:44> = indata<11:8>;
outdata<51:48> = indata<23:20>;
outdata<55:52> = indata<3:0>;
outdata<59:56> = indata<43:40>;
outdata<63:60> = indata<63:60>;
return outdata; PtrHasUpperAndLowerAddRanges() then
assert S1TranslationRegime() IN {EL1, EL2};
if S1TranslationRegime() == EL1 then
// EL1 translation regime registers
if data then
tbi = if ptr<55> == '1' then TCR_EL1.TBI1 == '1' else TCR_EL1.TBI0 == '1';
else
if ptr<55> == '1' then
tbi = TCR_EL1.TBI1 == '1' && TCR_EL1.TBID1 == '0';
else
tbi = TCR_EL1.TBI0 == '1' && TCR_EL1.TBID0 == '0';
else
// EL2 translation regime registers
if data then
tbi = if ptr<55> == '1' then TCR_EL2.TBI1 == '1' else TCR_EL2.TBI0 == '1';
else
if ptr<55> == '1' then
tbi = TCR_EL2.TBI1 == '1' && TCR_EL2.TBID1 == '0';
else
tbi = TCR_EL2.TBI0 == '1' && TCR_EL2.TBID0 == '0';
elsif PSTATE.EL == EL2 then
tbi = if data then TCR_EL2.TBI=='1' else TCR_EL2.TBI=='1' && TCR_EL2.TBID=='0';
elsif PSTATE.EL == EL3 then
tbi = if data then TCR_EL3.TBI=='1' else TCR_EL3.TBI=='1' && TCR_EL3.TBID=='0';
return tbi;
array bits(64) RC[0..4];
// PACCellShuffle()
// ================
bits(64) ComputePAC(bits(64) data, bits(64) modifier, bits(64) key0, bits(64) key1)
bits(64) workingval;
bits(64) runningmod;
bits(64) roundkey;
bits(64) modk0;
constant bits(64) Alpha = 0xC0AC29B7C97C50DD<63:0>;
RC[0] = 0x0000000000000000<63:0>;
RC[1] = 0x13198A2E03707344<63:0>;
RC[2] = 0xA4093822299F31D0<63:0>;
RC[3] = 0x082EFA98EC4E6C89<63:0>;
RC[4] = 0x452821E638D01377<63:0>;
modk0 = key0<0>:key0<63:2>:(key0<63> EOR key0<1>);
runningmod = modifier;
workingval = data EOR key0;
for i = 0 to 4
roundkey = key1 EOR runningmod;
workingval = workingval EOR roundkey;
workingval = workingval EOR RC[i];
if i > 0 then
workingval =PACCellShuffle(bits(64) indata)
bits(64) outdata;
outdata<3:0> = indata<55:52>;
outdata<7:4> = indata<27:24>;
outdata<11:8> = indata<47:44>;
outdata<15:12> = indata<3:0>;
outdata<19:16> = indata<31:28>;
outdata<23:20> = indata<51:48>;
outdata<27:24> = indata<7:4>;
outdata<31:28> = indata<43:40>;
outdata<35:32> = indata<35:32>;
outdata<39:36> = indata<15:12>;
outdata<43:40> = indata<59:56>;
outdata<47:44> = indata<23:20>;
outdata<51:48> = indata<11:8>;
outdata<55:52> = indata<39:36>;
outdata<59:56> = indata<19:16>;
outdata<63:60> = indata<63:60>;
return outdata; PACCellShuffle(workingval);
workingval = PACMult(workingval);
workingval = PACSub(workingval);
runningmod = TweakShuffle(runningmod<63:0>);
roundkey = modk0 EOR runningmod;
workingval = workingval EOR roundkey;
workingval = PACCellShuffle(workingval);
workingval = PACMult(workingval);
workingval = PACSub(workingval);
workingval = PACCellShuffle(workingval);
workingval = PACMult(workingval);
workingval = key1 EOR workingval;
workingval = PACCellInvShuffle(workingval);
workingval = PACInvSub(workingval);
workingval = PACMult(workingval);
workingval = PACCellInvShuffle(workingval);
workingval = workingval EOR key0;
workingval = workingval EOR runningmod;
for i = 0 to 4
workingval = PACInvSub(workingval);
if i < 4 then
workingval = PACMult(workingval);
workingval = PACCellInvShuffle(workingval);
runningmod = TweakInvShuffle(runningmod<63:0>);
roundkey = key1 EOR runningmod;
workingval = workingval EOR RC[4-i];
workingval = workingval EOR roundkey;
workingval = workingval EOR Alpha;
workingval = workingval EOR modk0;
return workingval;
// PACCellInvShuffle()
// ===================
// PACInvSub()
// ===========
bits(64) PACCellInvShuffle(bits(64) indata)
bits(64) outdata;
outdata<3:0> = indata<15:12>;
outdata<7:4> = indata<27:24>;
outdata<11:8> = indata<51:48>;
outdata<15:12> = indata<39:36>;
outdata<19:16> = indata<59:56>;
outdata<23:20> = indata<47:44>;
outdata<27:24> = indata<7:4>;
outdata<31:28> = indata<19:16>;
outdata<35:32> = indata<35:32>;
outdata<39:36> = indata<55:52>;
outdata<43:40> = indata<31:28>;
outdata<47:44> = indata<11:8>;
outdata<51:48> = indata<23:20>;
outdata<55:52> = indata<3:0>;
outdata<59:56> = indata<43:40>;
outdata<63:60> = indata<63:60>;
return outdata;PACInvSub(bits(64) Tinput)
// This is a 4-bit substitution from the PRINCE-family cipher
bits(64) Toutput;
for i = 0 to 15
case Tinput<4*i+3:4*i> of
when '0000' Toutput<4*i+3:4*i> = '0101';
when '0001' Toutput<4*i+3:4*i> = '1110';
when '0010' Toutput<4*i+3:4*i> = '1101';
when '0011' Toutput<4*i+3:4*i> = '1000';
when '0100' Toutput<4*i+3:4*i> = '1010';
when '0101' Toutput<4*i+3:4*i> = '1011';
when '0110' Toutput<4*i+3:4*i> = '0001';
when '0111' Toutput<4*i+3:4*i> = '1001';
when '1000' Toutput<4*i+3:4*i> = '0010';
when '1001' Toutput<4*i+3:4*i> = '0110';
when '1010' Toutput<4*i+3:4*i> = '1111';
when '1011' Toutput<4*i+3:4*i> = '0000';
when '1100' Toutput<4*i+3:4*i> = '0100';
when '1101' Toutput<4*i+3:4*i> = '1100';
when '1110' Toutput<4*i+3:4*i> = '0111';
when '1111' Toutput<4*i+3:4*i> = '0011';
return Toutput;
// PACCellShuffle()
// ================
// PACMult()
// =========
bits(64) PACCellShuffle(bits(64) indata)
bits(64) outdata;
outdata<3:0> = indata<55:52>;
outdata<7:4> = indata<27:24>;
outdata<11:8> = indata<47:44>;
outdata<15:12> = indata<3:0>;
outdata<19:16> = indata<31:28>;
outdata<23:20> = indata<51:48>;
outdata<27:24> = indata<7:4>;
outdata<31:28> = indata<43:40>;
outdata<35:32> = indata<35:32>;
outdata<39:36> = indata<15:12>;
outdata<43:40> = indata<59:56>;
outdata<47:44> = indata<23:20>;
outdata<51:48> = indata<11:8>;
outdata<55:52> = indata<39:36>;
outdata<59:56> = indata<19:16>;
outdata<63:60> = indata<63:60>;
return outdata;PACMult(bits(64) Sinput)
bits(4) t0;
bits(4) t1;
bits(4) t2;
bits(4) t3;
bits(64) Soutput;
for i = 0 to 3
t0<3:0> =RotCell(Sinput<4*(i+8)+3:4*(i+8)>, 1) EOR RotCell(Sinput<4*(i+4)+3:4*(i+4)>, 2);
t0<3:0> = t0<3:0> EOR RotCell(Sinput<4*(i)+3:4*(i)>, 1);
t1<3:0> = RotCell(Sinput<4*(i+12)+3:4*(i+12)>, 1) EOR RotCell(Sinput<4*(i+4)+3:4*(i+4)>, 1);
t1<3:0> = t1<3:0> EOR RotCell(Sinput<4*(i)+3:4*(i)>, 2);
t2<3:0> = RotCell(Sinput<4*(i+12)+3:4*(i+12)>, 2) EOR RotCell(Sinput<4*(i+8)+3:4*(i+8)>, 1);
t2<3:0> = t2<3:0> EOR RotCell(Sinput<4*(i)+3:4*(i)>, 1);
t3<3:0> = RotCell(Sinput<4*(i+12)+3:4*(i+12)>, 1) EOR RotCell(Sinput<4*(i+8)+3:4*(i+8)>, 2);
t3<3:0> = t3<3:0> EOR RotCell(Sinput<4*(i+4)+3:4*(i+4)>, 1);
Soutput<4*i+3:4*i> = t3<3:0>;
Soutput<4*(i+4)+3:4*(i+4)> = t2<3:0>;
Soutput<4*(i+8)+3:4*(i+8)> = t1<3:0>;
Soutput<4*(i+12)+3:4*(i+12)> = t0<3:0>;
return Soutput;
// PACInvSub()
// ===========
// PACSub()
// ========
bits(64) PACInvSub(bits(64) Tinput)
PACSub(bits(64) Tinput)
// This is a 4-bit substitution from the PRINCE-family cipher
bits(64) Toutput;
for i = 0 to 15
case Tinput<4*i+3:4*i> of
when '0000' Toutput<4*i+3:4*i> = '0101';
when '0001' Toutput<4*i+3:4*i> = '1110';
when '0010' Toutput<4*i+3:4*i> = '1101';
when '0011' Toutput<4*i+3:4*i> = '1000';
when '0100' Toutput<4*i+3:4*i> = '1010';
when '0101' Toutput<4*i+3:4*i> = '1011';
when '0110' Toutput<4*i+3:4*i> = '0001';
when '0111' Toutput<4*i+3:4*i> = '1001';
when '1000' Toutput<4*i+3:4*i> = '0010';
when '1001' Toutput<4*i+3:4*i> = '0110';
when '1010' Toutput<4*i+3:4*i> = '1111';
when '1011' Toutput<4*i+3:4*i> = '0000';
when '1100' Toutput<4*i+3:4*i> = '0100';
when '1101' Toutput<4*i+3:4*i> = '1100';
when '1110' Toutput<4*i+3:4*i> = '0111';
when '1111' Toutput<4*i+3:4*i> = '0011';
when '0000' Toutput<4*i+3:4*i> = '1011';
when '0001' Toutput<4*i+3:4*i> = '0110';
when '0010' Toutput<4*i+3:4*i> = '1000';
when '0011' Toutput<4*i+3:4*i> = '1111';
when '0100' Toutput<4*i+3:4*i> = '1100';
when '0101' Toutput<4*i+3:4*i> = '0000';
when '0110' Toutput<4*i+3:4*i> = '1001';
when '0111' Toutput<4*i+3:4*i> = '1110';
when '1000' Toutput<4*i+3:4*i> = '0011';
when '1001' Toutput<4*i+3:4*i> = '0111';
when '1010' Toutput<4*i+3:4*i> = '0100';
when '1011' Toutput<4*i+3:4*i> = '0101';
when '1100' Toutput<4*i+3:4*i> = '1101';
when '1101' Toutput<4*i+3:4*i> = '0010';
when '1110' Toutput<4*i+3:4*i> = '0001';
when '1111' Toutput<4*i+3:4*i> = '1010';
return Toutput;
// PACMult()
// RotCell()
// =========
bits(64)bits(4) PACMult(bits(64) Sinput)
bits(4) t0;
bits(4) t1;
bits(4) t2;
bits(4) t3;
bits(64) Soutput;
RotCell(bits(4) incell, integer amount)
bits(8) tmp;
bits(4) outcell;
for i = 0 to 3
t0<3:0> = // assert amount>3 || amount<1;
tmp<7:0> = incell<3:0>:incell<3:0>;
outcell = tmp<7-amount:4-amount>;
return outcell; RotCell(Sinput<4*(i+8)+3:4*(i+8)>, 1) EOR RotCell(Sinput<4*(i+4)+3:4*(i+4)>, 2);
t0<3:0> = t0<3:0> EOR RotCell(Sinput<4*(i)+3:4*(i)>, 1);
t1<3:0> = RotCell(Sinput<4*(i+12)+3:4*(i+12)>, 1) EOR RotCell(Sinput<4*(i+4)+3:4*(i+4)>, 1);
t1<3:0> = t1<3:0> EOR RotCell(Sinput<4*(i)+3:4*(i)>, 2);
t2<3:0> = RotCell(Sinput<4*(i+12)+3:4*(i+12)>, 2) EOR RotCell(Sinput<4*(i+8)+3:4*(i+8)>, 1);
t2<3:0> = t2<3:0> EOR RotCell(Sinput<4*(i)+3:4*(i)>, 1);
t3<3:0> = RotCell(Sinput<4*(i+12)+3:4*(i+12)>, 1) EOR RotCell(Sinput<4*(i+8)+3:4*(i+8)>, 2);
t3<3:0> = t3<3:0> EOR RotCell(Sinput<4*(i+4)+3:4*(i+4)>, 1);
Soutput<4*i+3:4*i> = t3<3:0>;
Soutput<4*(i+4)+3:4*(i+4)> = t2<3:0>;
Soutput<4*(i+8)+3:4*(i+8)> = t1<3:0>;
Soutput<4*(i+12)+3:4*(i+12)> = t0<3:0>;
return Soutput;
// PACSub()
// ========
// TweakCellInvRot()
// =================
bits(64)bits(4) TweakCellInvRot(bits(4)incell)
bits(4) outcell;
outcell<3> = incell<2>;
outcell<2> = incell<1>;
outcell<1> = incell<0>;
outcell<0> = incell<0> EOR incell<3>;
return outcell; PACSub(bits(64) Tinput)
// This is a 4-bit substitution from the PRINCE-family cipher
bits(64) Toutput;
for i = 0 to 15
case Tinput<4*i+3:4*i> of
when '0000' Toutput<4*i+3:4*i> = '1011';
when '0001' Toutput<4*i+3:4*i> = '0110';
when '0010' Toutput<4*i+3:4*i> = '1000';
when '0011' Toutput<4*i+3:4*i> = '1111';
when '0100' Toutput<4*i+3:4*i> = '1100';
when '0101' Toutput<4*i+3:4*i> = '0000';
when '0110' Toutput<4*i+3:4*i> = '1001';
when '0111' Toutput<4*i+3:4*i> = '1110';
when '1000' Toutput<4*i+3:4*i> = '0011';
when '1001' Toutput<4*i+3:4*i> = '0111';
when '1010' Toutput<4*i+3:4*i> = '0100';
when '1011' Toutput<4*i+3:4*i> = '0101';
when '1100' Toutput<4*i+3:4*i> = '1101';
when '1101' Toutput<4*i+3:4*i> = '0010';
when '1110' Toutput<4*i+3:4*i> = '0001';
when '1111' Toutput<4*i+3:4*i> = '1010';
return Toutput;
// RotCell()
// =========
// TweakCellRot()
// ==============
bits(4) RotCell(bits(4) incell, integer amount)
bits(8) tmp;
TweakCellRot(bits(4) incell)
bits(4) outcell;
// assert amount>3 || amount<1;
tmp<7:0> = incell<3:0>:incell<3:0>;
outcell = tmp<7-amount:4-amount>;
outcell<3> = incell<0> EOR incell<1>;
outcell<2> = incell<3>;
outcell<1> = incell<2>;
outcell<0> = incell<1>;
return outcell;
// TweakCellInvRot()
// TweakInvShuffle()
// =================
bits(4) TweakCellInvRot(bits(4)incell)
bits(4) outcell;
outcell<3> = incell<2>;
outcell<2> = incell<1>;
outcell<1> = incell<0>;
outcell<0> = incell<0> EOR incell<3>;
return outcell;bits(64) TweakInvShuffle(bits(64)indata)
bits(64) outdata;
outdata<3:0> = TweakCellInvRot(indata<51:48>);
outdata<7:4> = indata<55:52>;
outdata<11:8> = indata<23:20>;
outdata<15:12> = indata<27:24>;
outdata<19:16> = indata<3:0>;
outdata<23:20> = indata<7:4>;
outdata<27:24> = TweakCellInvRot(indata<11:8>);
outdata<31:28> = indata<15:12>;
outdata<35:32> = TweakCellInvRot(indata<31:28>);
outdata<39:36> = TweakCellInvRot(indata<63:60>);
outdata<43:40> = TweakCellInvRot(indata<59:56>);
outdata<47:44> = TweakCellInvRot(indata<19:16>);
outdata<51:48> = indata<35:32>;
outdata<55:52> = indata<39:36>;
outdata<59:56> = indata<43:40>;
outdata<63:60> = TweakCellInvRot(indata<47:44>);
return outdata;
// TweakCellRot()
// TweakShuffle()
// ==============
bits(4)bits(64) TweakCellRot(bits(4) incell)
bits(4) outcell;
outcell<3> = incell<0> EOR incell<1>;
outcell<2> = incell<3>;
outcell<1> = incell<2>;
outcell<0> = incell<1>;
return outcell;TweakShuffle(bits(64) indata)
bits(64) outdata;
outdata<3:0> = indata<19:16>;
outdata<7:4> = indata<23:20>;
outdata<11:8> =TweakCellRot(indata<27:24>);
outdata<15:12> = indata<31:28>;
outdata<19:16> = TweakCellRot(indata<47:44>);
outdata<23:20> = indata<11:8>;
outdata<27:24> = indata<15:12>;
outdata<31:28> = TweakCellRot(indata<35:32>);
outdata<35:32> = indata<51:48>;
outdata<39:36> = indata<55:52>;
outdata<43:40> = indata<59:56>;
outdata<47:44> = TweakCellRot(indata<63:60>);
outdata<51:48> = TweakCellRot(indata<3:0>);
outdata<55:52> = indata<7:4>;
outdata<59:56> = TweakCellRot(indata<43:40>);
outdata<63:60> = TweakCellRot(indata<39:36>);
return outdata;
// TweakInvShuffle()
// HaveEnhancedPAC()
// =================
// Returns TRUE if support for EnhancedPAC is implemented, FALSE otherwise.
bits(64) TweakInvShuffle(bits(64)indata)
bits(64) outdata;
outdata<3:0> = TweakCellInvRot(indata<51:48>);
outdata<7:4> = indata<55:52>;
outdata<11:8> = indata<23:20>;
outdata<15:12> = indata<27:24>;
outdata<19:16> = indata<3:0>;
outdata<23:20> = indata<7:4>;
outdata<27:24> = TweakCellInvRot(indata<11:8>);
outdata<31:28> = indata<15:12>;
outdata<35:32> = TweakCellInvRot(indata<31:28>);
outdata<39:36> = TweakCellInvRot(indata<63:60>);
outdata<43:40> = TweakCellInvRot(indata<59:56>);
outdata<47:44> = TweakCellInvRot(indata<19:16>);
outdata<51:48> = indata<35:32>;
outdata<55:52> = indata<39:36>;
outdata<59:56> = indata<43:40>;
outdata<63:60> = TweakCellInvRot(indata<47:44>);
return outdata;booleanHaveEnhancedPAC()
return ( HavePACExt()
&& boolean IMPLEMENTATION_DEFINED "Has enhanced PAC functionality" );
// TweakShuffle()
// ==============
// HavePACExt()
// ============
// Returns TRUE if support for the PAC extension is implemented, FALSE otherwise.
bits(64)boolean TweakShuffle(bits(64) indata)
bits(64) outdata;
outdata<3:0> = indata<19:16>;
outdata<7:4> = indata<23:20>;
outdata<11:8> =HavePACExt()
return TweakCellRotHasArchVersion(indata<27:24>);
outdata<15:12> = indata<31:28>;
outdata<19:16> =( TweakCellRotARMv8p3(indata<47:44>);
outdata<23:20> = indata<11:8>;
outdata<27:24> = indata<15:12>;
outdata<31:28> = TweakCellRot(indata<35:32>);
outdata<35:32> = indata<51:48>;
outdata<39:36> = indata<55:52>;
outdata<43:40> = indata<59:56>;
outdata<47:44> = TweakCellRot(indata<63:60>);
outdata<51:48> = TweakCellRot(indata<3:0>);
outdata<55:52> = indata<7:4>;
outdata<59:56> = TweakCellRot(indata<43:40>);
outdata<63:60> = TweakCellRot(indata<39:36>);
return outdata;);
// HaveEnhancedPAC()
// =================
// Returns TRUE if support for EnhancedPAC is implemented, FALSE otherwise.
// PtrHasUpperAndLowerAddRanges()
// ==============================
// Returns TRUE if the pointer has upper and lower address ranges, FALSE otherwise.
boolean HaveEnhancedPAC()
return (PtrHasUpperAndLowerAddRanges()
return PSTATE.EL == || PSTATE.EL == EL0 || (PSTATE.EL == EL2HavePACExtEL1()
&& boolean IMPLEMENTATION_DEFINED "Has enhanced PAC functionality" );&& HCR_EL2.E2H == '1');
// HavePACExt()
// ============
// Returns TRUE if support for the PAC extension is implemented, FALSE otherwise.
// Strip()
// =======
// Strip() returns a 64-bit value containing A, but replacing the pointer authentication
// code field bits with the extension of the address bits. This can apply to either
// instructions or data, where, as the use of tagged pointers is distinct, it might be
// handled differently.
booleanbits(64) HavePACExt()
returnStrip(bits(64) A, boolean data)
boolean TrapEL2;
boolean TrapEL3;
bits(64) original_ptr;
bits(64) extfield;
boolean tbi = HasArchVersionCalculateTBI((A, data);
integer bottom_PAC_bit =(A<55>);
extfield = Replicate(A<55>, 64);
if tbi then
original_ptr = A<63:56>:extfield< 56-bottom_PAC_bit-1:0>:A<bottom_PAC_bit-1:0>;
else
original_ptr = extfield< 64-bottom_PAC_bit-1:0>:A<bottom_PAC_bit-1:0>;
case PSTATE.EL of
when EL0
TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' &&
(HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0'));
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL1
TrapEL2 = EL2Enabled() && HCR_EL2.API == '0';
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL2
TrapEL2 = FALSE;
TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0';
when EL3
TrapEL2 = FALSE;
TrapEL3 = FALSE;
if TrapEL2 then TrapPACUse(EL2);
elsif TrapEL3 then TrapPACUse(EL3ARMv8p3CalculateBottomPACBit););
else return original_ptr;
// PtrHasUpperAndLowerAddRanges()
// ==============================
// Returns TRUE if the pointer has upper and lower address ranges, FALSE otherwise.
boolean// TrapPACUse()
// ============
// Used for the trapping of the pointer authentication functions by higher exception
// levels. PtrHasUpperAndLowerAddRanges()
return PSTATE.EL ==TrapPACUse(bits(2) target_el)
assert EL1HaveEL || PSTATE.EL ==(target_el) && target_el != EL0 || (PSTATE.EL ==&& (target_el) >= UInt(PSTATE.EL);
bits(64) preferred_exception_return = ThisInstrAddr();
ExceptionRecord exception;
vect_offset = 0;
exception = ExceptionSyndrome(Exception_PACTrap);
AArch64.TakeExceptionEL2UInt && HCR_EL2.E2H == '1');(target_el, exception, preferred_exception_return, vect_offset);
// Strip()
// =======
// Strip() returns a 64-bit value containing A, but replacing the pointer authentication
// code field bits with the extension of the address bits. This can apply to either
// instructions or data, where, as the use of tagged pointers is distinct, it might be
// handled differently.
bits(64)// AArch64.ESBOperation()
// ======================
// Perform the AArch64 ESB operation, either for ESB executed in AArch64 state, or for
// ESB in AArch32 state when SError interrupts are routed to an Exception level using
// AArch64 Strip(bits(64) A, boolean data)
boolean TrapEL2;
boolean TrapEL3;
bits(64) original_ptr;
bits(64) extfield;
boolean tbi =AArch64.ESBOperation()
route_to_el3 = CalculateTBIHaveEL(A, data);
integer bottom_PAC_bit =( CalculateBottomPACBitEL3(A<55>);
extfield =) && SCR_EL3.EA == '1';
route_to_el2 = ( ReplicateEL2Enabled(A<55>, 64);
() &&
(HCR_EL2.TGE == '1' || HCR_EL2.AMO == '1'));
if tbi then
original_ptr = A<63:56>:extfield< 56-bottom_PAC_bit-1:0>:A<bottom_PAC_bit-1:0>;
else
original_ptr = extfield< 64-bottom_PAC_bit-1:0>:A<bottom_PAC_bit-1:0>;
case PSTATE.EL of
when target = if route_to_el3 then EL0EL3
TrapEL2 = (elsif route_to_el2 thenEL2EnabledEL2() && HCR_EL2.API == '0' &&
(HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0'));
TrapEL3 =else HaveELEL1(;
if target ==EL3EL1) && SCR_EL3.API == '0';
whenthen
mask_active = PSTATE.EL IN { EL0, EL1
TrapEL2 =};
elsif EL2EnabledHaveVirtHostExt() && HCR_EL2.API == '0';
TrapEL3 =() && target == HaveELEL2(&& HCR_EL2.<E2H,TGE> == '11' then
mask_active = PSTATE.EL IN {EL3EL0) && SCR_EL3.API == '0';
when, EL2
TrapEL2 = FALSE;
TrapEL3 =};
else
mask_active = PSTATE.EL == target;
mask_set = (PSTATE.A == '1' && (! HaveELHaveDoubleFaultExt(() || SCR_EL3.EA == '0' ||
PSTATE.EL !=EL3) && SCR_EL3.API == '0';
when|| SCR_EL3.NMEA == '0'));
intdis = EL3Halted
TrapEL2 = FALSE;
TrapEL3 = FALSE;
if TrapEL2 then() || TrapPACUseExternalDebugInterruptsDisabled((target);
masked = (EL2UInt);
elsif TrapEL3 then(target) < TrapPACUseUInt((PSTATE.EL)) || intdis || (mask_active && mask_set);
// Check for a masked Physical SError pending
if() && masked then
// This function might be called for an interworking case, and INTdis is masking
// the SError interrupt.
if ELUsingAArch32(S1TranslationRegime()) then
syndrome32 = AArch32.PhysicalSErrorSyndrome();
DISR = AArch32.ReportDeferredSError(syndrome32.AET, syndrome32.ExT);
else
implicit_esb = FALSE;
syndrome64 = AArch64.PhysicalSErrorSyndrome(implicit_esb);
DISR_EL1 = AArch64.ReportDeferredSError(syndrome64);
ClearPendingPhysicalSErrorEL3IsPhysicalSErrorPending);
else return original_ptr;(); // Set ISR_EL1.A to 0
return;
// TrapPACUse()
// ============
// Used for the trapping of the pointer authentication functions by higher exception
// levels.// Return the SError syndrome
bits(25)
TrapPACUse(bits(2) target_el)
assertAArch64.PhysicalSErrorSyndrome(boolean implicit_esb); HaveEL(target_el) && target_el != EL0 && UInt(target_el) >= UInt(PSTATE.EL);
bits(64) preferred_exception_return = ThisInstrAddr();
ExceptionRecord exception;
vect_offset = 0;
exception = ExceptionSyndrome(Exception_PACTrap);
AArch64.TakeException(target_el, exception, preferred_exception_return, vect_offset);
// AArch64.ESBOperation()
// ======================
// Perform the AArch64 ESB operation, either for ESB executed in AArch64 state, or for
// ESB in AArch32 state when SError interrupts are routed to an Exception level using
// AArch64// AArch64.ReportDeferredSError()
// ==============================
// Generate deferred SError syndrome
bits(64)
AArch64.ESBOperation()
route_to_el3 =AArch64.ReportDeferredSError(bits(25) syndrome)
bits(64) target;
target<31> = '1'; // A
target<24> = syndrome<24>; // IDS
target<23:0> = syndrome<23:0>; // ISS
return target; HaveEL(EL3) && SCR_EL3.EA == '1';
route_to_el2 = (EL2Enabled() &&
(HCR_EL2.TGE == '1' || HCR_EL2.AMO == '1'));
target = if route_to_el3 then EL3 elsif route_to_el2 then EL2 else EL1;
if target == EL1 then
mask_active = PSTATE.EL IN {EL0, EL1};
elsif HaveVirtHostExt() && target == EL2 && HCR_EL2.<E2H,TGE> == '11' then
mask_active = PSTATE.EL IN {EL0, EL2};
else
mask_active = PSTATE.EL == target;
mask_set = (PSTATE.A == '1' && (!HaveDoubleFaultExt() || SCR_EL3.EA == '0' ||
PSTATE.EL != EL3 || SCR_EL3.NMEA == '0'));
intdis = Halted() || ExternalDebugInterruptsDisabled(target);
masked = (UInt(target) < UInt(PSTATE.EL)) || intdis || (mask_active && mask_set);
// Check for a masked Physical SError pending
if IsPhysicalSErrorPending() && masked then
// This function might be called for an interworking case, and INTdis is masking
// the SError interrupt.
if ELUsingAArch32(S1TranslationRegime()) then
syndrome32 = AArch32.PhysicalSErrorSyndrome();
DISR = AArch32.ReportDeferredSError(syndrome32.AET, syndrome32.ExT);
else
implicit_esb = FALSE;
syndrome64 = AArch64.PhysicalSErrorSyndrome(implicit_esb);
DISR_EL1 = AArch64.ReportDeferredSError(syndrome64);
ClearPendingPhysicalSError(); // Set ISR_EL1.A to 0
return;
// Return the SError syndrome
bits(25)// AArch64.vESBOperation()
// =======================
// Perform the AArch64 ESB operation for virtual SError interrupts, either for ESB
// executed in AArch64 state, or for ESB in AArch32 state with EL2 using AArch64 state AArch64.PhysicalSErrorSyndrome(boolean implicit_esb);AArch64.vESBOperation()
assert PSTATE.EL IN {EL0, EL1} && EL2Enabled();
// If physical SError interrupts are routed to EL2, and TGE is not set, then a virtual
// SError interrupt might be pending
vSEI_enabled = HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1';
vSEI_pending = vSEI_enabled && HCR_EL2.VSE == '1';
vintdis = Halted() || ExternalDebugInterruptsDisabled(EL1);
vmasked = vintdis || PSTATE.A == '1';
// Check for a masked virtual SError pending
if vSEI_pending && vmasked then
// This function might be called for the interworking case, and INTdis is masking
// the virtual SError interrupt.
if ELUsingAArch32(EL1) then
VDISR = AArch32.ReportDeferredSError(VDFSR<15:14>, VDFSR<12>);
else
VDISR_EL2 = AArch64.ReportDeferredSError(VSESR_EL2<24:0>);
HCR_EL2.VSE = '0'; // Clear pending virtual SError
return;
// AArch64.ReportDeferredSError()
// ==============================
// Generate deferred SError syndrome
bits(64)// AArch64.MaybeZeroRegisterUppers()
// =================================
// On taking an exception to AArch64 from AArch32, it is CONSTRAINED UNPREDICTABLE whether the top
// 32 bits of registers visible at any lower Exception level using AArch32 are set to zero. AArch64.ReportDeferredSError(bits(25) syndrome)
bits(64) target;
target<31> = '1'; // A
target<24> = syndrome<24>; // IDS
target<23:0> = syndrome<23:0>; // ISS
return target;AArch64.MaybeZeroRegisterUppers()
assertUsingAArch32(); // Always called from AArch32 state before entering AArch64 state
if PSTATE.EL == EL0 && !ELUsingAArch32(EL1) then
first = 0; last = 14; include_R15 = FALSE;
elsif PSTATE.EL IN {EL0, EL1} && EL2Enabled() && !ELUsingAArch32(EL2) then
first = 0; last = 30; include_R15 = FALSE;
else
first = 0; last = 30; include_R15 = TRUE;
for n = first to last
if (n != 15 || include_R15) && ConstrainUnpredictableBool(Unpredictable_ZEROUPPER) then
_R[n]<63:32> = Zeros();
return;
// AArch64.vESBOperation()
// =======================
// Perform the AArch64 ESB operation for virtual SError interrupts, either for ESB
// executed in AArch64 state, or for ESB in AArch32 state with EL2 using AArch64 state// AArch64.ResetGeneralRegisters()
// ===============================
AArch64.vESBOperation()
assert PSTATE.EL IN {AArch64.ResetGeneralRegisters()
for i = 0 to 30EL0X, EL1} && EL2Enabled();
// If physical SError interrupts are routed to EL2, and TGE is not set, then a virtual
// SError interrupt might be pending
vSEI_enabled = HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1';
vSEI_pending = vSEI_enabled && HCR_EL2.VSE == '1';
vintdis = Halted() || ExternalDebugInterruptsDisabled(EL1);
vmasked = vintdis || PSTATE.A == '1';
// Check for a masked virtual SError pending
if vSEI_pending && vmasked then
// This function might be called for the interworking case, and INTdis is masking
// the virtual SError interrupt.
if ELUsingAArch32(EL1) then
VDISR = AArch32.ReportDeferredSError(VDFSR<15:14>, VDFSR<12>);
else
VDISR_EL2 = AArch64.ReportDeferredSError(VSESR_EL2<24:0>);
HCR_EL2.VSE = '0'; // Clear pending virtual SError
[i] = bits(64) UNKNOWN;
return;
// AArch64.MaybeZeroRegisterUppers()
// =================================
// On taking an exception to AArch64 from AArch32, it is CONSTRAINED UNPREDICTABLE whether the top
// 32 bits of registers visible at any lower Exception level using AArch32 are set to zero.// AArch64.ResetSIMDFPRegisters()
// ==============================
AArch64.MaybeZeroRegisterUppers()
assertAArch64.ResetSIMDFPRegisters()
for i = 0 to 31 UsingAArch32V(); // Always called from AArch32 state before entering AArch64 state
if PSTATE.EL == EL0 && !ELUsingAArch32(EL1) then
first = 0; last = 14; include_R15 = FALSE;
elsif PSTATE.EL IN {EL0, EL1} && EL2Enabled() && !ELUsingAArch32(EL2) then
first = 0; last = 30; include_R15 = FALSE;
else
first = 0; last = 30; include_R15 = TRUE;
for n = first to last
if (n != 15 || include_R15) && ConstrainUnpredictableBool(Unpredictable_ZEROUPPER) then
_R[n]<63:32> = Zeros();
[i] = bits(128) UNKNOWN;
return;
// AArch64.ResetGeneralRegisters()
// AArch64.ResetSpecialRegisters()
// ===============================
AArch64.ResetGeneralRegisters()
AArch64.ResetSpecialRegisters()
for i = 0 to 30 // AArch64 special registers
SP_EL0 = bits(64) UNKNOWN;
SP_EL1 = bits(64) UNKNOWN;
SPSR_EL1 = bits(32) UNKNOWN;
ELR_EL1 = bits(64) UNKNOWN;
if
(EL2) then
SP_EL2 = bits(64) UNKNOWN;
SPSR_EL2 = bits(32) UNKNOWN;
ELR_EL2 = bits(64) UNKNOWN;
if HaveEL(EL3) then
SP_EL3 = bits(64) UNKNOWN;
SPSR_EL3 = bits(32) UNKNOWN;
ELR_EL3 = bits(64) UNKNOWN;
// AArch32 special registers that are not architecturally mapped to AArch64 registers
if HaveAArch32EL(EL1XHaveEL[i] = bits(64) UNKNOWN;
) then
SPSR_fiq = bits(32) UNKNOWN;
SPSR_irq = bits(32) UNKNOWN;
SPSR_abt = bits(32) UNKNOWN;
SPSR_und = bits(32) UNKNOWN;
// External debug special registers
DLR_EL0 = bits(64) UNKNOWN;
DSPSR_EL0 = bits(32) UNKNOWN;
return;
// AArch64.ResetSIMDFPRegisters()
// ==============================
AArch64.ResetSIMDFPRegisters()
for i = 0 to 31
V[i] = bits(128) UNKNOWN;
return;AArch64.ResetSystemRegisters(boolean cold_reset);
// AArch64.ResetSpecialRegisters()
// ===============================// PC - non-assignment form
// ========================
// Read program counter.
bits(64)
AArch64.ResetSpecialRegisters()
// AArch64 special registers
SP_EL0 = bits(64) UNKNOWN;
SP_EL1 = bits(64) UNKNOWN;
SPSR_EL1 = bits(32) UNKNOWN;
ELR_EL1 = bits(64) UNKNOWN;
ifPC[]
return _PC; HaveEL(EL2) then
SP_EL2 = bits(64) UNKNOWN;
SPSR_EL2 = bits(32) UNKNOWN;
ELR_EL2 = bits(64) UNKNOWN;
if HaveEL(EL3) then
SP_EL3 = bits(64) UNKNOWN;
SPSR_EL3 = bits(32) UNKNOWN;
ELR_EL3 = bits(64) UNKNOWN;
// AArch32 special registers that are not architecturally mapped to AArch64 registers
if HaveAArch32EL(EL1) then
SPSR_fiq = bits(32) UNKNOWN;
SPSR_irq = bits(32) UNKNOWN;
SPSR_abt = bits(32) UNKNOWN;
SPSR_und = bits(32) UNKNOWN;
// External debug special registers
DLR_EL0 = bits(64) UNKNOWN;
DSPSR_EL0 = bits(32) UNKNOWN;
return;
// SP[] - assignment form
// ======================
// Write to stack pointer from either a 32-bit or a 64-bit value.
SP[] = bits(width) value
assert width IN {32,64};
if PSTATE.SP == '0' then
SP_EL0 = ZeroExtend(value);
else
case PSTATE.EL of
when EL0 SP_EL0 = ZeroExtend(value);
when EL1 SP_EL1 = ZeroExtend(value);
when EL2 SP_EL2 = ZeroExtend(value);
when EL3 SP_EL3 = ZeroExtend(value);
return;
// SP[] - non-assignment form
// ==========================
// Read stack pointer with implicit slice of 8, 16, 32 or 64 bits.
bits(width) SP[]
assert width IN {8,16,32,64};
if PSTATE.SP == '0' then
return SP_EL0<width-1:0>;
else
case PSTATE.EL of
when EL0 return SP_EL0<width-1:0>;
when EL1 return SP_EL1<width-1:0>;
when EL2 return SP_EL2<width-1:0>;
when EL3AArch64.ResetSystemRegisters(boolean cold_reset);return SP_EL3<width-1:0>;
// PC - non-assignment form
// ========================
// Read program counter.
bits(64)// V[] - assignment form
// =====================
// Write to SIMD&FP register with implicit extension from
// 8, 16, 32, 64 or 128 bits. PC[]
return _PC;V[integer n] = bits(width) value
assert n >= 0 && n <= 31;
assert width IN {8,16,32,64,128};
integer vlen = ifIsSVEEnabled(PSTATE.EL) then VL else 128;
if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then
_Z[n] = ZeroExtend(value);
else
_Z[n]<vlen-1:0> = ZeroExtend(value);
// V[] - non-assignment form
// =========================
// Read from SIMD&FP register with implicit slice of 8, 16
// 32, 64 or 128 bits.
bits(width) V[integer n]
assert n >= 0 && n <= 31;
assert width IN {8,16,32,64,128};
return _Z[n]<width-1:0>;
// SP[] - assignment form
// ======================
// Write to stack pointer from either a 32-bit or a 64-bit value.// Vpart[] - non-assignment form
// =============================
// Reads a 128-bit SIMD&FP register in up to two parts:
// part 0 returns the bottom 8, 16, 32 or 64 bits of a value held in the register;
// part 1 returns the top half of the bottom 64 bits or the top half of the 128-bit
// value held in the register.
bits(width)
SP[] = bits(width) value
assert width IN {32,64};
if PSTATE.SP == '0' then
SP_EL0 =Vpart[integer n, integer part]
assert n >= 0 && n <= 31;
assert part IN {0, 1};
if part == 0 then
assert width < 128;
return ZeroExtendV(value);
[n];
else
case PSTATE.EL of
when assert width IN {32,64};
bits(128) vreg = EL0V SP_EL0 =[n];
return vreg<(width * 2)-1:width>;
// Vpart[] - assignment form
// =========================
// Writes a 128-bit SIMD&FP register in up to two parts:
// part 0 zero extends a 8, 16, 32, or 64-bit value to fill the whole register;
// part 1 inserts a 64-bit value into the top half of the register. ZeroExtend(value);
when EL1 SP_EL1 = ZeroExtend(value);
when EL2 SP_EL2 = ZeroExtend(value);
when EL3 SP_EL3 = ZeroExtend(value);
return;
// SP[] - non-assignment form
// ==========================
// Read stack pointer with implicit slice of 8, 16, 32 or 64 bits.
bits(width) SP[]
assert width IN {8,16,32,64};
if PSTATE.SP == '0' then
return SP_EL0<width-1:0>;
else
case PSTATE.EL of
whenVpart[integer n, integer part] = bits(width) value
assert n >= 0 && n <= 31;
assert part IN {0, 1};
if part == 0 then
assert width < 128; EL0V return SP_EL0<width-1:0>;
when[n] = value;
else
assert width == 64;
bits(64) vreg = EL1V return SP_EL1<width-1:0>;
when[n]; EL2V return SP_EL2<width-1:0>;
when EL3 return SP_EL3<width-1:0>;[n] = value<63:0> : vreg;
// V[] - assignment form
// X[] - assignment form
// =====================
// Write to SIMD&FP register with implicit extension from
// 8, 16, 32, 64 or 128 bits.// Write to general-purpose register from either a 32-bit or a 64-bit value.
V[integer n] = bits(width) value
X[integer n] = bits(width) value
assert n >= 0 && n <= 31;
assert width IN {8,16,32,64,128};
integer vlen = if assert width IN {32,64};
if n != 31 then
_R[n] = IsSVEEnabled(PSTATE.EL) then VL else 128;
if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then
_Z[n] = ZeroExtend(value);
else
_Z[n]<vlen-1:0> = return;
// X[] - non-assignment form
// =========================
// Read from general-purpose register with implicit slice of 8, 16, 32 or 64 bits.
bits(width) ZeroExtend(value);
// V[] - non-assignment form
// =========================
// Read from SIMD&FP register with implicit slice of 8, 16
// 32, 64 or 128 bits.
bits(width)X[integer n]
assert n >= 0 && n <= 31;
assert width IN {8,16,32,64};
if n != 31 then
return _R[n]<width-1:0>;
else
return V[integer n]
assert n >= 0 && n <= 31;
assert width IN {8,16,32,64,128};
return _Z[n]<width-1:0>;(width);
// Vpart[] - non-assignment form
// =============================
// Reads a 128-bit SIMD&FP register in up to two parts:
// part 0 returns the bottom 8, 16, 32 or 64 bits of a value held in the register;
// part 1 returns the top half of the bottom 64 bits or the top half of the 128-bit
// value held in the register.
// AArch32.IsFPEnabled()
// =====================
bits(width)boolean Vpart[integer n, integer part]
assert n >= 0 && n <= 31;
assert part IN {0, 1};
if part == 0 then
assert width < 128;
returnAArch32.IsFPEnabled(bits(2) el)
if el == VEL0[n];
else
assert width IN {32,64};
bits(128) vreg =&& ! VELUsingAArch32[n];
return vreg<(width * 2)-1:width>;
// Vpart[] - assignment form
// =========================
// Writes a 128-bit SIMD&FP register in up to two parts:
// part 0 zero extends a 8, 16, 32, or 64-bit value to fill the whole register;
// part 1 inserts a 64-bit value into the top half of the register.(
Vpart[integer n, integer part] = bits(width) value
assert n >= 0 && n <= 31;
assert part IN {0, 1};
if part == 0 then
assert width < 128;) then
return
VAArch64.IsFPEnabled[n] = value;
else
assert width == 64;
bits(64) vreg =(el);
if VHaveEL[n];(
) && ELUsingAArch32(EL3) && !IsSecure() then
// Check if access disabled in NSACR
if NSACR.cp10 == '0' then return FALSE;
if el IN {EL0, EL1} then
// Check if access disabled in CPACR
case CPACR.cp10 of
when 'x0' disabled = TRUE;
when '01' disabled = (el == EL0);
when '11' disabled = FALSE;
if disabled then return FALSE;
if el IN {EL0, EL1, EL2} then
if EL2Enabled() then
if !ELUsingAArch32(EL2) then
if CPTR_EL2.TFP == '1' then return FALSE;
else
if HCPTR.TCP10 == '1' then return FALSE;
if HaveEL(EL3) && !ELUsingAArch32(EL3VEL3[n] = value<63:0> : vreg;) then
// Check if access disabled in CPTR_EL3
if CPTR_EL3.TFP == '1' then return FALSE;
return TRUE;
// X[] - assignment form
// AArch64.IsFPEnabled()
// =====================
// Write to general-purpose register from either a 32-bit or a 64-bit value.
boolean
X[integer n] = bits(width) value
assert n >= 0 && n <= 31;
assert width IN {32,64};
if n != 31 then
_R[n] =AArch64.IsFPEnabled(bits(2) el)
// Check if access disabled in CPACR_EL1
if el IN { ZeroExtendEL0(value);
return;
// X[] - non-assignment form
// =========================
// Read from general-purpose register with implicit slice of 8, 16, 32 or 64 bits.
bits(width), X[integer n]
assert n >= 0 && n <= 31;
assert width IN {8,16,32,64};
if n != 31 then
return _R[n]<width-1:0>;
else
return} then
// Check FP&SIMD at EL0/EL1
case [].FPEN of
when 'x0' disabled = TRUE;
when '01' disabled = (el == EL0);
when '11' disabled = FALSE;
if disabled then return FALSE;
// Check if access disabled in CPTR_EL2
if el IN {EL0, EL1, EL2} && EL2Enabled() then
if HaveVirtHostExt() && HCR_EL2.E2H == '1' then
if CPTR_EL2.FPEN == 'x0' then return FALSE;
else
if CPTR_EL2.TFP == '1' then return FALSE;
// Check if access disabled in CPTR_EL3
if HaveEL(EL3ZerosCPACR(width);) then
if CPTR_EL3.TFP == '1' then return FALSE;
return TRUE;
// AArch32.IsFPEnabled()
// =====================
// CeilPow2()
// ==========
boolean// For a positive integer X, return the smallest power of 2 >= X
integer AArch32.IsFPEnabled(bits(2) el)
if el ==CeilPow2(integer x)
if x == 0 then return 0;
if x == 1 then return 2;
return EL0FloorPow2 && !ELUsingAArch32(EL1) then
return AArch64.IsFPEnabled(el);
if HaveEL(EL3) && ELUsingAArch32(EL3) && !IsSecure() then
// Check if access disabled in NSACR
if NSACR.cp10 == '0' then return FALSE;
if el IN {EL0, EL1} then
// Check if access disabled in CPACR
case CPACR.cp10 of
when 'x0' disabled = TRUE;
when '01' disabled = (el == EL0);
when '11' disabled = FALSE;
if disabled then return FALSE;
if el IN {EL0, EL1, EL2} then
if EL2Enabled() then
if !ELUsingAArch32(EL2) then
if CPTR_EL2.TFP == '1' then return FALSE;
else
if HCPTR.TCP10 == '1' then return FALSE;
if HaveEL(EL3) && !ELUsingAArch32(EL3) then
// Check if access disabled in CPTR_EL3
if CPTR_EL3.TFP == '1' then return FALSE;
return TRUE;(x - 1) * 2;
// AArch64.IsFPEnabled()
// =====================
boolean// CheckSVEEnabled()
// ================= AArch64.IsFPEnabled(bits(2) el)
CheckSVEEnabled()
// Check if access disabled in CPACR_EL1
if el IN { if PSTATE.EL IN {EL0, EL1} then
// Check FP&SIMD at EL0/EL1
// Check SVE at EL0/EL1
case CPACR[].FPEN of
when 'x0' disabled = TRUE;
when '01' disabled = (el ==[].ZEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL == EL0);
when '11' disabled = FALSE;
if disabled then return FALSE;
// Check if access disabled in CPTR_EL2
if el IN {;
when '11' disabled = FALSE;
if disabled thenSVEAccessTrap(EL1);
// Check FP&SIMD at EL0/EL1
case CPACR[].FPEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL == EL0,;
when '11' disabled = FALSE;
if disabled then AArch64.AdvSIMDFPAccessTrap(EL1,);
if PSTATE.EL IN { EL0, EL1, EL2} && EL2Enabled() then
if HaveVirtHostExt() && HCR_EL2.E2H == '1' then
if CPTR_EL2.FPEN == 'x0' then return FALSE;
else
if CPTR_EL2.TFP == '1' then return FALSE;
// Check if access disabled in CPTR_EL3
if if CPTR_EL2.ZEN == 'x0' then SVEAccessTrap(EL2);
if CPTR_EL2.FPEN == 'x0' then AArch64.AdvSIMDFPAccessTrap(EL2);
else
if CPTR_EL2.TZ == '1' then SVEAccessTrap(EL2);
if CPTR_EL2.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL2);
// Check if access disabled in CPTR_EL3
if HaveEL(EL3) then
if CPTR_EL3.EZ == '0' then SVEAccessTrap(EL3);
if CPTR_EL3.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL3) then
if CPTR_EL3.TFP == '1' then return FALSE;
return TRUE;);
// CeilPow2()
// ==========
// For a positive integer X, return the smallest power of 2 >= X
// DecodePredCount()
// =================
integer CeilPow2(integer x)
if x == 0 then return 0;
if x == 1 then return 2;
returnDecodePredCount(bits(5) pattern, integer esize)
integer elements = VL DIV esize;
integer numElem;
case pattern of
when '00000' numElem = FloorPow2(x - 1) * 2;(elements);
when '00001' numElem = if elements >= 1 then 1 else 0;
when '00010' numElem = if elements >= 2 then 2 else 0;
when '00011' numElem = if elements >= 3 then 3 else 0;
when '00100' numElem = if elements >= 4 then 4 else 0;
when '00101' numElem = if elements >= 5 then 5 else 0;
when '00110' numElem = if elements >= 6 then 6 else 0;
when '00111' numElem = if elements >= 7 then 7 else 0;
when '01000' numElem = if elements >= 8 then 8 else 0;
when '01001' numElem = if elements >= 16 then 16 else 0;
when '01010' numElem = if elements >= 32 then 32 else 0;
when '01011' numElem = if elements >= 64 then 64 else 0;
when '01100' numElem = if elements >= 128 then 128 else 0;
when '01101' numElem = if elements >= 256 then 256 else 0;
when '11101' numElem = elements - (elements MOD 4);
when '11110' numElem = elements - (elements MOD 3);
when '11111' numElem = elements;
otherwise numElem = 0;
return numElem;
// CheckSVEEnabled()
// =================// ElemFFR[] - non-assignment form
// ===============================
bit
CheckSVEEnabled()
// Check if access disabled in CPACR_EL1
if PSTATE.EL IN {ElemFFR[integer e, integer esize]
returnEL0ElemP,[_FFR, e, esize];
// ElemFFR[] - assignment form
// =========================== EL1} then
// Check SVE at EL0/EL1
caseElemFFR[integer e, integer esize] = bit value
integer psize = esize DIV 8;
integer n = e * psize;
assert n >= 0 && (n + psize) <= PL;
_FFR<n+psize-1:n> = CPACRZeroExtend[].ZEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL == EL0;
when '11' disabled = FALSE;
if disabled then SVEAccessTrap(EL1);
// Check FP&SIMD at EL0/EL1
case CPACR[].FPEN of
when 'x0' disabled = TRUE;
when '01' disabled = PSTATE.EL == EL0;
when '11' disabled = FALSE;
if disabled then AArch64.AdvSIMDFPAccessTrap(EL1);
if PSTATE.EL IN {EL0, EL1, EL2} && EL2Enabled() then
if HaveVirtHostExt() && HCR_EL2.E2H == '1' then
if CPTR_EL2.ZEN == 'x0' then SVEAccessTrap(EL2);
if CPTR_EL2.FPEN == 'x0' then AArch64.AdvSIMDFPAccessTrap(EL2);
else
if CPTR_EL2.TZ == '1' then SVEAccessTrap(EL2);
if CPTR_EL2.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL2);
// Check if access disabled in CPTR_EL3
if HaveEL(EL3) then
if CPTR_EL3.EZ == '0' then SVEAccessTrap(EL3);
if CPTR_EL3.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL3);(value, psize);
return;
// DecodePredCount()
// =================
// ElemP[] - non-assignment form
// =============================
integerbit DecodePredCount(bits(5) pattern, integer esize)
integer elements = VL DIV esize;
integer numElem;
case pattern of
when '00000' numElem =ElemP[bits(N) pred, integer e, integer esize]
integer n = e * (esize DIV 8);
assert n >= 0 && n < N;
return pred<n>;
// ElemP[] - assignment form
// ========================= ElemP[bits(N) &pred, integer e, integer esize] = bit value
integer psize = esize DIV 8;
integer n = e * psize;
assert n >= 0 && (n + psize) <= N;
pred<n+psize-1:n> = ZeroExtendFloorPow2(elements);
when '00001' numElem = if elements >= 1 then 1 else 0;
when '00010' numElem = if elements >= 2 then 2 else 0;
when '00011' numElem = if elements >= 3 then 3 else 0;
when '00100' numElem = if elements >= 4 then 4 else 0;
when '00101' numElem = if elements >= 5 then 5 else 0;
when '00110' numElem = if elements >= 6 then 6 else 0;
when '00111' numElem = if elements >= 7 then 7 else 0;
when '01000' numElem = if elements >= 8 then 8 else 0;
when '01001' numElem = if elements >= 16 then 16 else 0;
when '01010' numElem = if elements >= 32 then 32 else 0;
when '01011' numElem = if elements >= 64 then 64 else 0;
when '01100' numElem = if elements >= 128 then 128 else 0;
when '01101' numElem = if elements >= 256 then 256 else 0;
when '11101' numElem = elements - (elements MOD 4);
when '11110' numElem = elements - (elements MOD 3);
when '11111' numElem = elements;
otherwise numElem = 0;
return numElem;(value, psize);
return;
// ElemFFR[] - non-assignment form
// ===============================
// FFR[] - non-assignment form
// ===========================
bitbits(width) ElemFFR[integer e, integer esize]
returnFFR[]
assert width == PL;
return _FFR<width-1:0>;
// FFR[] - assignment form
// ======================= ElemP[_FFR, e, esize];
// ElemFFR[] - assignment form
// ===========================FFR[] = bits(width) value
assert width == PL;
if
(Unpredictable_SVEZEROUPPERElemFFR[integer e, integer esize] = bit value
integer psize = esize DIV 8;
integer n = e * psize;
assert n >= 0 && (n + psize) <= PL;
_FFR<n+psize-1:n> =) then
_FFR = ZeroExtend(value, psize);
return;(value);
else
_FFR<width-1:0> = value;
// ElemP[] - non-assignment form
// =============================
// FPCompareNE()
// =============
bitboolean ElemP[bits(N) pred, integer e, integer esize]
integer n = e * (esize DIV 8);
assert n >= 0 && n < N;
return pred<n>;
// ElemP[] - assignment form
// =========================FPCompareNE(bits(N) op1, bits(N) op2,
ElemP[bits(N) &pred, integer e, integer esize] = bit value
integer psize = esize DIV 8;
integer n = e * psize;
assert n >= 0 && (n + psize) <= N;
pred<n+psize-1:n> =fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) = (op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
if type1==FPType_SNaN || type1==FPType_QNaN || type2==FPType_SNaN || type2==FPType_QNaN then
result = TRUE;
if type1==FPType_SNaN || type2==FPType_SNaN then
FPProcessException(FPExc_InvalidOpZeroExtendFPUnpack(value, psize);
return;, fpcr);
else // All non-NaN cases can be evaluated on the values produced by FPUnpack()
result = (value1 != value2);
return result;
// FFR[] - non-assignment form
// ===========================
// FPCompareUN()
// =============
bits(width)boolean FFR[]
assert width == PL;
return _FFR<width-1:0>;
// FFR[] - assignment form
// =======================FPCompareUN(bits(N) op1, bits(N) op2,
FFR[] = bits(width) value
assert width == PL;
iffpcr)
assert N IN {16,32,64};
(type1,sign1,value1) = ConstrainUnpredictableBoolFPUnpack((op1, fpcr);
(type2,sign2,value2) =Unpredictable_SVEZEROUPPERFPUnpack) then
_FFR =(op2, fpcr);
if type1== || type2==FPType_SNaN then
FPProcessException(FPExc_InvalidOp, fpcr);
return (type1==FPType_SNaN || type1==FPType_QNaN || type2==FPType_SNaN || type2==FPType_QNaNZeroExtendFPType_SNaN(value);
else
_FFR<width-1:0> = value;);
// FPCompareNE()
// =============
// FPConvertSVE()
// ==============
booleanbits(M) FPCompareNE(bits(N) op1, bits(N) op2,FPConvertSVE(bits(N) op, FPCRType fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) =fpcr, FPUnpackFPRounding(op1, fpcr);
(type2,sign2,value2) =rounding)
fpcr.AHP = '0';
return FPUnpackFPConvert(op2, fpcr);
if type1==(op, fpcr, rounding);
// FPConvertSVE()
// ==============
bits(M)FPType_SNaN || type1==FPConvertSVE(bits(N) op,FPType_QNaNFPCRType || type2==fpcr)
fpcr.AHP = '0';
returnFPType_SNaNFPConvert || type2==(op, fpcr,FPType_QNaNFPRoundingMode then
result = TRUE;
if type1==FPType_SNaN || type2==FPType_SNaN then
FPProcessException(FPExc_InvalidOp, fpcr);
else // All non-NaN cases can be evaluated on the values produced by FPUnpack()
result = (value1 != value2);
return result;(fpcr));
// FPCompareUN()
// =============
// FPExpA()
// ========
booleanbits(N) FPCompareUN(bits(N) op1, bits(N) op2,FPExpA(bits(N) op)
assert N IN {16,32,64};
bits(N) result;
bits(N) coeff;
integer idx = if N == 16 then FPCRTypeUInt fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) =(op<4:0>) else FPUnpackUInt(op1, fpcr);
(type2,sign2,value2) =(op<5:0>);
coeff = FPUnpackFPExpCoefficient(op2, fpcr);
if type1==FPType_SNaN || type2==FPType_SNaN then
FPProcessException(FPExc_InvalidOp, fpcr);
return (type1==FPType_SNaN || type1==FPType_QNaN || type2==FPType_SNaN || type2==FPType_QNaN);[idx];
if N == 16 then
result<15:0> = '0':op<9:5>:coeff<9:0>;
elsif N == 32 then
result<31:0> = '0':op<13:6>:coeff<22:0>;
else // N == 64
result<63:0> = '0':op<16:6>:coeff<51:0>;
return result;
// FPConvertSVE()
// ==============
// FPExpCoefficient()
// ==================
bits(M)bits(N) FPConvertSVE(bits(N) op,FPExpCoefficient[integer index]
assert N IN {16,32,64};
integer result;
if N == 16 then
case index of
when 0 result = 0x0000;
when 1 result = 0x0016;
when 2 result = 0x002d;
when 3 result = 0x0045;
when 4 result = 0x005d;
when 5 result = 0x0075;
when 6 result = 0x008e;
when 7 result = 0x00a8;
when 8 result = 0x00c2;
when 9 result = 0x00dc;
when 10 result = 0x00f8;
when 11 result = 0x0114;
when 12 result = 0x0130;
when 13 result = 0x014d;
when 14 result = 0x016b;
when 15 result = 0x0189;
when 16 result = 0x01a8;
when 17 result = 0x01c8;
when 18 result = 0x01e8;
when 19 result = 0x0209;
when 20 result = 0x022b;
when 21 result = 0x024e;
when 22 result = 0x0271;
when 23 result = 0x0295;
when 24 result = 0x02ba;
when 25 result = 0x02e0;
when 26 result = 0x0306;
when 27 result = 0x032e;
when 28 result = 0x0356;
when 29 result = 0x037f;
when 30 result = 0x03a9;
when 31 result = 0x03d4;
elsif N == 32 then
case index of
when 0 result = 0x000000;
when 1 result = 0x0164d2;
when 2 result = 0x02cd87;
when 3 result = 0x043a29;
when 4 result = 0x05aac3;
when 5 result = 0x071f62;
when 6 result = 0x08980f;
when 7 result = 0x0a14d5;
when 8 result = 0x0b95c2;
when 9 result = 0x0d1adf;
when 10 result = 0x0ea43a;
when 11 result = 0x1031dc;
when 12 result = 0x11c3d3;
when 13 result = 0x135a2b;
when 14 result = 0x14f4f0;
when 15 result = 0x16942d;
when 16 result = 0x1837f0;
when 17 result = 0x19e046;
when 18 result = 0x1b8d3a;
when 19 result = 0x1d3eda;
when 20 result = 0x1ef532;
when 21 result = 0x20b051;
when 22 result = 0x227043;
when 23 result = 0x243516;
when 24 result = 0x25fed7;
when 25 result = 0x27cd94;
when 26 result = 0x29a15b;
when 27 result = 0x2b7a3a;
when 28 result = 0x2d583f;
when 29 result = 0x2f3b79;
when 30 result = 0x3123f6;
when 31 result = 0x3311c4;
when 32 result = 0x3504f3;
when 33 result = 0x36fd92;
when 34 result = 0x38fbaf;
when 35 result = 0x3aff5b;
when 36 result = 0x3d08a4;
when 37 result = 0x3f179a;
when 38 result = 0x412c4d;
when 39 result = 0x4346cd;
when 40 result = 0x45672a;
when 41 result = 0x478d75;
when 42 result = 0x49b9be;
when 43 result = 0x4bec15;
when 44 result = 0x4e248c;
when 45 result = 0x506334;
when 46 result = 0x52a81e;
when 47 result = 0x54f35b;
when 48 result = 0x5744fd;
when 49 result = 0x599d16;
when 50 result = 0x5bfbb8;
when 51 result = 0x5e60f5;
when 52 result = 0x60ccdf;
when 53 result = 0x633f89;
when 54 result = 0x65b907;
when 55 result = 0x68396a;
when 56 result = 0x6ac0c7;
when 57 result = 0x6d4f30;
when 58 result = 0x6fe4ba;
when 59 result = 0x728177;
when 60 result = 0x75257d;
when 61 result = 0x77d0df;
when 62 result = 0x7a83b3;
when 63 result = 0x7d3e0c;
else // N == 64
case index of
when 0 result = 0x0000000000000;
when 1 result = 0x02C9A3E778061;
when 2 result = 0x059B0D3158574;
when 3 result = 0x0874518759BC8;
when 4 result = 0x0B5586CF9890F;
when 5 result = 0x0E3EC32D3D1A2;
when 6 result = 0x11301D0125B51;
when 7 result = 0x1429AAEA92DE0;
when 8 result = 0x172B83C7D517B;
when 9 result = 0x1A35BEB6FCB75;
when 10 result = 0x1D4873168B9AA;
when 11 result = 0x2063B88628CD6;
when 12 result = 0x2387A6E756238;
when 13 result = 0x26B4565E27CDD;
when 14 result = 0x29E9DF51FDEE1;
when 15 result = 0x2D285A6E4030B;
when 16 result = 0x306FE0A31B715;
when 17 result = 0x33C08B26416FF;
when 18 result = 0x371A7373AA9CB;
when 19 result = 0x3A7DB34E59FF7;
when 20 result = 0x3DEA64C123422;
when 21 result = 0x4160A21F72E2A;
when 22 result = 0x44E086061892D;
when 23 result = 0x486A2B5C13CD0;
when 24 result = 0x4BFDAD5362A27;
when 25 result = 0x4F9B2769D2CA7;
when 26 result = 0x5342B569D4F82;
when 27 result = 0x56F4736B527DA;
when 28 result = 0x5AB07DD485429;
when 29 result = 0x5E76F15AD2148;
when 30 result = 0x6247EB03A5585;
when 31 result = 0x6623882552225;
when 32 result = 0x6A09E667F3BCD;
when 33 result = 0x6DFB23C651A2F;
when 34 result = 0x71F75E8EC5F74;
when 35 result = 0x75FEB564267C9;
when 36 result = 0x7A11473EB0187;
when 37 result = 0x7E2F336CF4E62;
when 38 result = 0x82589994CCE13;
when 39 result = 0x868D99B4492ED;
when 40 result = 0x8ACE5422AA0DB;
when 41 result = 0x8F1AE99157736;
when 42 result = 0x93737B0CDC5E5;
when 43 result = 0x97D829FDE4E50;
when 44 result = 0x9C49182A3F090;
when 45 result = 0xA0C667B5DE565;
when 46 result = 0xA5503B23E255D;
when 47 result = 0xA9E6B5579FDBF;
when 48 result = 0xAE89F995AD3AD;
when 49 result = 0xB33A2B84F15FB;
when 50 result = 0xB7F76F2FB5E47;
when 51 result = 0xBCC1E904BC1D2;
when 52 result = 0xC199BDD85529C;
when 53 result = 0xC67F12E57D14B;
when 54 result = 0xCB720DCEF9069;
when 55 result = 0xD072D4A07897C;
when 56 result = 0xD5818DCFBA487;
when 57 result = 0xDA9E603DB3285;
when 58 result = 0xDFC97337B9B5F;
when 59 result = 0xE502EE78B3FF6;
when 60 result = 0xEA4AFA2A490DA;
when 61 result = 0xEFA1BEE615A27;
when 62 result = 0xF50765B6E4540;
when 63 result = 0xFA7C1819E90D8;
return result<N-1:0>; FPCRType fpcr, FPRounding rounding)
fpcr.AHP = '0';
return FPConvert(op, fpcr, rounding);
// FPConvertSVE()
// ==============
bits(M) FPConvertSVE(bits(N) op, FPCRType fpcr)
fpcr.AHP = '0';
return FPConvert(op, fpcr, FPRoundingMode(fpcr));
// FPExpA()
// ========
// FPMinNormal()
// =============
bits(N) FPExpA(bits(N) op)
FPMinNormal(bit sign)
assert N IN {16,32,64};
bits(N) result;
bits(N) coeff;
integer idx = if N == 16 then constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = UIntZeros(op<4:0>) else(E-1):'1';
frac = UIntZeros(op<5:0>);
coeff = FPExpCoefficient[idx];
if N == 16 then
result<15:0> = '0':op<9:5>:coeff<9:0>;
elsif N == 32 then
result<31:0> = '0':op<13:6>:coeff<22:0>;
else // N == 64
result<63:0> = '0':op<16:6>:coeff<51:0>;
return result;(F);
return sign : exp : frac;
// FPExpCoefficient()
// ==================
// FPOne()
// =======
bits(N) FPExpCoefficient[integer index]
FPOne(bit sign)
assert N IN {16,32,64};
integer result;
if N == 16 then
case index of
when 0 result = 0x0000;
when 1 result = 0x0016;
when 2 result = 0x002d;
when 3 result = 0x0045;
when 4 result = 0x005d;
when 5 result = 0x0075;
when 6 result = 0x008e;
when 7 result = 0x00a8;
when 8 result = 0x00c2;
when 9 result = 0x00dc;
when 10 result = 0x00f8;
when 11 result = 0x0114;
when 12 result = 0x0130;
when 13 result = 0x014d;
when 14 result = 0x016b;
when 15 result = 0x0189;
when 16 result = 0x01a8;
when 17 result = 0x01c8;
when 18 result = 0x01e8;
when 19 result = 0x0209;
when 20 result = 0x022b;
when 21 result = 0x024e;
when 22 result = 0x0271;
when 23 result = 0x0295;
when 24 result = 0x02ba;
when 25 result = 0x02e0;
when 26 result = 0x0306;
when 27 result = 0x032e;
when 28 result = 0x0356;
when 29 result = 0x037f;
when 30 result = 0x03a9;
when 31 result = 0x03d4;
elsif N == 32 then
case index of
when 0 result = 0x000000;
when 1 result = 0x0164d2;
when 2 result = 0x02cd87;
when 3 result = 0x043a29;
when 4 result = 0x05aac3;
when 5 result = 0x071f62;
when 6 result = 0x08980f;
when 7 result = 0x0a14d5;
when 8 result = 0x0b95c2;
when 9 result = 0x0d1adf;
when 10 result = 0x0ea43a;
when 11 result = 0x1031dc;
when 12 result = 0x11c3d3;
when 13 result = 0x135a2b;
when 14 result = 0x14f4f0;
when 15 result = 0x16942d;
when 16 result = 0x1837f0;
when 17 result = 0x19e046;
when 18 result = 0x1b8d3a;
when 19 result = 0x1d3eda;
when 20 result = 0x1ef532;
when 21 result = 0x20b051;
when 22 result = 0x227043;
when 23 result = 0x243516;
when 24 result = 0x25fed7;
when 25 result = 0x27cd94;
when 26 result = 0x29a15b;
when 27 result = 0x2b7a3a;
when 28 result = 0x2d583f;
when 29 result = 0x2f3b79;
when 30 result = 0x3123f6;
when 31 result = 0x3311c4;
when 32 result = 0x3504f3;
when 33 result = 0x36fd92;
when 34 result = 0x38fbaf;
when 35 result = 0x3aff5b;
when 36 result = 0x3d08a4;
when 37 result = 0x3f179a;
when 38 result = 0x412c4d;
when 39 result = 0x4346cd;
when 40 result = 0x45672a;
when 41 result = 0x478d75;
when 42 result = 0x49b9be;
when 43 result = 0x4bec15;
when 44 result = 0x4e248c;
when 45 result = 0x506334;
when 46 result = 0x52a81e;
when 47 result = 0x54f35b;
when 48 result = 0x5744fd;
when 49 result = 0x599d16;
when 50 result = 0x5bfbb8;
when 51 result = 0x5e60f5;
when 52 result = 0x60ccdf;
when 53 result = 0x633f89;
when 54 result = 0x65b907;
when 55 result = 0x68396a;
when 56 result = 0x6ac0c7;
when 57 result = 0x6d4f30;
when 58 result = 0x6fe4ba;
when 59 result = 0x728177;
when 60 result = 0x75257d;
when 61 result = 0x77d0df;
when 62 result = 0x7a83b3;
when 63 result = 0x7d3e0c;
else // N == 64
case index of
when 0 result = 0x0000000000000;
when 1 result = 0x02C9A3E778061;
when 2 result = 0x059B0D3158574;
when 3 result = 0x0874518759BC8;
when 4 result = 0x0B5586CF9890F;
when 5 result = 0x0E3EC32D3D1A2;
when 6 result = 0x11301D0125B51;
when 7 result = 0x1429AAEA92DE0;
when 8 result = 0x172B83C7D517B;
when 9 result = 0x1A35BEB6FCB75;
when 10 result = 0x1D4873168B9AA;
when 11 result = 0x2063B88628CD6;
when 12 result = 0x2387A6E756238;
when 13 result = 0x26B4565E27CDD;
when 14 result = 0x29E9DF51FDEE1;
when 15 result = 0x2D285A6E4030B;
when 16 result = 0x306FE0A31B715;
when 17 result = 0x33C08B26416FF;
when 18 result = 0x371A7373AA9CB;
when 19 result = 0x3A7DB34E59FF7;
when 20 result = 0x3DEA64C123422;
when 21 result = 0x4160A21F72E2A;
when 22 result = 0x44E086061892D;
when 23 result = 0x486A2B5C13CD0;
when 24 result = 0x4BFDAD5362A27;
when 25 result = 0x4F9B2769D2CA7;
when 26 result = 0x5342B569D4F82;
when 27 result = 0x56F4736B527DA;
when 28 result = 0x5AB07DD485429;
when 29 result = 0x5E76F15AD2148;
when 30 result = 0x6247EB03A5585;
when 31 result = 0x6623882552225;
when 32 result = 0x6A09E667F3BCD;
when 33 result = 0x6DFB23C651A2F;
when 34 result = 0x71F75E8EC5F74;
when 35 result = 0x75FEB564267C9;
when 36 result = 0x7A11473EB0187;
when 37 result = 0x7E2F336CF4E62;
when 38 result = 0x82589994CCE13;
when 39 result = 0x868D99B4492ED;
when 40 result = 0x8ACE5422AA0DB;
when 41 result = 0x8F1AE99157736;
when 42 result = 0x93737B0CDC5E5;
when 43 result = 0x97D829FDE4E50;
when 44 result = 0x9C49182A3F090;
when 45 result = 0xA0C667B5DE565;
when 46 result = 0xA5503B23E255D;
when 47 result = 0xA9E6B5579FDBF;
when 48 result = 0xAE89F995AD3AD;
when 49 result = 0xB33A2B84F15FB;
when 50 result = 0xB7F76F2FB5E47;
when 51 result = 0xBCC1E904BC1D2;
when 52 result = 0xC199BDD85529C;
when 53 result = 0xC67F12E57D14B;
when 54 result = 0xCB720DCEF9069;
when 55 result = 0xD072D4A07897C;
when 56 result = 0xD5818DCFBA487;
when 57 result = 0xDA9E603DB3285;
when 58 result = 0xDFC97337B9B5F;
when 59 result = 0xE502EE78B3FF6;
when 60 result = 0xEA4AFA2A490DA;
when 61 result = 0xEFA1BEE615A27;
when 62 result = 0xF50765B6E4540;
when 63 result = 0xFA7C1819E90D8;
return result<N-1:0>; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = '0':Ones(E-1);
frac = Zeros(F);
return sign : exp : frac;
// FPMinNormal()
// FPPointFive()
// =============
bits(N) FPMinNormal(bit sign)
FPPointFive(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = exp = '0': ZerosOnes(E-1):'1';
(E-2):'0';
frac = Zeros(F);
return sign : exp : frac;
// FPOne()
// =======
// FPProcess()
// ===========
bits(N) FPOne(bit sign)
FPProcess(bits(N) input)
bits(N) result;
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = '0': (fptype,sign,value) =OnesFPUnpack(E-1);
frac =(input, FPCR);
if fptype == || fptype == FPType_QNaN then
result = FPProcessNaN(fptype, input, FPCR);
elsif fptype == FPType_Infinity then
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
result = FPZero(sign);
else
result = FPRoundZerosFPType_SNaN(F);
return sign : exp : frac;(value, FPCR);
return result;
// FPPointFive()
// =============
// FPScale()
// =========
bits(N) FPPointFive(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = '0':FPScale(bits (N) op, integer scale,OnesFPCRType(E-2):'0';
frac =fpcr)
assert N IN {16,32,64};
(fptype,sign,value) = (op, fpcr);
if fptype == FPType_SNaN || fptype == FPType_QNaN then
result = FPProcessNaN(fptype, op, fpcr);
elsif fptype == FPType_Zero then
result = FPZero(sign);
elsif fptype == FPType_Infinity then
result = FPInfinity(sign);
else
result = FPRoundZerosFPUnpack(F);
return sign : exp : frac;(value * (2.0^scale), fpcr);
return result;
// FPProcess()
// ===========
// FPTrigMAdd()
// ============
bits(N) FPProcess(bits(N) input)
bits(N) result;
assert N IN {16,32,64};
(fptype,sign,value) =FPTrigMAdd(integer x, bits(N) op1, bits(N) op2, FPUnpackFPCRType(input, FPCR);
if fptype ==fpcr)
assert N IN {16,32,64};
assert x >= 0;
assert x < 8;
bits(N) coeff;
if op2<N-1> == '1' then
x = x + 8;
op2<N-1> = '0';
coeff = FPType_SNaNFPTrigMAddCoefficient || fptype ==[x];
result = FPType_QNaNFPMulAdd then
result = FPProcessNaN(fptype, input, FPCR);
elsif fptype == FPType_Infinity then
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
result = FPZero(sign);
else
result = FPRound(value, FPCR);
(coeff, op1, op2, fpcr);
return result;
// FPScale()
// =========
// FPTrigMAddCoefficient()
// =======================
bits(N) FPScale(bits (N) op, integer scale,FPTrigMAddCoefficient[integer index]
assert N IN {16,32,64};
integer result;
if N == 16 then
case index of
when 0 result = 0x3c00;
when 1 result = 0xb155;
when 2 result = 0x2030;
when 3 result = 0x0000;
when 4 result = 0x0000;
when 5 result = 0x0000;
when 6 result = 0x0000;
when 7 result = 0x0000;
when 8 result = 0x3c00;
when 9 result = 0xb800;
when 10 result = 0x293a;
when 11 result = 0x0000;
when 12 result = 0x0000;
when 13 result = 0x0000;
when 14 result = 0x0000;
when 15 result = 0x0000;
elsif N == 32 then
case index of
when 0 result = 0x3f800000;
when 1 result = 0xbe2aaaab;
when 2 result = 0x3c088886;
when 3 result = 0xb95008b9;
when 4 result = 0x36369d6d;
when 5 result = 0x00000000;
when 6 result = 0x00000000;
when 7 result = 0x00000000;
when 8 result = 0x3f800000;
when 9 result = 0xbf000000;
when 10 result = 0x3d2aaaa6;
when 11 result = 0xbab60705;
when 12 result = 0x37cd37cc;
when 13 result = 0x00000000;
when 14 result = 0x00000000;
when 15 result = 0x00000000;
else // N == 64
case index of
when 0 result = 0x3ff0000000000000;
when 1 result = 0xbfc5555555555543;
when 2 result = 0x3f8111111110f30c;
when 3 result = 0xbf2a01a019b92fc6;
when 4 result = 0x3ec71de351f3d22b;
when 5 result = 0xbe5ae5e2b60f7b91;
when 6 result = 0x3de5d8408868552f;
when 7 result = 0x0000000000000000;
when 8 result = 0x3ff0000000000000;
when 9 result = 0xbfe0000000000000;
when 10 result = 0x3fa5555555555536;
when 11 result = 0xbf56c16c16c13a0b;
when 12 result = 0x3efa01a019b1e8d8;
when 13 result = 0xbe927e4f7282f468;
when 14 result = 0x3e21ee96d2641b13;
when 15 result = 0xbda8f76380fbb401;
return result<N-1:0>; FPCRType fpcr)
assert N IN {16,32,64};
(fptype,sign,value) = FPUnpack(op, fpcr);
if fptype == FPType_SNaN || fptype == FPType_QNaN then
result = FPProcessNaN(fptype, op, fpcr);
elsif fptype == FPType_Zero then
result = FPZero(sign);
elsif fptype == FPType_Infinity then
result = FPInfinity(sign);
else
result = FPRound(value * (2.0^scale), fpcr);
return result;
// FPTrigMAdd()
// FPTrigSMul()
// ============
bits(N) FPTrigMAdd(integer x, bits(N) op1, bits(N) op2,FPTrigSMul(bits(N) op1, bits(N) op2, FPCRType fpcr)
assert N IN {16,32,64};
assert x >= 0;
assert x < 8;
bits(N) coeff;
if op2<N-1> == '1' then
x = x + 8;
op2<N-1> = '0';
coeff = result = FPTrigMAddCoefficientFPMul[x];
result =(op1, op1, fpcr);
(fptype, sign, value) = (result, fpcr);
if (fptype != FPType_QNaN) && (fptype != FPType_SNaNFPMulAddFPUnpack(coeff, op1, op2, fpcr);
) then
result<N-1> = op2<0>;
return result;
// FPTrigMAddCoefficient()
// =======================
// FPTrigSSel()
// ============
bits(N) FPTrigMAddCoefficient[integer index]
FPTrigSSel(bits(N) op1, bits(N) op2)
assert N IN {16,32,64};
integer result;
bits(N) result;
if N == 16 then
case index of
when 0 result = 0x3c00;
when 1 result = 0xb155;
when 2 result = 0x2030;
when 3 result = 0x0000;
when 4 result = 0x0000;
when 5 result = 0x0000;
when 6 result = 0x0000;
when 7 result = 0x0000;
when 8 result = 0x3c00;
when 9 result = 0xb800;
when 10 result = 0x293a;
when 11 result = 0x0000;
when 12 result = 0x0000;
when 13 result = 0x0000;
when 14 result = 0x0000;
when 15 result = 0x0000;
elsif N == 32 then
case index of
when 0 result = 0x3f800000;
when 1 result = 0xbe2aaaab;
when 2 result = 0x3c088886;
when 3 result = 0xb95008b9;
when 4 result = 0x36369d6d;
when 5 result = 0x00000000;
when 6 result = 0x00000000;
when 7 result = 0x00000000;
when 8 result = 0x3f800000;
when 9 result = 0xbf000000;
when 10 result = 0x3d2aaaa6;
when 11 result = 0xbab60705;
when 12 result = 0x37cd37cc;
when 13 result = 0x00000000;
when 14 result = 0x00000000;
when 15 result = 0x00000000;
else // N == 64
case index of
when 0 result = 0x3ff0000000000000;
when 1 result = 0xbfc5555555555543;
when 2 result = 0x3f8111111110f30c;
when 3 result = 0xbf2a01a019b92fc6;
when 4 result = 0x3ec71de351f3d22b;
when 5 result = 0xbe5ae5e2b60f7b91;
when 6 result = 0x3de5d8408868552f;
when 7 result = 0x0000000000000000;
when 8 result = 0x3ff0000000000000;
when 9 result = 0xbfe0000000000000;
when 10 result = 0x3fa5555555555536;
when 11 result = 0xbf56c16c16c13a0b;
when 12 result = 0x3efa01a019b1e8d8;
when 13 result = 0xbe927e4f7282f468;
when 14 result = 0x3e21ee96d2641b13;
when 15 result = 0xbda8f76380fbb401;
return result<N-1:0>; if op2<0> == '1' then
result =FPOne(op2<1>);
else
result = op1;
result<N-1> = result<N-1> EOR op2<1>;
return result;
// FPTrigSMul()
// ============
// FirstActive()
// =============
bits(N)bit FPTrigSMul(bits(N) op1, bits(N) op2,FirstActive(bits(N) mask, bits(N) x, integer esize)
integer elements = N DIV (esize DIV 8);
for e = 0 to elements-1
if FPCRTypeElemP fpcr)
assert N IN {16,32,64};
result =[mask, e, esize] == '1' then return FPMulElemP(op1, op1, fpcr);
(fptype, sign, value) = FPUnpack(result, fpcr);
if (fptype != FPType_QNaN) && (fptype != FPType_SNaN) then
result<N-1> = op2<0>;
return result;[x, e, esize];
return '0';
// FPTrigSSel()
// ============
// FloorPow2()
// ===========
// For a positive integer X, return the largest power of 2 <= X
bits(N)integer FPTrigSSel(bits(N) op1, bits(N) op2)
assert N IN {16,32,64};
bits(N) result;
if op2<0> == '1' then
result =FloorPow2(integer x)
assert x >= 0;
integer n = 1;
if x == 0 then return 0;
while x >= 2^n do
n = n + 1;
return 2^(n - 1); FPOne(op2<1>);
else
result = op1;
result<N-1> = result<N-1> EOR op2<1>;
return result;
// FirstActive()
// =============
// HaveSVE()
// =========
bitboolean FirstActive(bits(N) mask, bits(N) x, integer esize)
integer elements = N DIV (esize DIV 8);
for e = 0 to elements-1
ifHaveSVE()
return ElemPHasArchVersion[mask, e, esize] == '1' then return( ElemPARMv8p2[x, e, esize];
return '0';) && boolean IMPLEMENTATION_DEFINED "Have SVE ISA";
// FloorPow2()
// ===========
// For a positive integer X, return the largest power of 2 <= X
// ImplementedSVEVectorLength()
// ============================
// Reduce SVE vector length to a supported value (e.g. power of two)
integer FloorPow2(integer x)
assert x >= 0;
integer n = 1;
if x == 0 then return 0;
while x >= 2^n do
n = n + 1;
return 2^(n - 1);ImplementedSVEVectorLength(integer nbits)
return integer IMPLEMENTATION_DEFINED;
// HaveSVE()
// =========
// IsEven()
// ========
boolean HaveSVE()
returnIsEven(integer val)
return val MOD 2 == 0; HasArchVersion(ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Have SVE ISA";
// ImplementedSVEVectorLength()
// ============================
// Reduce SVE vector length to a supported value (e.g. power of two)
// IsFPEnabled()
// =============
integerboolean ImplementedSVEVectorLength(integer nbits)
return integer IMPLEMENTATION_DEFINED;IsFPEnabled(bits(2) el)
ifELUsingAArch32(el) then
return AArch32.IsFPEnabled(el);
else
return AArch64.IsFPEnabled(el);
// IsEven()
// ========
// IsSVEEnabled()
// ==============
boolean IsEven(integer val)
return val MOD 2 == 0;IsSVEEnabled(bits(2) el)
ifELUsingAArch32(el) then
return FALSE;
// Check if access disabled in CPACR_EL1
if el IN {EL0, EL1} then
// Check SVE at EL0/EL1
case CPACR[].ZEN of
when 'x0' disabled = TRUE;
when '01' disabled = (el == EL0);
when '11' disabled = FALSE;
if disabled then return FALSE;
// Check if access disabled in CPTR_EL2
if el IN {EL0, EL1, EL2} && EL2Enabled() then
if HaveVirtHostExt() && HCR_EL2.E2H == '1' then
if CPTR_EL2.ZEN == 'x0' then return FALSE;
else
if CPTR_EL2.TZ == '1' then return FALSE;
// Check if access disabled in CPTR_EL3
if HaveEL(EL3) then
if CPTR_EL3.EZ == '0' then return FALSE;
return TRUE;
// IsFPEnabled()
// =============
// LastActive()
// ============
booleanbit IsFPEnabled(bits(2) el)
ifLastActive(bits(N) mask, bits(N) x, integer esize)
integer elements = N DIV (esize DIV 8);
for e = elements-1 downto 0
if ELUsingAArch32ElemP(el) then
return[mask, e, esize] == '1' then return AArch32.IsFPEnabledElemP(el);
else
return AArch64.IsFPEnabled(el);[x, e, esize];
return '0';
// IsSVEEnabled()
// ==============
// LastActiveElement()
// ===================
booleaninteger IsSVEEnabled(bits(2) el)
ifLastActiveElement(bits(N) mask, integer esize)
assert esize IN {8, 16, 32, 64};
integer elements = VL DIV esize;
for e = elements-1 downto 0
if ELUsingAArch32ElemP(el) then
return FALSE;
// Check if access disabled in CPACR_EL1
if el IN {EL0, EL1} then
// Check SVE at EL0/EL1
case CPACR[].ZEN of
when 'x0' disabled = TRUE;
when '01' disabled = (el == EL0);
when '11' disabled = FALSE;
if disabled then return FALSE;
// Check if access disabled in CPTR_EL2
if el IN {EL0, EL1, EL2} && EL2Enabled() then
if HaveVirtHostExt() && HCR_EL2.E2H == '1' then
if CPTR_EL2.ZEN == 'x0' then return FALSE;
else
if CPTR_EL2.TZ == '1' then return FALSE;
// Check if access disabled in CPTR_EL3
if HaveEL(EL3) then
if CPTR_EL3.EZ == '0' then return FALSE;
return TRUE;[mask, e, esize] == '1' then return e;
return -1;
// LastActive()
// ============
bitconstant integer LastActive(bits(N) mask, bits(N) x, integer esize)
integer elements = N DIV (esize DIV 8);
for e = elements-1 downto 0
ifMAX_PL = 256; ElemP[mask, e, esize] == '1' then return ElemP[x, e, esize];
return '0';
// LastActiveElement()
// ===================
integerconstant integer LastActiveElement(bits(N) mask, integer esize)
assert esize IN {8, 16, 32, 64};
integer elements = VL DIV esize;
for e = elements-1 downto 0
ifMAX_VL = 2048; ElemP[mask, e, esize] == '1' then return e;
return -1;
constant integer// MaybeZeroSVEUppers()
// ==================== MAX_PL = 256;MaybeZeroSVEUppers(bits(2) target_el)
boolean lower_enabled;
ifUInt(target_el) <= UInt(PSTATE.EL) || !IsSVEEnabled(target_el) then
return;
if target_el == EL3 then
if EL2Enabled() then
lower_enabled = IsFPEnabled(EL2);
else
lower_enabled = IsFPEnabled(EL1);
else
lower_enabled = IsFPEnabled(target_el - 1);
if lower_enabled then
integer vl = if IsSVEEnabled(PSTATE.EL) then VL else 128;
integer pl = vl DIV 8;
for n = 0 to 31
if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then
_Z[n] = ZeroExtend(_Z[n]<vl-1:0>);
for n = 0 to 15
if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then
_P[n] = ZeroExtend(_P[n]<pl-1:0>);
if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then
_FFR = ZeroExtend(_FFR<pl-1:0>);
constant integer// MemNF[] - non-assignment form
// =============================
(bits(8*size), boolean) MemNF[bits(64) address, integer size, acctype]
assert size IN {1, 2, 4, 8, 16};
bits(8*size) value;
aligned = (address == Align(address, size));
A = SCTLR[].A;
if !aligned && (A == '1') then
return (bits(8*size) UNKNOWN, TRUE);
atomic = aligned || size == 1;
if !atomic then
(value<7:0>, bad) = MemSingleNF[address, 1, acctype, aligned];
if bad then
return (bits(8*size) UNKNOWN, TRUE);
// For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory
// access will generate an Alignment Fault, as to get this far means the first byte did
// not, so we must be changing to a new translation page.
if !aligned then
c = ConstrainUnpredictable(Unpredictable_DEVPAGE2);
assert c IN {Constraint_FAULT, Constraint_NONE};
if c == Constraint_NONE then aligned = TRUE;
for i = 1 to size-1
(value<8*i+7:8*i>, bad) = MemSingleNF[address+i, 1, acctype, aligned];
if bad then
return (bits(8*size) UNKNOWN, TRUE);
else
(value, bad) = MemSingleNF[address, size, acctype, aligned];
if bad then
return (bits(8*size) UNKNOWN, TRUE);
if BigEndian() then
value = BigEndianReverseMAX_VL = 2048;(value);
return (value, FALSE);
// MaybeZeroSVEUppers()
// ====================// MemSingleNF[] - non-assignment form
// ===================================
(bits(8*size), boolean) MemSingleNF[bits(64) address, integer size,
MaybeZeroSVEUppers(bits(2) target_el)
boolean lower_enabled;
ifacctype, boolean wasaligned]
bits(8*size) value;
boolean iswrite = FALSE; UIntAddressDescriptor(target_el) <=memaddrdesc;
// Implementation may suppress NF load for any reason
if UInt(PSTATE.EL) || !IsSVEEnabled(target_el) then
return;
if target_el == EL3 then
if EL2Enabled() then
lower_enabled = IsFPEnabled(EL2);
else
lower_enabled = IsFPEnabled(EL1);
else
lower_enabled = IsFPEnabled(target_el - 1);
if lower_enabled then
integer vl = if IsSVEEnabled(PSTATE.EL) then VL else 128;
integer pl = vl DIV 8;
for n = 0 to 31
if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPERUnpredictable_NONFAULT) then
_Z[n] = return (bits(8*size) UNKNOWN, TRUE);
// MMU or MPU
memaddrdesc = ZeroExtendAArch64.TranslateAddress(_Z[n]<vl-1:0>);
for n = 0 to 15
if(address, acctype, iswrite, wasaligned, size);
// Non-fault load from Device memory must not be performed externally
if memaddrdesc.memattrs.memtype == ConstrainUnpredictableBoolMemType_Device(then
return (bits(8*size) UNKNOWN, TRUE);
// Check for aborts or debug exceptions
ifUnpredictable_SVEZEROUPPERIsFault) then
_P[n] =(memaddrdesc) then
return (bits(8*size) UNKNOWN, TRUE);
// Memory array access
accdesc = CreateAccessDescriptor(acctype);
if ZeroExtendHaveMTEExt(_P[n]<pl-1:0>);
() then
if ConstrainUnpredictableBoolAArch64.AccessIsTagChecked((address, acctype) then
bits(4) ptag =Unpredictable_SVEZEROUPPERAArch64.TransformTag) then
_FFR =(address);
if ! ZeroExtendAArch64.CheckTag(_FFR<pl-1:0>);(memaddrdesc, ptag, iswrite) then
return (bits(8*size) UNKNOWN, TRUE);
value = _Mem[memaddrdesc, size, accdesc];
return (value, FALSE);
// MemNF[] - non-assignment form
// =============================
// NoneActive()
// ============
(bits(8*size), boolean) MemNF[bits(64) address, integer size,bit AccType acctype]
assert size IN {1, 2, 4, 8, 16};
bits(8*size) value;
aligned = (address ==NoneActive(bits(N) mask, bits(N) x, integer esize)
integer elements = N DIV (esize DIV 8);
for e = 0 to elements-1
if AlignElemP(address, size));
A =[mask, e, esize] == '1' && SCTLRElemP[].A;
if !aligned && (A == '1') then
return (bits(8*size) UNKNOWN, TRUE);
atomic = aligned || size == 1;
if !atomic then
(value<7:0>, bad) = MemSingleNF[address, 1, acctype, aligned];
if bad then
return (bits(8*size) UNKNOWN, TRUE);
// For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory
// access will generate an Alignment Fault, as to get this far means the first byte did
// not, so we must be changing to a new translation page.
if !aligned then
c = ConstrainUnpredictable(Unpredictable_DEVPAGE2);
assert c IN {Constraint_FAULT, Constraint_NONE};
if c == Constraint_NONE then aligned = TRUE;
for i = 1 to size-1
(value<8*i+7:8*i>, bad) = MemSingleNF[address+i, 1, acctype, aligned];
if bad then
return (bits(8*size) UNKNOWN, TRUE);
else
(value, bad) = MemSingleNF[address, size, acctype, aligned];
if bad then
return (bits(8*size) UNKNOWN, TRUE);
if BigEndian() then
value = BigEndianReverse(value);
return (value, FALSE);[x, e, esize] == '1' then return '0';
return '1';
// MemSingleNF[] - non-assignment form
// ===================================
// P[] - non-assignment form
// =========================
(bits(8*size), boolean) MemSingleNF[bits(64) address, integer size,bits(width) AccType acctype, boolean wasaligned]
bits(8*size) value;
boolean iswrite = FALSE;P[integer n]
assert n >= 0 && n <= 31;
assert width == PL;
return _P[n]<width-1:0>;
// P[] - assignment form
// =====================
AddressDescriptor memaddrdesc;
// Implementation may suppress NF load for any reason
P[integer n] = bits(width) value
assert n >= 0 && n <= 31;
assert width == PL;
if ConstrainUnpredictableBool(Unpredictable_NONFAULTUnpredictable_SVEZEROUPPER) then
return (bits(8*size) UNKNOWN, TRUE);
// MMU or MPU
memaddrdesc = _P[n] = AArch64.TranslateAddressZeroExtend(address, acctype, iswrite, wasaligned, size);
// Non-fault load from Device memory must not be performed externally
if memaddrdesc.memattrs.memtype == MemType_Device then
return (bits(8*size) UNKNOWN, TRUE);
// Check for aborts or debug exceptions
if IsFault(memaddrdesc) then
return (bits(8*size) UNKNOWN, TRUE);
// Memory array access
accdesc = CreateAccessDescriptor(acctype);
if HaveMTEExt() then
if AArch64.AccessIsTagChecked(address, acctype) then
bits(4) ptag = AArch64.PhysicalTag(address);
if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then
return (bits(8*size) UNKNOWN, TRUE);
value = _Mem[memaddrdesc, size, accdesc];
return (value, FALSE);(value);
else
_P[n]<width-1:0> = value;
// NoneActive()
// ============
// PL - non-assignment form
// ========================
bitinteger PL
return VL DIV 8; NoneActive(bits(N) mask, bits(N) x, integer esize)
integer elements = N DIV (esize DIV 8);
for e = 0 to elements-1
if ElemP[mask, e, esize] == '1' && ElemP[x, e, esize] == '1' then return '0';
return '1';
// P[] - non-assignment form
// =========================
// PredTest()
// ==========
bits(width)bits(4) P[integer n]
assert n >= 0 && n <= 31;
assert width == PL;
return _P[n]<width-1:0>;
// P[] - assignment form
// =====================PredTest(bits(N) mask, bits(N) result, integer esize)
bit n =
P[integer n] = bits(width) value
assert n >= 0 && n <= 31;
assert width == PL;
if(mask, result, esize);
bit z = ConstrainUnpredictableBoolNoneActive((mask, result, esize);
bit c = NOTUnpredictable_SVEZEROUPPERLastActive) then
_P[n] = ZeroExtend(value);
else
_P[n]<width-1:0> = value;(mask, result, esize);
bit v = '0';
return n:z:c:v;
// PL - non-assignment form
// ========================
// ReducePredicated()
// ==================
integer PL
return VL DIV 8;bits(esize)ReducePredicated(ReduceOp op, bits(N) input, bits(M) mask, bits(esize) identity)
assert(N == M * 8);
integer p2bits = CeilPow2(N);
bits(p2bits) operand;
integer elements = p2bits DIV esize;
for e = 0 to elements-1
if e * esize < N && ElemP[mask, e, esize] == '1' then
Elem[operand, e, esize] = Elem[input, e, esize];
else
Elem[operand, e, esize] = identity;
return Reduce(op, operand, esize);
// PredTest()
// ==========
// Reverse()
// =========
// Reverse subwords of M bits in an N-bit word
bits(4)bits(N) PredTest(bits(N) mask, bits(N) result, integer esize)
bit n =Reverse(bits(N) word, integer M)
bits(N) result;
integer sw = N DIV M;
assert N == sw * M;
for s = 0 to sw-1 FirstActiveElem(mask, result, esize);
bit z =[result, sw - 1 - s, M] = NoneActiveElem(mask, result, esize);
bit c = NOT LastActive(mask, result, esize);
bit v = '0';
return n:z:c:v;[word, s, M];
return result;
// ReducePredicated()
// ==================
bits(esize)// SVEAccessTrap()
// ===============
// Trapped access to SVE registers due to CPACR_EL1, CPTR_EL2, or CPTR_EL3. ReducePredicated(SVEAccessTrap(bits(2) target_el)
assertReduceOpUInt op, bits(N) input, bits(M) mask, bits(esize) identity)
assert(N == M * 8);
integer p2bits =(target_el) >= CeilPow2UInt(N);
bits(p2bits) operand;
integer elements = p2bits DIV esize;
for e = 0 to elements-1
if e * esize < N &&(PSTATE.EL) && target_el != ElemPEL0[mask, e, esize] == '1' then&&
ElemHaveEL[operand, e, esize] =(target_el);
route_to_el2 = target_el == ElemEL1[input, e, esize];
else&&
ElemEL2Enabled[operand, e, esize] = identity;
() && HCR_EL2.TGE == '1';
return exception = (Exception_SVEAccessTrap);
bits(64) preferred_exception_return = ThisInstrAddr();
vect_offset = 0x0;
if route_to_el2 then
AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
else
AArch64.TakeExceptionReduceExceptionSyndrome(op, operand, esize);(target_el, exception, preferred_exception_return, vect_offset);
// Reverse()
// =========
// Reverse subwords of M bits in an N-bit word
bits(N)enumeration Reverse(bits(N) word, integer M)
bits(N) result;
integer sw = N DIV M;
assert N == sw * M;
for s = 0 to sw-1SVECmp {
Elem[result, sw - 1 - s, M] =Cmp_EQ, Cmp_NE, Cmp_GE, Cmp_GT, Cmp_LT, Cmp_LE, Elem[word, s, M];
return result;Cmp_UN };
// SVEAccessTrap()
// ===============
// Trapped access to SVE registers due to CPACR_EL1, CPTR_EL2, or CPTR_EL3.// SVEMoveMaskPreferred()
// ======================
// Return FALSE if a bitmask immediate encoding would generate an immediate
// value that could also be represented by a single DUP instruction.
// Used as a condition for the preferred MOV<-DUPM alias.
boolean
SVEAccessTrap(bits(2) target_el)
assertSVEMoveMaskPreferred(bits(13) imm13)
bits(64) imm;
(imm, -) = UIntDecodeBitMasks(target_el) >=(imm13<12>, imm13<5:0>, imm13<11:6>, TRUE);
// Check for 8 bit immediates
if ! UIntIsZero(PSTATE.EL) && target_el !=(imm<7:0>) then
// Check for 'ffffffffffffffxy' or '00000000000000xy'
if EL0IsZero &&(imm<63:7>) || HaveELIsOnes(target_el);
route_to_el2 = target_el ==(imm<63:7>) then
return FALSE;
// Check for 'ffffffxyffffffxy' or '000000xy000000xy'
if imm<63:32> == imm<31:0> && ( EL1IsZero &&(imm<31:7>) || EL2EnabledIsOnes() && HCR_EL2.TGE == '1';
(imm<31:7>)) then
return FALSE;
exception = // Check for 'ffxyffxyffxyffxy' or '00xy00xy00xy00xy'
if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> && ( ExceptionSyndromeIsZero((imm<15:7>) ||Exception_SVEAccessTrapIsOnes);
bits(64) preferred_exception_return =(imm<15:7>)) then
return FALSE;
// Check for 'xyxyxyxyxyxyxyxy'
if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> && (imm<15:8> == imm<7:0>) then
return FALSE;
// Check for 16 bit immediates
else
// Check for 'ffffffffffffxy00' or '000000000000xy00'
if ThisInstrAddrIsZero();
vect_offset = 0x0;
if route_to_el2 then(imm<63:15>) ||
AArch64.TakeExceptionIsOnes((imm<63:15>) then
return FALSE;
// Check for 'ffffxy00ffffxy00' or '0000xy000000xy00'
if imm<63:32> == imm<31:0> && (EL2IsZero, exception, preferred_exception_return, vect_offset);
else(imm<31:7>) ||
AArch64.TakeExceptionIsOnes(target_el, exception, preferred_exception_return, vect_offset);(imm<31:7>)) then
return FALSE;
// Check for 'xy00xy00xy00xy00'
if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> then
return FALSE;
return TRUE;
enumerationarray bits( SVECmp {) _Z[0..31];
array bits( Cmp_EQ,) _P[0..15];
bits( Cmp_NE, Cmp_GE, Cmp_GT, Cmp_LT, Cmp_LE, Cmp_UN };) _FFR;
// SVEMoveMaskPreferred()
// ======================
// Return FALSE if a bitmask immediate encoding would generate an immediate
// value that could also be represented by a single DUP instruction.
// Used as a condition for the preferred MOV<-DUPM alias.
// VL - non-assignment form
// ========================
booleaninteger VL
integer vl;
if PSTATE.EL == SVEMoveMaskPreferred(bits(13) imm13)
bits(64) imm;
(imm, -) =|| (PSTATE.EL == DecodeBitMasksEL0(imm13<12>, imm13<5:0>, imm13<11:6>, TRUE);
// Check for 8 bit immediates
if !&& !IsZeroIsInHost(imm<7:0>) then
// Check for 'ffffffffffffffxy' or '00000000000000xy'
if()) then
vl = IsZeroUInt(imm<63:7>) ||(ZCR_EL1.LEN);
if PSTATE.EL == IsOnesEL2(imm<63:7>) then
return FALSE;
// Check for 'ffffffxyffffffxy' or '000000xy000000xy'
if imm<63:32> == imm<31:0> && (|| (PSTATE.EL ==IsZeroEL0(imm<31:7>) ||&& IsOnesIsInHost(imm<31:7>)) then
return FALSE;
// Check for 'ffxyffxyffxyffxy' or '00xy00xy00xy00xy'
if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> && (()) then
vl =IsZeroUInt(imm<15:7>) ||(ZCR_EL2.LEN);
elsif PSTATE.EL IN { IsOnesEL0(imm<15:7>)) then
return FALSE;
// Check for 'xyxyxyxyxyxyxyxy'
if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> && (imm<15:8> == imm<7:0>) then
return FALSE;
// Check for 16 bit immediates
else
// Check for 'ffffffffffffxy00' or '000000000000xy00'
if, IsZeroEL1(imm<63:15>) ||} && IsOnesEL2Enabled(imm<63:15>) then
return FALSE;
// Check for 'ffffxy00ffffxy00' or '0000xy000000xy00'
if imm<63:32> == imm<31:0> && (() then
vl =IsZeroMin(imm<31:7>) ||(vl, (ZCR_EL2.LEN));
if PSTATE.EL == EL3 then
vl = UInt(ZCR_EL3.LEN);
elsif HaveEL(EL3) then
vl = Min(vl, UInt(ZCR_EL3.LEN));
vl = (vl + 1) * 128;
vl = ImplementedSVEVectorLengthIsOnesUInt(imm<31:7>)) then
return FALSE;
(vl);
// Check for 'xy00xy00xy00xy00'
if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> then
return FALSE;
return TRUE; return vl;
array bits(// Z[] - non-assignment form
// =========================
bits(width)MAX_VL) _Z[0..31];
array bits(Z[integer n]
assert n >= 0 && n <= 31;
assert width == VL;
return _Z[n]<width-1:0>;
// Z[] - assignment form
// =====================MAX_PL) _P[0..15];
bits(Z[integer n] = bits(width) value
assert n >= 0 && n <= 31;
assert width == VL;
if(Unpredictable_SVEZEROUPPER) then
_Z[n] = ZeroExtendMAX_PLConstrainUnpredictableBool) _FFR;(value);
else
_Z[n]<width-1:0> = value;
// VL - non-assignment form
// ========================
// CNTKCTL[] - non-assignment form
// ===============================
integer VL
integer vl;
if PSTATE.EL ==CNTKCTLType EL1 || (PSTATE.EL ==CNTKCTL[]
bits(32) r;
if EL0 && !IsInHost()) then
vl = UInt(ZCR_EL1.LEN);
if PSTATE.EL == EL2 || (PSTATE.EL == EL0 && IsInHost()) then
vl = UInt(ZCR_EL2.LEN);
elsif PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
vl = Min(vl, UInt(ZCR_EL2.LEN));
if PSTATE.EL == EL3 then
vl = UInt(ZCR_EL3.LEN);
elsif HaveEL(EL3) then
vl = Min(vl, UInt(ZCR_EL3.LEN));
vl = (vl + 1) * 128;
vl = ImplementedSVEVectorLength(vl);
return vl;() then
r = CNTHCTL_EL2;
return r;
r = CNTKCTL_EL1;
return r;
// Z[] - non-assignment form
// =========================
bits(width)type Z[integer n]
assert n >= 0 && n <= 31;
assert width == VL;
return _Z[n]<width-1:0>;
// Z[] - assignment form
// =====================CNTKCTLType;
Z[integer n] = bits(width) value
assert n >= 0 && n <= 31;
assert width == VL;
if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then
_Z[n] = ZeroExtend(value);
else
_Z[n]<width-1:0> = value;
// CNTKCTL[] - non-assignment form
// ===============================
// CPACR[] - non-assignment form
// =============================
CNTKCTLTypeCPACRType CNTKCTL[]
CPACR[]
bits(32) r;
if IsInHost() then
r = CNTHCTL_EL2;
r = CPTR_EL2;
return r;
r = CNTKCTL_EL1;
r = CPACR_EL1;
return r;
// CPACR[] - non-assignment form
// =============================
// ELR[] - non-assignment form
// ===========================
CPACRTypebits(64) CPACR[]
bits(32) r;
ifELR[bits(2) el]
bits(64) r;
case el of
when r = ELR_EL1;
when EL2 r = ELR_EL2;
when EL3 r = ELR_EL3;
otherwise Unreachable();
return r;
// ELR[] - non-assignment form
// ===========================
bits(64) ELR[]
assert PSTATE.EL != EL0;
return ELR[PSTATE.EL];
// ELR[] - assignment form
// =======================
ELR[bits(2) el] = bits(64) value
bits(64) r = value;
case el of
when EL1 ELR_EL1 = r;
when EL2 ELR_EL2 = r;
when EL3 ELR_EL3 = r;
otherwise Unreachable();
return;
// ELR[] - assignment form
// =======================
ELR[] = bits(64) value
assert PSTATE.EL != EL0;
ELRIsInHostEL1() then
r = CPTR_EL2;
return r;
r = CPACR_EL1;
return r;[PSTATE.EL] = value;
return;
type// ESR[] - non-assignment form
// ===========================
ESRType CPACRType;ESR[bits(2) regime]
bits(32) r;
case regime of
whenEL1 r = ESR_EL1;
when EL2 r = ESR_EL2;
when EL3 r = ESR_EL3;
otherwise Unreachable();
return r;
// ESR[] - non-assignment form
// ===========================
ESRType ESR[]
return ESR[S1TranslationRegime()];
// ESR[] - assignment form
// =======================
ESR[bits(2) regime] = ESRType value
bits(32) r = value;
case regime of
when EL1 ESR_EL1 = r;
when EL2 ESR_EL2 = r;
when EL3 ESR_EL3 = r;
otherwise Unreachable();
return;
// ESR[] - assignment form
// =======================
ESR[] = ESRType value
ESR[S1TranslationRegime()] = value;
// ELR[] - non-assignment form
// ===========================
bits(64)type ELR[bits(2) el]
bits(64) r;
case el of
whenESRType; EL1 r = ELR_EL1;
when EL2 r = ELR_EL2;
when EL3 r = ELR_EL3;
otherwise Unreachable();
return r;
// ELR[] - non-assignment form
// ===========================
bits(64) ELR[]
assert PSTATE.EL != EL0;
return ELR[PSTATE.EL];
// ELR[] - assignment form
// =======================
ELR[bits(2) el] = bits(64) value
bits(64) r = value;
case el of
when EL1 ELR_EL1 = r;
when EL2 ELR_EL2 = r;
when EL3 ELR_EL3 = r;
otherwise Unreachable();
return;
// ELR[] - assignment form
// =======================
ELR[] = bits(64) value
assert PSTATE.EL != EL0;
ELR[PSTATE.EL] = value;
return;
// ESR[] - non-assignment form
// FAR[] - non-assignment form
// ===========================
ESRTypebits(64) ESR[bits(2) regime]
bits(32) r;
FAR[bits(2) regime]
bits(64) r;
case regime of
when EL1 r = ESR_EL1;
r = FAR_EL1;
when EL2 r = ESR_EL2;
r = FAR_EL2;
when EL3 r = ESR_EL3;
r = FAR_EL3;
otherwise Unreachable();
return r;
// ESR[] - non-assignment form
// FAR[] - non-assignment form
// ===========================
ESRTypebits(64) ESR[]
FAR[]
return ESRFAR[S1TranslationRegime()];
// ESR[] - assignment form
// FAR[] - assignment form
// =======================
ESR[bits(2) regime] =FAR[bits(2) regime] = bits(64) value
bits(64) r = value;
case regime of
when ESRType value
bits(32) r = value;
case regime of
when EL1 ESR_EL1 = r;
FAR_EL1 = r;
when EL2 ESR_EL2 = r;
FAR_EL2 = r;
when EL3 ESR_EL3 = r;
FAR_EL3 = r;
otherwise Unreachable();
return;
// ESR[] - assignment form
// FAR[] - assignment form
// =======================
ESR[] =FAR[] = bits(64) value ESRTypeFAR value
ESR[S1TranslationRegime()] = value;()] = value;
return;
type// MAIR[] - non-assignment form
// ============================
MAIRType ESRType;MAIR[bits(2) regime]
bits(64) r;
case regime of
whenEL1 r = MAIR_EL1;
when EL2 r = MAIR_EL2;
when EL3 r = MAIR_EL3;
otherwise Unreachable();
return r;
// MAIR[] - non-assignment form
// ============================
MAIRType MAIR[]
return MAIR[S1TranslationRegime()];
// FAR[] - non-assignment form
// ===========================
bits(64)type FAR[bits(2) regime]
bits(64) r;
case regime of
whenMAIRType; EL1 r = FAR_EL1;
when EL2 r = FAR_EL2;
when EL3 r = FAR_EL3;
otherwise Unreachable();
return r;
// FAR[] - non-assignment form
// ===========================
bits(64) FAR[]
return FAR[S1TranslationRegime()];
// FAR[] - assignment form
// =======================
FAR[bits(2) regime] = bits(64) value
bits(64) r = value;
case regime of
when EL1 FAR_EL1 = r;
when EL2 FAR_EL2 = r;
when EL3 FAR_EL3 = r;
otherwise Unreachable();
return;
// FAR[] - assignment form
// =======================
FAR[] = bits(64) value
FAR[S1TranslationRegime()] = value;
return;
// MAIR[] - non-assignment form
// ============================
// SCTLR[] - non-assignment form
// =============================
MAIRTypeSCTLRType MAIR[bits(2) regime]
SCTLR[bits(2) regime]
bits(64) r;
case regime of
when EL1 r = MAIR_EL1;
r = SCTLR_EL1;
when EL2 r = MAIR_EL2;
r = SCTLR_EL2;
when EL3 r = MAIR_EL3;
r = SCTLR_EL3;
otherwise Unreachable();
return r;
// MAIR[] - non-assignment form
// ============================
// SCTLR[] - non-assignment form
// =============================
MAIRTypeSCTLRType MAIR[]
SCTLR[]
return MAIRSCTLR[S1TranslationRegime()];
// SCTLR[] - non-assignment form
// =============================
// VBAR[] - non-assignment form
// ============================
SCTLRTypebits(64) SCTLR[bits(2) regime]
VBAR[bits(2) regime]
bits(64) r;
case regime of
when EL1 r = SCTLR_EL1;
r = VBAR_EL1;
when EL2 r = SCTLR_EL2;
r = VBAR_EL2;
when EL3 r = SCTLR_EL3;
r = VBAR_EL3;
otherwise Unreachable();
return r;
// SCTLR[] - non-assignment form
// =============================
// VBAR[] - non-assignment form
// ============================
SCTLRTypebits(64) SCTLR[]
VBAR[]
return SCTLRVBAR[S1TranslationRegime()];
type// AArch64.AllocationTagAccessIsEnabled()
// ======================================
// Check whether access to Allocation Tags is enabled.
boolean SCTLRType;AArch64.AllocationTagAccessIsEnabled()
if SCR_EL3.ATA == '0' && PSTATE.EL IN {EL0, EL1, EL2} then
return FALSE;
elsif HCR_EL2.ATA == '0' && PSTATE.EL IN {EL0, EL1} && EL2Enabled() && HCR_EL2.<E2H,TGE> != '11' then
return FALSE;
elsif SCTLR_EL3.ATA == '0' && PSTATE.EL == EL3 then
return FALSE;
elsif SCTLR_EL2.ATA == '0' && PSTATE.EL == EL2 then
return FALSE;
elsif SCTLR_EL1.ATA == '0' && PSTATE.EL == EL1 then
return FALSE;
elsif SCTLR_EL2.ATA0 == '0' && PSTATE.EL == EL0 && EL2Enabled() && HCR_EL2.<E2H,TGE> == '11' then
return FALSE;
elsif SCTLR_EL1.ATA0 == '0' && PSTATE.EL == EL0 && !(EL2Enabled() && HCR_EL2.<E2H,TGE> == '11') then
return FALSE;
else
return TRUE;
// VBAR[] - non-assignment form
// ============================
bits(64)// AArch64.CheckSystemAccess()
// ===========================
// Checks if an AArch64 MSR, MRS or SYS instruction is allowed from the current exception level and security state.
// Also checks for traps by TIDCP and NV access. VBAR[bits(2) regime]
bits(64) r;
case regime of
whenAArch64.CheckSystemAccess(bits(2) op0, bits(3) op1, bits(4) crn, bits(4) crm, bits(3) op2, bits(5) rt, bit read)
boolean unallocated = FALSE;
boolean need_secure = FALSE;
bits(2) min_EL;
// Check for traps by HCR_EL2.TIDCP
if PSTATE.EL IN { EL0, EL1 r = VBAR_EL1;
when} && EL2Enabled() && HCR_EL2.TIDCP == '1' && op0 == 'x1' && crn == '1x11' then
// At EL0, it is IMPLEMENTATION_DEFINED whether attempts to execute system
// register access instructions with reserved encodings are trapped to EL2 or UNDEFINED
rcs_el0_trap = boolean IMPLEMENTATION_DEFINED "Reserved Control Space EL0 Trapped";
if PSTATE.EL == EL1 || rcs_el0_trap then
AArch64.SystemAccessTrap(EL2 r = VBAR_EL2;
when, 0x18); // Exception_SystemRegisterTrap
// Check for unallocated encodings
case op1 of
when '00x', '010'
min_EL = EL1;
when '011'
min_EL = EL0;
when '100'
min_EL = EL2;
when '101'
if !HaveVirtHostExt() then UNDEFINED;
min_EL = EL2;
when '110'
min_EL = EL3 r = VBAR_EL3;
otherwise;
when '111'
min_EL = UnreachableEL1();
return r;
;
need_secure = TRUE;
// VBAR[] - non-assignment form
// ============================
bits(64) if VBAR[]
return(PSTATE.EL) < VBARUInt[(min_EL) then
// Check for traps on read/write access to registers named _EL2, _EL02, _EL12 from non-secure EL1 when HCR_EL2.NV bit is set
nv_access =() && min_EL == EL2 && PSTATE.EL == EL1 && EL2Enabled() && HCR_EL2.NV == '1';
if !nv_access then
UNDEFINED;
elsif need_secure && !IsSecureS1TranslationRegimeHaveNVExt()];() then
UNDEFINED;
// AArch64.AllocationTagAccessIsEnabled()
// ======================================
// Check whether access to Allocation Tags is enabled.
// AArch64.ChooseNonExcludedTag()
// ==============================
// Return a tag derived from the start and the offset values, excluding
// any tags in the given mask.
booleanbits(4) AArch64.AllocationTagAccessIsEnabled()
if SCR_EL3.ATA == '0' && PSTATE.EL IN {AArch64.ChooseNonExcludedTag(bits(4) tag, bits(4) offset, bits(16) exclude)
ifEL0IsOnes,(exclude) then
return '0000';
if offset == '0000' then
while exclude< EL1UInt,(tag)> == '1' do
tag = tag + '0001';
while offset != '0000' do
offset = offset - '0001';
tag = tag + '0001';
while exclude< EL2UInt} then
return FALSE;
elsif HCR_EL2.ATA == '0' && PSTATE.EL IN {EL0, EL1} && EL2Enabled() && HCR_EL2.<E2H,TGE> != '11' then
return FALSE;
elsif SCTLR_EL3.ATA == '0' && PSTATE.EL == EL3 then
return FALSE;
elsif SCTLR_EL2.ATA == '0' && PSTATE.EL == EL2 then
return FALSE;
elsif SCTLR_EL1.ATA == '0' && PSTATE.EL == EL1 then
return FALSE;
elsif SCTLR_EL2.ATA0 == '0' && PSTATE.EL == EL0 && EL2Enabled() && HCR_EL2.<E2H,TGE> == '11' then
return FALSE;
elsif SCTLR_EL1.ATA0 == '0' && PSTATE.EL == EL0 && !(EL2Enabled() && HCR_EL2.<E2H,TGE> == '11') then
return FALSE;
else
return TRUE;(tag)> == '1' do
tag = tag + '0001';
return tag;
// AArch64.CheckSystemAccess()
// ===========================
// Checks if an AArch64 MSR, MRS or SYS instruction is allowed from the current exception level and security state.
// Also checks for traps by TIDCP and NV access.// AArch64.ExecutingATS1xPInstr()
// ==============================
// Return TRUE if current instruction is AT S1E1R/WP
boolean
AArch64.CheckSystemAccess(bits(2) op0, bits(3) op1, bits(4) crn, bits(4) crm, bits(3) op2, bits(5) rt, bit read)
boolean unallocated = FALSE;
boolean need_secure = FALSE;
bits(2) min_EL;
// Check for traps by HCR_EL2.TIDCP
if PSTATE.EL IN {AArch64.ExecutingATS1xPInstr()
if !EL0HavePrivATExt,() then return FALSE;
instr = EL1ThisInstr} && EL2Enabled() && HCR_EL2.TIDCP == '1' && op0 == 'x1' && crn == '1x11' then
// At EL0, it is IMPLEMENTATION_DEFINED whether attempts to execute system
// register access instructions with reserved encodings are trapped to EL2 or UNDEFINED
rcs_el0_trap = boolean IMPLEMENTATION_DEFINED "Reserved Control Space EL0 Trapped";
if PSTATE.EL == EL1 || rcs_el0_trap then
AArch64.SystemAccessTrap(EL2, 0x18); // Exception_SystemRegisterTrap
// Check for unallocated encodings
case op1 of
when '00x', '010'
min_EL = EL1;
when '011'
min_EL = EL0;
when '100'
min_EL = EL2;
when '101'
if !HaveVirtHostExt() then UNDEFINED;
min_EL = EL2;
when '110'
min_EL = EL3;
when '111'
min_EL = EL1;
need_secure = TRUE;
if UInt(PSTATE.EL) < UInt(min_EL) then
// Check for traps on read/write access to registers named _EL2, _EL02, _EL12 from non-secure EL1 when HCR_EL2.NV bit is set
nv_access = HaveNVExt() && min_EL == EL2 && PSTATE.EL == EL1 && EL2Enabled() && HCR_EL2.NV == '1';
if !nv_access then
UNDEFINED;
elsif need_secure && !IsSecure() then
UNDEFINED;();
if instr<22+:10> == '1101010100' then
op1 = instr<16+:3>;
CRn = instr<12+:4>;
CRm = instr<8+:4>;
op2 = instr<5+:3>;
return op1 == '000' && CRn == '0111' && CRm == '1001' && op2 IN {'000','001'};
else
return FALSE;
// AArch64.ChooseNonExcludedTag()
// ==============================
// Return a tag derived from the start and the offset values, excluding
// any tags in the given mask.
// AArch64.ExecutingBROrBLROrRetInstr()
// ====================================
// Returns TRUE if current instruction is a BR, BLR, RET, B[L]RA[B][Z], or RETA[B].
bits(4)boolean AArch64.ChooseNonExcludedTag(bits(4) tag, bits(4) offset, bits(16) exclude)
ifAArch64.ExecutingBROrBLROrRetInstr()
if ! IsOnesHaveBTIExt(exclude) then
return '0000';
() then return FALSE;
if offset == '0000' then
while exclude< instr =UIntThisInstr(tag)> == '1' do
tag = tag + '0001';
while offset != '0000' do
offset = offset - '0001';
tag = tag + '0001';
while exclude<UInt(tag)> == '1' do
tag = tag + '0001';
return tag;();
if instr<31:25> == '1101011' && instr<20:16> == '11111' then
opc = instr<24:21>;
return opc != '0101';
else
return FALSE;
// AArch64.ExecutingATS1xPInstr()
// ==============================
// Return TRUE if current instruction is AT S1E1R/WP
// AArch64.ExecutingBTIInstr()
// ===========================
// Returns TRUE if current instruction is a BTI.
boolean AArch64.ExecutingATS1xPInstr()
AArch64.ExecutingBTIInstr()
if !HavePrivATExtHaveBTIExt() then return FALSE;
instr = ThisInstr();
if instr<22+:10> == '1101010100' then
op1 = instr<16+:3>;
CRn = instr<12+:4>;
CRm = instr<8+:4>;
op2 = instr<5+:3>;
return op1 == '000' && CRn == '0111' && CRm == '1001' && op2 IN {'000','001'};
if instr<31:22> == '1101010100' && instr<21:12> == '0000110010' && instr<4:0> == '11111' then
CRm = instr<11:8>;
op2 = instr<7:5>;
return (CRm == '0100' && op2<0> == '0');
else
return FALSE;
// AArch64.ExecutingBROrBLROrRetInstr()
// ====================================
// Returns TRUE if current instruction is a BR, BLR, RET, B[L]RA[B][Z], or RETA[B].
// AArch64.ExecutingERETInstr()
// ============================
// Returns TRUE if current instruction is ERET.
boolean AArch64.ExecutingBROrBLROrRetInstr()
if !AArch64.ExecutingERETInstr()
instr =HaveBTIExt() then return FALSE;
instr = ThisInstr();
if instr<31:25> == '1101011' && instr<20:16> == '11111' then
opc = instr<24:21>;
return opc != '0101';
else
return FALSE; return instr<31:12> == '11010110100111110000';
// AArch64.ExecutingBTIInstr()
// ===========================
// Returns TRUE if current instruction is a BTI.
// AArch64.NextRandomTagBit()
// ==========================
// Generate a random bit suitable for generating a random Allocation Tag.
booleanbit AArch64.ExecutingBTIInstr()
if !AArch64.NextRandomTagBit()
bits(16) lfsr = RGSR_EL1.SEED;
bit top = lfsr<5> EOR lfsr<3> EOR lfsr<2> EOR lfsr<0>;
RGSR_EL1.SEED = top:lfsr<15:1>;
return top;HaveBTIExt() then return FALSE;
instr = ThisInstr();
if instr<31:22> == '1101010100' && instr<21:12> == '0000110010' && instr<4:0> == '11111' then
CRm = instr<11:8>;
op2 = instr<7:5>;
return (CRm == '0100' && op2<0> == '0');
else
return FALSE;
// AArch64.ExecutingERETInstr()
// ============================
// Returns TRUE if current instruction is ERET.
// AArch64.RandomTag()
// ===================
// Generate a random Allocation Tag.
booleanbits(4) AArch64.ExecutingERETInstr()
instr =AArch64.RandomTag()
bits(4) tag;
for i = 0 to 3
tag<i> = ThisInstrAArch64.NextRandomTagBit();
return instr<31:12> == '11010110100111110000'; return tag;
// AArch64.NextRandomTagBit()
// ==========================
// Generate a random bit suitable for generating a random Allocation Tag.
bit// Execute a system instruction with write (source operand). AArch64.NextRandomTagBit()
bits(16) lfsr = RGSR_EL1.SEED;
bit top = lfsr<5> EOR lfsr<3> EOR lfsr<2> EOR lfsr<0>;
RGSR_EL1.SEED = top:lfsr<15:1>;
return top;AArch64.SysInstr(integer op0, integer op1, integer crn, integer crm, integer op2, bits(64) val);
// AArch64.RandomTag()
// ===================
// Generate a random Allocation Tag.
bits(4)// Execute a system instruction with read (result operand).
// Returns the result of the instruction.
bits(64) AArch64.RandomTag()
bits(4) tag;
for i = 0 to 3
tag<i> =AArch64.SysInstrWithResult(integer op0, integer op1, integer crn, integer crm, integer op2); AArch64.NextRandomTagBit();
return tag;
// Execute a system instruction with write (source operand).// Read from a system register and return the contents of the register.
bits(64)
AArch64.SysInstr(integer op0, integer op1, integer crn, integer crm, integer op2, bits(64) val);AArch64.SysRegRead(integer op0, integer op1, integer crn, integer crm, integer op2);
// Execute a system instruction with read (result operand).
// Returns the result of the instruction.
bits(64)// Write to a system register. AArch64.SysInstrWithResult(integer op0, integer op1, integer crn, integer crm, integer op2);AArch64.SysRegWrite(integer op0, integer op1, integer crn, integer crm, integer op2, bits(64) val);
// Read from a system register and return the contents of the register.
bits(64)boolean BTypeCompatible; AArch64.SysRegRead(integer op0, integer op1, integer crn, integer crm, integer op2);
// Write to a system register.// BTypeCompatible_BTI
// ===================
// This function determines whether a given hint encoding is compatible with the current value of
// PSTATE.BTYPE. A value of TRUE here indicates a valid Branch Target Identification instruction.
boolean
AArch64.SysRegWrite(integer op0, integer op1, integer crn, integer crm, integer op2, bits(64) val);BTypeCompatible_BTI(bits(2) hintcode)
case hintcode of
when '00'
return FALSE;
when '01'
return PSTATE.BTYPE != '11';
when '10'
return PSTATE.BTYPE != '10';
when '11'
return TRUE;
boolean BTypeCompatible;// BTypeCompatible_PACIXSP()
// =========================
// Returns TRUE if PACIASP, PACIBSP instruction is implicit compatible with PSTATE.BTYPE,
// FALSE otherwise.
booleanBTypeCompatible_PACIXSP()
if PSTATE.BTYPE IN {'01', '10'} then
return TRUE;
elsif PSTATE.BTYPE == '11' then
index = if PSTATE.EL == EL0 then 35 else 36;
return SCTLR[]<index> == '0';
else
return FALSE;
// BTypeCompatible_BTI
// ===================
// This function determines whether a given hint encoding is compatible with the current value of
// PSTATE.BTYPE. A value of TRUE here indicates a valid Branch Target Identification instruction.
booleanbits(2) BTypeNext; BTypeCompatible_BTI(bits(2) hintcode)
case hintcode of
when '00'
return FALSE;
when '01'
return PSTATE.BTYPE != '11';
when '10'
return PSTATE.BTYPE != '10';
when '11'
return TRUE;
// BTypeCompatible_PACIXSP()
// =========================
// Returns TRUE if PACIASP, PACIBSP instruction is implicit compatible with PSTATE.BTYPE,
// FALSE otherwise.
booleanboolean InGuardedPage; BTypeCompatible_PACIXSP()
if PSTATE.BTYPE IN {'01', '10'} then
return TRUE;
elsif PSTATE.BTYPE == '11' then
index = if PSTATE.EL == EL0 then 35 else 36;
return SCTLR[]<index> == '0';
else
return FALSE;
bits(2) BTypeNext;// AArch64.ExceptionReturn()
// =========================AArch64.ExceptionReturn(bits(64) new_pc, bits(32) spsr)
SynchronizeContext();
sync_errors = HaveIESB() && SCTLR[].IESB == '1';
if HaveDoubleFaultExt() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == EL3);
if sync_errors then
SynchronizeErrors();
iesb_req = TRUE;
TakeUnmaskedPhysicalSErrorInterrupts(iesb_req);
// Attempts to change to an illegal state will invoke the Illegal Execution state mechanism
SetPSTATEFromPSR(spsr);
ClearExclusiveLocal(ProcessorID());
SendEventLocal();
if PSTATE.IL == '1' && spsr<4> == '1' && spsr<20> == '0' then
// If the exception return is illegal, PC[63:32,1:0] are UNKNOWN
new_pc<63:32> = bits(32) UNKNOWN;
new_pc<1:0> = bits(2) UNKNOWN;
elsif UsingAArch32() then // Return to AArch32
// ELR_ELx[1:0] or ELR_ELx[0] are treated as being 0, depending on the target instruction set state
if PSTATE.T == '1' then
new_pc<0> = '0'; // T32
else
new_pc<1:0> = '00'; // A32
else // Return to AArch64
// ELR_ELx[63:56] might include a tag
new_pc = AArch64.BranchAddr(new_pc);
if UsingAArch32() then
// 32 most significant bits are ignored.
BranchTo(new_pc<31:0>, BranchType_ERET);
else
BranchToAddr(new_pc, BranchType_ERET);
boolean InGuardedPage;enumerationCountOp {CountOp_CLZ, CountOp_CLS, CountOp_CNT};
// AArch64.ExceptionReturn()
// =========================// DecodeRegExtend()
// =================
// Decode a register extension option
ExtendType
AArch64.ExceptionReturn(bits(64) new_pc, bits(32) spsr)DecodeRegExtend(bits(3) op)
case op of
when '000' return
SynchronizeContextExtendType_UXTB();
sync_errors =;
when '001' return HaveIESBExtendType_UXTH() &&;
when '010' return SCTLRExtendType_UXTW[].IESB == '1';
if;
when '011' return HaveDoubleFaultExtExtendType_UXTX() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL ==;
when '100' return EL3ExtendType_SXTB);
if sync_errors then;
when '101' return
SynchronizeErrorsExtendType_SXTH();
iesb_req = TRUE;;
when '110' return
TakeUnmaskedPhysicalSErrorInterruptsExtendType_SXTW(iesb_req);
// Attempts to change to an illegal state will invoke the Illegal Execution state mechanism;
when '111' return
SetPSTATEFromPSRExtendType_SXTX(spsr);
ClearExclusiveLocal(ProcessorID());
SendEventLocal();
if PSTATE.IL == '1' && spsr<4> == '1' && spsr<20> == '0' then
// If the exception return is illegal, PC[63:32,1:0] are UNKNOWN
new_pc<63:32> = bits(32) UNKNOWN;
new_pc<1:0> = bits(2) UNKNOWN;
elsif UsingAArch32() then // Return to AArch32
// ELR_ELx[1:0] or ELR_ELx[0] are treated as being 0, depending on the target instruction set state
if PSTATE.T == '1' then
new_pc<0> = '0'; // T32
else
new_pc<1:0> = '00'; // A32
else // Return to AArch64
// ELR_ELx[63:56] might include a tag
new_pc = AArch64.BranchAddr(new_pc);
if UsingAArch32() then
// 32 most significant bits are ignored.
BranchTo(new_pc<31:0>, BranchType_ERET);
else
BranchToAddr(new_pc, BranchType_ERET);;
enumeration// ExtendReg()
// ===========
// Perform a register extension and shift
bits(N) CountOp {ExtendReg(integer reg,CountOp_CLZ,exttype, integer shift)
assert shift >= 0 && shift <= 4;
bits(N) val = CountOp_CLS,[reg];
boolean unsigned;
integer len;
case exttype of
when unsigned = FALSE; len = 8;
when ExtendType_SXTH unsigned = FALSE; len = 16;
when ExtendType_SXTW unsigned = FALSE; len = 32;
when ExtendType_SXTX unsigned = FALSE; len = 64;
when ExtendType_UXTB unsigned = TRUE; len = 8;
when ExtendType_UXTH unsigned = TRUE; len = 16;
when ExtendType_UXTW unsigned = TRUE; len = 32;
when ExtendType_UXTX unsigned = TRUE; len = 64;
// Note the extended width of the intermediate value and
// that sign extension occurs from bit <len+shift-1>, not
// from bit <len-1>. This is equivalent to the instruction
// [SU]BFIZ Rtmp, Rreg, #shift, #len
// It may also be seen as a sign/zero extend followed by a shift:
// LSL(Extend(val<len-1:0>, N, unsigned), shift);
len = Min(len, N - shift);
return Extend(val<len-1:0> : ZerosCountOp_CNT};(shift), N, unsigned);
// DecodeRegExtend()
// =================
// Decode a register extension option
ExtendTypeenumeration DecodeRegExtend(bits(3) op)
case op of
when '000' returnExtendType { ExtendType_UXTB;
when '001' returnExtendType_SXTB, ExtendType_UXTH;
when '010' returnExtendType_SXTH, ExtendType_UXTW;
when '011' returnExtendType_SXTW, ExtendType_UXTX;
when '100' returnExtendType_SXTX, ExtendType_SXTB;
when '101' returnExtendType_UXTB, ExtendType_SXTH;
when '110' returnExtendType_UXTH, ExtendType_SXTW;
when '111' returnExtendType_UXTW, ExtendType_SXTX;ExtendType_UXTX};
// ExtendReg()
// ===========
// Perform a register extension and shift
bits(N)enumeration ExtendReg(integer reg,FPMaxMinOp { ExtendType exttype, integer shift)
assert shift >= 0 && shift <= 4;
bits(N) val =FPMaxMinOp_MAX, X[reg];
boolean unsigned;
integer len;
case exttype of
whenFPMaxMinOp_MIN, ExtendType_SXTB unsigned = FALSE; len = 8;
whenFPMaxMinOp_MAXNUM, ExtendType_SXTH unsigned = FALSE; len = 16;
when ExtendType_SXTW unsigned = FALSE; len = 32;
when ExtendType_SXTX unsigned = FALSE; len = 64;
when ExtendType_UXTB unsigned = TRUE; len = 8;
when ExtendType_UXTH unsigned = TRUE; len = 16;
when ExtendType_UXTW unsigned = TRUE; len = 32;
when ExtendType_UXTX unsigned = TRUE; len = 64;
// Note the extended width of the intermediate value and
// that sign extension occurs from bit <len+shift-1>, not
// from bit <len-1>. This is equivalent to the instruction
// [SU]BFIZ Rtmp, Rreg, #shift, #len
// It may also be seen as a sign/zero extend followed by a shift:
// LSL(Extend(val<len-1:0>, N, unsigned), shift);
len = Min(len, N - shift);
return Extend(val<len-1:0> : Zeros(shift), N, unsigned);FPMaxMinOp_MINNUM};
enumeration ExtendType {FPUnaryOp {ExtendType_SXTB,FPUnaryOp_ABS, ExtendType_SXTH,FPUnaryOp_MOV, ExtendType_SXTW,FPUnaryOp_NEG, ExtendType_SXTX,FPUnaryOp_SQRT};
ExtendType_UXTB, ExtendType_UXTH, ExtendType_UXTW, ExtendType_UXTX};
enumeration FPMaxMinOp {FPConvOp {FPMaxMinOp_MAX,FPConvOp_CVT_FtoI, FPMaxMinOp_MIN,FPConvOp_CVT_ItoF,
FPMaxMinOp_MAXNUM,FPConvOp_MOV_FtoI, FPMaxMinOp_MINNUM};FPConvOp_MOV_ItoF
,FPConvOp_CVT_FtoI_JS
};
enumeration// BFXPreferred()
// ==============
//
// Return TRUE if UBFX or SBFX is the preferred disassembly of a
// UBFM or SBFM bitfield instruction. Must exclude more specific
// aliases UBFIZ, SBFIZ, UXT[BH], SXT[BHW], LSL, LSR and ASR.
boolean FPUnaryOp {BFXPreferred(bit sf, bit uns, bits(6) imms, bits(6) immr)
integer S =FPUnaryOp_ABS,(imms);
integer R = FPUnaryOp_MOV,(immr);
// must not match UBFIZ/SBFIX alias
if
FPUnaryOp_NEG,(imms) < FPUnaryOp_SQRT};(immr) then
return FALSE;
// must not match LSR/ASR/LSL alias (imms == 31 or 63)
if imms == sf:'11111' then
return FALSE;
// must not match UXTx/SXTx alias
if immr == '000000' then
// must not match 32-bit UXT[BH] or SXT[BH]
if sf == '0' && imms IN {'000111', '001111'} then
return FALSE;
// must not match 64-bit SXT[BHW]
if sf:uns == '10' && imms IN {'000111', '001111', '011111'} then
return FALSE;
// must be UBFX/SBFX alias
return TRUE;
enumeration// DecodeBitMasks()
// ================
// Decode AArch64 bitfield and logical immediate masks which use a similar encoding structure
(bits(M), bits(M)) FPConvOp {DecodeBitMasks(bit immN, bits(6) imms, bits(6) immr, boolean immediate)
bits(64) tmask, wmask;
bits(6) tmask_and, wmask_and;
bits(6) tmask_or, wmask_or;
bits(6) levels;
// Compute log2 of element size
// 2^len must be in range [2, M]
len =FPConvOp_CVT_FtoI,(immN:NOT(imms));
if len < 1 then UNDEFINED;
assert M >= (1 << len);
// Determine S, R and S - R parameters
levels = FPConvOp_CVT_ItoF,(
FPConvOp_MOV_FtoI,(len), 6);
// For logical immediates an all-ones value of S is reserved
// since it would generate a useless all-ones result (many times)
if immediate && (imms AND levels) == levels then
UNDEFINED;
S = FPConvOp_MOV_ItoF
,(imms AND levels);
R = (immr AND levels);
diff = S - R; // 6-bit subtract with borrow
// From a software perspective, the remaining code is equivalant to:
// esize = 1 << len;
// d = UInt(diff<len-1:0>);
// welem = ZeroExtend(Ones(S + 1), esize);
// telem = ZeroExtend(Ones(d + 1), esize);
// wmask = Replicate(ROR(welem, R));
// tmask = Replicate(telem);
// return (wmask, tmask);
// Compute "top mask"
tmask_and = diff<5:0> OR NOT(levels);
tmask_or = diff<5:0> AND levels;
tmask = Ones(64);
tmask = ((tmask
AND Replicate(Replicate(tmask_and<0>, 1) : Ones(1), 32))
OR Replicate(Zeros(1) : Replicate(tmask_or<0>, 1), 32));
// optimization of first step:
// tmask = Replicate(tmask_and<0> : '1', 32);
tmask = ((tmask
AND Replicate(Replicate(tmask_and<1>, 2) : Ones(2), 16))
OR Replicate(Zeros(2) : Replicate(tmask_or<1>, 2), 16));
tmask = ((tmask
AND Replicate(Replicate(tmask_and<2>, 4) : Ones(4), 8))
OR Replicate(Zeros(4) : Replicate(tmask_or<2>, 4), 8));
tmask = ((tmask
AND Replicate(Replicate(tmask_and<3>, 8) : Ones(8), 4))
OR Replicate(Zeros(8) : Replicate(tmask_or<3>, 8), 4));
tmask = ((tmask
AND Replicate(Replicate(tmask_and<4>, 16) : Ones(16), 2))
OR Replicate(Zeros(16) : Replicate(tmask_or<4>, 16), 2));
tmask = ((tmask
AND Replicate(Replicate(tmask_and<5>, 32) : Ones(32), 1))
OR Replicate(Zeros(32) : Replicate(tmask_or<5>, 32), 1));
// Compute "wraparound mask"
wmask_and = immr OR NOT(levels);
wmask_or = immr AND levels;
wmask = Zeros(64);
wmask = ((wmask
AND Replicate(Ones(1) : Replicate(wmask_and<0>, 1), 32))
OR Replicate(Replicate(wmask_or<0>, 1) : Zeros(1), 32));
// optimization of first step:
// wmask = Replicate(wmask_or<0> : '0', 32);
wmask = ((wmask
AND Replicate(Ones(2) : Replicate(wmask_and<1>, 2), 16))
OR Replicate(Replicate(wmask_or<1>, 2) : Zeros(2), 16));
wmask = ((wmask
AND Replicate(Ones(4) : Replicate(wmask_and<2>, 4), 8))
OR Replicate(Replicate(wmask_or<2>, 4) : Zeros(4), 8));
wmask = ((wmask
AND Replicate(Ones(8) : Replicate(wmask_and<3>, 8), 4))
OR Replicate(Replicate(wmask_or<3>, 8) : Zeros(8), 4));
wmask = ((wmask
AND Replicate(Ones(16) : Replicate(wmask_and<4>, 16), 2))
OR Replicate(Replicate(wmask_or<4>, 16) : Zeros(16), 2));
wmask = ((wmask
AND Replicate(Ones(32) : Replicate(wmask_and<5>, 32), 1))
OR Replicate(Replicate(wmask_or<5>, 32) : ZerosFPConvOp_CVT_FtoI_JS
};(32), 1));
if diff<6> != '0' then // borrow from S - R
wmask = wmask AND tmask;
else
wmask = wmask OR tmask;
return (wmask<M-1:0>, tmask<M-1:0>);
// BFXPreferred()
// ==============
//
// Return TRUE if UBFX or SBFX is the preferred disassembly of a
// UBFM or SBFM bitfield instruction. Must exclude more specific
// aliases UBFIZ, SBFIZ, UXT[BH], SXT[BHW], LSL, LSR and ASR.
booleanenumeration BFXPreferred(bit sf, bit uns, bits(6) imms, bits(6) immr)
integer S =MoveWideOp { UInt(imms);
integer R =MoveWideOp_N, UInt(immr);
// must not match UBFIZ/SBFIX alias
ifMoveWideOp_Z, UInt(imms) < UInt(immr) then
return FALSE;
// must not match LSR/ASR/LSL alias (imms == 31 or 63)
if imms == sf:'11111' then
return FALSE;
// must not match UXTx/SXTx alias
if immr == '000000' then
// must not match 32-bit UXT[BH] or SXT[BH]
if sf == '0' && imms IN {'000111', '001111'} then
return FALSE;
// must not match 64-bit SXT[BHW]
if sf:uns == '10' && imms IN {'000111', '001111', '011111'} then
return FALSE;
// must be UBFX/SBFX alias
return TRUE;MoveWideOp_K};
// DecodeBitMasks()
// ================
// MoveWidePreferred()
// ===================
//
// Return TRUE if a bitmask immediate encoding would generate an immediate
// value that could also be represented by a single MOVZ or MOVN instruction.
// Used as a condition for the preferred MOV<-ORR alias.
// Decode AArch64 bitfield and logical immediate masks which use a similar encoding structure
(bits(M), bits(M))boolean DecodeBitMasks(bit immN, bits(6) imms, bits(6) immr, boolean immediate)
bits(64) tmask, wmask;
bits(6) tmask_and, wmask_and;
bits(6) tmask_or, wmask_or;
bits(6) levels;
// Compute log2 of element size
// 2^len must be in range [2, M]
len =MoveWidePreferred(bit sf, bit immN, bits(6) imms, bits(6) immr)
integer S = HighestSetBit(immN:NOT(imms));
if len < 1 then UNDEFINED;
assert M >= (1 << len);
// Determine S, R and S - R parameters
levels = ZeroExtend(Ones(len), 6);
// For logical immediates an all-ones value of S is reserved
// since it would generate a useless all-ones result (many times)
if immediate && (imms AND levels) == levels then
UNDEFINED;
S = UInt(imms AND levels);
R =(imms);
integer R = UInt(immr AND levels);
diff = S - R; // 6-bit subtract with borrow
// From a software perspective, the remaining code is equivalant to:
// esize = 1 << len;
// d = UInt(diff<len-1:0>);
// welem = ZeroExtend(Ones(S + 1), esize);
// telem = ZeroExtend(Ones(d + 1), esize);
// wmask = Replicate(ROR(welem, R));
// tmask = Replicate(telem);
// return (wmask, tmask);
// Compute "top mask"
tmask_and = diff<5:0> OR NOT(levels);
tmask_or = diff<5:0> AND levels;
tmask = Ones(64);
tmask = ((tmask
AND Replicate(Replicate(tmask_and<0>, 1) : Ones(1), 32))
OR Replicate(Zeros(1) : Replicate(tmask_or<0>, 1), 32));
// optimization of first step:
// tmask = Replicate(tmask_and<0> : '1', 32);
tmask = ((tmask
AND Replicate(Replicate(tmask_and<1>, 2) : Ones(2), 16))
OR Replicate(Zeros(2) : Replicate(tmask_or<1>, 2), 16));
tmask = ((tmask
AND Replicate(Replicate(tmask_and<2>, 4) : Ones(4), 8))
OR Replicate(Zeros(4) : Replicate(tmask_or<2>, 4), 8));
tmask = ((tmask
AND Replicate(Replicate(tmask_and<3>, 8) : Ones(8), 4))
OR Replicate(Zeros(8) : Replicate(tmask_or<3>, 8), 4));
tmask = ((tmask
AND Replicate(Replicate(tmask_and<4>, 16) : Ones(16), 2))
OR Replicate(Zeros(16) : Replicate(tmask_or<4>, 16), 2));
tmask = ((tmask
AND Replicate(Replicate(tmask_and<5>, 32) : Ones(32), 1))
OR Replicate(Zeros(32) : Replicate(tmask_or<5>, 32), 1));
// Compute "wraparound mask"
wmask_and = immr OR NOT(levels);
wmask_or = immr AND levels;
wmask = Zeros(64);
wmask = ((wmask
AND Replicate(Ones(1) : Replicate(wmask_and<0>, 1), 32))
OR Replicate(Replicate(wmask_or<0>, 1) : Zeros(1), 32));
// optimization of first step:
// wmask = Replicate(wmask_or<0> : '0', 32);
wmask = ((wmask
AND Replicate(Ones(2) : Replicate(wmask_and<1>, 2), 16))
OR Replicate(Replicate(wmask_or<1>, 2) : Zeros(2), 16));
wmask = ((wmask
AND Replicate(Ones(4) : Replicate(wmask_and<2>, 4), 8))
OR Replicate(Replicate(wmask_or<2>, 4) : Zeros(4), 8));
wmask = ((wmask
AND Replicate(Ones(8) : Replicate(wmask_and<3>, 8), 4))
OR Replicate(Replicate(wmask_or<3>, 8) : Zeros(8), 4));
wmask = ((wmask
AND Replicate(Ones(16) : Replicate(wmask_and<4>, 16), 2))
OR Replicate(Replicate(wmask_or<4>, 16) : Zeros(16), 2));
wmask = ((wmask
AND Replicate(Ones(32) : Replicate(wmask_and<5>, 32), 1))
OR Replicate(Replicate(wmask_or<5>, 32) : Zeros(32), 1));
(immr);
integer width = if sf == '1' then 64 else 32;
if diff<6> != '0' then // borrow from S - R
wmask = wmask AND tmask;
else
wmask = wmask OR tmask;
// element size must equal total immediate size
if sf == '1' && immN:imms != '1xxxxxx' then
return FALSE;
if sf == '0' && immN:imms != '00xxxxx' then
return FALSE;
return (wmask<M-1:0>, tmask<M-1:0>); // for MOVZ must contain no more than 16 ones
if S < 16 then
// ones must not span halfword boundary when rotated
return (-R MOD 16) <= (15 - S);
// for MOVN must contain no more than 16 zeros
if S >= width - 15 then
// zeros must not span halfword boundary when rotated
return (R MOD 16) <= (S - (width - 15));
return FALSE;
enumeration// DecodeShift()
// =============
// Decode shift encodings
ShiftType MoveWideOp {DecodeShift(bits(2) op)
case op of
when '00' returnMoveWideOp_N,;
when '01' return MoveWideOp_Z,;
when '10' return ;
when '11' return ShiftType_RORMoveWideOp_K};;
// MoveWidePreferred()
// ===================
//
// Return TRUE if a bitmask immediate encoding would generate an immediate
// value that could also be represented by a single MOVZ or MOVN instruction.
// Used as a condition for the preferred MOV<-ORR alias.
// ShiftReg()
// ==========
// Perform shift of a register operand
booleanbits(N) MoveWidePreferred(bit sf, bit immN, bits(6) imms, bits(6) immr)
integer S =ShiftReg(integer reg, UIntShiftType(imms);
integer R =shiftype, integer amount)
bits(N) result = [reg];
case shiftype of
when ShiftType_LSL result = LSL(result, amount);
when ShiftType_LSR result = LSR(result, amount);
when ShiftType_ASR result = ASR(result, amount);
when ShiftType_ROR result = RORUIntX(immr);
integer width = if sf == '1' then 64 else 32;
// element size must equal total immediate size
if sf == '1' && immN:imms != '1xxxxxx' then
return FALSE;
if sf == '0' && immN:imms != '00xxxxx' then
return FALSE;
// for MOVZ must contain no more than 16 ones
if S < 16 then
// ones must not span halfword boundary when rotated
return (-R MOD 16) <= (15 - S);
// for MOVN must contain no more than 16 zeros
if S >= width - 15 then
// zeros must not span halfword boundary when rotated
return (R MOD 16) <= (S - (width - 15));
return FALSE;(result, amount);
return result;
// DecodeShift()
// =============
// Decode shift encodings
ShiftTypeenumeration DecodeShift(bits(2) op)
case op of
when '00' returnShiftType { ShiftType_LSL;
when '01' returnShiftType_LSL, ShiftType_LSR;
when '10' returnShiftType_LSR, ShiftType_ASR;
when '11' returnShiftType_ASR, ShiftType_ROR;ShiftType_ROR};
// ShiftReg()
// ==========
// Perform shift of a register operand
bits(N)enumeration ShiftReg(integer reg,LogicalOp { ShiftType shiftype, integer amount)
bits(N) result =LogicalOp_AND, X[reg];
case shiftype of
whenLogicalOp_EOR, ShiftType_LSL result = LSL(result, amount);
when ShiftType_LSR result = LSR(result, amount);
when ShiftType_ASR result = ASR(result, amount);
when ShiftType_ROR result = ROR(result, amount);
return result;LogicalOp_ORR};
enumeration ShiftType {MemAtomicOp {ShiftType_LSL,MemAtomicOp_ADD, ShiftType_LSR,MemAtomicOp_BIC, ShiftType_ASR,MemAtomicOp_EOR, ShiftType_ROR};MemAtomicOp_ORR,MemAtomicOp_SMAX,
MemAtomicOp_SMIN,
MemAtomicOp_UMAX,
MemAtomicOp_UMIN,
MemAtomicOp_SWP};
enumeration LogicalOp {MemOp {LogicalOp_AND,MemOp_LOAD, LogicalOp_EOR,MemOp_STORE, LogicalOp_ORR};MemOp_PREFETCH};
enumeration// Prefetch()
// ==========
// Decode and execute the prefetch hint on ADDRESS specified by PRFOP MemAtomicOp {Prefetch(bits(64) address, bits(5) prfop)MemAtomicOp_ADD,hint;
integer target;
boolean stream;
case prfop<4:3> of
when '00' hint =
MemAtomicOp_BIC,; // PLD: prefetch for load
when '01' hint =
MemAtomicOp_EOR,; // PLI: preload instructions
when '10' hint =
MemAtomicOp_ORR,; // PST: prepare for store
when '11' return; // unallocated hint
target =
MemAtomicOp_SMAX,(prfop<2:1>); // target cache level
stream = (prfop<0> != '0'); // streaming (non-temporal)
MemAtomicOp_SMIN,
MemAtomicOp_UMAX,
MemAtomicOp_UMIN,
MemAtomicOp_SWP};(address, hint, target, stream);
return;
enumeration MemOp {MemBarrierOp {MemOp_LOAD,MemBarrierOp_DSB // Data Synchronization Barrier
, MemOp_STORE,MemBarrierOp_DMB // Data Memory Barrier
, MemOp_PREFETCH};MemBarrierOp_ISB // Instruction Synchronization Barrier
,MemBarrierOp_SSBB // Speculative Synchronization Barrier to VA
, MemBarrierOp_PSSBB // Speculative Synchronization Barrier to PA
, MemBarrierOp_SB // Speculation Barrier
};
// Prefetch()
// ==========
// Decode and execute the prefetch hint on ADDRESS specified by PRFOPenumeration
Prefetch(bits(64) address, bits(5) prfop)SystemHintOp {
PrefetchHint hint;
integer target;
boolean stream;
case prfop<4:3> of
when '00' hint =SystemHintOp_NOP, Prefetch_READ; // PLD: prefetch for load
when '01' hint =SystemHintOp_YIELD, Prefetch_EXEC; // PLI: preload instructions
when '10' hint =SystemHintOp_WFE, Prefetch_WRITE; // PST: prepare for store
when '11' return; // unallocated hint
target =SystemHintOp_WFI, UInt(prfop<2:1>); // target cache level
stream = (prfop<0> != '0'); // streaming (non-temporal)SystemHintOp_SEV,
SystemHintOp_SEVL,
SystemHintOp_ESB,
SystemHintOp_PSB,
SystemHintOp_TSB,
SystemHintOp_BTI,
Hint_Prefetch(address, hint, target, stream);
return;SystemHintOp_CSDB
};
enumeration MemBarrierOp {PSTATEField { MemBarrierOp_DSB // Data Synchronization Barrier
,PSTATEField_DAIFSet, MemBarrierOp_DMB // Data Memory Barrier
,PSTATEField_DAIFClr, MemBarrierOp_ISB // Instruction Synchronization Barrier
,PSTATEField_PAN, // Armv8.1 MemBarrierOp_SSBB // Speculative Synchronization Barrier to VA
,PSTATEField_UAO, // Armv8.2 MemBarrierOp_PSSBB // Speculative Synchronization Barrier to PA
,PSTATEField_DIT, // Armv8.4 MemBarrierOp_SB // Speculation Barrier
};PSTATEField_SSBS,PSTATEField_TCO, // Armv8.5
PSTATEField_SP
};
enumeration// SysOp()
// =======
SystemOp SystemHintOp {SysOp(bits(3) op1, bits(4) CRn, bits(4) CRm, bits(3) op2)
case op1:CRn:CRm:op2 of
when '000 0111 1000 000' return
SystemHintOp_NOP,; // S1E1R
when '100 0111 1000 000' return
SystemHintOp_YIELD,; // S1E2R
when '110 0111 1000 000' return
SystemHintOp_WFE,; // S1E3R
when '000 0111 1000 001' return
SystemHintOp_WFI,; // S1E1W
when '100 0111 1000 001' return
SystemHintOp_SEV,; // S1E2W
when '110 0111 1000 001' return
SystemHintOp_SEVL,; // S1E3W
when '000 0111 1000 010' return
SystemHintOp_ESB,; // S1E0R
when '000 0111 1000 011' return
SystemHintOp_PSB,; // S1E0W
when '100 0111 1000 100' return
SystemHintOp_TSB,; // S12E1R
when '100 0111 1000 101' return
SystemHintOp_BTI,; // S12E1W
when '100 0111 1000 110' return
; // S12E0R
when '100 0111 1000 111' return Sys_AT; // S12E0W
when '011 0111 0100 001' return Sys_DC; // ZVA
when '000 0111 0110 001' return Sys_DC; // IVAC
when '000 0111 0110 010' return Sys_DC; // ISW
when '011 0111 1010 001' return Sys_DC; // CVAC
when '000 0111 1010 010' return Sys_DC; // CSW
when '011 0111 1011 001' return Sys_DC; // CVAU
when '011 0111 1110 001' return Sys_DC; // CIVAC
when '000 0111 1110 010' return Sys_DC; // CISW
when '011 0111 1101 001' return Sys_DC; // CVADP
when '000 0111 0001 000' return Sys_IC; // IALLUIS
when '000 0111 0101 000' return Sys_IC; // IALLU
when '011 0111 0101 001' return Sys_IC; // IVAU
when '100 1000 0000 001' return Sys_TLBI; // IPAS2E1IS
when '100 1000 0000 101' return Sys_TLBI; // IPAS2LE1IS
when '000 1000 0011 000' return Sys_TLBI; // VMALLE1IS
when '100 1000 0011 000' return Sys_TLBI; // ALLE2IS
when '110 1000 0011 000' return Sys_TLBI; // ALLE3IS
when '000 1000 0011 001' return Sys_TLBI; // VAE1IS
when '100 1000 0011 001' return Sys_TLBI; // VAE2IS
when '110 1000 0011 001' return Sys_TLBI; // VAE3IS
when '000 1000 0011 010' return Sys_TLBI; // ASIDE1IS
when '000 1000 0011 011' return Sys_TLBI; // VAAE1IS
when '100 1000 0011 100' return Sys_TLBI; // ALLE1IS
when '000 1000 0011 101' return Sys_TLBI; // VALE1IS
when '100 1000 0011 101' return Sys_TLBI; // VALE2IS
when '110 1000 0011 101' return Sys_TLBI; // VALE3IS
when '100 1000 0011 110' return Sys_TLBI; // VMALLS12E1IS
when '000 1000 0011 111' return Sys_TLBI; // VAALE1IS
when '100 1000 0100 001' return Sys_TLBI; // IPAS2E1
when '100 1000 0100 101' return Sys_TLBI; // IPAS2LE1
when '000 1000 0111 000' return Sys_TLBI; // VMALLE1
when '100 1000 0111 000' return Sys_TLBI; // ALLE2
when '110 1000 0111 000' return Sys_TLBI; // ALLE3
when '000 1000 0111 001' return Sys_TLBI; // VAE1
when '100 1000 0111 001' return Sys_TLBI; // VAE2
when '110 1000 0111 001' return Sys_TLBI; // VAE3
when '000 1000 0111 010' return Sys_TLBI; // ASIDE1
when '000 1000 0111 011' return Sys_TLBI; // VAAE1
when '100 1000 0111 100' return Sys_TLBI; // ALLE1
when '000 1000 0111 101' return Sys_TLBI; // VALE1
when '100 1000 0111 101' return Sys_TLBI; // VALE2
when '110 1000 0111 101' return Sys_TLBI; // VALE3
when '100 1000 0111 110' return Sys_TLBI; // VMALLS12E1
when '000 1000 0111 111' return Sys_TLBI; // VAALE1
return Sys_SYSSystemHintOp_CSDB
};;
enumeration PSTATEField {SystemOp {PSTATEField_DAIFSet,Sys_AT, PSTATEField_DAIFClr,Sys_DC,
PSTATEField_PAN, // Armv8.1Sys_IC,
PSTATEField_UAO, // Armv8.2Sys_TLBI,
PSTATEField_DIT, // Armv8.4Sys_SYS};
PSTATEField_SSBS,
PSTATEField_TCO, // Armv8.5
PSTATEField_SP
};
// SysOp()
// =======
SystemOpenumeration SysOp(bits(3) op1, bits(4) CRn, bits(4) CRm, bits(3) op2)
case op1:CRn:CRm:op2 of
when '000 0111 1000 000' returnVBitOp { Sys_AT; // S1E1R
when '100 0111 1000 000' returnVBitOp_VBIF, Sys_AT; // S1E2R
when '110 0111 1000 000' returnVBitOp_VBIT, Sys_AT; // S1E3R
when '000 0111 1000 001' returnVBitOp_VBSL, Sys_AT; // S1E1W
when '100 0111 1000 001' return Sys_AT; // S1E2W
when '110 0111 1000 001' return Sys_AT; // S1E3W
when '000 0111 1000 010' return Sys_AT; // S1E0R
when '000 0111 1000 011' return Sys_AT; // S1E0W
when '100 0111 1000 100' return Sys_AT; // S12E1R
when '100 0111 1000 101' return Sys_AT; // S12E1W
when '100 0111 1000 110' return Sys_AT; // S12E0R
when '100 0111 1000 111' return Sys_AT; // S12E0W
when '011 0111 0100 001' return Sys_DC; // ZVA
when '000 0111 0110 001' return Sys_DC; // IVAC
when '000 0111 0110 010' return Sys_DC; // ISW
when '011 0111 1010 001' return Sys_DC; // CVAC
when '000 0111 1010 010' return Sys_DC; // CSW
when '011 0111 1011 001' return Sys_DC; // CVAU
when '011 0111 1110 001' return Sys_DC; // CIVAC
when '000 0111 1110 010' return Sys_DC; // CISW
when '011 0111 1101 001' return Sys_DC; // CVADP
when '000 0111 0001 000' return Sys_IC; // IALLUIS
when '000 0111 0101 000' return Sys_IC; // IALLU
when '011 0111 0101 001' return Sys_IC; // IVAU
when '100 1000 0000 001' return Sys_TLBI; // IPAS2E1IS
when '100 1000 0000 101' return Sys_TLBI; // IPAS2LE1IS
when '000 1000 0011 000' return Sys_TLBI; // VMALLE1IS
when '100 1000 0011 000' return Sys_TLBI; // ALLE2IS
when '110 1000 0011 000' return Sys_TLBI; // ALLE3IS
when '000 1000 0011 001' return Sys_TLBI; // VAE1IS
when '100 1000 0011 001' return Sys_TLBI; // VAE2IS
when '110 1000 0011 001' return Sys_TLBI; // VAE3IS
when '000 1000 0011 010' return Sys_TLBI; // ASIDE1IS
when '000 1000 0011 011' return Sys_TLBI; // VAAE1IS
when '100 1000 0011 100' return Sys_TLBI; // ALLE1IS
when '000 1000 0011 101' return Sys_TLBI; // VALE1IS
when '100 1000 0011 101' return Sys_TLBI; // VALE2IS
when '110 1000 0011 101' return Sys_TLBI; // VALE3IS
when '100 1000 0011 110' return Sys_TLBI; // VMALLS12E1IS
when '000 1000 0011 111' return Sys_TLBI; // VAALE1IS
when '100 1000 0100 001' return Sys_TLBI; // IPAS2E1
when '100 1000 0100 101' return Sys_TLBI; // IPAS2LE1
when '000 1000 0111 000' return Sys_TLBI; // VMALLE1
when '100 1000 0111 000' return Sys_TLBI; // ALLE2
when '110 1000 0111 000' return Sys_TLBI; // ALLE3
when '000 1000 0111 001' return Sys_TLBI; // VAE1
when '100 1000 0111 001' return Sys_TLBI; // VAE2
when '110 1000 0111 001' return Sys_TLBI; // VAE3
when '000 1000 0111 010' return Sys_TLBI; // ASIDE1
when '000 1000 0111 011' return Sys_TLBI; // VAAE1
when '100 1000 0111 100' return Sys_TLBI; // ALLE1
when '000 1000 0111 101' return Sys_TLBI; // VALE1
when '100 1000 0111 101' return Sys_TLBI; // VALE2
when '110 1000 0111 101' return Sys_TLBI; // VALE3
when '100 1000 0111 110' return Sys_TLBI; // VMALLS12E1
when '000 1000 0111 111' return Sys_TLBI; // VAALE1
return Sys_SYS;VBitOp_VEOR};
enumeration SystemOp {CompareOp {Sys_AT,CompareOp_GT, Sys_DC,CompareOp_GE, Sys_IC,CompareOp_EQ, Sys_TLBI,CompareOp_LE, Sys_SYS};CompareOp_LT};
enumeration VBitOp {ImmediateOp {VBitOp_VBIF,ImmediateOp_MOVI, VBitOp_VBIT,ImmediateOp_MVNI, VBitOp_VBSL,ImmediateOp_ORR, VBitOp_VEOR};ImmediateOp_BIC};
enumeration// Reduce()
// ========
bits(esize) CompareOp {Reduce(CompareOp_GT,op, bits(N) input, integer esize)
integer half;
bits(esize) hi;
bits(esize) lo;
bits(esize) result;
if N == esize then
return input<esize-1:0>;
half = N DIV 2;
hi = CompareOp_GE,(op, input<N-1:half>, esize);
lo = CompareOp_EQ,(op, input<half-1:0>, esize);
case op of
when
CompareOp_LE,result = (lo, hi, FPCR);
when ReduceOp_FMAXNUM
result = FPMaxNum(lo, hi, FPCR);
when ReduceOp_FMIN
result = FPMin(lo, hi, FPCR);
when ReduceOp_FMAX
result = FPMax(lo, hi, FPCR);
when ReduceOp_FADD
result = FPAdd(lo, hi, FPCR);
when ReduceOp_ADDCompareOp_LT};result = lo + hi;
return result;
enumeration ImmediateOp {ReduceOp {ImmediateOp_MOVI,ReduceOp_FMINNUM, ImmediateOp_MVNI,ReduceOp_FMAXNUM,
ImmediateOp_ORR,ReduceOp_FMIN, ImmediateOp_BIC};ReduceOp_FMAX,ReduceOp_FADD, ReduceOp_ADD};
// Reduce()
// ========
// AArch64.InstructionDevice()
// ===========================
// Instruction fetches from memory marked as Device but not execute-never might generate a
// Permission Fault but are otherwise treated as if from Normal Non-cacheable memory.
bits(esize)AddressDescriptor Reduce(AArch64.InstructionDevice(ReduceOpAddressDescriptor op, bits(N) input, integer esize)
integer half;
bits(esize) hi;
bits(esize) lo;
bits(esize) result;
if N == esize then
return input<esize-1:0>;
half = N DIV 2;
hi =addrdesc, bits(64) vaddress,
bits(52) ipaddress, integer level, ReduceAccType(op, input<N-1:half>, esize);
lo =acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
c = ReduceConstrainUnpredictable(op, input<half-1:0>, esize);
case op of
when( ReduceOp_FMINNUMUnpredictable_INSTRDEVICE
result =);
assert c IN { FPMinNumConstraint_NONE(lo, hi, FPCR);
when, ReduceOp_FMAXNUMConstraint_FAULT
result =};
if c == FPMaxNumConstraint_FAULT(lo, hi, FPCR);
whenthen
addrdesc.fault = ReduceOp_FMINAArch64.PermissionFault
result =(ipaddress, boolean UNKNOWN, level, acctype, iswrite,
secondstage, s2fs1walk);
else
addrdesc.memattrs.memtype = FPMinMemType_Normal(lo, hi, FPCR);
when;
addrdesc.memattrs.inner.attrs = ReduceOp_FMAXMemAttr_NC
result =;
addrdesc.memattrs.inner.hints = FPMaxMemHint_No(lo, hi, FPCR);
when;
addrdesc.memattrs.outer = addrdesc.memattrs.inner;
addrdesc.memattrs.tagged = FALSE;
addrdesc.memattrs = ReduceOp_FADDMemAttrDefaults
result = FPAdd(lo, hi, FPCR);
when ReduceOp_ADD
result = lo + hi;
(addrdesc.memattrs);
return result; return addrdesc;
enumeration// AArch64.S1AttrDecode()
// ======================
// Converts the Stage 1 attribute fields, using the MAIR, to orthogonal
// attributes and hints.
MemoryAttributes ReduceOp {AArch64.S1AttrDecode(bits(2) SH, bits(3) attr,ReduceOp_FMINNUM,acctype) ReduceOp_FMAXNUM,memattrs;
mair =
ReduceOp_FMIN,[];
index = 8 * ReduceOp_FMAX,(attr);
attrfield = mair<index+7:index>;
memattrs.tagged = FALSE;
if ((attrfield<7:4> != '0000' && attrfield<7:4> != '1111' && attrfield<3:0> == '0000') ||
(attrfield<7:4> == '0000' && attrfield<3:0> != 'xx00')) then
// Reserved, maps to an allocated value
(-, attrfield) =
ReduceOp_FADD,( );
if !HaveMTEExt() && attrfield<7:4> == '1111' && attrfield<3:0> == '0000' then
// Reserved, maps to an allocated value
(-, attrfield) = ConstrainUnpredictableBits(Unpredictable_RESMAIR);
if attrfield<7:4> == '0000' then // Device
memattrs.memtype = MemType_Device;
case attrfield<3:0> of
when '0000' memattrs.device = DeviceType_nGnRnE;
when '0100' memattrs.device = DeviceType_nGnRE;
when '1000' memattrs.device = DeviceType_nGRE;
when '1100' memattrs.device = DeviceType_GRE;
otherwise Unreachable(); // Reserved, handled above
elsif attrfield<3:0> != '0000' then // Normal
memattrs.memtype = MemType_Normal;
memattrs.outer = LongConvertAttrsHints(attrfield<7:4>, acctype);
memattrs.inner = LongConvertAttrsHints(attrfield<3:0>, acctype);
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
elsif HaveMTEExt() && attrfield == '11110000' then // Normal, Tagged if WB-RWA
memattrs.memtype = MemType_Normal;
memattrs.outer = LongConvertAttrsHints('1111', acctype); // WB_RWA
memattrs.inner = LongConvertAttrsHints('1111', acctype); // WB_RWA
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
memattrs.tagged = (memattrs.inner.attrs == MemAttr_WB &&
memattrs.inner.hints == MemHint_RWA &&
memattrs.outer.attrs == MemAttr_WB &&
memattrs.outer.hints == MemHint_RWA);
else
Unreachable(); // Reserved, handled above
return MemAttrDefaultsReduceOp_ADD};(memattrs);
// AArch64.InstructionDevice()
// ===========================
// Instruction fetches from memory marked as Device but not execute-never might generate a
// Permission Fault but are otherwise treated as if from Normal Non-cacheable memory.
// AArch64.TranslateAddressS1Off()
// ===============================
// Called for stage 1 translations when translation is disabled to supply a default translation.
// Note that there are additional constraints on instruction prefetching that are not described in
// this pseudocode.
AddressDescriptorTLBRecord AArch64.InstructionDevice(AArch64.TranslateAddressS1Off(bits(64) vaddress,AddressDescriptor addrdesc, bits(64) vaddress,
bits(52) ipaddress, integer level,
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
c =acctype, boolean iswrite)
assert ! ConstrainUnpredictableELUsingAArch32(Unpredictable_INSTRDEVICES1TranslationRegime);
assert c IN {());Constraint_NONETLBRecord,result;
Top = Constraint_FAULTAddrTop};
if c ==(vaddress, (acctype == Constraint_FAULTAccType_IFETCH then
addrdesc.fault =), PSTATE.EL);
if ! AArch64.PermissionFaultIsZero(ipaddress, boolean UNKNOWN, level, acctype, iswrite,
secondstage, s2fs1walk);
else
addrdesc.memattrs.memtype =(vaddress<Top: PAMax()>) then
level = 0;
ipaddress = bits(52) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,boolean UNKNOWN, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
default_cacheable = (HasS2Translation() && HCR_EL2.DC == '1');
if default_cacheable then
// Use default cacheable settings
result.addrdesc.memattrs.memtype = MemType_Normal;
addrdesc.memattrs.inner.attrs = result.addrdesc.memattrs.inner.attrs = MemAttr_WB; // Write-back
result.addrdesc.memattrs.inner.hints = MemHint_RWA;
result.addrdesc.memattrs.shareable = FALSE;
result.addrdesc.memattrs.outershareable = FALSE;
result.addrdesc.memattrs.tagged = HCR_EL2.DCT == '1';
elsif acctype != AccType_IFETCH then
// Treat data as Device
result.addrdesc.memattrs.memtype = MemType_Device;
result.addrdesc.memattrs.device = DeviceType_nGnRnE;
result.addrdesc.memattrs.inner = MemAttrHints UNKNOWN;
result.addrdesc.memattrs.tagged = FALSE;
else
// Instruction cacheability controlled by SCTLR_ELx.I
cacheable = SCTLR[].I == '1';
result.addrdesc.memattrs.memtype = MemType_Normal;
if cacheable then
result.addrdesc.memattrs.inner.attrs = MemAttr_WT;
result.addrdesc.memattrs.inner.hints = MemHint_RA;
else
result.addrdesc.memattrs.inner.attrs = MemAttr_NC;
addrdesc.memattrs.inner.hints = result.addrdesc.memattrs.inner.hints = MemHint_No;
addrdesc.memattrs.outer = addrdesc.memattrs.inner;
addrdesc.memattrs.tagged = FALSE;
addrdesc.memattrs = result.addrdesc.memattrs.shareable = TRUE;
result.addrdesc.memattrs.outershareable = TRUE;
result.addrdesc.memattrs.tagged = FALSE;
result.addrdesc.memattrs.outer = result.addrdesc.memattrs.inner;
result.addrdesc.memattrs = MemAttrDefaults(result.addrdesc.memattrs);
result.perms.ap = bits(3) UNKNOWN;
result.perms.xn = '0';
result.perms.pxn = '0';
result.nG = bit UNKNOWN;
result.contiguous = boolean UNKNOWN;
result.domain = bits(4) UNKNOWN;
result.level = integer UNKNOWN;
result.blocksize = integer UNKNOWN;
result.addrdesc.paddress.address = vaddress<51:0>;
result.addrdesc.paddress.NS = if IsSecure() then '0' else '1';
result.addrdesc.fault = AArch64.NoFault(addrdesc.memattrs);
return addrdesc;();
return result;
// AArch64.S1AttrDecode()
// ======================
// Converts the Stage 1 attribute fields, using the MAIR, to orthogonal
// attributes and hints.
// AArch64.AccessIsPrivileged()
// ============================
MemoryAttributesboolean AArch64.S1AttrDecode(bits(2) SH, bits(3) attr,AArch64.AccessIsPrivileged( AccType acctype)acctype)
el =
MemoryAttributesAArch64.AccessUsesEL memattrs;
(acctype);
mair = if el == MAIREL0[];
index = 8 *then
ispriv = FALSE;
elsif el == UIntEL3(attr);
attrfield = mair<index+7:index>;
memattrs.tagged = FALSE;
if ((attrfield<7:4> != '0000' && attrfield<7:4> != '1111' && attrfield<3:0> == '0000') ||
(attrfield<7:4> == '0000' && attrfield<3:0> != 'xx00')) then
// Reserved, maps to an allocated value
(-, attrfield) =then
ispriv = TRUE;
elsif el == ConstrainUnpredictableBitsEL2(&& (!Unpredictable_RESMAIRIsInHost);
if !() || HCR_EL2.TGE == '0') then
ispriv = TRUE;
elsifHaveMTEExtHaveUAOExt() && attrfield<7:4> == '1111' && attrfield<3:0> == '0000' then
// Reserved, maps to an allocated value
(-, attrfield) =() && PSTATE.UAO == '1' then
ispriv = TRUE;
else
ispriv = (acctype != ConstrainUnpredictableBitsAccType_UNPRIV(Unpredictable_RESMAIR);
if attrfield<7:4> == '0000' then // Device
memattrs.memtype = MemType_Device;
case attrfield<3:0> of
when '0000' memattrs.device = DeviceType_nGnRnE;
when '0100' memattrs.device = DeviceType_nGnRE;
when '1000' memattrs.device = DeviceType_nGRE;
when '1100' memattrs.device = DeviceType_GRE;
otherwise Unreachable(); // Reserved, handled above
elsif attrfield<3:0> != '0000' then // Normal
memattrs.memtype = MemType_Normal;
memattrs.outer = LongConvertAttrsHints(attrfield<7:4>, acctype);
memattrs.inner = LongConvertAttrsHints(attrfield<3:0>, acctype);
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
elsif HaveMTEExt() && attrfield == '11110000' then // Normal, Tagged if WB-RWA
memattrs.memtype = MemType_Normal;
memattrs.outer = LongConvertAttrsHints('1111', acctype); // WB_RWA
memattrs.inner = LongConvertAttrsHints('1111', acctype); // WB_RWA
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
memattrs.tagged = (memattrs.inner.attrs == MemAttr_WB &&
memattrs.inner.hints == MemHint_RWA &&
memattrs.outer.attrs == MemAttr_WB &&
memattrs.outer.hints == MemHint_RWA);
else
Unreachable(); // Reserved, handled above
return MemAttrDefaults(memattrs););
return ispriv;
// AArch64.TranslateAddressS1Off()
// ===============================
// Called for stage 1 translations when translation is disabled to supply a default translation.
// Note that there are additional constraints on instruction prefetching that are not described in
// this pseudocode.
// AArch64.AccessUsesEL()
// ======================
// Returns the Exception Level of the regime that will manage the translation for a given access type.
TLBRecordbits(2) AArch64.TranslateAddressS1Off(bits(64) vaddress,AArch64.AccessUsesEL( AccType acctype, boolean iswrite)
assert !acctype)
if acctype ==ELUsingAArch32AccType_UNPRIV(then
returnS1TranslationRegimeEL0());;
elsif acctype ==
TLBRecordAccType_NV2REGISTER result;
Top =then
return AddrTopEL2(vaddress, (acctype == AccType_IFETCH), PSTATE.EL);
if !IsZero(vaddress<Top:PAMax()>) then
level = 0;
ipaddress = bits(52) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,boolean UNKNOWN, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
default_cacheable = (HasS2Translation() && HCR_EL2.DC == '1');
if default_cacheable then
// Use default cacheable settings
result.addrdesc.memattrs.memtype = MemType_Normal;
result.addrdesc.memattrs.inner.attrs = MemAttr_WB; // Write-back
result.addrdesc.memattrs.inner.hints = MemHint_RWA;
result.addrdesc.memattrs.shareable = FALSE;
result.addrdesc.memattrs.outershareable = FALSE;
result.addrdesc.memattrs.tagged = HCR_EL2.DCT == '1';
elsif acctype != AccType_IFETCH then
// Treat data as Device
result.addrdesc.memattrs.memtype = MemType_Device;
result.addrdesc.memattrs.device = DeviceType_nGnRnE;
result.addrdesc.memattrs.inner = MemAttrHints UNKNOWN;
result.addrdesc.memattrs.tagged = FALSE;
else
// Instruction cacheability controlled by SCTLR_ELx.I
cacheable = SCTLR[].I == '1';
result.addrdesc.memattrs.memtype = MemType_Normal;
if cacheable then
result.addrdesc.memattrs.inner.attrs = MemAttr_WT;
result.addrdesc.memattrs.inner.hints = MemHint_RA;
else
result.addrdesc.memattrs.inner.attrs = MemAttr_NC;
result.addrdesc.memattrs.inner.hints = MemHint_No;
result.addrdesc.memattrs.shareable = TRUE;
result.addrdesc.memattrs.outershareable = TRUE;
result.addrdesc.memattrs.tagged = FALSE;
result.addrdesc.memattrs.outer = result.addrdesc.memattrs.inner;
result.addrdesc.memattrs = MemAttrDefaults(result.addrdesc.memattrs);
result.perms.ap = bits(3) UNKNOWN;
result.perms.xn = '0';
result.perms.pxn = '0';
result.nG = bit UNKNOWN;
result.contiguous = boolean UNKNOWN;
result.domain = bits(4) UNKNOWN;
result.level = integer UNKNOWN;
result.blocksize = integer UNKNOWN;
result.addrdesc.paddress.address = vaddress<51:0>;
result.addrdesc.paddress.NS = if IsSecure() then '0' else '1';
result.addrdesc.fault = AArch64.NoFault();
return result;;
else
return PSTATE.EL;
// AArch64.AccessIsPrivileged()
// ============================
// AArch64.CheckPermission()
// =========================
// Function used for permission checking from AArch64 stage 1 translations
booleanFaultRecord AArch64.AccessIsPrivileged(AArch64.CheckPermission(Permissions perms, bits(64) vaddress, integer level,
bit NS, AccType acctype)
el =acctype, boolean iswrite)
assert ! AArch64.AccessUsesELELUsingAArch32(acctype);
if el ==( S1TranslationRegime());
wxn = SCTLR[].WXN == '1';
if (PSTATE.EL == EL0 then
ispriv = FALSE;
elsif el ==|| EL3IsInHost then
ispriv = TRUE;
elsif el ==() ||
(PSTATE.EL == EL1 && !HaveNV2Ext()) ||
(PSTATE.EL == EL1 && HaveNV2Ext() && (acctype != AccType_NV2REGISTER || !ELIsInHost(EL2 && (!)))) then
priv_r = TRUE;
priv_w = perms.ap<2> == '0';
user_r = perms.ap<1> == '1';
user_w = perms.ap<2:1> == '01';
ispriv =IsInHostAArch64.AccessIsPrivileged() || HCR_EL2.TGE == '0') then
ispriv = TRUE;
elsif(acctype);
pan = if HaveUAOExtHavePANExt() && PSTATE.UAO == '1' then
ispriv = TRUE;
else
ispriv = (acctype !=() then PSTATE.PAN else '0';
if ( () && ((PSTATE.EL == EL1 && HaveNVExt() && HCR_EL2.<NV, NV1> == '11') ||
(HaveNV2Ext() && acctype == AccType_NV2REGISTER && HCR_EL2.NV2 == '1'))) then
pan = '0';
is_ldst = !(acctype IN {AccType_DC, AccType_DC_UNPRIV, AccType_AT, AccType_IFETCH});
is_ats1xp = (acctype == AccType_AT && AArch64.ExecutingATS1xPInstr());
if pan == '1' && user_r && ispriv && (is_ldst || is_ats1xp) then
priv_r = FALSE;
priv_w = FALSE;
user_xn = perms.xn == '1' || (user_w && wxn);
priv_xn = perms.pxn == '1' || (priv_w && wxn) || user_w;
if ispriv then
(r, w, xn) = (priv_r, priv_w, priv_xn);
else
(r, w, xn) = (user_r, user_w, user_xn);
else
// Access from EL2 or EL3
r = TRUE;
w = perms.ap<2> == '0';
xn = perms.xn == '1' || (w && wxn);
// Restriction on Secure instruction fetch
if HaveEL(EL3) && IsSecure() && NS == '1' && SCR_EL3.SIF == '1' then
xn = TRUE;
if acctype == AccType_IFETCH then
fail = xn;
failedread = TRUE;
elsif acctype IN { AccType_ATOMICRW, AccType_ORDEREDRW, AccType_ORDEREDATOMICRW } then
fail = !r || !w;
failedread = !r;
elsif iswrite then
fail = !w;
failedread = FALSE;
elsif acctype == AccType_DC && PSTATE.EL != EL0 then
// DC maintenance instructions operating by VA, cannot fault from stage 1 translation,
// other than DC IVAC, which requires write permission, and operations executed at EL0,
// which require read permission.
fail = FALSE;
else
fail = !r;
failedread = TRUE;
if fail then
secondstage = FALSE;
s2fs1walk = FALSE;
ipaddress = bits(52) UNKNOWN;
return AArch64.PermissionFault(ipaddress,boolean UNKNOWN, level, acctype,
!failedread, secondstage, s2fs1walk);
else
return AArch64.NoFaultAccType_UNPRIVEL2Enabled);
return ispriv;();
// AArch64.AccessUsesEL()
// ======================
// Returns the Exception Level of the regime that will manage the translation for a given access type.
// AArch64.CheckS2Permission()
// ===========================
// Function used for permission checking from AArch64 stage 2 translations
bits(2)FaultRecord AArch64.AccessUsesEL(AArch64.CheckS2Permission(Permissions perms, bits(64) vaddress, bits(52) ipaddress,
integer level, AccType acctype)
if acctype ==acctype, boolean iswrite, boolean NS,
boolean s2fs1walk, boolean hwupdatewalk)
assert AccType_UNPRIVIsSecureEL2Enabled then
return() || ( HaveEL(EL2) && !IsSecure() && !ELUsingAArch32(EL2) ) && HasS2Translation();
r = perms.ap<1> == '1';
w = perms.ap<2> == '1';
if HaveExtendedExecuteNeverExt() then
case perms.xn:perms.xxn of
when '00' xn = FALSE;
when '01' xn = PSTATE.EL == EL1;
when '10' xn = TRUE;
when '11' xn = PSTATE.EL == EL0;
elsif acctype == else
xn = perms.xn == '1';
// Stage 1 walk is checked as a read, regardless of the original type
if acctype == AccType_NV2REGISTERAccType_IFETCH then
return&& !s2fs1walk then
fail = xn;
failedread = TRUE;
elsif (acctype IN { , AccType_ORDEREDRW, AccType_ORDEREDATOMICRW }) && !s2fs1walk then
fail = !r || !w;
failedread = !r;
elsif iswrite && !s2fs1walk then
fail = !w;
failedread = FALSE;
elsif acctype == AccType_DC && PSTATE.EL != EL0 && !s2fs1walk then
// DC maintenance instructions operating by VA, with the exception of DC IVAC, do
// not generate Permission faults from stage 2 translation, other than when
// performing a stage 1 translation table walk.
fail = FALSE;
elsif hwupdatewalk then
fail = !w;
failedread = !iswrite;
else
fail = !r;
failedread = !iswrite;
if fail then
domain = bits(4) UNKNOWN;
secondstage = TRUE;
return AArch64.PermissionFault(ipaddress,NS, level, acctype,
!failedread, secondstage, s2fs1walk);
else
return AArch64.NoFaultEL2AccType_ATOMICRW;
else
return PSTATE.EL;();
// AArch64.CheckPermission()
// AArch64.CheckBreakpoint()
// =========================
// Function used for permission checking from AArch64 stage 1 translations
// Called before executing the instruction of length "size" bytes at "vaddress" in an AArch64
// translation regime.
// The breakpoint can in fact be evaluated well ahead of execution, for example, at instruction
// fetch. This is the simple sequential execution of the program.
FaultRecord AArch64.CheckPermission(AArch64.CheckBreakpoint(bits(64) vaddress,Permissions perms, bits(64) vaddress, integer level,
bit NS, AccType acctype, boolean iswrite)
acctype, integer size)
assert !ELUsingAArch32(S1TranslationRegime());
wxn = assert ( SCTLRUsingAArch32[].WXN == '1';
() && size IN {2,4}) || size == 4;
if (PSTATE.EL == match = FALSE;
for i = 0 to EL0UInt ||(ID_AA64DFR0_EL1.BRPs)
match_i =
IsInHostAArch64.BreakpointMatch() ||
(PSTATE.EL ==(i, vaddress, acctype, size);
match = match || match_i;
if match && EL1HaltOnBreakpointOrWatchpoint && !() then
reason =HaveNV2ExtDebugHalt_Breakpoint()) ||
(PSTATE.EL ==; EL1Halt &&(reason);
elsif match && MDSCR_EL1.MDE == '1' && HaveNV2ExtAArch64.GenerateDebugExceptions() && (acctype !=() then
acctype = AccType_NV2REGISTER || !ELIsInHost(EL2)))) then
priv_r = TRUE;
priv_w = perms.ap<2> == '0';
user_r = perms.ap<1> == '1';
user_w = perms.ap<2:1> == '01';
ispriv = AArch64.AccessIsPrivileged(acctype);
pan = if HavePANExt() then PSTATE.PAN else '0';
if (EL2Enabled() && ((PSTATE.EL == EL1 && HaveNVExt() && HCR_EL2.<NV, NV1> == '11') ||
(HaveNV2Ext() && acctype == AccType_NV2REGISTER && HCR_EL2.NV2 == '1'))) then
pan = '0';
is_ldst = !(acctype IN {AccType_DC, AccType_DC_UNPRIV, AccType_AT, AccType_IFETCH});
is_ats1xp = (acctype ==;
iswrite = FALSE;
return AccType_ATAArch64.DebugFault && AArch64.ExecutingATS1xPInstr());
if pan == '1' && user_r && ispriv && (is_ldst || is_ats1xp) then
priv_r = FALSE;
priv_w = FALSE;
user_xn = perms.xn == '1' || (user_w && wxn);
priv_xn = perms.pxn == '1' || (priv_w && wxn) || user_w;
if ispriv then
(r, w, xn) = (priv_r, priv_w, priv_xn);
else
(r, w, xn) = (user_r, user_w, user_xn);
else
// Access from EL2 or EL3
r = TRUE;
w = perms.ap<2> == '0';
xn = perms.xn == '1' || (w && wxn);
// Restriction on Secure instruction fetch
if HaveEL(EL3) && IsSecure() && NS == '1' && SCR_EL3.SIF == '1' then
xn = TRUE;
if acctype == AccType_IFETCH then
fail = xn;
failedread = TRUE;
elsif acctype IN { AccType_ATOMICRW, AccType_ORDEREDRW, AccType_ORDEREDATOMICRW } then
fail = !r || !w;
failedread = !r;
elsif iswrite then
fail = !w;
failedread = FALSE;
elsif acctype == AccType_DC && PSTATE.EL != EL0 then
// DC maintenance instructions operating by VA, cannot fault from stage 1 translation,
// other than DC IVAC, which requires write permission, and operations executed at EL0,
// which require read permission.
fail = FALSE;
else
fail = !r;
failedread = TRUE;
if fail then
secondstage = FALSE;
s2fs1walk = FALSE;
ipaddress = bits(52) UNKNOWN;
return AArch64.PermissionFault(ipaddress,boolean UNKNOWN, level, acctype,
!failedread, secondstage, s2fs1walk);
(acctype, iswrite);
else
return AArch64.NoFault();
// AArch64.CheckS2Permission()
// ===========================
// Function used for permission checking from AArch64 stage 2 translations
// AArch64.CheckDebug()
// ====================
// Called on each access to check for a debug exception or entry to Debug state.
FaultRecord AArch64.CheckS2Permission(AArch64.CheckDebug(bits(64) vaddress,Permissions perms, bits(64) vaddress, bits(52) ipaddress,
integer level, AccType acctype, boolean iswrite, boolean NS,
boolean s2fs1walk, boolean hwupdatewalk)
assertacctype, boolean iswrite, integer size) IsSecureEL2EnabledFaultRecord() || (fault = HaveELAArch64.NoFault(();
d_side = (acctype !=EL2) && !IsSecure() && !ELUsingAArch32(EL2) ) && HasS2Translation();
r = perms.ap<1> == '1';
w = perms.ap<2> == '1';
if HaveExtendedExecuteNeverExt() then
case perms.xn:perms.xxn of
when '00' xn = FALSE;
when '01' xn = PSTATE.EL == EL1;
when '10' xn = TRUE;
when '11' xn = PSTATE.EL == EL0;
else
xn = perms.xn == '1';
// Stage 1 walk is checked as a read, regardless of the original type
if acctype == AccType_IFETCH && !s2fs1walk then
fail = xn;
failedread = TRUE;
elsif (acctype IN {);
generate_exception = AccType_ATOMICRWAArch64.GenerateDebugExceptions,() && MDSCR_EL1.MDE == '1';
halt = AccType_ORDEREDRWHaltOnBreakpointOrWatchpoint,();
if generate_exception || halt then
if d_side then
fault = AccType_ORDEREDATOMICRWAArch64.CheckWatchpoint }) && !s2fs1walk then
fail = !r || !w;
failedread = !r;
elsif iswrite && !s2fs1walk then
fail = !w;
failedread = FALSE;
elsif acctype ==(vaddress, acctype, iswrite, size);
else
fault = AccType_DCAArch64.CheckBreakpoint && PSTATE.EL != EL0 && !s2fs1walk then
// DC maintenance instructions operating by VA, with the exception of DC IVAC, do
// not generate Permission faults from stage 2 translation, other than when
// performing a stage 1 translation table walk.
fail = FALSE;
elsif hwupdatewalk then
fail = !w;
failedread = !iswrite;
else
fail = !r;
failedread = !iswrite;
if fail then
domain = bits(4) UNKNOWN;
secondstage = TRUE;
return AArch64.PermissionFault(ipaddress,NS, level, acctype,
!failedread, secondstage, s2fs1walk);
else
return AArch64.NoFault();(vaddress, acctype, size);
return fault;
// AArch64.CheckBreakpoint()
// AArch64.CheckWatchpoint()
// =========================
// Called before executing the instruction of length "size" bytes at "vaddress" in an AArch64
// translation regime.
// The breakpoint can in fact be evaluated well ahead of execution, for example, at instruction
// fetch. This is the simple sequential execution of the program.
// Called before accessing the memory location of "size" bytes at "address".
FaultRecord AArch64.CheckBreakpoint(bits(64) vaddress,AArch64.CheckWatchpoint(bits(64) vaddress, AccType acctype, integer size)
acctype,
boolean iswrite, integer size)
assert !ELUsingAArch32(S1TranslationRegime());
assert (
match = FALSE;
ispriv =UsingAArch32AArch64.AccessIsPrivileged() && size IN {2,4}) || size == 4;
match = FALSE;
(acctype);
for i = 0 to UInt(ID_AA64DFR0_EL1.BRPs)
match_i =(ID_AA64DFR0_EL1.WRPs)
match = match || AArch64.BreakpointMatchAArch64.WatchpointMatch(i, vaddress, acctype, size);
match = match || match_i;
(i, vaddress, size, ispriv, acctype, iswrite);
if match && HaltOnBreakpointOrWatchpoint() then
reason = if acctype != DebugHalt_BreakpointAccType_NONFAULT;&& acctype !=
AccType_CNOTFIRST then
reason = DebugHalt_Watchpoint;
Halt(reason);
elsif match && MDSCR_EL1.MDE == '1' && else
// Fault will be reported and cancelled
return AArch64.DebugFault(acctype, iswrite);
elsif match && MDSCR_EL1.MDE == '1' && AArch64.GenerateDebugExceptions() then
acctype = AccType_IFETCH;
iswrite = FALSE;
() then
return AArch64.DebugFault(acctype, iswrite);
else
return AArch64.NoFault();
// AArch64.CheckDebug()
// ====================
// Called on each access to check for a debug exception or entry to Debug state.
// AArch64.AccessFlagFault()
// =========================
FaultRecord AArch64.CheckDebug(bits(64) vaddress,AArch64.AccessFlagFault(bits(52) ipaddress,boolean NS, integer level, AccType acctype, boolean iswrite, integer size)acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
return
FaultRecordAArch64.CreateFaultRecord fault =( AArch64.NoFaultFault_AccessFlag();
d_side = (acctype != AccType_IFETCH);
if HaveNV2Ext() && acctype == AccType_NV2REGISTER then
mask = '0';
generate_exception = AArch64.GenerateDebugExceptionsFrom(EL2, IsSecure(), mask) && MDSCR_EL1.MDE == '1';
else
generate_exception = AArch64.GenerateDebugExceptions() && MDSCR_EL1.MDE == '1';
halt = HaltOnBreakpointOrWatchpoint();
if generate_exception || halt then
if d_side then
fault = AArch64.CheckWatchpoint(vaddress, acctype, iswrite, size);
else
fault = AArch64.CheckBreakpoint(vaddress, acctype, size);
return fault;, ipaddress, NS, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);
// AArch64.CheckWatchpoint()
// =========================
// Called before accessing the memory location of "size" bytes at "address".
// AArch64.AddressSizeFault()
// ==========================
FaultRecord AArch64.CheckWatchpoint(bits(64) vaddress,AArch64.AddressSizeFault(bits(52) ipaddress,boolean NS, integer level, AccType acctype,
boolean iswrite, integer size)
assert !acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
returnELUsingAArch32AArch64.CreateFaultRecord(S1TranslationRegimeFault_AddressSize());
match = FALSE;
ispriv = AArch64.AccessIsPrivileged(acctype);
for i = 0 to UInt(ID_AA64DFR0_EL1.WRPs)
match = match || AArch64.WatchpointMatch(i, vaddress, size, ispriv, acctype, iswrite);
if match && HaltOnBreakpointOrWatchpoint() then
if acctype != AccType_NONFAULT && acctype != AccType_CNOTFIRST then
reason = DebugHalt_Watchpoint;
Halt(reason);
else
// Fault will be reported and cancelled
return AArch64.DebugFault(acctype, iswrite);
elsif match && MDSCR_EL1.MDE == '1' && AArch64.GenerateDebugExceptions() then
return AArch64.DebugFault(acctype, iswrite);
else
return AArch64.NoFault();, ipaddress, NS, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);
// AArch64.AccessFlagFault()
// =========================
// AArch64.AlignmentFault()
// ========================
FaultRecord AArch64.AccessFlagFault(bits(52) ipaddress,boolean NS, integer level,AArch64.AlignmentFault(
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
acctype, boolean iswrite, boolean secondstage)
ipaddress = bits(52) UNKNOWN;
level = integer UNKNOWN;
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
s2fs1walk = boolean UNKNOWN;
return AArch64.CreateFaultRecord(Fault_AccessFlagFault_Alignment, ipaddress, NS, level, acctype, iswrite,
, ipaddress, boolean UNKNOWN, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);
// AArch64.AddressSizeFault()
// ==========================
// AArch64.AsynchExternalAbort()
// =============================
// Wrapper function for asynchronous external aborts
FaultRecord AArch64.AddressSizeFault(bits(52) ipaddress,boolean NS, integer level,AArch64.AsynchExternalAbort(boolean parity, bits(2) errortype, bit extflag)
faulttype = if parity then
AccTypeFault_AsyncParity acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
returnelse Fault_AsyncExternal;
ipaddress = bits(52) UNKNOWN;
level = integer UNKNOWN;
acctype = AccType_NORMAL;
iswrite = boolean UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch64.CreateFaultRecord(Fault_AddressSize, ipaddress, NS, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);(faulttype, ipaddress, boolean UNKNOWN, level, acctype, iswrite, extflag,
errortype, secondstage, s2fs1walk);
// AArch64.AlignmentFault()
// ========================
// AArch64.DebugFault()
// ====================
FaultRecord AArch64.AlignmentFault(AArch64.DebugFault(AccType acctype, boolean iswrite, boolean secondstage)
acctype, boolean iswrite)
ipaddress = bits(52) UNKNOWN;
errortype = bits(2) UNKNOWN;
level = integer UNKNOWN;
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
s2fs1walk = boolean UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch64.CreateFaultRecord(Fault_AlignmentFault_Debug, ipaddress, boolean UNKNOWN, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);
// AArch64.AsynchExternalAbort()
// =============================
// Wrapper function for asynchronous external aborts
// AArch64.NoFault()
// =================
FaultRecord AArch64.AsynchExternalAbort(boolean parity, bits(2) errortype, bit extflag)
AArch64.NoFault()
faulttype = if parity then ipaddress = bits(52) UNKNOWN;
level = integer UNKNOWN;
acctype = Fault_AsyncParity else Fault_AsyncExternal;
ipaddress = bits(52) UNKNOWN;
level = integer UNKNOWN;
acctype = AccType_NORMAL;
iswrite = boolean UNKNOWN;
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch64.CreateFaultRecord(Fault_None(faulttype, ipaddress, boolean UNKNOWN, level, acctype, iswrite, extflag,
errortype, secondstage, s2fs1walk);, ipaddress, boolean UNKNOWN, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);
// AArch64.DebugFault()
// ====================
// AArch64.PermissionFault()
// =========================
FaultRecord AArch64.DebugFault(AArch64.PermissionFault(bits(52) ipaddress,boolean NS, integer level,AccType acctype, boolean iswrite)
acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
ipaddress = bits(52) UNKNOWN;
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
level = integer UNKNOWN;
extflag = bit UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch64.CreateFaultRecord(Fault_DebugFault_Permission, ipaddress, boolean UNKNOWN, level, acctype, iswrite,
, ipaddress, NS, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);
// AArch64.NoFault()
// =================
// AArch64.TranslationFault()
// ==========================
FaultRecord AArch64.NoFault()
ipaddress = bits(52) UNKNOWN;
level = integer UNKNOWN;
acctype =AArch64.TranslationFault(bits(52) ipaddress, boolean NS, integer level, AccType_NORMALAccType;
iswrite = boolean UNKNOWN;
acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
return AArch64.CreateFaultRecord(Fault_NoneFault_Translation, ipaddress, boolean UNKNOWN, level, acctype, iswrite,
, ipaddress, NS, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);
// AArch64.PermissionFault()
// =========================
// AArch64.CheckAndUpdateDescriptor()
// ==================================
// Check and update translation table descriptor if hardware update is configured
FaultRecord AArch64.PermissionFault(bits(52) ipaddress,boolean NS, integer level,AArch64.CheckAndUpdateDescriptor(
DescriptorUpdate result, FaultRecord fault,
boolean secondstage, bits(64) vaddress, AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
acctype,
boolean iswrite, boolean s2fs1walk, boolean hwupdatewalk)
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
return boolean hw_update_AF = FALSE;
boolean hw_update_AP = FALSE;
// Check if access flag can be updated
// Address translation instructions are permitted to update AF but not required
if result.AF then
if fault.statuscode == AArch64.CreateFaultRecordFault_None(||(Unpredictable_AFUPDATE) == Constraint_TRUE then
hw_update_AF = TRUE;
if result.AP && fault.statuscode == Fault_None then
write_perm_req = (iswrite || acctype IN {AccType_ATOMICRW,AccType_ORDEREDRW, AccType_ORDEREDATOMICRW }) && !s2fs1walk;
hw_update_AP = (write_perm_req && !(acctype IN {AccType_AT, AccType_DC, AccType_DC_UNPRIV})) || hwupdatewalk;
if hw_update_AF || hw_update_AP then
if secondstage || !HasS2Translation() then
descaddr2 = result.descaddr;
else
hwupdatewalk = TRUE;
descaddr2 = AArch64.SecondStageWalk(result.descaddr, vaddress, acctype, iswrite, 8, hwupdatewalk);
if IsFault(descaddr2) then
return descaddr2.fault;
accdesc = CreateAccessDescriptor(AccType_ATOMICRW);
desc = _Mem[descaddr2, 8, accdesc];
el = AArch64.AccessUsesEL(acctype);
case el of
when EL3
reversedescriptors = SCTLR_EL3.EE == '1';
when EL2
reversedescriptors = SCTLR_EL2.EE == '1';
otherwise
reversedescriptors = SCTLR_EL1.EE == '1';
if reversedescriptors then
desc = BigEndianReverse(desc);
if hw_update_AF then
desc<10> = '1';
if hw_update_AP then
desc<7> = (if secondstage then '1' else '0');
_Mem[descaddr2,8,accdesc] = if reversedescriptors then BigEndianReverseFault_PermissionConstrainUnpredictable, ipaddress, NS, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);(desc) else desc;
return fault;
// AArch64.TranslationFault()
// ==========================
// AArch64.FirstStageTranslate()
// =============================
// Perform a stage 1 translation walk. The function used by Address Translation operations is
// similar except it uses the translation regime specified for the instruction.
FaultRecordAddressDescriptor AArch64.TranslationFault(bits(52) ipaddress, boolean NS, integer level,AArch64.FirstStageTranslate(bits(64) vaddress,
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk)
acctype, boolean iswrite,
boolean wasaligned, integer size)
extflag = bit UNKNOWN;
errortype = bits(2) UNKNOWN;
return if AArch64.CreateFaultRecordHaveNV2Ext(() && acctype == then
s1_enabled = SCTLR_EL2.M == '1';
elsif HasS2Translation() then
s1_enabled = HCR_EL2.TGE == '0' && HCR_EL2.DC == '0' && SCTLR_EL1.M == '1';
else
s1_enabled = SCTLR[].M == '1';
ipaddress = bits(52) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
if s1_enabled then // First stage enabled
S1 = AArch64.TranslationTableWalk(ipaddress, TRUE, vaddress, acctype, iswrite, secondstage,
s2fs1walk, size);
permissioncheck = TRUE;
if acctype == AccType_IFETCH then
InGuardedPage = S1.GP == '1'; // Global state updated on instruction fetch that denotes
// if the fetched instruction is from a guarded page.
else
S1 = AArch64.TranslateAddressS1Off(vaddress, acctype, iswrite);
permissioncheck = FALSE;
if UsingAArch32() && HaveTrapLoadStoreMultipleDeviceExt() && AArch32.ExecutingLSMInstr() then
if S1.addrdesc.memattrs.memtype == MemType_Device && S1.addrdesc.memattrs.device != DeviceType_GRE then
nTLSMD = if S1TranslationRegime() == EL2 then SCTLR_EL2.nTLSMD else SCTLR_EL1.nTLSMD;
if nTLSMD == '0' then
S1.addrdesc.fault = AArch64.AlignmentFault(acctype, iswrite, secondstage);
// Check for unaligned data accesses to Device memory
if ((!wasaligned && acctype != AccType_IFETCH) || (acctype == AccType_DCZVA))
&& S1.addrdesc.memattrs.memtype == MemType_Device && !IsFault(S1.addrdesc) then
S1.addrdesc.fault = AArch64.AlignmentFault(acctype, iswrite, secondstage);
if !IsFault(S1.addrdesc) && permissioncheck then
S1.addrdesc.fault = AArch64.CheckPermission(S1.perms, vaddress, S1.level,
S1.addrdesc.paddress.NS,
acctype, iswrite);
// Check for instruction fetches from Device memory not marked as execute-never. If there has
// not been a Permission Fault then the memory is not marked execute-never.
if (!IsFault(S1.addrdesc) && S1.addrdesc.memattrs.memtype == MemType_Device &&
acctype == AccType_IFETCH) then
S1.addrdesc = AArch64.InstructionDevice(S1.addrdesc, vaddress, ipaddress, S1.level,
acctype, iswrite,
secondstage, s2fs1walk);
// Check and update translation table descriptor if required
hwupdatewalk = FALSE;
s2fs1walk = FALSE;
S1.addrdesc.fault = AArch64.CheckAndUpdateDescriptorFault_TranslationAccType_NV2REGISTER, ipaddress, NS, level, acctype, iswrite,
extflag, errortype, secondstage, s2fs1walk);(S1.descupdate, S1.addrdesc.fault,
secondstage, vaddress, acctype,
iswrite, s2fs1walk, hwupdatewalk);
return S1.addrdesc;
// AArch64.CheckAndUpdateDescriptor()
// ==================================
// Check and update translation table descriptor if hardware update is configured
// AArch64.FullTranslate()
// =======================
// Perform both stage 1 and stage 2 translation walks for the current translation regime. The
// function used by Address Translation operations is similar except it uses the translation
// regime specified for the instruction.
FaultRecordAddressDescriptor AArch64.CheckAndUpdateDescriptor(AArch64.FullTranslate(bits(64) vaddress,DescriptorUpdate result, FaultRecord fault,
boolean secondstage, bits(64) vaddress, AccType acctype,
boolean iswrite, boolean s2fs1walk, boolean hwupdatewalk)
acctype, boolean iswrite,
boolean wasaligned, integer size)
boolean hw_update_AF = FALSE;
boolean hw_update_AP = FALSE;
// Check if access flag can be updated
// Address translation instructions are permitted to update AF but not required
if result.AF then
if fault.statuscode == // First Stage Translation
S1 = Fault_NoneAArch64.FirstStageTranslate ||(vaddress, acctype, iswrite, wasaligned, size);
if ! ConstrainUnpredictableIsFault((S1) && !(Unpredictable_AFUPDATEHaveNV2Ext) ==() && acctype == Constraint_TRUEAccType_NV2REGISTER then
hw_update_AF = TRUE;
if result.AP && fault.statuscode ==) && Fault_None then
write_perm_req = (iswrite || acctype IN {AccType_ATOMICRW,AccType_ORDEREDRW, AccType_ORDEREDATOMICRW }) && !s2fs1walk;
hw_update_AP = (write_perm_req && !(acctype IN {AccType_AT, AccType_DC, AccType_DC_UNPRIV})) || hwupdatewalk;
if hw_update_AF || hw_update_AP then
if secondstage || !HasS2Translation() then
descaddr2 = result.descaddr;
else
hwupdatewalk = TRUE;
descaddr2 = s2fs1walk = FALSE;
hwupdatewalk = FALSE;
result = AArch64.SecondStageWalkAArch64.SecondStageTranslate(result.descaddr, vaddress, acctype, iswrite, 8, hwupdatewalk);
if IsFault(descaddr2) then
return descaddr2.fault;
accdesc = CreateAccessDescriptor(AccType_ATOMICRW);
desc = _Mem[descaddr2, 8, accdesc];
el = AArch64.AccessUsesEL(acctype);
case el of
when EL3
reversedescriptors = SCTLR_EL3.EE == '1';
when EL2
reversedescriptors = SCTLR_EL2.EE == '1';
otherwise
reversedescriptors = SCTLR_EL1.EE == '1';
if reversedescriptors then
desc = BigEndianReverse(desc);
if hw_update_AF then
desc<10> = '1';
if hw_update_AP then
desc<7> = (if secondstage then '1' else '0');
_Mem[descaddr2,8,accdesc] = if reversedescriptors then BigEndianReverse(desc) else desc;
(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk,
size, hwupdatewalk);
else
result = S1;
return fault; return result;
// AArch64.FirstStageTranslate()
// =============================
// Perform a stage 1 translation walk. The function used by Address Translation operations is
// AArch64.SecondStageTranslate()
// ==============================
// Perform a stage 2 translation walk. The function used by Address Translation operations is
// similar except it uses the translation regime specified for the instruction.
AddressDescriptor AArch64.FirstStageTranslate(bits(64) vaddress,AArch64.SecondStageTranslate( AddressDescriptor S1, bits(64) vaddress,
AccType acctype, boolean iswrite,
boolean wasaligned, integer size)
ifacctype, boolean iswrite, boolean wasaligned,
boolean s2fs1walk, integer size, boolean hwupdatewalk)
assert HaveNV2Ext() && acctype == AccType_NV2REGISTER then
s1_enabled = SCTLR_EL2.M == '1';
elsif HasS2Translation() then
s1_enabled = HCR_EL2.TGE == '0' && HCR_EL2.DC == '0' && SCTLR_EL1.M == '1';
else
s1_enabled =();
s2_enabled = HCR_EL2.VM == '1' || HCR_EL2.DC == '1';
secondstage = TRUE;
if s2_enabled then // Second stage enabled
ipaddress = S1.paddress.address<51:0>;
NS = S1.paddress.NS == '1';
S2 = SCTLR[].M == '1';
ipaddress = bits(52) UNKNOWN;
secondstage = FALSE;
s2fs1walk = FALSE;
if s1_enabled then // First stage enabled
S1 = AArch64.TranslationTableWalk(ipaddress, TRUE, vaddress, acctype, iswrite, secondstage,
(ipaddress, NS, vaddress, acctype, iswrite, secondstage,
s2fs1walk, size);
permissioncheck = TRUE;
if acctype ==
// Check for unaligned data accesses to Device memory
if ((!wasaligned && acctype != AccType_IFETCH then
InGuardedPage = S1.GP == '1'; // Global state updated on instruction fetch that denotes
// if the fetched instruction is from a guarded page.
else
S1 =) || (acctype == AArch64.TranslateAddressS1Off(vaddress, acctype, iswrite);
permissioncheck = FALSE;
if UsingAArch32() && HaveTrapLoadStoreMultipleDeviceExt() && AArch32.ExecutingLSMInstr() then
if S1.addrdesc.memattrs.memtype == MemType_Device && S1.addrdesc.memattrs.device != DeviceType_GRE then
nTLSMD = if S1TranslationRegime() == EL2 then SCTLR_EL2.nTLSMD else SCTLR_EL1.nTLSMD;
if nTLSMD == '0' then
S1.addrdesc.fault = AArch64.AlignmentFault(acctype, iswrite, secondstage);
// Check for unaligned data accesses to Device memory
if ((!wasaligned && acctype != AccType_IFETCH) || (acctype == AccType_DCZVA))
&& S1.addrdesc.memattrs.memtype == && S2.addrdesc.memattrs.memtype == MemType_Device && !IsFault(S1.addrdesc) then
S1.addrdesc.fault =(S2.addrdesc) then
S2.addrdesc.fault = AArch64.AlignmentFault(acctype, iswrite, secondstage);
if !
// Check for permissions on Stage2 translations
if !IsFault(S1.addrdesc) && permissioncheck then
S1.addrdesc.fault =(S2.addrdesc) then
S2.addrdesc.fault = AArch64.CheckPermissionAArch64.CheckS2Permission(S1.perms, vaddress, S1.level,
S1.addrdesc.paddress.NS,
acctype, iswrite);
(S2.perms, vaddress, ipaddress, S2.level,
acctype, iswrite, NS,s2fs1walk, hwupdatewalk);
// Check for instruction fetches from Device memory not marked as execute-never. If there has
// not been a Permission Fault then the memory is not marked execute-never.
if (! // Check for instruction fetches from Device memory not marked as execute-never. As there
// has not been a Permission Fault then the memory is not marked execute-never.
if (!s2fs1walk && !IsFault(S1.addrdesc) && S1.addrdesc.memattrs.memtype ==(S2.addrdesc) && S2.addrdesc.memattrs.memtype == MemType_Device &&
acctype == AccType_IFETCH) then
S1.addrdesc = S2.addrdesc = AArch64.InstructionDevice(S1.addrdesc, vaddress, ipaddress, S1.level,
acctype, iswrite,
secondstage, s2fs1walk);
// Check and update translation table descriptor if required
hwupdatewalk = FALSE;
s2fs1walk = FALSE;
S1.addrdesc.fault =(S2.addrdesc, vaddress, ipaddress, S2.level,
acctype, iswrite,
secondstage, s2fs1walk);
// Check for protected table walk
if (s2fs1walk && ! IsFault(S2.addrdesc) && HCR_EL2.PTW == '1' &&
S2.addrdesc.memattrs.memtype == MemType_Device) then
S2.addrdesc.fault = AArch64.PermissionFault(ipaddress, NS, S2.level, acctype,
iswrite, secondstage, s2fs1walk);
// Check and update translation table descriptor if required
S2.addrdesc.fault = AArch64.CheckAndUpdateDescriptor(S2.descupdate, S2.addrdesc.fault,
secondstage, vaddress, acctype,
iswrite, s2fs1walk, hwupdatewalk);
result = CombineS1S2Desc(S1.descupdate, S1.addrdesc.fault,
secondstage, vaddress, acctype,
iswrite, s2fs1walk, hwupdatewalk);
(S1, S2.addrdesc);
else
result = S1;
return S1.addrdesc; return result;
// AArch64.FullTranslate()
// =======================
// Perform both stage 1 and stage 2 translation walks for the current translation regime. The
// function used by Address Translation operations is similar except it uses the translation
// regime specified for the instruction.
// AArch64.SecondStageWalk()
// =========================
// Perform a stage 2 translation on a stage 1 translation page table walk access.
AddressDescriptor AArch64.FullTranslate(bits(64) vaddress,AArch64.SecondStageWalk( AddressDescriptor S1, bits(64) vaddress, AccType acctype, boolean iswrite,
boolean wasaligned, integer size)
// First Stage Translation
S1 = AArch64.FirstStageTranslate(vaddress, acctype, iswrite, wasaligned, size);
if !IsFault(S1) && !(HaveNV2Ext() && acctype == AccType_NV2REGISTER) &&acctype,
boolean iswrite, integer size, boolean hwupdatewalk)
assert HasS2Translation() then
s2fs1walk = FALSE;
hwupdatewalk = FALSE;
result =();
s2fs1walk = TRUE;
wasaligned = TRUE;
return AArch64.SecondStageTranslate(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk,
size, hwupdatewalk);
else
result = S1;
return result; size, hwupdatewalk);
// AArch64.SecondStageTranslate()
// ==============================
// Perform a stage 2 translation walk. The function used by Address Translation operations is
// similar except it uses the translation regime specified for the instruction.
// AArch64.TranslateAddress()
// ==========================
// Main entry point for translating an address
AddressDescriptor AArch64.SecondStageTranslate(AArch64.TranslateAddress(bits(64) vaddress,AddressDescriptor S1, bits(64) vaddress,
AccType acctype, boolean iswrite, boolean wasaligned,
boolean s2fs1walk, integer size, boolean hwupdatewalk)
assertacctype, boolean iswrite,
boolean wasaligned, integer size)
result = HasS2TranslationAArch64.FullTranslate();
(vaddress, acctype, iswrite, wasaligned, size);
s2_enabled = HCR_EL2.VM == '1' || HCR_EL2.DC == '1';
secondstage = TRUE;
if s2_enabled then // Second stage enabled
ipaddress = S1.paddress.address<51:0>;
NS = S1.paddress.NS == '1';
S2 = if !(acctype IN { AArch64.TranslationTableWalkAccType_PTW(ipaddress, NS, vaddress, acctype, iswrite, secondstage,
s2fs1walk, size);
// Check for unaligned data accesses to Device memory
if ((!wasaligned && acctype !=, AccType_IFETCHAccType_IC) || (acctype ==, AccType_DCZVAAccType_AT))
&& S2.addrdesc.memattrs.memtype ==}) && ! MemType_Device && !IsFault(S2.addrdesc) then
S2.addrdesc.fault =(result) then
result.fault = AArch64.AlignmentFaultAArch64.CheckDebug(acctype, iswrite, secondstage);
(vaddress, acctype, iswrite, size);
// Check for permissions on Stage2 translations
if ! // Update virtual address for abort functions
result.vaddress =IsFaultZeroExtend(S2.addrdesc) then
S2.addrdesc.fault = AArch64.CheckS2Permission(S2.perms, vaddress, ipaddress, S2.level,
acctype, iswrite, NS,s2fs1walk, hwupdatewalk);
// Check for instruction fetches from Device memory not marked as execute-never. As there
// has not been a Permission Fault then the memory is not marked execute-never.
if (!s2fs1walk && !IsFault(S2.addrdesc) && S2.addrdesc.memattrs.memtype == MemType_Device &&
acctype == AccType_IFETCH) then
S2.addrdesc = AArch64.InstructionDevice(S2.addrdesc, vaddress, ipaddress, S2.level,
acctype, iswrite,
secondstage, s2fs1walk);
// Check for protected table walk
if (s2fs1walk && !IsFault(S2.addrdesc) && HCR_EL2.PTW == '1' &&
S2.addrdesc.memattrs.memtype == MemType_Device) then
S2.addrdesc.fault = AArch64.PermissionFault(ipaddress, NS, S2.level, acctype,
iswrite, secondstage, s2fs1walk);
// Check and update translation table descriptor if required
S2.addrdesc.fault = AArch64.CheckAndUpdateDescriptor(S2.descupdate, S2.addrdesc.fault,
secondstage, vaddress, acctype,
iswrite, s2fs1walk, hwupdatewalk);
result = CombineS1S2Desc(S1, S2.addrdesc);
else
result = S1;
(vaddress);
return result;
// AArch64.SecondStageWalk()
// =========================
// Perform a stage 2 translation on a stage 1 translation page table walk access.
// AArch64.TranslationTableWalk()
// ==============================
// Returns a result of a translation table walk
//
// Implementations might cache information from memory in any number of non-coherent TLB
// caching structures, and so avoid memory accesses that have been expressed in this
// pseudocode. The use of such TLBs is not expressed in this pseudocode.
AddressDescriptorTLBRecord AArch64.SecondStageWalk(AArch64.TranslationTableWalk(bits(52) ipaddress, boolean s1_nonsecure, bits(64) vaddress,AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk, integer size)
if !secondstage then
assert !ELUsingAArch32(S1TranslationRegime());
else
assert IsSecureEL2Enabled() || ( HaveEL(EL2) && !IsSecure() && !ELUsingAArch32(EL2) ) && HasS2Translation();
TLBRecord result;
AddressDescriptor S1, bits(64) vaddress,descaddr;
bits(64) baseregister;
bits(64) inputaddr; // Input Address is 'vaddress' for stage 1, 'ipaddress' for stage 2
bit nswalk; // Stage 2 translation table walks are to Secure or to Non-secure PA space
descaddr.memattrs.memtype = AccTypeMemType_Normal acctype,
boolean iswrite, integer size, boolean hwupdatewalk)
;
assert // Derived parameters for the page table walk:
// grainsize = Log2(Size of Table) - Size of Table is 4KB, 16KB or 64KB in AArch64
// stride = Log2(Address per Level) - Bits of address consumed at each level
// firstblocklevel = First level where a block entry is allowed
// ps = Physical Address size as encoded in TCR_EL1.IPS or TCR_ELx/VTCR_EL2.PS
// inputsize = Log2(Size of Input Address) - Input Address size in bits
// level = Level to start walk from
// This means that the number of levels after start level = 3-level
if !secondstage then
// First stage translation
inputaddr = ZeroExtend(vaddress);
el = AArch64.AccessUsesEL(acctype);
top = AddrTop(inputaddr, (acctype == AccType_IFETCH), el);
if el == EL3 then
largegrain = TCR_EL3.TG0 == '01';
midgrain = TCR_EL3.TG0 == '10';
inputsize = 64 - UInt(TCR_EL3.T0SZ);
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
ps = TCR_EL3.PS;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>);
disabled = FALSE;
baseregister = TTBR0_EL3;
descaddr.memattrs = WalkAttrDecode(TCR_EL3.SH0, TCR_EL3.ORGN0, TCR_EL3.IRGN0, secondstage);
reversedescriptors = SCTLR_EL3.EE == '1';
lookupsecure = TRUE;
singlepriv = TRUE;
update_AF = HaveAccessFlagUpdateExt() && TCR_EL3.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL3.HD == '1';
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL3.HPD == '1';
elsif ELIsInHost(el) then
if inputaddr<top> == '0' then
largegrain = TCR_EL2.TG0 == '01';
midgrain = TCR_EL2.TG0 == '10';
inputsize = 64 - UInt(TCR_EL2.T0SZ);
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>);
disabled = TCR_EL2.EPD0 == '1' || (PSTATE.EL == EL0 && HaveE0PDExt() && TCR_EL2.E0PD0 == '1');
baseregister = TTBR0_EL2;
descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH0, TCR_EL2.ORGN0, TCR_EL2.IRGN0, secondstage);
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD0 == '1';
else
inputsize = 64 - UInt(TCR_EL2.T1SZ);
largegrain = TCR_EL2.TG1 == '11'; // TG1 and TG0 encodings differ
midgrain = TCR_EL2.TG1 == '01';
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsOnes(inputaddr<top:inputsize>);
disabled = TCR_EL2.EPD1 == '1' || (PSTATE.EL == EL0 && HaveE0PDExt() && TCR_EL2.E0PD1 == '1');
baseregister = TTBR1_EL2;
descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH1, TCR_EL2.ORGN1, TCR_EL2.IRGN1, secondstage);
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD1 == '1';
ps = TCR_EL2.IPS;
reversedescriptors = SCTLR_EL2.EE == '1';
lookupsecure = if IsSecureEL2Enabled() then IsSecure() else FALSE;
singlepriv = FALSE;
update_AF = HaveAccessFlagUpdateExt() && TCR_EL2.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL2.HD == '1';
elsif el == EL2 then
inputsize = 64 - UInt(TCR_EL2.T0SZ);
largegrain = TCR_EL2.TG0 == '01';
midgrain = TCR_EL2.TG0 == '10';
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
ps = TCR_EL2.PS;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>);
disabled = FALSE;
baseregister = TTBR0_EL2;
descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH0, TCR_EL2.ORGN0, TCR_EL2.IRGN0, secondstage);
reversedescriptors = SCTLR_EL2.EE == '1';
lookupsecure = if IsSecureEL2Enabled() then IsSecure() else FALSE;
singlepriv = TRUE;
update_AF = HaveAccessFlagUpdateExt() && TCR_EL2.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL2.HD == '1';
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD == '1';
else
if inputaddr<top> == '0' then
inputsize = 64 - UInt(TCR_EL1.T0SZ);
largegrain = TCR_EL1.TG0 == '01';
midgrain = TCR_EL1.TG0 == '10';
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>);
disabled = TCR_EL1.EPD0 == '1' || (PSTATE.EL == EL0 && HaveE0PDExt() && TCR_EL1.E0PD0 == '1');
disabled = disabled || (el == EL0 && acctype == AccType_NONFAULT && TCR_EL1.NFD0 == '1');
baseregister = TTBR0_EL1;
descaddr.memattrs = WalkAttrDecode(TCR_EL1.SH0, TCR_EL1.ORGN0, TCR_EL1.IRGN0, secondstage);
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL1.HPD0 == '1';
else
inputsize = 64 - UInt(TCR_EL1.T1SZ);
largegrain = TCR_EL1.TG1 == '11'; // TG1 and TG0 encodings differ
midgrain = TCR_EL1.TG1 == '01';
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsOnes(inputaddr<top:inputsize>);
disabled = TCR_EL1.EPD1 == '1' || (PSTATE.EL == EL0 && HaveE0PDExt() && TCR_EL1.E0PD1 == '1');
disabled = disabled || (el == EL0 && acctype == AccType_NONFAULT && TCR_EL1.NFD1 == '1');
baseregister = TTBR1_EL1;
descaddr.memattrs = WalkAttrDecode(TCR_EL1.SH1, TCR_EL1.ORGN1, TCR_EL1.IRGN1, secondstage);
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL1.HPD1 == '1';
ps = TCR_EL1.IPS;
reversedescriptors = SCTLR_EL1.EE == '1';
lookupsecure = IsSecure();
singlepriv = FALSE;
update_AF = HaveAccessFlagUpdateExt() && TCR_EL1.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL1.HD == '1';
if largegrain then
grainsize = 16; // Log2(64KB page size)
firstblocklevel = (if Have52BitPAExt() then 1 else 2); // Largest block is 4TB (2^42 bytes) for 52 bit PA
// and 512MB (2^29 bytes) otherwise
elsif midgrain then
grainsize = 14; // Log2(16KB page size)
firstblocklevel = 2; // Largest block is 32MB (2^25 bytes)
else // Small grain
grainsize = 12; // Log2(4KB page size)
firstblocklevel = 1; // Largest block is 1GB (2^30 bytes)
stride = grainsize - 3; // Log2(page size / 8 bytes)
// The starting level is the number of strides needed to consume the input address
level = 4 - RoundUp(Real(inputsize - grainsize) / Real(stride));
else
// Second stage translation
inputaddr = ZeroExtend(ipaddress);
if IsSecureBelowEL3() then
// Second stage for Secure translation regime
if s1_nonsecure then // Non-secure IPA space
t0size = VTCR_EL2.T0SZ;
tg0 = VTCR_EL2.TG0;
nswalk = VTCR_EL2.NSW;
else // Secure IPA space
t0size = VSTCR_EL2.T0SZ;
tg0 = VSTCR_EL2.TG0;
nswalk = VSTCR_EL2.SW;
// Stage 2 translation accesses the Non-secure PA space or the Secure PA space
if nswalk == '1' then
// When walk is Non-secure, access must be to the Non-secure PA space
nsaccess = '1';
elsif !s1_nonsecure then
// When walk is Secure and in the Secure IPA space,
// access is specified by VSTCR_EL2.SA
nsaccess = VSTCR_EL2.SA;
elsif VSTCR_EL2.SW == '1' || VSTCR_EL2.SA == '1' then
// When walk is Secure and in the Non-secure IPA space,
// access is Non-secure when VSTCR_EL2.SA specifies the Non-secure PA space
nsaccess = '1';
else
// When walk is Secure and in the Non-secure IPA space,
// if VSTCR_EL2.SA specifies the Secure PA space, access is specified by VTCR_EL2.NSA
nsaccess = VTCR_EL2.NSA;
else
// Second stage for Non-secure translation regime
t0size = VTCR_EL2.T0SZ;
tg0 = VTCR_EL2.TG0;
nswalk = '1';
nsaccess = '1';
inputsize = 64 - UInt(t0size);
largegrain = tg0 == '01';
midgrain = tg0 == '10';
inputsize_max = if Have52BitPAExt() && PAMax() == 52 && largegrain then 52 else 48;
inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
ps = VTCR_EL2.PS;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<63:inputsize>);
disabled = FALSE;
descaddr.memattrs = WalkAttrDecode(VTCR_EL2.SH0, VTCR_EL2.ORGN0, VTCR_EL2.IRGN0, secondstage);
reversedescriptors = SCTLR_EL2.EE == '1';
singlepriv = TRUE;
update_AF = HaveAccessFlagUpdateExt() && VTCR_EL2.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && VTCR_EL2.HD == '1';
if IsSecureEL2Enabled() then
lookupsecure = !s1_nonsecure;
else
lookupsecure = FALSE;
if lookupsecure then
baseregister = VSTTBR_EL2;
startlevel = UInt(VSTCR_EL2.SL0);
else
baseregister = VTTBR_EL2;
startlevel = UInt(VTCR_EL2.SL0);
if largegrain then
grainsize = 16; // Log2(64KB page size)
level = 3 - startlevel;
firstblocklevel = (if Have52BitPAExt() then 1 else 2); // Largest block is 4TB (2^42 bytes) for 52 bit PA
// and 512MB (2^29 bytes) otherwise
elsif midgrain then
grainsize = 14; // Log2(16KB page size)
level = 3 - startlevel;
firstblocklevel = 2; // Largest block is 32MB (2^25 bytes)
else // Small grain
grainsize = 12; // Log2(4KB page size)
if HaveSmallPageTblExt() && startlevel == 3 then
level = startlevel; // Startlevel 3 (VTCR_EL2.SL0 or VSCTR_EL2.SL0 == 0b11) for 4KB granule
else
level = 2 - startlevel;
firstblocklevel = 1; // Largest block is 1GB (2^30 bytes)
stride = grainsize - 3; // Log2(page size / 8 bytes)
// Limits on IPA controls based on implemented PA size. Level 0 is only
// supported by small grain translations
if largegrain then // 64KB pages
// Level 1 only supported if implemented PA size is greater than 2^42 bytes
if level == 0 || (level == 1 && PAMax() <= 42) then basefound = FALSE;
elsif midgrain then // 16KB pages
// Level 1 only supported if implemented PA size is greater than 2^40 bytes
if level == 0 || (level == 1 && PAMax() <= 40) then basefound = FALSE;
else // Small grain, 4KB pages
// Level 0 only supported if implemented PA size is greater than 2^42 bytes
if level < 0 || (level == 0 && PAMax() <= 42) then basefound = FALSE;
// If the inputsize exceeds the PAMax value, the behavior is CONSTRAINED UNPREDICTABLE
inputsizecheck = inputsize;
if inputsize > PAMax() && (!ELUsingAArch32(EL1) || inputsize > 40) then
case ConstrainUnpredictable(Unpredictable_LARGEIPA) of
when Constraint_FORCE
// Restrict the inputsize to the PAMax value
inputsize = PAMax();
inputsizecheck = PAMax();
when Constraint_FORCENOSLCHECK
// As FORCE, except use the configured inputsize in the size checks below
inputsize = PAMax();
when Constraint_FAULT
// Generate a translation fault
basefound = FALSE;
otherwise
Unreachable();
// Number of entries in the starting level table =
// (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table))
startsizecheck = inputsizecheck - ((3 - level)*stride + grainsize); // Log2(Num of entries)
// Check for starting level table with fewer than 2 entries or longer than 16 pages.
// Lower bound check is: startsizecheck < Log2(2 entries)
// Upper bound check is: startsizecheck > Log2(pagesize/8*16)
if startsizecheck < 1 || startsizecheck > stride + 4 then basefound = FALSE;
if !basefound || disabled then
level = 0; // AArch32 reports this as a level 1 fault
result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
case ps of
when '000' outputsize = 32;
when '001' outputsize = 36;
when '010' outputsize = 40;
when '011' outputsize = 42;
when '100' outputsize = 44;
when '101' outputsize = 48;
when '110' outputsize = (if Have52BitPAExt() && largegrain then 52 else 48);
otherwise outputsize = integer IMPLEMENTATION_DEFINED "Reserved Intermediate Physical Address size value";
if outputsize > PAMax() then outputsize = PAMax();
if outputsize < 48 && !IsZero(baseregister<47:outputsize>) then
level = 0;
result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
// Bottom bound of the Base address is:
// Log2(8 bytes per entry)+Log2(Number of entries in starting level table)
// Number of entries in starting level table =
// (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table))
baselowerbound = 3 + inputsize - ((3-level)*stride + grainsize); // Log2(Num of entries*8)
if outputsize == 52 then
z = (if baselowerbound < 6 then 6 else baselowerbound);
baseaddress = baseregister<5:2>:baseregister<47:z>:Zeros(z);
else
baseaddress = ZeroExtend(baseregister<47:baselowerbound>:Zeros(baselowerbound));
ns_table = if lookupsecure then '0' else '1';
ap_table = '00';
xn_table = '0';
pxn_table = '0';
addrselecttop = inputsize - 1;
apply_nvnv1_effect = HaveNVExt() && EL2Enabled() && HCR_EL2.<NV,NV1> == '11' && S1TranslationRegime() == EL1 && !secondstage;
repeat
addrselectbottom = (3-level)*stride + grainsize;
bits(52) index = ZeroExtend(inputaddr<addrselecttop:addrselectbottom>:'000');
descaddr.paddress.address = baseaddress OR index;
descaddr.paddress.NS = if secondstage then nswalk else ns_table;
// If there are two stages of translation, then the first stage table walk addresses
// are themselves subject to translation
if secondstage || !HasS2Translation();
s2fs1walk = TRUE;
wasaligned = TRUE;
return() || ( () && acctype == AccType_NV2REGISTER) then
descaddr2 = descaddr;
else
hwupdatewalk = FALSE;
descaddr2 = AArch64.SecondStageWalk(descaddr, vaddress, acctype, iswrite, 8, hwupdatewalk);
// Check for a fault on the stage 2 walk
if IsFault(descaddr2) then
result.addrdesc.fault = descaddr2.fault;
return result;
// Update virtual address for abort functions
descaddr2.vaddress = ZeroExtend(vaddress);
accdesc = CreateAccessDescriptorPTW(acctype, secondstage, s2fs1walk, level);
desc = _Mem[descaddr2, 8, accdesc];
if reversedescriptors then desc = BigEndianReverse(desc);
if desc<0> == '0' || (desc<1:0> == '01' && (level == 3 ||
(HaveBlockBBM() && IsBlockDescriptorNTBitValid() && desc<16> == '1'))) then
// Fault (00), Reserved (10), Block (01) at level 3, or Block(01) with nT bit set.
result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Valid Block, Page, or Table entry
if desc<1:0> == '01' || level == 3 then // Block (01) or Page (11)
blocktranslate = TRUE;
else // Table (11)
if (outputsize < 52 && largegrain && !IsZero(desc<15:12>)) || (outputsize < 48 && !IsZero(desc<47:outputsize>)) then
result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
if outputsize == 52 then
baseaddress = desc<15:12>:desc<47:grainsize>:Zeros(grainsize);
else
baseaddress = ZeroExtend(desc<47:grainsize>:Zeros(grainsize));
if !secondstage then
// Unpack the upper and lower table attributes
ns_table = ns_table OR desc<63>;
if !secondstage && !hierattrsdisabled then
ap_table<1> = ap_table<1> OR desc<62>; // read-only
if apply_nvnv1_effect then
pxn_table = pxn_table OR desc<60>;
else
xn_table = xn_table OR desc<60>;
// pxn_table and ap_table[0] apply in EL1&0 or EL2&0 translation regimes
if !singlepriv then
if !apply_nvnv1_effect then
pxn_table = pxn_table OR desc<59>;
ap_table<0> = ap_table<0> OR desc<61>; // privileged
level = level + 1;
addrselecttop = addrselectbottom - 1;
blocktranslate = FALSE;
until blocktranslate;
// Check block size is supported at this level
if level < firstblocklevel then
result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Check for misprogramming of the contiguous bit
if largegrain then
contiguousbitcheck = level == 2 && inputsize < 34;
elsif midgrain then
contiguousbitcheck = level == 2 && inputsize < 30;
else
contiguousbitcheck = level == 1 && inputsize < 34;
if contiguousbitcheck && desc<52> == '1' then
if boolean IMPLEMENTATION_DEFINED "Translation fault on misprogrammed contiguous bit" then
result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Unpack the descriptor into address and upper and lower block attributes
if largegrain then
outputaddress = desc<15:12>:desc<47:addrselectbottom>:inputaddr<addrselectbottom-1:0>;
else
outputaddress = ZeroExtend(desc<47:addrselectbottom>:inputaddr<addrselectbottom-1:0>);
// When 52-bit PA is supported, for 64 Kbyte translation granule,
// block size might be larger than the supported output address size
if outputsize < 52 && !IsZero(outputaddress<51:outputsize>) then
result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Check Access Flag
if desc<10> == '0' then
if !update_AF then
result.addrdesc.fault = AArch64.AccessFlagFault(ipaddress,s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
else
result.descupdate.AF = TRUE;
if update_AP && desc<51> == '1' then
// If hw update of access permission field is configured consider AP[2] as '0' / S2AP[2] as '1'
if !secondstage && desc<7> == '1' then
desc<7> = '0';
result.descupdate.AP = TRUE;
elsif secondstage && desc<7> == '0' then
desc<7> = '1';
result.descupdate.AP = TRUE;
// Required descriptor if AF or AP[2]/S2AP[2] needs update
result.descupdate.descaddr = descaddr;
if apply_nvnv1_effect then
pxn = desc<54>; // Bit[54] of the block/page descriptor holds PXN instead of UXN
xn = '0'; // XN is '0'
ap = desc<7>:'01'; // Bit[6] of the block/page descriptor is treated as '0' regardless of value programmed
else
xn = desc<54>; // Bit[54] of the block/page descriptor holds UXN
pxn = desc<53>; // Bit[53] of the block/page descriptor holds PXN
ap = desc<7:6>:'1'; // Bits[7:6] of the block/page descriptor hold AP[2:1]
contiguousbit = desc<52>;
nG = desc<11>;
sh = desc<9:8>;
memattr = desc<5:2>; // AttrIndx and NS bit in stage 1
result.domain = bits(4) UNKNOWN; // Domains not used
result.level = level;
result.blocksize = 2^((3-level)*stride + grainsize);
// Stage 1 translation regimes also inherit attributes from the tables
if !secondstage then
result.perms.xn = xn OR xn_table;
result.perms.ap<2> = ap<2> OR ap_table<1>; // Force read-only
// PXN, nG and AP[1] apply in EL1&0 or EL2&0 stage 1 translation regimes
if !singlepriv then
result.perms.ap<1> = ap<1> AND NOT(ap_table<0>); // Force privileged only
result.perms.pxn = pxn OR pxn_table;
// Pages from Non-secure tables are marked non-global in Secure EL1&0
if IsSecure() then
result.nG = nG OR ns_table;
else
result.nG = nG;
else
result.perms.ap<1> = '1';
result.perms.pxn = '0';
result.nG = '0';
result.GP = desc<50>; // Stage 1 block or pages might be guarded
result.perms.ap<0> = '1';
result.addrdesc.memattrs = AArch64.S1AttrDecode(sh, memattr<2:0>, acctype);
result.addrdesc.paddress.NS = memattr<3> OR ns_table;
else
result.perms.ap<2:1> = ap<2:1>;
result.perms.ap<0> = '1';
result.perms.xn = xn;
if HaveExtendedExecuteNeverExt() then result.perms.xxn = desc<53>;
result.perms.pxn = '0';
result.nG = '0';
if s2fs1walk then
result.addrdesc.memattrs = S2AttrDecode(sh, memattr, AccType_PTW);
else
result.addrdesc.memattrs = S2AttrDecode(sh, memattr, acctype);
result.addrdesc.paddress.NS = nsaccess;
result.addrdesc.paddress.address = outputaddress;
result.addrdesc.fault = AArch64.NoFault();
result.contiguous = contiguousbit == '1';
if HaveCommonNotPrivateTransExtAArch64.SecondStageTranslateHaveNV2Ext(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk,
size, hwupdatewalk);() then result.CnP = baseregister<0>;
return result;
// AArch64.TranslateAddress()
// ==========================
// Main entry point for translating an address
AddressDescriptor// ClearStickyErrors()
// =================== AArch64.TranslateAddress(bits(64) vaddress,ClearStickyErrors()
EDSCR.TXU = '0'; // Clear TX underrun flag
EDSCR.RXO = '0'; // Clear RX overrun flag
if AccTypeHalted acctype, boolean iswrite,
boolean wasaligned, integer size)
() then // in Debug state
EDSCR.ITO = '0'; // Clear ITR overrun flag
result = // If halted and the ITR is not empty then it is UNPREDICTABLE whether the EDSCR.ERR is cleared.
// The UNPREDICTABLE behavior also affects the instructions in flight, but this is not described
// in the pseudocode.
if AArch64.FullTranslateHalted(vaddress, acctype, iswrite, wasaligned, size);
if !(acctype IN {() && EDSCR.ITE == '0' &&AccType_PTWConstrainUnpredictableBool,( AccType_ICUnpredictable_CLEARERRITEZERO, AccType_AT}) && !IsFault(result) then
result.fault = AArch64.CheckDebug(vaddress, acctype, iswrite, size);
// Update virtual address for abort functions
result.vaddress = ZeroExtend(vaddress);
) then
return;
EDSCR.ERR = '0'; // Clear cumulative error flag
return result; return;
// AArch64.TranslationTableWalk()
// ==============================
// Returns a result of a translation table walk
//
// Implementations might cache information from memory in any number of non-coherent TLB
// caching structures, and so avoid memory accesses that have been expressed in this
// pseudocode. The use of such TLBs is not expressed in this pseudocode.
// DebugTarget()
// =============
// Returns the debug exception target Exception level
TLBRecordbits(2) AArch64.TranslationTableWalk(bits(52) ipaddress, boolean s1_nonsecure, bits(64) vaddress,DebugTarget()
secure =
AccType acctype, boolean iswrite, boolean secondstage,
boolean s2fs1walk, integer size)
if !secondstage then
assert !ELUsingAArch32(S1TranslationRegime());
else
assert IsSecureEL2Enabled() || ( HaveEL(EL2) && !IsSecure() && !();
returnELUsingAArch32DebugTargetFrom(EL2) ) && HasS2Translation();
TLBRecord result;
AddressDescriptor descaddr;
bits(64) baseregister;
bits(64) inputaddr; // Input Address is 'vaddress' for stage 1, 'ipaddress' for stage 2
bit nswalk; // Stage 2 translation table walks are to Secure or to Non-secure PA space
descaddr.memattrs.memtype = MemType_Normal;
// Derived parameters for the page table walk:
// grainsize = Log2(Size of Table) - Size of Table is 4KB, 16KB or 64KB in AArch64
// stride = Log2(Address per Level) - Bits of address consumed at each level
// firstblocklevel = First level where a block entry is allowed
// ps = Physical Address size as encoded in TCR_EL1.IPS or TCR_ELx/VTCR_EL2.PS
// inputsize = Log2(Size of Input Address) - Input Address size in bits
// level = Level to start walk from
// This means that the number of levels after start level = 3-level
if !secondstage then
// First stage translation
inputaddr = ZeroExtend(vaddress);
el = AArch64.AccessUsesEL(acctype);
top = AddrTop(inputaddr, (acctype == AccType_IFETCH), el);
if el == EL3 then
largegrain = TCR_EL3.TG0 == '01';
midgrain = TCR_EL3.TG0 == '10';
inputsize = 64 - UInt(TCR_EL3.T0SZ);
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
ps = TCR_EL3.PS;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>);
disabled = FALSE;
baseregister = TTBR0_EL3;
descaddr.memattrs = WalkAttrDecode(TCR_EL3.SH0, TCR_EL3.ORGN0, TCR_EL3.IRGN0, secondstage);
reversedescriptors = SCTLR_EL3.EE == '1';
lookupsecure = TRUE;
singlepriv = TRUE;
update_AF = HaveAccessFlagUpdateExt() && TCR_EL3.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL3.HD == '1';
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL3.HPD == '1';
elsif ELIsInHost(el) then
if inputaddr<top> == '0' then
largegrain = TCR_EL2.TG0 == '01';
midgrain = TCR_EL2.TG0 == '10';
inputsize = 64 - UInt(TCR_EL2.T0SZ);
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>);
disabled = TCR_EL2.EPD0 == '1' || (PSTATE.EL == EL0 && HaveE0PDExt() && TCR_EL2.E0PD0 == '1');
baseregister = TTBR0_EL2;
descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH0, TCR_EL2.ORGN0, TCR_EL2.IRGN0, secondstage);
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD0 == '1';
else
inputsize = 64 - UInt(TCR_EL2.T1SZ);
largegrain = TCR_EL2.TG1 == '11'; // TG1 and TG0 encodings differ
midgrain = TCR_EL2.TG1 == '01';
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsOnes(inputaddr<top:inputsize>);
disabled = TCR_EL2.EPD1 == '1' || (PSTATE.EL == EL0 && HaveE0PDExt() && TCR_EL2.E0PD1 == '1');
baseregister = TTBR1_EL2;
descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH1, TCR_EL2.ORGN1, TCR_EL2.IRGN1, secondstage);
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD1 == '1';
ps = TCR_EL2.IPS;
reversedescriptors = SCTLR_EL2.EE == '1';
lookupsecure = if IsSecureEL2Enabled() then IsSecure() else FALSE;
singlepriv = FALSE;
update_AF = HaveAccessFlagUpdateExt() && TCR_EL2.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL2.HD == '1';
elsif el == EL2 then
inputsize = 64 - UInt(TCR_EL2.T0SZ);
largegrain = TCR_EL2.TG0 == '01';
midgrain = TCR_EL2.TG0 == '10';
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
ps = TCR_EL2.PS;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>);
disabled = FALSE;
baseregister = TTBR0_EL2;
descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH0, TCR_EL2.ORGN0, TCR_EL2.IRGN0, secondstage);
reversedescriptors = SCTLR_EL2.EE == '1';
lookupsecure = if IsSecureEL2Enabled() then IsSecure() else FALSE;
singlepriv = TRUE;
update_AF = HaveAccessFlagUpdateExt() && TCR_EL2.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL2.HD == '1';
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD == '1';
else
if inputaddr<top> == '0' then
inputsize = 64 - UInt(TCR_EL1.T0SZ);
largegrain = TCR_EL1.TG0 == '01';
midgrain = TCR_EL1.TG0 == '10';
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>);
disabled = TCR_EL1.EPD0 == '1' || (PSTATE.EL == EL0 && HaveE0PDExt() && TCR_EL1.E0PD0 == '1');
disabled = disabled || (el == EL0 && acctype == AccType_NONFAULT && TCR_EL1.NFD0 == '1');
baseregister = TTBR0_EL1;
descaddr.memattrs = WalkAttrDecode(TCR_EL1.SH0, TCR_EL1.ORGN0, TCR_EL1.IRGN0, secondstage);
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL1.HPD0 == '1';
else
inputsize = 64 - UInt(TCR_EL1.T1SZ);
largegrain = TCR_EL1.TG1 == '11'; // TG1 and TG0 encodings differ
midgrain = TCR_EL1.TG1 == '01';
inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48;
inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsOnes(inputaddr<top:inputsize>);
disabled = TCR_EL1.EPD1 == '1' || (PSTATE.EL == EL0 && HaveE0PDExt() && TCR_EL1.E0PD1 == '1');
disabled = disabled || (el == EL0 && acctype == AccType_NONFAULT && TCR_EL1.NFD1 == '1');
baseregister = TTBR1_EL1;
descaddr.memattrs = WalkAttrDecode(TCR_EL1.SH1, TCR_EL1.ORGN1, TCR_EL1.IRGN1, secondstage);
hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL1.HPD1 == '1';
ps = TCR_EL1.IPS;
reversedescriptors = SCTLR_EL1.EE == '1';
lookupsecure = IsSecure();
singlepriv = FALSE;
update_AF = HaveAccessFlagUpdateExt() && TCR_EL1.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL1.HD == '1';
if largegrain then
grainsize = 16; // Log2(64KB page size)
firstblocklevel = (if Have52BitPAExt() then 1 else 2); // Largest block is 4TB (2^42 bytes) for 52 bit PA
// and 512MB (2^29 bytes) otherwise
elsif midgrain then
grainsize = 14; // Log2(16KB page size)
firstblocklevel = 2; // Largest block is 32MB (2^25 bytes)
else // Small grain
grainsize = 12; // Log2(4KB page size)
firstblocklevel = 1; // Largest block is 1GB (2^30 bytes)
stride = grainsize - 3; // Log2(page size / 8 bytes)
// The starting level is the number of strides needed to consume the input address
level = 4 - (1 + (inputsize - grainsize - 1) DIV stride);
else
// Second stage translation
inputaddr = ZeroExtend(ipaddress);
if IsSecureBelowEL3() then
// Second stage for Secure translation regime
if s1_nonsecure then // Non-secure IPA space
t0size = VTCR_EL2.T0SZ;
tg0 = VTCR_EL2.TG0;
nswalk = VTCR_EL2.NSW;
else // Secure IPA space
t0size = VSTCR_EL2.T0SZ;
tg0 = VSTCR_EL2.TG0;
nswalk = VSTCR_EL2.SW;
// Stage 2 translation accesses the Non-secure PA space or the Secure PA space
if nswalk == '1' then
// When walk is Non-secure, access must be to the Non-secure PA space
nsaccess = '1';
elsif !s1_nonsecure then
// When walk is Secure and in the Secure IPA space,
// access is specified by VSTCR_EL2.SA
nsaccess = VSTCR_EL2.SA;
elsif VSTCR_EL2.SW == '1' || VSTCR_EL2.SA == '1' then
// When walk is Secure and in the Non-secure IPA space,
// access is Non-secure when VSTCR_EL2.SA specifies the Non-secure PA space
nsaccess = '1';
else
// When walk is Secure and in the Non-secure IPA space,
// if VSTCR_EL2.SA specifies the Secure PA space, access is specified by VTCR_EL2.NSA
nsaccess = VTCR_EL2.NSA;
else
// Second stage for Non-secure translation regime
t0size = VTCR_EL2.T0SZ;
tg0 = VTCR_EL2.TG0;
nswalk = '1';
nsaccess = '1';
inputsize = 64 - UInt(t0size);
largegrain = tg0 == '01';
midgrain = tg0 == '10';
inputsize_max = if Have52BitPAExt() && PAMax() == 52 && largegrain then 52 else 48;
inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48);
if inputsize < inputsize_min then
c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
assert c IN {Constraint_FORCE, Constraint_FAULT};
if c == Constraint_FORCE then inputsize = inputsize_min;
ps = VTCR_EL2.PS;
basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<63:inputsize>);
disabled = FALSE;
descaddr.memattrs = WalkAttrDecode(VTCR_EL2.SH0, VTCR_EL2.ORGN0, VTCR_EL2.IRGN0, secondstage);
reversedescriptors = SCTLR_EL2.EE == '1';
singlepriv = TRUE;
update_AF = HaveAccessFlagUpdateExt() && VTCR_EL2.HA == '1';
update_AP = HaveDirtyBitModifierExt() && update_AF && VTCR_EL2.HD == '1';
if IsSecureEL2Enabled() then
lookupsecure = !s1_nonsecure;
else
lookupsecure = FALSE;
if lookupsecure then
baseregister = VSTTBR_EL2;
startlevel = UInt(VSTCR_EL2.SL0);
else
baseregister = VTTBR_EL2;
startlevel = UInt(VTCR_EL2.SL0);
if largegrain then
grainsize = 16; // Log2(64KB page size)
level = 3 - startlevel;
firstblocklevel = (if Have52BitPAExt() then 1 else 2); // Largest block is 4TB (2^42 bytes) for 52 bit PA
// and 512MB (2^29 bytes) otherwise
elsif midgrain then
grainsize = 14; // Log2(16KB page size)
level = 3 - startlevel;
firstblocklevel = 2; // Largest block is 32MB (2^25 bytes)
else // Small grain
grainsize = 12; // Log2(4KB page size)
if HaveSmallPageTblExt() && startlevel == 3 then
level = startlevel; // Startlevel 3 (VTCR_EL2.SL0 or VSCTR_EL2.SL0 == 0b11) for 4KB granule
else
level = 2 - startlevel;
firstblocklevel = 1; // Largest block is 1GB (2^30 bytes)
stride = grainsize - 3; // Log2(page size / 8 bytes)
// Limits on IPA controls based on implemented PA size. Level 0 is only
// supported by small grain translations
if largegrain then // 64KB pages
// Level 1 only supported if implemented PA size is greater than 2^42 bytes
if level == 0 || (level == 1 && PAMax() <= 42) then basefound = FALSE;
elsif midgrain then // 16KB pages
// Level 1 only supported if implemented PA size is greater than 2^40 bytes
if level == 0 || (level == 1 && PAMax() <= 40) then basefound = FALSE;
else // Small grain, 4KB pages
// Level 0 only supported if implemented PA size is greater than 2^42 bytes
if level < 0 || (level == 0 && PAMax() <= 42) then basefound = FALSE;
// If the inputsize exceeds the PAMax value, the behavior is CONSTRAINED UNPREDICTABLE
inputsizecheck = inputsize;
if inputsize > PAMax() && (!ELUsingAArch32(EL1) || inputsize > 40) then
case ConstrainUnpredictable(Unpredictable_LARGEIPA) of
when Constraint_FORCE
// Restrict the inputsize to the PAMax value
inputsize = PAMax();
inputsizecheck = PAMax();
when Constraint_FORCENOSLCHECK
// As FORCE, except use the configured inputsize in the size checks below
inputsize = PAMax();
when Constraint_FAULT
// Generate a translation fault
basefound = FALSE;
otherwise
Unreachable();
// Number of entries in the starting level table =
// (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table))
startsizecheck = inputsizecheck - ((3 - level)*stride + grainsize); // Log2(Num of entries)
// Check for starting level table with fewer than 2 entries or longer than 16 pages.
// Lower bound check is: startsizecheck < Log2(2 entries)
// Upper bound check is: startsizecheck > Log2(pagesize/8*16)
if startsizecheck < 1 || startsizecheck > stride + 4 then basefound = FALSE;
if !basefound || disabled then
level = 0; // AArch32 reports this as a level 1 fault
result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
case ps of
when '000' outputsize = 32;
when '001' outputsize = 36;
when '010' outputsize = 40;
when '011' outputsize = 42;
when '100' outputsize = 44;
when '101' outputsize = 48;
when '110' outputsize = (if Have52BitPAExt() && largegrain then 52 else 48);
otherwise outputsize = integer IMPLEMENTATION_DEFINED "Reserved Intermediate Physical Address size value";
if outputsize > PAMax() then outputsize = PAMax();
if outputsize < 48 && !IsZero(baseregister<47:outputsize>) then
level = 0;
result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype, iswrite,
secondstage, s2fs1walk);
return result;
// Bottom bound of the Base address is:
// Log2(8 bytes per entry)+Log2(Number of entries in starting level table)
// Number of entries in starting level table =
// (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table))
baselowerbound = 3 + inputsize - ((3-level)*stride + grainsize); // Log2(Num of entries*8)
if outputsize == 52 then
z = (if baselowerbound < 6 then 6 else baselowerbound);
baseaddress = baseregister<5:2>:baseregister<47:z>:Zeros(z);
else
baseaddress = ZeroExtend(baseregister<47:baselowerbound>:Zeros(baselowerbound));
ns_table = if lookupsecure then '0' else '1';
ap_table = '00';
xn_table = '0';
pxn_table = '0';
addrselecttop = inputsize - 1;
apply_nvnv1_effect = HaveNVExt() && EL2Enabled() && HCR_EL2.<NV,NV1> == '11' && S1TranslationRegime() == EL1 && !secondstage;
repeat
addrselectbottom = (3-level)*stride + grainsize;
bits(52) index = ZeroExtend(inputaddr<addrselecttop:addrselectbottom>:'000');
descaddr.paddress.address = baseaddress OR index;
descaddr.paddress.NS = if secondstage then nswalk else ns_table;
// If there are two stages of translation, then the first stage table walk addresses
// are themselves subject to translation
if secondstage || !HasS2Translation() || (HaveNV2Ext() && acctype == AccType_NV2REGISTER) then
descaddr2 = descaddr;
else
hwupdatewalk = FALSE;
descaddr2 = AArch64.SecondStageWalk(descaddr, vaddress, acctype, iswrite, 8, hwupdatewalk);
// Check for a fault on the stage 2 walk
if IsFault(descaddr2) then
result.addrdesc.fault = descaddr2.fault;
return result;
// Update virtual address for abort functions
descaddr2.vaddress = ZeroExtend(vaddress);
accdesc = CreateAccessDescriptorPTW(acctype, secondstage, s2fs1walk, level);
desc = _Mem[descaddr2, 8, accdesc];
if reversedescriptors then desc = BigEndianReverse(desc);
if desc<0> == '0' || (desc<1:0> == '01' && (level == 3 ||
(HaveBlockBBM() && IsBlockDescriptorNTBitValid() && desc<16> == '1'))) then
// Fault (00), Reserved (10), Block (01) at level 3, or Block(01) with nT bit set.
result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Valid Block, Page, or Table entry
if desc<1:0> == '01' || level == 3 then // Block (01) or Page (11)
blocktranslate = TRUE;
else // Table (11)
if (outputsize < 52 && largegrain && !IsZero(desc<15:12>)) || (outputsize < 48 && !IsZero(desc<47:outputsize>)) then
result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
if outputsize == 52 then
baseaddress = desc<15:12>:desc<47:grainsize>:Zeros(grainsize);
else
baseaddress = ZeroExtend(desc<47:grainsize>:Zeros(grainsize));
if !secondstage then
// Unpack the upper and lower table attributes
ns_table = ns_table OR desc<63>;
if !secondstage && !hierattrsdisabled then
ap_table<1> = ap_table<1> OR desc<62>; // read-only
if apply_nvnv1_effect then
pxn_table = pxn_table OR desc<60>;
else
xn_table = xn_table OR desc<60>;
// pxn_table and ap_table[0] apply in EL1&0 or EL2&0 translation regimes
if !singlepriv then
if !apply_nvnv1_effect then
pxn_table = pxn_table OR desc<59>;
ap_table<0> = ap_table<0> OR desc<61>; // privileged
level = level + 1;
addrselecttop = addrselectbottom - 1;
blocktranslate = FALSE;
until blocktranslate;
// Check block size is supported at this level
if level < firstblocklevel then
result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Check for misprogramming of the contiguous bit
if largegrain then
contiguousbitcheck = level == 2 && inputsize < 34;
elsif midgrain then
contiguousbitcheck = level == 2 && inputsize < 30;
else
contiguousbitcheck = level == 1 && inputsize < 34;
if contiguousbitcheck && desc<52> == '1' then
if boolean IMPLEMENTATION_DEFINED "Translation fault on misprogrammed contiguous bit" then
result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Unpack the descriptor into address and upper and lower block attributes
if largegrain then
outputaddress = desc<15:12>:desc<47:addrselectbottom>:inputaddr<addrselectbottom-1:0>;
else
outputaddress = ZeroExtend(desc<47:addrselectbottom>:inputaddr<addrselectbottom-1:0>);
// When 52-bit PA is supported, for 64 Kbyte translation granule,
// block size might be larger than the supported output address size
if outputsize < 52 && !IsZero(outputaddress<51:outputsize>) then
result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
// Check Access Flag
if desc<10> == '0' then
if !update_AF then
result.addrdesc.fault = AArch64.AccessFlagFault(ipaddress,s1_nonsecure, level, acctype,
iswrite, secondstage, s2fs1walk);
return result;
else
result.descupdate.AF = TRUE;
if update_AP && desc<51> == '1' then
// If hw update of access permission field is configured consider AP[2] as '0' / S2AP[2] as '1'
if !secondstage && desc<7> == '1' then
desc<7> = '0';
result.descupdate.AP = TRUE;
elsif secondstage && desc<7> == '0' then
desc<7> = '1';
result.descupdate.AP = TRUE;
// Required descriptor if AF or AP[2]/S2AP[2] needs update
result.descupdate.descaddr = descaddr;
if apply_nvnv1_effect then
pxn = desc<54>; // Bit[54] of the block/page descriptor holds PXN instead of UXN
xn = '0'; // XN is '0'
ap = desc<7>:'01'; // Bit[6] of the block/page descriptor is treated as '0' regardless of value programmed
else
xn = desc<54>; // Bit[54] of the block/page descriptor holds UXN
pxn = desc<53>; // Bit[53] of the block/page descriptor holds PXN
ap = desc<7:6>:'1'; // Bits[7:6] of the block/page descriptor hold AP[2:1]
contiguousbit = desc<52>;
nG = desc<11>;
sh = desc<9:8>;
memattr = desc<5:2>; // AttrIndx and NS bit in stage 1
result.domain = bits(4) UNKNOWN; // Domains not used
result.level = level;
result.blocksize = 2^((3-level)*stride + grainsize);
// Stage 1 translation regimes also inherit attributes from the tables
if !secondstage then
result.perms.xn = xn OR xn_table;
result.perms.ap<2> = ap<2> OR ap_table<1>; // Force read-only
// PXN, nG and AP[1] apply in EL1&0 or EL2&0 stage 1 translation regimes
if !singlepriv then
result.perms.ap<1> = ap<1> AND NOT(ap_table<0>); // Force privileged only
result.perms.pxn = pxn OR pxn_table;
// Pages from Non-secure tables are marked non-global in Secure EL1&0
if IsSecure() then
result.nG = nG OR ns_table;
else
result.nG = nG;
else
result.perms.ap<1> = '1';
result.perms.pxn = '0';
result.nG = '0';
result.GP = desc<50>; // Stage 1 block or pages might be guarded
result.perms.ap<0> = '1';
result.addrdesc.memattrs = AArch64.S1AttrDecode(sh, memattr<2:0>, acctype);
result.addrdesc.paddress.NS = memattr<3> OR ns_table;
else
result.perms.ap<2:1> = ap<2:1>;
result.perms.ap<0> = '1';
result.perms.xn = xn;
if HaveExtendedExecuteNeverExt() then result.perms.xxn = desc<53>;
result.perms.pxn = '0';
result.nG = '0';
if s2fs1walk then
result.addrdesc.memattrs = S2AttrDecode(sh, memattr, AccType_PTW);
else
result.addrdesc.memattrs = S2AttrDecode(sh, memattr, acctype);
result.addrdesc.paddress.NS = nsaccess;
result.addrdesc.paddress.address = outputaddress;
result.addrdesc.fault = AArch64.NoFault();
result.contiguous = contiguousbit == '1';
if HaveCommonNotPrivateTransExt() then result.CnP = baseregister<0>;
return result;(secure);
// ClearStickyErrors()
// ===================// DebugTargetFrom()
// =================
bits(2)
ClearStickyErrors()
EDSCR.TXU = '0'; // Clear TX underrun flag
EDSCR.RXO = '0'; // Clear RX overrun flag
DebugTargetFrom(boolean secure)
if HaltedHaveEL() then // in Debug state
EDSCR.ITO = '0'; // Clear ITR overrun flag
// If halted and the ITR is not empty then it is UNPREDICTABLE whether the EDSCR.ERR is cleared.
// The UNPREDICTABLE behavior also affects the instructions in flight, but this is not described
// in the pseudocode.
if( HaltedEL2() && EDSCR.ITE == '0' &&) && !secure then
if ConstrainUnpredictableBoolELUsingAArch32() then
route_to_el2 = (HDCR.TDE == '1' || HCR.TGE == '1');
else
route_to_el2 = (MDCR_EL2.TDE == '1' || HCR_EL2.TGE == '1');
else
route_to_el2 = FALSE;
if route_to_el2 then
target = EL2;
elsif HaveEL(EL3) && HighestELUsingAArch32() && secure then
target = EL3;
else
target = EL1Unpredictable_CLEARERRITEZEROEL2) then
return;
EDSCR.ERR = '0'; // Clear cumulative error flag
;
return; return target;
// DebugTarget()
// =============
// Returns the debug exception target Exception level
// DoubleLockStatus()
// ==================
// Returns the state of the OS Double Lock.
// FALSE if OSDLR_EL1.DLK == 0 or DBGPRCR_EL1.CORENPDRQ == 1 or the PE is in Debug state.
// TRUE if OSDLR_EL1.DLK == 1 and DBGPRCR_EL1.CORENPDRQ == 0 and the PE is in Non-debug state.
bits(2)boolean DebugTarget()
secure =DoubleLockStatus()
if ! IsSecureHaveDoubleLock();
return() then
return FALSE;
elsif (EL1) then
return DBGOSDLR.DLK == '1' && DBGPRCR.CORENPDRQ == '0' && !Halted();
else
return OSDLR_EL1.DLK == '1' && DBGPRCR_EL1.CORENPDRQ == '0' && !HaltedDebugTargetFromELUsingAArch32(secure);();
// DebugTargetFrom()
// =================
// AllowExternalDebugAccess()
// ==========================
// Returns TRUE if an external debug interface access to the External debug registers
// is allowed, FALSE otherwise.
bits(2)boolean DebugTargetFrom(boolean secure)
AllowExternalDebugAccess()
// The access may also be subject to OS Lock, power-down, etc.
if HaveELHaveSecureExtDebugView(() then
returnEL2AllowExternalDebugAccess) && !secure then
if( ELUsingAArch32IsAccessSecure(());
else
returnEL2AllowExternalDebugAccess) then
route_to_el2 = (HDCR.TDE == '1' || HCR.TGE == '1');
else
route_to_el2 = (MDCR_EL2.TDE == '1' || HCR_EL2.TGE == '1');
else
route_to_el2 = FALSE;
if route_to_el2 then
target =( EL2ExternalSecureInvasiveDebugEnabled;
elsif());
// AllowExternalDebugAccess()
// ==========================
// Returns TRUE if an external debug interface access to the External debug registers
// is allowed for the given Security state, FALSE otherwise.
boolean AllowExternalDebugAccess(boolean allow_secure)
// The access may also be subject to OS Lock, power-down, etc.
if HaveSecureExtDebugView() || ExternalInvasiveDebugEnabled() then
if allow_secure then
return TRUE;
elsif HaveEL(EL3) &&) then
if HighestELUsingAArch32ELUsingAArch32() && secure then
target =( EL3;
else
target =) then
return SDCR.EDAD == '0';
else
return MDCR_EL3.EDAD == '0';
else
return ! EL1IsSecure;
return target;();
else
return FALSE;
// DoubleLockStatus()
// ==================
// Returns the state of the OS Double Lock.
// FALSE if OSDLR_EL1.DLK == 0 or DBGPRCR_EL1.CORENPDRQ == 1 or the PE is in Debug state.
// TRUE if OSDLR_EL1.DLK == 1 and DBGPRCR_EL1.CORENPDRQ == 0 and the PE is in Non-debug state.
// AllowExternalPMUAccess()
// ========================
// Returns TRUE if an external debug interface access to the PMU registers is allowed, FALSE otherwise.
boolean DoubleLockStatus()
if !AllowExternalPMUAccess()
// The access may also be subject to OS Lock, power-down, etc.
ifHaveDoubleLockHaveSecureExtDebugView() then
return FALSE;
elsif return AllowExternalPMUAccess(IsAccessSecure());
else
return AllowExternalPMUAccess(ExternalSecureNoninvasiveDebugEnabled());
// AllowExternalPMUAccess()
// ========================
// Returns TRUE if an external debug interface access to the PMU registers is allowed for the given
// Security state, FALSE otherwise.
boolean AllowExternalPMUAccess(boolean allow_secure)
// The access may also be subject to OS Lock, power-down, etc.
if HaveSecureExtDebugView() || ExternalNoninvasiveDebugEnabled() then
if allow_secure then
return TRUE;
elsif HaveEL(EL3) then
if ELUsingAArch32(EL1EL3) then
return DBGOSDLR.DLK == '1' && DBGPRCR.CORENPDRQ == '0' && ! return SDCR.EPMAD == '0';
else
return MDCR_EL3.EPMAD == '0';
else
return !HaltedIsSecure();
else
return OSDLR_EL1.DLK == '1' && DBGPRCR_EL1.CORENPDRQ == '0' && !Halted();();
else
return FALSE;
// AllowExternalDebugAccess()
// ==========================
// Returns TRUE if an external debug interface access to the External debug registers
// is allowed, FALSE otherwise.
booleansignal DBGEN;
signal NIDEN;
signal SPIDEN;
signal SPNIDEN; AllowExternalDebugAccess()
// The access may also be subject to OS Lock, power-down, etc.
if HaveSecureExtDebugView() then
return AllowExternalDebugAccess(IsAccessSecure());
else
return AllowExternalDebugAccess(ExternalSecureInvasiveDebugEnabled());
// AllowExternalDebugAccess()
// ==========================
// Returns TRUE if an external debug interface access to the External debug registers
// is allowed for the given Security state, FALSE otherwise.
boolean AllowExternalDebugAccess(boolean allow_secure)
// The access may also be subject to OS Lock, power-down, etc.
if HaveSecureExtDebugView() || ExternalInvasiveDebugEnabled() then
if allow_secure then
return TRUE;
elsif HaveEL(EL3) then
if ELUsingAArch32(EL3) then
return SDCR.EDAD == '0';
else
return MDCR_EL3.EDAD == '0';
else
return !IsSecure();
else
return FALSE;
// AllowExternalPMUAccess()
// ========================
// Returns TRUE if an external debug interface access to the PMU registers is allowed, FALSE otherwise.
// ExternalInvasiveDebugEnabled()
// ==============================
// The definition of this function is IMPLEMENTATION DEFINED.
// In the recommended interface, this function returns the state of the DBGEN signal.
boolean AllowExternalPMUAccess()
// The access may also be subject to OS Lock, power-down, etc.
ifExternalInvasiveDebugEnabled()
return DBGEN == HIGH; HaveSecureExtDebugView() then
return AllowExternalPMUAccess(IsAccessSecure());
else
return AllowExternalPMUAccess(ExternalSecureNoninvasiveDebugEnabled());
// AllowExternalPMUAccess()
// ========================
// Returns TRUE if an external debug interface access to the PMU registers is allowed for the given
// Security state, FALSE otherwise.
boolean AllowExternalPMUAccess(boolean allow_secure)
// The access may also be subject to OS Lock, power-down, etc.
if HaveSecureExtDebugView() || ExternalNoninvasiveDebugEnabled() then
if allow_secure then
return TRUE;
elsif HaveEL(EL3) then
if ELUsingAArch32(EL3) then
return SDCR.EPMAD == '0';
else
return MDCR_EL3.EPMAD == '0';
else
return !IsSecure();
else
return FALSE;
signal DBGEN;
signal NIDEN;
signal SPIDEN;
signal SPNIDEN;// ExternalNoninvasiveDebugAllowed()
// =================================
// Returns TRUE if Trace and PC Sample-based Profiling are allowed
booleanExternalNoninvasiveDebugAllowed()
return (ExternalNoninvasiveDebugEnabled() &&
(!IsSecure() || ExternalSecureNoninvasiveDebugEnabled() ||
(ELUsingAArch32(EL1) && PSTATE.EL == EL0 && SDER.SUNIDEN == '1')));
// ExternalInvasiveDebugEnabled()
// ==============================
// The definition of this function is IMPLEMENTATION DEFINED.
// In the recommended interface, this function returns the state of the DBGEN signal.
// ExternalNoninvasiveDebugEnabled()
// =================================
// This function returns TRUE if the ARMv8.4-Debug is implemented, otherwise this
// function is IMPLEMENTATION DEFINED.
// In the recommended interface, ExternalNoninvasiveDebugEnabled returns the state of the (DBGEN
// OR NIDEN) signal.
boolean ExternalInvasiveDebugEnabled()
return DBGEN == HIGH;ExternalNoninvasiveDebugEnabled()
return !HaveNoninvasiveDebugAuth() || ExternalInvasiveDebugEnabled() || NIDEN == HIGH;
// ExternalNoninvasiveDebugAllowed()
// =================================
// Returns TRUE if Trace and PC Sample-based Profiling are allowed
// ExternalSecureInvasiveDebugEnabled()
// ====================================
// The definition of this function is IMPLEMENTATION DEFINED.
// In the recommended interface, this function returns the state of the (DBGEN AND SPIDEN) signal.
// CoreSight allows asserting SPIDEN without also asserting DBGEN, but this is not recommended.
boolean ExternalNoninvasiveDebugAllowed()
return (ExternalSecureInvasiveDebugEnabled()
if !ExternalNoninvasiveDebugEnabledHaveEL() &&
(!(EL3) && !IsSecure() ||() then return FALSE;
return ExternalSecureNoninvasiveDebugEnabledExternalInvasiveDebugEnabled() ||
(ELUsingAArch32(EL1) && PSTATE.EL == EL0 && SDER.SUNIDEN == '1')));() && SPIDEN == HIGH;
// ExternalNoninvasiveDebugEnabled()
// =================================
// This function returns TRUE if the ARMv8.4-Debug is implemented, otherwise this
// function is IMPLEMENTATION DEFINED.
// In the recommended interface, ExternalNoninvasiveDebugEnabled returns the state of the (DBGEN
// OR NIDEN) signal.
// ExternalSecureNoninvasiveDebugEnabled()
// =======================================
// This function returns the value of ExternalSecureInvasiveDebugEnabled() when ARMv8.4-Debug
// is implemented. Otherwise, the definition of this function is IMPLEMENTATION DEFINED.
// In the recommended interface, this function returns the state of the (DBGEN OR NIDEN) AND
// (SPIDEN OR SPNIDEN) signal.
boolean ExternalNoninvasiveDebugEnabled()
return !ExternalSecureNoninvasiveDebugEnabled()
if !HaveEL(EL3) && !IsSecure() then return FALSE;
if HaveNoninvasiveDebugAuth() ||() then
return () && (SPIDEN == HIGH || SPNIDEN == HIGH);
else
return ExternalSecureInvasiveDebugEnabledExternalInvasiveDebugEnabledExternalNoninvasiveDebugEnabled() || NIDEN == HIGH;();
// ExternalSecureInvasiveDebugEnabled()
// ====================================
// The definition of this function is IMPLEMENTATION DEFINED.
// In the recommended interface, this function returns the state of the (DBGEN AND SPIDEN) signal.
// CoreSight allows asserting SPIDEN without also asserting DBGEN, but this is not recommended.
// Returns TRUE when an access is Secure
boolean ExternalSecureInvasiveDebugEnabled()
if !IsAccessSecure();HaveEL(EL3) && !IsSecure() then return FALSE;
return ExternalInvasiveDebugEnabled() && SPIDEN == HIGH;
// ExternalSecureNoninvasiveDebugEnabled()
// =======================================
// This function returns the value of ExternalSecureInvasiveDebugEnabled() when ARMv8.4-Debug
// is implemented. Otherwise, the definition of this function is IMPLEMENTATION DEFINED.
// In the recommended interface, this function returns the state of the (DBGEN OR NIDEN) AND
// (SPIDEN OR SPNIDEN) signal.
boolean// Set a Cross Trigger multi-cycle input event trigger to the specified level.
CTI_SetEventLevel( ExternalSecureNoninvasiveDebugEnabled()
if !HaveEL(EL3) && !IsSecure() then return FALSE;
if HaveNoninvasiveDebugAuth() then
return ExternalNoninvasiveDebugEnabled() && (SPIDEN == HIGH || SPNIDEN == HIGH);
else
return ExternalSecureInvasiveDebugEnabled();id, signal level);
// Returns TRUE when an access is Secure
boolean// Signal a discrete event on a Cross Trigger input event trigger. IsAccessSecure();CTI_SignalEvent(CrossTriggerIn id);
// Returns TRUE if the Core power domain is powered on, FALSE otherwise.
booleanenumeration IsCorePowered();CrossTriggerOut {CrossTriggerOut_DebugRequest, CrossTriggerOut_RestartRequest,
CrossTriggerOut_IRQ, CrossTriggerOut_RSVD3,
CrossTriggerOut_TraceExtIn0, CrossTriggerOut_TraceExtIn1,
CrossTriggerOut_TraceExtIn2, CrossTriggerOut_TraceExtIn3};
enumeration CrossTriggerIn {CrossTriggerIn_CrossHalt, CrossTriggerIn_PMUOverflow,
CrossTriggerIn_RSVD2, CrossTriggerIn_RSVD3,
CrossTriggerIn_TraceExtOut0, CrossTriggerIn_TraceExtOut1,
CrossTriggerIn_TraceExtOut2, CrossTriggerIn_TraceExtOut3};
// Set a Cross Trigger multi-cycle input event trigger to the specified level.
CTI_SetEventLevel(// CheckForDCCInterrupts()
// =======================CheckForDCCInterrupts()
commrx = (EDSCR.RXfull == '1');
commtx = (EDSCR.TXfull == '0');
// COMMRX and COMMTX support is optional and not recommended for new designs.
// SetInterruptRequestLevel(InterruptID_COMMRX, if commrx then HIGH else LOW);
// SetInterruptRequestLevel(InterruptID_COMMTX, if commtx then HIGH else LOW);
// The value to be driven onto the common COMMIRQ signal.
if ELUsingAArch32(EL1) then
commirq = ((commrx && DBGDCCINT.RX == '1') ||
(commtx && DBGDCCINT.TX == '1'));
else
commirq = ((commrx && MDCCINT_EL1.RX == '1') ||
(commtx && MDCCINT_EL1.TX == '1'));
SetInterruptRequestLevel(InterruptID_COMMIRQCrossTriggerIn id, signal level);, if commirq then HIGH else LOW);
return;
// Signal a discrete event on a Cross Trigger input event trigger.// DBGDTRRX_EL0[] (external write)
// ===============================
// Called on writes to debug register 0x08C.
CTI_SignalEvent(DBGDTRRX_EL0[boolean memory_mapped] = bits(32) value
if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "signal slave-generated error";
return;
if EDSCR.ERR == '1' then return; // Error flag set: ignore write
// The Software lock is OPTIONAL.
if memory_mapped && EDLSR.SLK == '1' then return; // Software lock locked: ignore write
if EDSCR.RXfull == '1' || (() && EDSCR.MA == '1' && EDSCR.ITE == '0') then
EDSCR.RXO = '1'; EDSCR.ERR = '1'; // Overrun condition: ignore write
return;
EDSCR.RXfull = '1';
DTRRX = value;
if Halted() && EDSCR.MA == '1' then
EDSCR.ITE = '0'; // See comments in EDITR[] (external write)
if !UsingAArch32() then
ExecuteA64(0xD5330501<31:0>); // A64 "MRS X1,DBGDTRRX_EL0"
ExecuteA64(0xB8004401<31:0>); // A64 "STR W1,[X0],#4"
X[1] = bits(64) UNKNOWN;
else
ExecuteT32(0xEE10<15:0> /*hw1*/, 0x1E15<15:0> /*hw2*/); // T32 "MRS R1,DBGDTRRXint"
ExecuteT32(0xF840<15:0> /*hw1*/, 0x1B04<15:0> /*hw2*/); // T32 "STR R1,[R0],#4"
R[1] = bits(32) UNKNOWN;
// If the store aborts, the Data Abort exception is taken and EDSCR.ERR is set to 1
if EDSCR.ERR == '1' then
EDSCR.RXfull = bit UNKNOWN;
DBGDTRRX_EL0 = bits(32) UNKNOWN;
else
// "MRS X1,DBGDTRRX_EL0" calls DBGDTR_EL0[] (read) which clears RXfull.
assert EDSCR.RXfull == '0';
EDSCR.ITE = '1'; // See comments in EDITR[] (external write)
return;
// DBGDTRRX_EL0[] (external read)
// ==============================
bits(32) CrossTriggerInHalted id);DBGDTRRX_EL0[boolean memory_mapped]
return DTRRX;
enumeration// DBGDTRTX_EL0[] (external read)
// ==============================
// Called on reads of debug register 0x080.
bits(32) CrossTriggerOut {DBGDTRTX_EL0[boolean memory_mapped]
if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "signal slave-generated error";
return bits(32) UNKNOWN;
underrun = EDSCR.TXfull == '0' || (CrossTriggerOut_DebugRequest,() && EDSCR.MA == '1' && EDSCR.ITE == '0');
value = if underrun then bits(32) UNKNOWN else DTRTX;
if EDSCR.ERR == '1' then return value; // Error flag set: no side-effects
// The Software lock is OPTIONAL.
if memory_mapped && EDLSR.SLK == '1' then // Software lock locked: no side-effects
return value;
if underrun then
EDSCR.TXU = '1'; EDSCR.ERR = '1'; // Underrun condition: block side-effects
return value; // Return UNKNOWN
EDSCR.TXfull = '0';
if CrossTriggerOut_RestartRequest,() && EDSCR.MA == '1' then
EDSCR.ITE = '0'; // See comments in EDITR[] (external write)
if !
CrossTriggerOut_IRQ,() then CrossTriggerOut_RSVD3,(0xB8404401<31:0>); // A64 "LDR W1,[X0],#4"
else
CrossTriggerOut_TraceExtIn0,(0xF850<15:0> /*hw1*/, 0x1B04<15:0> /*hw2*/); // T32 "LDR R1,[R0],#4"
// If the load aborts, the Data Abort exception is taken and EDSCR.ERR is set to 1
if EDSCR.ERR == '1' then
EDSCR.TXfull = bit UNKNOWN;
DBGDTRTX_EL0 = bits(32) UNKNOWN;
else
if ! CrossTriggerOut_TraceExtIn1,() then
CrossTriggerOut_TraceExtIn2,(0xD5130501<31:0>); // A64 "MSR DBGDTRTX_EL0,X1"
else CrossTriggerOut_TraceExtIn3};
enumeration(0xEE00<15:0> /*hw1*/, 0x1E15<15:0> /*hw2*/); // T32 "MSR DBGDTRTXint,R1"
// "MSR DBGDTRTX_EL0,X1" calls DBGDTR_EL0[] (write) which sets TXfull.
assert EDSCR.TXfull == '1';
if ! CrossTriggerIn {() thenCrossTriggerIn_CrossHalt,[1] = bits(64) UNKNOWN;
else CrossTriggerIn_PMUOverflow,
CrossTriggerIn_RSVD2, CrossTriggerIn_RSVD3,
CrossTriggerIn_TraceExtOut0, CrossTriggerIn_TraceExtOut1,
CrossTriggerIn_TraceExtOut2,[1] = bits(32) UNKNOWN;
EDSCR.ITE = '1'; // See comments in EDITR[] (external write)
return value;
// DBGDTRTX_EL0[] (external write)
// =============================== CrossTriggerIn_TraceExtOut3};DBGDTRTX_EL0[boolean memory_mapped] = bits(32) value
// The Software lock is OPTIONAL.
if memory_mapped && EDLSR.SLK == '1' then return; // Software lock locked: ignore write
DTRTX = value;
return;
// CheckForDCCInterrupts()
// =======================// DBGDTR_EL0[] (write)
// ====================
// System register writes to DBGDTR_EL0, DBGDTRTX_EL0 (AArch64) and DBGDTRTXint (AArch32)
CheckForDCCInterrupts()
commrx = (EDSCR.RXfull == '1');
commtx = (EDSCR.TXfull == '0');
DBGDTR_EL0[] = bits(N) value
// For MSR DBGDTRTX_EL0,<Rt> N=32, value=X[t]<31:0>, X[t]<63:32> is ignored
// For MSR DBGDTR_EL0,<Xt> N=64, value=X[t]<63:0>
assert N IN {32,64};
if EDSCR.TXfull == '1' then
value = bits(N) UNKNOWN;
// On a 64-bit write, implement a half-duplex channel
if N == 64 then DTRRX = value<63:32>;
DTRTX = value<31:0>; // 32-bit or 64-bit write
EDSCR.TXfull = '1';
return;
// COMMRX and COMMTX support is optional and not recommended for new designs.
// SetInterruptRequestLevel(InterruptID_COMMRX, if commrx then HIGH else LOW);
// SetInterruptRequestLevel(InterruptID_COMMTX, if commtx then HIGH else LOW);
// DBGDTR_EL0[] (read)
// ===================
// System register reads of DBGDTR_EL0, DBGDTRRX_EL0 (AArch64) and DBGDTRRXint (AArch32)
// The value to be driven onto the common COMMIRQ signal.
ifbits(N) ELUsingAArch32(EL1) then
commirq = ((commrx && DBGDCCINT.RX == '1') ||
(commtx && DBGDCCINT.TX == '1'));
else
commirq = ((commrx && MDCCINT_EL1.RX == '1') ||
(commtx && MDCCINT_EL1.TX == '1'));
SetInterruptRequestLevel(InterruptID_COMMIRQ, if commirq then HIGH else LOW);
return;DBGDTR_EL0[]
// For MRS <Rt>,DBGDTRTX_EL0 N=32, X[t]=Zeros(32):result
// For MRS <Xt>,DBGDTR_EL0 N=64, X[t]=result
assert N IN {32,64};
bits(N) result;
if EDSCR.RXfull == '0' then
result = bits(N) UNKNOWN;
else
// On a 64-bit read, implement a half-duplex channel
// NOTE: the word order is reversed on reads with regards to writes
if N == 64 then result<63:32> = DTRTX;
result<31:0> = DTRRX;
EDSCR.RXfull = '0';
return result;
// DBGDTRRX_EL0[] (external write)
// ===============================
// Called on writes to debug register 0x08C.bits(32) DTRRX;
bits(32) DTRTX;
DBGDTRRX_EL0[boolean memory_mapped] = bits(32) value
if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "signal slave-generated error";
return;
if EDSCR.ERR == '1' then return; // Error flag set: ignore write
// The Software lock is OPTIONAL.
if memory_mapped && EDLSR.SLK == '1' then return; // Software lock locked: ignore write
if EDSCR.RXfull == '1' || (Halted() && EDSCR.MA == '1' && EDSCR.ITE == '0') then
EDSCR.RXO = '1'; EDSCR.ERR = '1'; // Overrun condition: ignore write
return;
EDSCR.RXfull = '1';
DTRRX = value;
if Halted() && EDSCR.MA == '1' then
EDSCR.ITE = '0'; // See comments in EDITR[] (external write)
if !UsingAArch32() then
ExecuteA64(0xD5330501<31:0>); // A64 "MRS X1,DBGDTRRX_EL0"
ExecuteA64(0xB8004401<31:0>); // A64 "STR W1,[X0],#4"
X[1] = bits(64) UNKNOWN;
else
ExecuteT32(0xEE10<15:0> /*hw1*/, 0x1E15<15:0> /*hw2*/); // T32 "MRS R1,DBGDTRRXint"
ExecuteT32(0xF840<15:0> /*hw1*/, 0x1B04<15:0> /*hw2*/); // T32 "STR R1,[R0],#4"
R[1] = bits(32) UNKNOWN;
// If the store aborts, the Data Abort exception is taken and EDSCR.ERR is set to 1
if EDSCR.ERR == '1' then
EDSCR.RXfull = bit UNKNOWN;
DBGDTRRX_EL0 = bits(32) UNKNOWN;
else
// "MRS X1,DBGDTRRX_EL0" calls DBGDTR_EL0[] (read) which clears RXfull.
assert EDSCR.RXfull == '0';
EDSCR.ITE = '1'; // See comments in EDITR[] (external write)
return;
// DBGDTRRX_EL0[] (external read)
// ==============================
bits(32) DBGDTRRX_EL0[boolean memory_mapped]
return DTRRX;
// DBGDTRTX_EL0[] (external read)
// ==============================
// Called on reads of debug register 0x080.
bits(32)// EDITR[] (external write)
// ========================
// Called on writes to debug register 0x084. DBGDTRTX_EL0[boolean memory_mapped]
EDITR[boolean memory_mapped] = bits(32) value
if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "signal slave-generated error";
return bits(32) UNKNOWN;
return;
underrun = EDSCR.TXfull == '0' || ( if EDSCR.ERR == '1' then return; // Error flag set: ignore write
// The Software lock is OPTIONAL.
if memory_mapped && EDLSR.SLK == '1' then return; // Software lock locked: ignore write
if !Halted() && EDSCR.MA == '1' && EDSCR.ITE == '0');
value = if underrun then bits(32) UNKNOWN else DTRTX;
() then return; // Non-debug state: ignore write
if EDSCR.ERR == '1' then return value; // Error flag set: no side-effects
if EDSCR.ITE == '0' || EDSCR.MA == '1' then
EDSCR.ITO = '1'; EDSCR.ERR = '1'; // Overrun condition: block write
return;
// The Software lock is OPTIONAL.
if memory_mapped && EDLSR.SLK == '1' then // Software lock locked: no side-effects
return value;
// ITE indicates whether the processor is ready to accept another instruction; the processor
// may support multiple outstanding instructions. Unlike the "InstrCompl" flag in [v7A] there
// is no indication that the pipeline is empty (all instructions have completed). In this
// pseudocode, the assumption is that only one instruction can be executed at a time,
// meaning ITE acts like "InstrCompl".
EDSCR.ITE = '0';
if underrun then
EDSCR.TXU = '1'; EDSCR.ERR = '1'; // Underrun condition: block side-effects
return value; // Return UNKNOWN
EDSCR.TXfull = '0';
if if ! Halted() && EDSCR.MA == '1' then
EDSCR.ITE = '0'; // See comments in EDITR[] (external write)
if !UsingAArch32() then
ExecuteA64(0xB8404401<31:0>); // A64 "LDR W1,[X0],#4"
else(value);
else
ExecuteT32(0xF850<15:0> /*hw1*/, 0x1B04<15:0> /*hw2*/); // T32 "LDR R1,[R0],#4"
// If the load aborts, the Data Abort exception is taken and EDSCR.ERR is set to 1
if EDSCR.ERR == '1' then
EDSCR.TXfull = bit UNKNOWN;
DBGDTRTX_EL0 = bits(32) UNKNOWN;
else
if !UsingAArch32() then
ExecuteA64(0xD5130501<31:0>); // A64 "MSR DBGDTRTX_EL0,X1"
else
ExecuteT32(0xEE00<15:0> /*hw1*/, 0x1E15<15:0> /*hw2*/); // T32 "MSR DBGDTRTXint,R1"
// "MSR DBGDTRTX_EL0,X1" calls DBGDTR_EL0[] (write) which sets TXfull.
assert EDSCR.TXfull == '1';
if !UsingAArch32() then
X[1] = bits(64) UNKNOWN;
else
R[1] = bits(32) UNKNOWN;
EDSCR.ITE = '1'; // See comments in EDITR[] (external write)
return value;
// DBGDTRTX_EL0[] (external write)
// ===============================
DBGDTRTX_EL0[boolean memory_mapped] = bits(32) value
// The Software lock is OPTIONAL.
if memory_mapped && EDLSR.SLK == '1' then return; // Software lock locked: ignore write
DTRTX = value;
(value<15:0>/*hw1*/, value<31:16> /*hw2*/);
EDSCR.ITE = '1';
return;
// DBGDTR_EL0[] (write)
// ====================
// System register writes to DBGDTR_EL0, DBGDTRTX_EL0 (AArch64) and DBGDTRTXint (AArch32)// DCPSInstruction()
// =================
// Operation of the DCPS instruction in Debug state
DBGDTR_EL0[] = bits(N) value
// For MSR DBGDTRTX_EL0,<Rt> N=32, value=X[t]<31:0>, X[t]<63:32> is ignored
// For MSR DBGDTR_EL0,<Xt> N=64, value=X[t]<63:0>
assert N IN {32,64};
if EDSCR.TXfull == '1' then
value = bits(N) UNKNOWN;
// On a 64-bit write, implement a half-duplex channel
if N == 64 then DTRRX = value<63:32>;
DTRTX = value<31:0>; // 32-bit or 64-bit write
EDSCR.TXfull = '1';
return;
// DBGDTR_EL0[] (read)
// ===================
// System register reads of DBGDTR_EL0, DBGDTRRX_EL0 (AArch64) and DBGDTRRXint (AArch32)
bits(N)DCPSInstruction(bits(2) target_el) ();
case target_el of
when EL1
if PSTATE.EL == EL2 || (PSTATE.EL == EL3 && !UsingAArch32()) then handle_el = PSTATE.EL;
elsif EL2Enabled() && HCR_EL2.TGE == '1' then UndefinedFault();
else handle_el = EL1;
when EL2
if !HaveEL(EL2) then UndefinedFault();
elsif PSTATE.EL == EL3 && !UsingAArch32() then handle_el = EL3;
elsif !IsSecureEL2Enabled() && IsSecure() then UndefinedFault();
else handle_el = EL2;
when EL3
if EDSCR.SDD == '1' || !HaveEL(EL3) then UndefinedFault();
handle_el = EL3;
otherwise
Unreachable();
from_secure = IsSecure();
if ELUsingAArch32(handle_el) then
if PSTATE.M == M32_Monitor then SCR.NS = '0';
assert UsingAArch32(); // Cannot move from AArch64 to AArch32
case handle_el of
when EL1AArch32.WriteMode(M32_Svc);
if HavePANExt() && SCTLR.SPAN == '0' then
PSTATE.PAN = '1';
when EL2 AArch32.WriteMode(M32_Hyp);
when EL3AArch32.WriteMode(M32_Monitor);
if HavePANExt() then
if !from_secure then
PSTATE.PAN = '0';
elsif SCTLR.SPAN == '0' then
PSTATE.PAN = '1';
if handle_el == EL2 then
ELR_hyp = bits(32) UNKNOWN; HSR = bits(32) UNKNOWN;
else
LR = bits(32) UNKNOWN;
SPSR[] = bits(32) UNKNOWN;
PSTATE.E = SCTLR[].EE;
DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN;
else // Targeting AArch64
if UsingAArch32() then
AArch64.MaybeZeroRegisterUppers();
MaybeZeroSVEUppers(target_el);
PSTATE.nRW = '0'; PSTATE.SP = '1'; PSTATE.EL = handle_el;
if HavePANExt() && ((handle_el == EL1 && SCTLR_EL1.SPAN == '0') ||
(handle_el == EL2 && HCR_EL2.E2H == '1' &&
HCR_EL2.TGE == '1' && SCTLR_EL2.SPAN == '0')) then
PSTATE.PAN = '1';
ELR[] = bits(64) UNKNOWN; SPSR[] = bits(32) UNKNOWN; ESR[] = bits(32) UNKNOWN;
DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(32) UNKNOWN;
if HaveUAOExt() then PSTATE.UAO = '0';
UpdateEDSCRFields(); // Update EDSCR PE state flags
sync_errors = HaveIESB() && SCTLR[].IESB == '1';
if HaveDoubleFaultExt() && !UsingAArch32() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == EL3);
// SCTLR[].IESB might be ignored in Debug state.
if !ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then
sync_errors = FALSE;
if sync_errors then
SynchronizeErrorsDBGDTR_EL0[]
// For MRS <Rt>,DBGDTRTX_EL0 N=32, X[t]=Zeros(32):result
// For MRS <Xt>,DBGDTR_EL0 N=64, X[t]=result
assert N IN {32,64};
bits(N) result;
if EDSCR.RXfull == '0' then
result = bits(N) UNKNOWN;
else
// On a 64-bit read, implement a half-duplex channel
// NOTE: the word order is reversed on reads with regards to writes
if N == 64 then result<63:32> = DTRTX;
result<31:0> = DTRRX;
EDSCR.RXfull = '0';
return result;();
return;
bits(32) DTRRX;
bits(32) DTRTX;// DRPSInstruction()
// =================
// Operation of the A64 DRPS and T32 ERET instructions in Debug stateDRPSInstruction()
SynchronizeContext();
sync_errors = HaveIESB() && SCTLR[].IESB == '1';
if HaveDoubleFaultExt() && !UsingAArch32() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == EL3);
// SCTLR[].IESB might be ignored in Debug state.
if !ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then
sync_errors = FALSE;
if sync_errors then
SynchronizeErrors();
SetPSTATEFromPSR(SPSR[]);
// PSTATE.{N,Z,C,V,Q,GE,SS,D,A,I,F} are not observable and ignored in Debug state, so
// behave as if UNKNOWN.
if UsingAArch32() then
PSTATE.<N,Z,C,V,Q,GE,SS,A,I,F> = bits(13) UNKNOWN;
// In AArch32, all instructions are T32 and unconditional.
PSTATE.IT = '00000000'; PSTATE.T = '1'; // PSTATE.J is RES0
DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN;
else
PSTATE.<N,Z,C,V,SS,D,A,I,F> = bits(9) UNKNOWN;
DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(32) UNKNOWN;
UpdateEDSCRFields(); // Update EDSCR PE state flags
return;
// EDITR[] (external write)
// ========================
// Called on writes to debug register 0x084.constant bits(6)
EDITR[boolean memory_mapped] = bits(32) value
if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "signal slave-generated error";
return;
if EDSCR.ERR == '1' then return; // Error flag set: ignore write
// The Software lock is OPTIONAL.
if memory_mapped && EDLSR.SLK == '1' then return; // Software lock locked: ignore write
if !DebugHalt_Breakpoint = '000111';
constant bits(6)Halted() then return; // Non-debug state: ignore write
if EDSCR.ITE == '0' || EDSCR.MA == '1' then
EDSCR.ITO = '1'; EDSCR.ERR = '1'; // Overrun condition: block write
return;
// ITE indicates whether the processor is ready to accept another instruction; the processor
// may support multiple outstanding instructions. Unlike the "InstrCompl" flag in [v7A] there
// is no indication that the pipeline is empty (all instructions have completed). In this
// pseudocode, the assumption is that only one instruction can be executed at a time,
// meaning ITE acts like "InstrCompl".
EDSCR.ITE = '0';
if !DebugHalt_EDBGRQ = '010011';
constant bits(6)UsingAArch32() thenDebugHalt_Step_Normal = '011011';
constant bits(6)
ExecuteA64(value);
elseDebugHalt_Step_Exclusive = '011111';
constant bits(6)
DebugHalt_OSUnlockCatch = '100011';
constant bits(6) DebugHalt_ResetCatch = '100111';
constant bits(6) DebugHalt_Watchpoint = '101011';
constant bits(6) DebugHalt_HaltInstruction = '101111';
constant bits(6) DebugHalt_SoftwareAccess = '110011';
constant bits(6) DebugHalt_ExceptionCatch = '110111';
constant bits(6) ExecuteT32(value<15:0>/*hw1*/, value<31:16> /*hw2*/);
EDSCR.ITE = '1';
return;DebugHalt_Step_NoSyndrome = '111011';
// DCPSInstruction()
// =================
// Operation of the DCPS instruction in Debug state
DCPSInstruction(bits(2) target_el)
SynchronizeContext();
case target_el of
when EL1
if PSTATE.EL == EL2 || (PSTATE.EL == EL3 && !UsingAArch32()) then handle_el = PSTATE.EL;
elsif EL2Enabled() && HCR_EL2.TGE == '1' then UndefinedFault();
else handle_el = EL1;
when EL2
if !HaveEL(EL2) then UndefinedFault();
elsif PSTATE.EL == EL3 && !UsingAArch32() then handle_el = EL3;
elsif !IsSecureEL2Enabled() && IsSecure() then UndefinedFault();
else handle_el = EL2;
when EL3
if EDSCR.SDD == '1' || !HaveEL(EL3) then UndefinedFault();
handle_el = EL3;
otherwise
Unreachable();
from_secure = IsSecure();
if ELUsingAArch32(handle_el) then
if PSTATE.M == M32_Monitor then SCR.NS = '0';
assert UsingAArch32(); // Cannot move from AArch64 to AArch32
case handle_el of
when EL1AArch32.WriteMode(M32_Svc);
if HavePANExt() && SCTLR.SPAN == '0' then
PSTATE.PAN = '1';
when EL2 AArch32.WriteMode(M32_Hyp);
when EL3AArch32.WriteMode(M32_Monitor);
if HavePANExt() then
if !from_secure then
PSTATE.PAN = '0';
elsif SCTLR.SPAN == '0' then
PSTATE.PAN = '1';
if handle_el == EL2 then
ELR_hyp = bits(32) UNKNOWN; HSR = bits(32) UNKNOWN;
else
LR = bits(32) UNKNOWN;
SPSR[] = bits(32) UNKNOWN;
PSTATE.E = SCTLR[].EE;
DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN;
else // Targeting AArch64
if UsingAArch32() then
AArch64.MaybeZeroRegisterUppers();
MaybeZeroSVEUppers(target_el);
PSTATE.nRW = '0'; PSTATE.SP = '1'; PSTATE.EL = handle_el;
if HavePANExt() && ((handle_el == EL1 && SCTLR_EL1.SPAN == '0') ||
(handle_el == EL2 && HCR_EL2.E2H == '1' &&
HCR_EL2.TGE == '1' && SCTLR_EL2.SPAN == '0')) then
PSTATE.PAN = '1';
ELR[] = bits(64) UNKNOWN; SPSR[] = bits(32) UNKNOWN; ESR[] = bits(32) UNKNOWN;
DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(32) UNKNOWN;
if HaveUAOExt() then PSTATE.UAO = '0';
UpdateEDSCRFields(); // Update EDSCR PE state flags
sync_errors = HaveIESB() && SCTLR[].IESB == '1';
if HaveDoubleFaultExt() && !UsingAArch32() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == EL3);
// SCTLR[].IESB might be ignored in Debug state.
if !ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then
sync_errors = FALSE;
if sync_errors then
SynchronizeErrors();
return;DisableITRAndResumeInstructionPrefetch();
// DRPSInstruction()
// =================
// Operation of the A64 DRPS and T32 ERET instructions in Debug state// Execute an A64 instruction in Debug state.
DRPSInstruction()ExecuteA64(bits(32) instr);
SynchronizeContext();
sync_errors = HaveIESB() && SCTLR[].IESB == '1';
if HaveDoubleFaultExt() && !UsingAArch32() then
sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == EL3);
// SCTLR[].IESB might be ignored in Debug state.
if !ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then
sync_errors = FALSE;
if sync_errors then
SynchronizeErrors();
SetPSTATEFromPSR(SPSR[]);
// PSTATE.{N,Z,C,V,Q,GE,SS,D,A,I,F} are not observable and ignored in Debug state, so
// behave as if UNKNOWN.
if UsingAArch32() then
PSTATE.<N,Z,C,V,Q,GE,SS,A,I,F> = bits(13) UNKNOWN;
// In AArch32, all instructions are T32 and unconditional.
PSTATE.IT = '00000000'; PSTATE.T = '1'; // PSTATE.J is RES0
DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN;
else
PSTATE.<N,Z,C,V,SS,D,A,I,F> = bits(9) UNKNOWN;
DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(32) UNKNOWN;
UpdateEDSCRFields(); // Update EDSCR PE state flags
return;
constant bits(6)// Execute a T32 instruction in Debug state. DebugHalt_Breakpoint = '000111';
constant bits(6)ExecuteT32(bits(16) hw1, bits(16) hw2); DebugHalt_EDBGRQ = '010011';
constant bits(6) DebugHalt_Step_Normal = '011011';
constant bits(6) DebugHalt_Step_Exclusive = '011111';
constant bits(6) DebugHalt_OSUnlockCatch = '100011';
constant bits(6) DebugHalt_ResetCatch = '100111';
constant bits(6) DebugHalt_Watchpoint = '101011';
constant bits(6) DebugHalt_HaltInstruction = '101111';
constant bits(6) DebugHalt_SoftwareAccess = '110011';
constant bits(6) DebugHalt_ExceptionCatch = '110111';
constant bits(6) DebugHalt_Step_NoSyndrome = '111011';
// ExitDebugState()
// ================
ExitDebugState()
assert Halted();
SynchronizeContext();
// Although EDSCR.STATUS signals that the PE is restarting, debuggers must use EDPRSR.SDR to
// detect that the PE has restarted.
EDSCR.STATUS = '000001'; // Signal restarting
EDESR<2:0> = '000'; // Clear any pending Halting debug events
bits(64) new_pc;
bits(32) spsr;
if UsingAArch32() then
new_pc = ZeroExtend(DLR);
spsr = DSPSR;
else
new_pc = DLR_EL0;
spsr = DSPSR_EL0;
// If this is an illegal return, SetPSTATEFromPSR() will set PSTATE.IL.
SetPSTATEFromPSR(spsr); // Can update privileged bits, even at EL0
if UsingAArch32() then
if ConstrainUnpredictableBool(Unpredictable_RESTARTALIGNPC) then new_pc<0> = '0';
BranchTo(new_pc<31:0>, BranchType_DBGEXIT); // AArch32 branch
else
// If targeting AArch32 then possibly zero the 32 most significant bits of the target PC
if spsr<4> == '1' && ConstrainUnpredictableBool(Unpredictable_RESTARTZEROUPPERPC) then
new_pc<63:32> = Zeros();
BranchTo(new_pc, BranchType_DBGEXIT); // A type of branch that is never predicted
(EDSCR.STATUS,EDPRSR.SDR) = ('000010','1'); // Atomically signal restarted
UpdateEDSCRFields(); // Stop signalling PE state
DisableITRAndResumeInstructionPrefetchDisableITRAndResumeInstructionPrefetch();();
return;
// Execute an A64 instruction in Debug state.// Halt()
// ======
ExecuteA64(bits(32) instr);Halt(bits(6) reason)CTI_SignalEvent(CrossTriggerIn_CrossHalt); // Trigger other cores to halt
bits(64) preferred_restart_address = ThisInstrAddr();
spsr = GetPSRFromPSTATE();
if UsingAArch32() then
// If entering from AArch32 state, spsr<21> is the DIT bit which has to be moved for DSPSR
spsr<24> = spsr<21>;
spsr<21> = PSTATE.SS; // Always save the SS bit
if (HaveBTIExt() &&
!(reason IN {DebugHalt_Step_Normal, DebugHalt_Step_Exclusive, DebugHalt_Step_NoSyndrome,
DebugHalt_Breakpoint, DebugHalt_HaltInstruction}) &&
ConstrainUnpredictableBool(Unpredictable_ZEROBTYPE)) then
DSPSR<11:10> = '00';
if UsingAArch32() then
DLR = preferred_restart_address<31:0>;
DSPSR = spsr;
else
DLR_EL0 = preferred_restart_address;
DSPSR_EL0 = spsr;
EDSCR.ITE = '1';
EDSCR.ITO = '0';
if IsSecure() then
EDSCR.SDD = '0'; // If entered in Secure state, allow debug
elsif HaveEL(EL3) then
EDSCR.SDD = if ExternalSecureInvasiveDebugEnabled() then '0' else '1';
else
assert EDSCR.SDD == '1'; // Otherwise EDSCR.SDD is RES1
EDSCR.MA = '0';
// PSTATE.{SS,D,A,I,F} are not observable and ignored in Debug state, so behave as if
// UNKNOWN. PSTATE.{N,Z,C,V,Q,GE} are also not observable, but since these are not changed on
// exception entry, this function also leaves them unchanged. PSTATE.{E,M,nRW,EL,SP} are
// unchanged. PSTATE.IL is set to 0.
if UsingAArch32() then
PSTATE.<SS,A,I,F> = bits(4) UNKNOWN;
// In AArch32, all instructions are T32 and unconditional.
PSTATE.IT = '00000000';
PSTATE.T = '1'; // PSTATE.J is RES0
else
PSTATE.<SS,D,A,I,F> = bits(5) UNKNOWN;
PSTATE.IL = '0';
StopInstructionPrefetchAndEnableITR();
EDSCR.STATUS = reason; // Signal entered Debug state
UpdateEDSCRFields(); // Update EDSCR PE state flags.
return;
// Execute a T32 instruction in Debug state.// HaltOnBreakpointOrWatchpoint()
// ==============================
// Returns TRUE if the Breakpoint and Watchpoint debug events should be considered for Debug
// state entry, FALSE if they should be considered for a debug exception.
boolean
ExecuteT32(bits(16) hw1, bits(16) hw2);HaltOnBreakpointOrWatchpoint()
returnHaltingAllowed() && EDSCR.HDE == '1' && OSLSR_EL1.OSLK == '0';
// ExitDebugState()
// ================// Halted()
// ========
boolean
ExitDebugState()
assertHalted()
return !(EDSCR.STATUS IN {'000001', '000010'}); // Halted Halted();
SynchronizeContext();
// Although EDSCR.STATUS signals that the PE is restarting, debuggers must use EDPRSR.SDR to
// detect that the PE has restarted.
EDSCR.STATUS = '000001'; // Signal restarting
EDESR<2:0> = '000'; // Clear any pending Halting debug events
bits(64) new_pc;
bits(32) spsr;
if UsingAArch32() then
new_pc = ZeroExtend(DLR);
spsr = DSPSR;
else
new_pc = DLR_EL0;
spsr = DSPSR_EL0;
// If this is an illegal return, SetPSTATEFromPSR() will set PSTATE.IL.
SetPSTATEFromPSR(spsr); // Can update privileged bits, even at EL0
if UsingAArch32() then
if ConstrainUnpredictableBool(Unpredictable_RESTARTALIGNPC) then new_pc<0> = '0';
BranchTo(new_pc<31:0>, BranchType_DBGEXIT); // AArch32 branch
else
// If targeting AArch32 then possibly zero the 32 most significant bits of the target PC
if spsr<4> == '1' && ConstrainUnpredictableBool(Unpredictable_RESTARTZEROUPPERPC) then
new_pc<63:32> = Zeros();
BranchTo(new_pc, BranchType_DBGEXIT); // A type of branch that is never predicted
(EDSCR.STATUS,EDPRSR.SDR) = ('000010','1'); // Atomically signal restarted
UpdateEDSCRFields(); // Stop signalling PE state
DisableITRAndResumeInstructionPrefetch();
return;
// Halt()
// ======// HaltingAllowed()
// ================
// Returns TRUE if halting is currently allowed, FALSE if halting is prohibited.
boolean
Halt(bits(6) reason)HaltingAllowed()
if
CTI_SignalEventHalted(() ||CrossTriggerIn_CrossHaltDoubleLockStatus); // Trigger other cores to halt
bits(64) preferred_restart_address =() then
return FALSE;
elsif ThisInstrAddr();
spsr = GetPSRFromPSTATE();
if UsingAArch32() then
// If entering from AArch32 state, spsr<21> is the DIT bit which has to be moved for DSPSR
spsr<24> = spsr<21>;
spsr<21> = PSTATE.SS; // Always save the SS bit
if (HaveBTIExt() &&
!(reason IN {DebugHalt_Step_Normal, DebugHalt_Step_Exclusive, DebugHalt_Step_NoSyndrome,
DebugHalt_Breakpoint, DebugHalt_HaltInstruction}) &&
ConstrainUnpredictableBool(Unpredictable_ZEROBTYPE)) then
DSPSR<11:10> = '00';
if UsingAArch32() then
DLR = preferred_restart_address<31:0>;
DSPSR = spsr;
else
DLR_EL0 = preferred_restart_address;
DSPSR_EL0 = spsr;
EDSCR.ITE = '1';
EDSCR.ITO = '0';
if IsSecure() then
EDSCR.SDD = '0'; // If entered in Secure state, allow debug
elsif return HaveEL(EL3) then
EDSCR.SDD = if ExternalSecureInvasiveDebugEnabled() then '0' else '1';
();
else
assert EDSCR.SDD == '1'; // Otherwise EDSCR.SDD is RES1
EDSCR.MA = '0';
// PSTATE.{SS,D,A,I,F} are not observable and ignored in Debug state, so behave as if
// UNKNOWN. PSTATE.{N,Z,C,V,Q,GE} are also not observable, but since these are not changed on
// exception entry, this function also leaves them unchanged. PSTATE.{E,M,nRW,EL,SP} are
// unchanged. PSTATE.IL is set to 0.
if return UsingAArch32ExternalInvasiveDebugEnabled() then
PSTATE.<SS,A,I,F> = bits(4) UNKNOWN;
// In AArch32, all instructions are T32 and unconditional.
PSTATE.IT = '00000000';
PSTATE.T = '1'; // PSTATE.J is RES0
else
PSTATE.<SS,D,A,I,F> = bits(5) UNKNOWN;
PSTATE.IL = '0';
StopInstructionPrefetchAndEnableITR();
EDSCR.STATUS = reason; // Signal entered Debug state
UpdateEDSCRFields(); // Update EDSCR PE state flags.
return;();
// HaltOnBreakpointOrWatchpoint()
// ==============================
// Returns TRUE if the Breakpoint and Watchpoint debug events should be considered for Debug
// state entry, FALSE if they should be considered for a debug exception.
// Restarting()
// ============
boolean HaltOnBreakpointOrWatchpoint()
returnRestarting()
return EDSCR.STATUS == '000001'; // Restarting HaltingAllowed() && EDSCR.HDE == '1' && OSLSR_EL1.OSLK == '0';
// Halted()
// ========
boolean Halted()
return !(EDSCR.STATUS IN {'000001', '000010'}); // HaltedStopInstructionPrefetchAndEnableITR();
// HaltingAllowed()
// ================
// Returns TRUE if halting is currently allowed, FALSE if halting is prohibited.
boolean// UpdateEDSCRFields()
// ===================
// Update EDSCR PE state fields HaltingAllowed()
ifUpdateEDSCRFields()
if ! Halted() ||() then
EDSCR.EL = '00';
EDSCR.NS = bit UNKNOWN;
EDSCR.RW = '1111';
else
EDSCR.EL = PSTATE.EL;
EDSCR.NS = if DoubleLockStatus() then
return FALSE;
elsif IsSecure() then
return() then '0' else '1';
bits(4) RW;
RW<1> = if ExternalSecureInvasiveDebugEnabledELUsingAArch32();
else
return( ) then '0' else '1';
if PSTATE.EL != EL0 then
RW<0> = RW<1>;
else
RW<0> = if UsingAArch32() then '0' else '1';
if !HaveEL(EL2) || (HaveEL(EL3) && SCR_GEN[].NS == '0' && !IsSecureEL2Enabled()) then
RW<2> = RW<1>;
else
RW<2> = if ELUsingAArch32(EL2) then '0' else '1';
if !HaveEL(EL3) then
RW<3> = RW<2>;
else
RW<3> = if ELUsingAArch32(EL3ExternalInvasiveDebugEnabledEL1();) then '0' else '1';
// The least-significant bits of EDSCR.RW are UNKNOWN if any higher EL is using AArch32.
if RW<3> == '0' then RW<2:0> = bits(3) UNKNOWN;
elsif RW<2> == '0' then RW<1:0> = bits(2) UNKNOWN;
elsif RW<1> == '0' then RW<0> = bit UNKNOWN;
EDSCR.RW = RW;
return;
// Restarting()
// ============
boolean// CheckExceptionCatch()
// =====================
// Check whether an Exception Catch debug event is set on the current Exception level Restarting()
return EDSCR.STATUS == '000001'; // RestartingCheckExceptionCatch(boolean exception_entry)
// Called after an exception entry or exit, that is, such that IsSecure() and PSTATE.EL are correct
// for the exception target.
base = ifIsSecure() then 0 else 4;
if HaltingAllowed() then
if HaveExtendedECDebugEvents() then
exception_exit = !exception_entry;
ctrl = EDECCR<UInt(PSTATE.EL) + base + 8>:EDECCR<UInt(PSTATE.EL) + base>;
case ctrl of
when '00' halt = FALSE;
when '01' halt = TRUE;
when '10' halt = (exception_exit == TRUE);
when '11' halt = (exception_entry == TRUE);
else
halt = (EDECCR<UInt(PSTATE.EL) + base> == '1');
if halt then Halt(DebugHalt_ExceptionCatch);
// CheckHaltingStep()
// ==================
// Check whether EDESR.SS has been set by Halting Step
CheckHaltingStep()
if HaltingAllowed() && EDESR.SS == '1' then
// The STATUS code depends on how we arrived at the state where EDESR.SS == 1.
if HaltingStep_DidNotStep() then
Halt(DebugHalt_Step_NoSyndrome);
elsif HaltingStep_SteppedEX() then
Halt(DebugHalt_Step_Exclusive);
else
Halt(DebugHalt_Step_NormalStopInstructionPrefetchAndEnableITR(););
// UpdateEDSCRFields()
// ===================
// Update EDSCR PE state fields// CheckOSUnlockCatch()
// ====================
// Called on unlocking the OS Lock to pend an OS Unlock Catch debug event
UpdateEDSCRFields()
if !CheckOSUnlockCatch()
if EDECR.OSUCE == '1' && !Halted() then
EDSCR.EL = '00';
EDSCR.NS = bit UNKNOWN;
EDSCR.RW = '1111';
else
EDSCR.EL = PSTATE.EL;
EDSCR.NS = if() then EDESR.OSUC = '1'; IsSecure() then '0' else '1';
bits(4) RW;
RW<1> = if ELUsingAArch32(EL1) then '0' else '1';
if PSTATE.EL != EL0 then
RW<0> = RW<1>;
else
RW<0> = if UsingAArch32() then '0' else '1';
if !HaveEL(EL2) || (HaveEL(EL3) && SCR_GEN[].NS == '0' && !IsSecureEL2Enabled()) then
RW<2> = RW<1>;
else
RW<2> = if ELUsingAArch32(EL2) then '0' else '1';
if !HaveEL(EL3) then
RW<3> = RW<2>;
else
RW<3> = if ELUsingAArch32(EL3) then '0' else '1';
// The least-significant bits of EDSCR.RW are UNKNOWN if any higher EL is using AArch32.
if RW<3> == '0' then RW<2:0> = bits(3) UNKNOWN;
elsif RW<2> == '0' then RW<1:0> = bits(2) UNKNOWN;
elsif RW<1> == '0' then RW<0> = bit UNKNOWN;
EDSCR.RW = RW;
return;
// CheckExceptionCatch()
// =====================
// Check whether an Exception Catch debug event is set on the current Exception level// CheckPendingOSUnlockCatch()
// ===========================
// Check whether EDESR.OSUC has been set by an OS Unlock Catch debug event
CheckExceptionCatch(boolean exception_entry)
// Called after an exception entry or exit, that is, such that IsSecure() and PSTATE.EL are correct
// for the exception target.
base = ifCheckPendingOSUnlockCatch()
if IsSecure() then 0 else 4;
if HaltingAllowed() then
if() && EDESR.OSUC == '1' then HaveExtendedECDebugEvents() then
exception_exit = !exception_entry;
ctrl = EDECCR<UInt(PSTATE.EL) + base + 8>:EDECCR<UInt(PSTATE.EL) + base>;
case ctrl of
when '00' halt = FALSE;
when '01' halt = TRUE;
when '10' halt = (exception_exit == TRUE);
when '11' halt = (exception_entry == TRUE);
else
halt = (EDECCR<UInt(PSTATE.EL) + base> == '1');
if halt then Halt(DebugHalt_ExceptionCatchDebugHalt_OSUnlockCatch);
// CheckHaltingStep()
// ==================
// Check whether EDESR.SS has been set by Halting Step// CheckPendingResetCatch()
// ========================
// Check whether EDESR.RC has been set by a Reset Catch debug event
CheckHaltingStep()
CheckPendingResetCatch()
if HaltingAllowed() && EDESR.SS == '1' then
// The STATUS code depends on how we arrived at the state where EDESR.SS == 1.
if() && EDESR.RC == '1' then HaltingStep_DidNotStep() then
Halt(DebugHalt_Step_NoSyndromeDebugHalt_ResetCatch);
elsif HaltingStep_SteppedEX() then
Halt(DebugHalt_Step_Exclusive);
else
Halt(DebugHalt_Step_Normal);
// CheckOSUnlockCatch()
// ====================
// Called on unlocking the OS Lock to pend an OS Unlock Catch debug event// CheckResetCatch()
// =================
// Called after reset
CheckOSUnlockCatch()
if (CheckResetCatch()
if EDECR.RCE == '1' then
EDESR.RC = '1';
// If halting is allowed then halt immediately
ifHaveDoPDHaltingAllowed() && CTIDEVCTL.OSUCE == '1') || (!() thenHaveDoPDHalt() && EDECR.OSUCE == '1') then
if !(HaltedDebugHalt_ResetCatch() then EDESR.OSUC = '1';);
// CheckPendingOSUnlockCatch()
// ===========================
// Check whether EDESR.OSUC has been set by an OS Unlock Catch debug event// CheckSoftwareAccessToDebugRegisters()
// =====================================
// Check for access to Breakpoint and Watchpoint registers.
CheckPendingOSUnlockCatch()
ifCheckSoftwareAccessToDebugRegisters()
os_lock = (if ELUsingAArch32(EL1) then DBGOSLSR.OSLK else OSLSR_EL1.OSLK);
if HaltingAllowed() && EDESR.OSUC == '1' then() && EDSCR.TDA == '1' && os_lock == '0' then
Halt(DebugHalt_OSUnlockCatchDebugHalt_SoftwareAccess);
// CheckPendingResetCatch()
// ========================
// Check whether EDESR.RC has been set by a Reset Catch debug event// ExternalDebugRequest()
// ======================
CheckPendingResetCatch()
ExternalDebugRequest()
if HaltingAllowed() && EDESR.RC == '1' then() then
Halt(DebugHalt_ResetCatchDebugHalt_EDBGRQ););
// Otherwise the CTI continues to assert the debug request until it is taken.
// CheckResetCatch()
// =================
// Called after reset// Returns TRUE if the previously executed instruction was executed in the inactive state, that is,
// if it was not itself stepped.
boolean
CheckResetCatch()
if (HaltingStep_DidNotStep();HaveDoPD() && CTIDEVCTL.RCE == '1') || (!HaveDoPD() && EDECR.RCE == '1') then
EDESR.RC = '1';
// If halting is allowed then halt immediately
if HaltingAllowed() then Halt(DebugHalt_ResetCatch);
// CheckSoftwareAccessToDebugRegisters()
// =====================================
// Check for access to Breakpoint and Watchpoint registers.// Returns TRUE if the previously executed instruction was a Load-Exclusive class instruction
// executed in the active-not-pending state.
boolean
CheckSoftwareAccessToDebugRegisters()
os_lock = (ifHaltingStep_SteppedEX(); ELUsingAArch32(EL1) then DBGOSLSR.OSLK else OSLSR_EL1.OSLK);
if HaltingAllowed() && EDSCR.TDA == '1' && os_lock == '0' then
Halt(DebugHalt_SoftwareAccess);
// ExternalDebugRequest()
// ======================// RunHaltingStep()
// ================
ExternalDebugRequest()
ifRunHaltingStep(boolean exception_generated, bits(2) exception_target, boolean syscall,
boolean reset)
// "exception_generated" is TRUE if the previous instruction generated a synchronous exception
// or was cancelled by an asynchronous exception.
//
// if "exception_generated" is TRUE then "exception_target" is the target of the exception, and
// "syscall" is TRUE if the exception is a synchronous exception where the preferred return
// address is the instruction following that which generated the exception.
//
// "reset" is TRUE if exiting reset state into the highest EL.
if reset then assert ! Halted(); // Cannot come out of reset halted
active = EDECR.SS == '1' && !Halted();
if active && reset then // Coming out of reset with EDECR.SS set
EDESR.SS = '1';
elsif active && HaltingAllowed() then() then
if exception_generated && exception_target ==
HaltEL3(then
advance = syscall ||DebugHalt_EDBGRQExternalSecureInvasiveDebugEnabled);
// Otherwise the CTI continues to assert the debug request until it is taken.();
else
advance = TRUE;
if advance then EDESR.SS = '1';
return;
// Returns TRUE if the previously executed instruction was executed in the inactive state, that is,
// if it was not itself stepped.
// ExternalDebugInterruptsDisabled()
// =================================
// Determine whether EDSCR disables interrupts routed to 'target'
boolean HaltingStep_DidNotStep();ExternalDebugInterruptsDisabled(bits(2) target)
case target of
whenEL3
int_dis = EDSCR.INTdis == '11' && ExternalSecureInvasiveDebugEnabled();
when EL2
int_dis = EDSCR.INTdis == '1x' && ExternalInvasiveDebugEnabled();
when EL1
if IsSecure() then
int_dis = EDSCR.INTdis == '1x' && ExternalSecureInvasiveDebugEnabled();
else
int_dis = EDSCR.INTdis != '00' && ExternalInvasiveDebugEnabled();
return int_dis;
// Returns TRUE if the previously executed instruction was a Load-Exclusive class instruction
// executed in the active-not-pending state.
booleanenumeration HaltingStep_SteppedEX();InterruptID {InterruptID_PMUIRQ, InterruptID_COMMIRQ, InterruptID_CTIIRQ,
InterruptID_COMMRX, InterruptID_COMMTX};
// RunHaltingStep()
// ================// Set a level-sensitive interrupt to the specified level.
SetInterruptRequestLevel(
RunHaltingStep(boolean exception_generated, bits(2) exception_target, boolean syscall,
boolean reset)
// "exception_generated" is TRUE if the previous instruction generated a synchronous exception
// or was cancelled by an asynchronous exception.
//
// if "exception_generated" is TRUE then "exception_target" is the target of the exception, and
// "syscall" is TRUE if the exception is a synchronous exception where the preferred return
// address is the instruction following that which generated the exception.
//
// "reset" is TRUE if exiting reset state into the highest EL.
if reset then assert !Halted(); // Cannot come out of reset halted
active = EDECR.SS == '1' && !Halted();
if active && reset then // Coming out of reset with EDECR.SS set
EDESR.SS = '1';
elsif active && HaltingAllowed() then
if exception_generated && exception_target == EL3 then
advance = syscall || ExternalSecureInvasiveDebugEnabled();
else
advance = TRUE;
if advance then EDESR.SS = '1';
return;id, signal level);
// ExternalDebugInterruptsDisabled()
// =================================
// Determine whether EDSCR disables interrupts routed to 'target'
boolean// CreatePCSample()
// ================ ExternalDebugInterruptsDisabled(bits(2) target)
case target of
whenCreatePCSample()
// In a simple sequential execution of the program, CreatePCSample is executed each time the PE
// executes an instruction that can be sampled. An implementation is not constrained such that
// reads of EDPCSRlo return the current values of PC, etc.
pc_sample.valid = EL3ExternalNoninvasiveDebugAllowed
int_dis = EDSCR.INTdis == '11' &&() && ! ExternalSecureInvasiveDebugEnabledHalted();
when pc_sample.pc = EL2ThisInstrAddr
int_dis = EDSCR.INTdis == '1x' &&();
pc_sample.el = PSTATE.EL;
pc_sample.rw = if ExternalInvasiveDebugEnabledUsingAArch32();
when() then '0' else '1';
pc_sample.ns = if IsSecure() then '0' else '1';
pc_sample.contextidr = if ELUsingAArch32(EL1
if) then CONTEXTIDR else CONTEXTIDR_EL1;
pc_sample.has_el2 = IsSecureEL2Enabled() then
int_dis = EDSCR.INTdis == '1x' &&();
if ExternalSecureInvasiveDebugEnabledEL2Enabled();
else
int_dis = EDSCR.INTdis != '00' &&() then
if (EL2) then
pc_sample.vmid = ZeroExtend(VTTBR.VMID, 16);
elsif !Have16bitVMID() || VTCR_EL2.VS == '0' then
pc_sample.vmid = ZeroExtend(VTTBR_EL2.VMID<7:0>, 16);
else
pc_sample.vmid = VTTBR_EL2.VMID;
if HaveVirtHostExt() && !ELUsingAArch32(EL2) then
pc_sample.contextidr_el2 = CONTEXTIDR_EL2;
else
pc_sample.contextidr_el2 = bits(32) UNKNOWN;
pc_sample.el0h = PSTATE.EL == EL0 && IsInHostExternalInvasiveDebugEnabledELUsingAArch32();
return int_dis; return;
enumeration// EDPCSRlo[] (read)
// =================
bits(32) InterruptID {EDPCSRlo[boolean memory_mapped]
if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "signal slave-generated error";
return bits(32) UNKNOWN;
// The Software lock is OPTIONAL.
update = !memory_mapped || EDLSR.SLK == '0'; // Software locked: no side-effects
if pc_sample.valid then
sample = pc_sample.pc<31:0>;
if update then
ifInterruptID_PMUIRQ,() && EDSCR.SC2 == '1' then
EDPCSRhi.PC = (if pc_sample.rw == '0' then InterruptID_COMMIRQ,(24) else pc_sample.pc<55:32>);
EDPCSRhi.EL = pc_sample.el;
EDPCSRhi.NS = pc_sample.ns;
else
EDPCSRhi = (if pc_sample.rw == '0' then InterruptID_CTIIRQ,(32) else pc_sample.pc<63:32>);
EDCIDSR = pc_sample.contextidr;
if
InterruptID_COMMRX,() && EDSCR.SC2 == '1' then
EDVIDSR = (if (EL2) && pc_sample.ns == '1' then pc_sample.contextidr_el2
else bits(32) UNKNOWN);
else
if HaveEL(EL2) && pc_sample.ns == '1' && pc_sample.el IN {EL1,EL0} then
EDVIDSR.VMID = pc_sample.vmid;
else
EDVIDSR.VMID = Zeros();
EDVIDSR.NS = pc_sample.ns;
EDVIDSR.E2 = (if pc_sample.el == EL2 then '1' else '0');
EDVIDSR.E3 = (if pc_sample.el == EL3 then '1' else '0') AND pc_sample.rw;
// The conditions for setting HV are not specified if PCSRhi is zero.
// An example implementation may be "pc_sample.rw".
EDVIDSR.HV = (if !IsZero(EDPCSRhi) then '1' else bit IMPLEMENTATION_DEFINED "0 or 1");
else
sample = OnesInterruptID_COMMTX};(32);
if update then
EDPCSRhi = bits(32) UNKNOWN;
EDCIDSR = bits(32) UNKNOWN;
EDVIDSR = bits(32) UNKNOWN;
return sample;
// Set a level-sensitive interrupt to the specified level.
SetInterruptRequestLevel(typePCSample is (
boolean valid,
bits(64) pc,
bits(2) el,
bit rw,
bit ns,
boolean has_el2,
bits(32) contextidr,
bits(32) contextidr_el2,
boolean el0h,
bits(16) vmid
)
PCSampleInterruptID id, signal level);pc_sample;
// CreatePCSample()
// ================// PMPCSR[] (read)
// ===============
bits(32)
CreatePCSample()
// In a simple sequential execution of the program, CreatePCSample is executed each time the PE
// executes an instruction that can be sampled. An implementation is not constrained such that
// reads of EDPCSRlo return the current values of PC, etc.
PMPCSR[boolean memory_mapped]
pc_sample.valid = if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "signal slave-generated error";
return bits(32) UNKNOWN;
// The Software lock is OPTIONAL.
update = !memory_mapped || PMLSR.SLK == '0'; // Software locked: no side-effects
if pc_sample.valid then
sample = pc_sample.pc<31:0>;
if update then
PMPCSR<55:32> = (if pc_sample.rw == '0' then ExternalNoninvasiveDebugAllowedZeros() && !(24) else pc_sample.pc<55:32>);
PMPCSR.EL = pc_sample.el;
PMPCSR.NS = pc_sample.ns;
PMCID1SR = pc_sample.contextidr;
PMCID2SR = if pc_sample.has_el2 then pc_sample.contextidr_el2 else bits(32) UNKNOWN;
PMVIDSR.VMID = (if pc_sample.has_el2 && pc_sample.el IN {Halted();
pc_sample.pc = ThisInstrAddr();
pc_sample.el = PSTATE.EL;
pc_sample.rw = if UsingAArch32() then '0' else '1';
pc_sample.ns = if IsSecure() then '0' else '1';
pc_sample.contextidr = if ELUsingAArch32(EL1) then CONTEXTIDR else CONTEXTIDR_EL1;
pc_sample.has_el2 =, EL2Enabled();
if EL2Enabled() then
if ELUsingAArch32(EL2) then
pc_sample.vmid = ZeroExtend(VTTBR.VMID, 16);
elsif !Have16bitVMID() || VTCR_EL2.VS == '0' then
pc_sample.vmid = ZeroExtend(VTTBR_EL2.VMID<7:0>, 16);
else
pc_sample.vmid = VTTBR_EL2.VMID;
if HaveVirtHostExt() && !ELUsingAArch32(EL2) then
pc_sample.contextidr_el2 = CONTEXTIDR_EL2;
else
pc_sample.contextidr_el2 = bits(32) UNKNOWN;
pc_sample.el0h = PSTATE.EL == EL0 &&} && !pc_sample.el0h
then pc_sample.vmid else bits(16) UNKNOWN);
else
sample = IsInHostOnes();
return;(32);
if update then
PMPCSR<55:32> = bits(24) UNKNOWN;
PMPCSR.EL = bits(2) UNKNOWN;
PMPCSR.NS = bit UNKNOWN;
PMCID1SR = bits(32) UNKNOWN;
PMCID2SR = bits(32) UNKNOWN;
PMVIDSR.VMID = bits(16) UNKNOWN;
return sample;
// EDPCSRlo[] (read)
// =================
bits(32)// CheckSoftwareStep()
// ===================
// Take a Software Step exception if in the active-pending state EDPCSRlo[boolean memory_mapped]
CheckSoftwareStep()
if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "signal slave-generated error";
return bits(32) UNKNOWN;
// The Software lock is OPTIONAL.
update = !memory_mapped || EDLSR.SLK == '0'; // Software locked: no side-effects
if pc_sample.valid then
sample = pc_sample.pc<31:0>;
if update then
if // Other self-hosted debug functions will call AArch32.GenerateDebugExceptions() if called from
// AArch32 state. However, because Software Step is only active when the debug target Exception
// level is using AArch64, CheckSoftwareStep only calls AArch64.GenerateDebugExceptions().
if ! HaveVirtHostExtELUsingAArch32() && EDSCR.SC2 == '1' then
EDPCSRhi.PC = (if pc_sample.rw == '0' then( ZerosDebugTarget(24) else pc_sample.pc<55:32>);
EDPCSRhi.EL = pc_sample.el;
EDPCSRhi.NS = pc_sample.ns;
else
EDPCSRhi = (if pc_sample.rw == '0' then()) && ZerosAArch64.GenerateDebugExceptions(32) else pc_sample.pc<63:32>);
EDCIDSR = pc_sample.contextidr;
if() then
if MDSCR_EL1.SS == '1' && PSTATE.SS == '0' then HaveVirtHostExtAArch64.SoftwareStepException() && EDSCR.SC2 == '1' then
EDVIDSR = (if HaveEL(EL2) && pc_sample.ns == '1' then pc_sample.contextidr_el2
else bits(32) UNKNOWN);
else
if HaveEL(EL2) && pc_sample.ns == '1' && pc_sample.el IN {EL1,EL0} then
EDVIDSR.VMID = pc_sample.vmid;
else
EDVIDSR.VMID = Zeros();
EDVIDSR.NS = pc_sample.ns;
EDVIDSR.E2 = (if pc_sample.el == EL2 then '1' else '0');
EDVIDSR.E3 = (if pc_sample.el == EL3 then '1' else '0') AND pc_sample.rw;
// The conditions for setting HV are not specified if PCSRhi is zero.
// An example implementation may be "pc_sample.rw".
EDVIDSR.HV = (if !IsZero(EDPCSRhi) then '1' else bit IMPLEMENTATION_DEFINED "0 or 1");
else
sample = Ones(32);
if update then
EDPCSRhi = bits(32) UNKNOWN;
EDCIDSR = bits(32) UNKNOWN;
EDVIDSR = bits(32) UNKNOWN;
return sample;();
type// DebugExceptionReturnSS()
// ========================
// Returns value to write to PSTATE.SS on an exception return or Debug state exit.
bit PCSample is (
boolean valid,
bits(64) pc,
bits(2) el,
bit rw,
bit ns,
boolean has_el2,
bits(32) contextidr,
bits(32) contextidr_el2,
boolean el0h,
bits(16) vmid
)DebugExceptionReturnSS(bits(32) spsr)
assert
() || Restarting() || PSTATE.EL != EL0;
SS_bit = '0';
if MDSCR_EL1.SS == '1' then
if Restarting() then
enabled_at_source = FALSE;
elsif UsingAArch32() then
enabled_at_source = AArch32.GenerateDebugExceptions();
else
enabled_at_source = AArch64.GenerateDebugExceptions();
if IllegalExceptionReturn(spsr) then
dest = PSTATE.EL;
else
(valid, dest) = ELFromSPSR(spsr); assert valid;
secure = IsSecureBelowEL3() || dest == EL3;
if ELUsingAArch32(dest) then
enabled_at_dest = AArch32.GenerateDebugExceptionsFrom(dest, secure);
else
mask = spsr<9>;
enabled_at_dest = AArch64.GenerateDebugExceptionsFrom(dest, secure, mask);
ELd = DebugTargetFrom(secure);
if !ELUsingAArch32PCSampleHalted pc_sample;(ELd) && !enabled_at_source && enabled_at_dest then
SS_bit = spsr<21>;
return SS_bit;
// PMPCSR[] (read)
// ===============
bits(32)// SSAdvance()
// ===========
// Advance the Software Step state machine. PMPCSR[boolean memory_mapped]
SSAdvance()
if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits
IMPLEMENTATION_DEFINED "signal slave-generated error";
return bits(32) UNKNOWN;
// The Software lock is OPTIONAL.
update = !memory_mapped || PMLSR.SLK == '0'; // Software locked: no side-effects
if pc_sample.valid then
sample = pc_sample.pc<31:0>;
if update then
PMPCSR<55:32> = (if pc_sample.rw == '0' then // A simpler implementation of this function just clears PSTATE.SS to zero regardless of the
// current Software Step state machine. However, this check is made to illustrate that the
// processor only needs to consider advancing the state machine from the active-not-pending
// state.
target = ZerosDebugTarget(24) else pc_sample.pc<55:32>);
PMPCSR.EL = pc_sample.el;
PMPCSR.NS = pc_sample.ns;
PMCID1SR = pc_sample.contextidr;
PMCID2SR = if pc_sample.has_el2 then pc_sample.contextidr_el2 else bits(32) UNKNOWN;
PMVIDSR.VMID = (if pc_sample.has_el2 && pc_sample.el IN {();
step_enabled = !EL1ELUsingAArch32,EL0} && !pc_sample.el0h
then pc_sample.vmid else bits(16) UNKNOWN);
else
sample = Ones(32);
if update then
PMPCSR<55:32> = bits(24) UNKNOWN;
PMPCSR.EL = bits(2) UNKNOWN;
PMPCSR.NS = bit UNKNOWN;
(target) && MDSCR_EL1.SS == '1';
active_not_pending = step_enabled && PSTATE.SS == '1';
PMCID1SR = bits(32) UNKNOWN;
PMCID2SR = bits(32) UNKNOWN;
if active_not_pending then PSTATE.SS = '0';
PMVIDSR.VMID = bits(16) UNKNOWN;
return sample; return;
// CheckSoftwareStep()
// ===================
// Take a Software Step exception if in the active-pending state// Returns TRUE if the previously executed instruction was executed in the inactive state, that is,
// if it was not itself stepped.
boolean
CheckSoftwareStep()
// Other self-hosted debug functions will call AArch32.GenerateDebugExceptions() if called from
// AArch32 state. However, because Software Step is only active when the debug target Exception
// level is using AArch64, CheckSoftwareStep only calls AArch64.GenerateDebugExceptions().
if !SoftwareStep_DidNotStep();ELUsingAArch32(DebugTarget()) && AArch64.GenerateDebugExceptions() then
if MDSCR_EL1.SS == '1' && PSTATE.SS == '0' then
AArch64.SoftwareStepException();
// DebugExceptionReturnSS()
// ========================
// Returns value to write to PSTATE.SS on an exception return or Debug state exit.
bit// Returns TRUE if the previously executed instruction was a Load-Exclusive class instruction
// executed in the active-not-pending state.
boolean DebugExceptionReturnSS(bits(32) spsr)
assertSoftwareStep_SteppedEX(); Halted() || Restarting() || PSTATE.EL != EL0;
SS_bit = '0';
if MDSCR_EL1.SS == '1' then
if Restarting() then
enabled_at_source = FALSE;
elsif UsingAArch32() then
enabled_at_source = AArch32.GenerateDebugExceptions();
else
enabled_at_source = AArch64.GenerateDebugExceptions();
if IllegalExceptionReturn(spsr) then
dest = PSTATE.EL;
else
(valid, dest) = ELFromSPSR(spsr); assert valid;
secure = IsSecureBelowEL3() || dest == EL3;
if ELUsingAArch32(dest) then
enabled_at_dest = AArch32.GenerateDebugExceptionsFrom(dest, secure);
else
mask = spsr<9>;
enabled_at_dest = AArch64.GenerateDebugExceptionsFrom(dest, secure, mask);
ELd = DebugTargetFrom(secure);
if !ELUsingAArch32(ELd) && !enabled_at_source && enabled_at_dest then
SS_bit = spsr<21>;
return SS_bit;
// SSAdvance()
// ===========
// Advance the Software Step state machine.// ConditionSyndrome()
// ===================
// Return CV and COND fields of instruction syndrome
bits(5)
SSAdvance()
ConditionSyndrome()
// A simpler implementation of this function just clears PSTATE.SS to zero regardless of the
// current Software Step state machine. However, this check is made to illustrate that the
// processor only needs to consider advancing the state machine from the active-not-pending
// state.
target = bits(5) syndrome;
if DebugTargetUsingAArch32();
step_enabled = !() then
cond =();
if PSTATE.T == '0' then // A32
syndrome<4> = '1';
// A conditional A32 instruction that is known to pass its condition code check
// can be presented either with COND set to 0xE, the value for unconditional, or
// the COND value held in the instruction.
if ConditionHolds(cond) && ConstrainUnpredictableBool(Unpredictable_ESRCONDPASSELUsingAArch32AArch32.CurrentCond(target) && MDSCR_EL1.SS == '1';
active_not_pending = step_enabled && PSTATE.SS == '1';
) then
syndrome<3:0> = '1110';
else
syndrome<3:0> = cond;
else // T32
// When a T32 instruction is trapped, it is IMPLEMENTATION DEFINED whether:
// * CV set to 0 and COND is set to an UNKNOWN value
// * CV set to 1 and COND is set to the condition code for the condition that
// applied to the instruction.
if boolean IMPLEMENTATION_DEFINED "Condition valid for trapped T32" then
syndrome<4> = '1';
syndrome<3:0> = cond;
else
syndrome<4> = '0';
syndrome<3:0> = bits(4) UNKNOWN;
else
syndrome<4> = '1';
syndrome<3:0> = '1110';
if active_not_pending then PSTATE.SS = '0';
return; return syndrome;
// Returns TRUE if the previously executed instruction was executed in the inactive state, that is,
// if it was not itself stepped.
booleanenumeration SoftwareStep_DidNotStep();Exception {Exception_Uncategorized, // Uncategorized or unknown reason
Exception_WFxTrap, // Trapped WFI or WFE instruction
Exception_CP15RTTrap, // Trapped AArch32 MCR or MRC access to CP15
Exception_CP15RRTTrap, // Trapped AArch32 MCRR or MRRC access to CP15
Exception_CP14RTTrap, // Trapped AArch32 MCR or MRC access to CP14
Exception_CP14DTTrap, // Trapped AArch32 LDC or STC access to CP14
Exception_AdvSIMDFPAccessTrap, // HCPTR-trapped access to SIMD or FP
Exception_FPIDTrap, // Trapped access to SIMD or FP ID register
// Trapped BXJ instruction not supported in Armv8
Exception_PACTrap, // Trapped invalid PAC use
Exception_CP14RRTTrap, // Trapped MRRC access to CP14 from AArch32
Exception_IllegalState, // Illegal Execution state
Exception_SupervisorCall, // Supervisor Call
Exception_HypervisorCall, // Hypervisor Call
Exception_MonitorCall, // Monitor Call or Trapped SMC instruction
Exception_SystemRegisterTrap, // Trapped MRS or MSR system register access
Exception_ERetTrap, // Trapped invalid ERET use
Exception_InstructionAbort, // Instruction Abort or Prefetch Abort
Exception_PCAlignment, // PC alignment fault
Exception_DataAbort, // Data Abort
Exception_NV2DataAbort, // Data abort at EL1 reported as being from EL2
Exception_SPAlignment, // SP alignment fault
Exception_FPTrappedException, // IEEE trapped FP exception
Exception_SError, // SError interrupt
Exception_Breakpoint, // (Hardware) Breakpoint
Exception_SoftwareStep, // Software Step
Exception_Watchpoint, // Watchpoint
Exception_SoftwareBreakpoint, // Software Breakpoint Instruction
Exception_VectorCatch, // AArch32 Vector Catch
Exception_IRQ, // IRQ interrupt
Exception_SVEAccessTrap, // HCPTR trapped access to SVE
Exception_BranchTarget, // Branch Target Identification
Exception_FIQ}; // FIQ interrupt
// Returns TRUE if the previously executed instruction was a Load-Exclusive class instruction
// executed in the active-not-pending state.
booleantype SoftwareStep_SteppedEX();ExceptionRecord is (Exception exceptype, // Exception class
bits(25) syndrome, // Syndrome record
bits(64) vaddress, // Virtual fault address
boolean ipavalid, // Physical fault address for second stage faults is valid
bits(1) NS, // Physical fault address for second stage faults is Non-secure or secure
bits(52) ipaddress) // Physical fault address for second stage faults
// ConditionSyndrome()
// ExceptionSyndrome()
// ===================
// Return CV and COND fields of instruction syndrome
// Return a blank exception syndrome record for an exception of the given type.
bits(5)ExceptionRecord ConditionSyndrome()
bits(5) syndrome;
ifExceptionSyndrome( UsingAArch32Exception() then
cond =exceptype) AArch32.CurrentCondExceptionRecord();
if PSTATE.T == '0' then // A32
syndrome<4> = '1';
// A conditional A32 instruction that is known to pass its condition code check
// can be presented either with COND set to 0xE, the value for unconditional, or
// the COND value held in the instruction.
ifr;
r.exceptype = exceptype;
// Initialize all other fields
r.syndrome = ConditionHoldsZeros(cond) &&();
r.vaddress = ConstrainUnpredictableBoolZeros(();
r.ipavalid = FALSE;
r.NS = '0';
r.ipaddress =Unpredictable_ESRCONDPASSZeros) then
syndrome<3:0> = '1110';
else
syndrome<3:0> = cond;
else // T32
// When a T32 instruction is trapped, it is IMPLEMENTATION DEFINED whether:
// * CV set to 0 and COND is set to an UNKNOWN value
// * CV set to 1 and COND is set to the condition code for the condition that
// applied to the instruction.
if boolean IMPLEMENTATION_DEFINED "Condition valid for trapped T32" then
syndrome<4> = '1';
syndrome<3:0> = cond;
else
syndrome<4> = '0';
syndrome<3:0> = bits(4) UNKNOWN;
else
syndrome<4> = '1';
syndrome<3:0> = '1110';
();
return syndrome; return r;
enumeration// ReservedValue()
// =============== Exception {ReservedValue()
ifException_Uncategorized, // Uncategorized or unknown reason() && !
Exception_WFxTrap, // Trapped WFI or WFE instruction() then
Exception_CP15RTTrap, // Trapped AArch32 MCR or MRC access to CP15();
else
Exception_CP15RRTTrap, // Trapped AArch32 MCRR or MRRC access to CP15
Exception_CP14RTTrap, // Trapped AArch32 MCR or MRC access to CP14
Exception_CP14DTTrap, // Trapped AArch32 LDC or STC access to CP14
Exception_AdvSIMDFPAccessTrap, // HCPTR-trapped access to SIMD or FP
Exception_FPIDTrap, // Trapped access to SIMD or FP ID register
// Trapped BXJ instruction not supported in Armv8
Exception_PACTrap, // Trapped invalid PAC use
Exception_CP14RRTTrap, // Trapped MRRC access to CP14 from AArch32
Exception_IllegalState, // Illegal Execution state
Exception_SupervisorCall, // Supervisor Call
Exception_HypervisorCall, // Hypervisor Call
Exception_MonitorCall, // Monitor Call or Trapped SMC instruction
Exception_SystemRegisterTrap, // Trapped MRS or MSR system register access
Exception_ERetTrap, // Trapped invalid ERET use
Exception_InstructionAbort, // Instruction Abort or Prefetch Abort
Exception_PCAlignment, // PC alignment fault
Exception_DataAbort, // Data Abort
Exception_NV2DataAbort, // Data abort at EL1 reported as being from EL2
Exception_SPAlignment, // SP alignment fault
Exception_FPTrappedException, // IEEE trapped FP exception
Exception_SError, // SError interrupt
Exception_Breakpoint, // (Hardware) Breakpoint
Exception_SoftwareStep, // Software Step
Exception_Watchpoint, // Watchpoint
Exception_NV2Watchpoint, // Watchpoint at EL1 reported as being from EL2
Exception_SoftwareBreakpoint, // Software Breakpoint Instruction
Exception_VectorCatch, // AArch32 Vector Catch
Exception_IRQ, // IRQ interrupt
Exception_SVEAccessTrap, // HCPTR trapped access to SVE
Exception_BranchTarget, // Branch Target Identification
Exception_FIQ}; // FIQ interrupt();
type// UnallocatedEncoding()
// ===================== ExceptionRecord is (UnallocatedEncoding()
if() && AArch32.ExecutingCP10or11Instr() then
FPEXC.DEX = '0';
if UsingAArch32() && !AArch32.GeneralExceptionsToAArch64() then
AArch32.TakeUndefInstrException();
else
AArch64.UndefinedFaultExceptionUsingAArch32 exceptype, // Exception class
bits(25) syndrome, // Syndrome record
bits(64) vaddress, // Virtual fault address
boolean ipavalid, // Physical fault address for second stage faults is valid
bits(1) NS, // Physical fault address for second stage faults is Non-secure or secure
bits(52) ipaddress) // Physical fault address for second stage faults();
// ExceptionSyndrome()
// ===================
// Return a blank exception syndrome record for an exception of the given type.
// EncodeLDFSC()
// =============
// Function that gives the Long-descriptor FSC code for types of Fault
ExceptionRecordbits(6) ExceptionSyndrome(EncodeLDFSC(ExceptionFault exceptype)statuscode, integer level)
bits(6) result;
case statuscode of
when
ExceptionRecordFault_AddressSize r;
r.exceptype = exceptype;
// Initialize all other fields
r.syndrome =result = '0000':level<1:0>; assert level IN {0,1,2,3};
when ZerosFault_AccessFlag();
r.vaddress =result = '0010':level<1:0>; assert level IN {1,2,3};
when ZerosFault_Permission();
r.ipavalid = FALSE;
r.NS = '0';
r.ipaddress =result = '0011':level<1:0>; assert level IN {1,2,3};
when result = '0001':level<1:0>; assert level IN {0,1,2,3};
when Fault_SyncExternal result = '010000';
when Fault_SyncExternalOnWalk result = '0101':level<1:0>; assert level IN {0,1,2,3};
when Fault_SyncParity result = '011000';
when Fault_SyncParityOnWalk result = '0111':level<1:0>; assert level IN {0,1,2,3};
when Fault_AsyncParity result = '011001';
when Fault_AsyncExternal result = '010001';
when Fault_Alignment result = '100001';
when Fault_Debug result = '100010';
when Fault_TLBConflict result = '110000';
when Fault_HWUpdateAccessFlag result = '110001';
when Fault_Lockdown result = '110100'; // IMPLEMENTATION DEFINED
when Fault_Exclusive result = '110101'; // IMPLEMENTATION DEFINED
otherwise UnreachableZerosFault_Translation();
return r; return result;
// ReservedValue()
// ===============// IPAValid()
// ==========
// Return TRUE if the IPA is reported for the abort
boolean
ReservedValue()
ifIPAValid( UsingAArch32FaultRecord() && !fault)
assert fault.statuscode !=AArch32.GeneralExceptionsToAArch64Fault_None() then;
if fault.s2fs1walk then
return fault.statuscode IN {
AArch32.TakeUndefInstrExceptionFault_AccessFlag();
else,
, Fault_Translation,
Fault_AddressSize};
elsif fault.secondstage then
return fault.statuscode IN {Fault_AccessFlag, Fault_Translation, Fault_AddressSizeAArch64.UndefinedFaultFault_Permission();};
else
return FALSE;
// UnallocatedEncoding()
// =====================// IsAsyncAbort()
// ==============
// Returns TRUE if the abort currently being processed is an asynchronous abort, and FALSE
// otherwise.
boolean
UnallocatedEncoding()
ifIsAsyncAbort( UsingAArch32Fault() &&statuscode)
assert statuscode != AArch32.ExecutingCP10or11InstrFault_None() then
FPEXC.DEX = '0';
if;
return (statuscode IN { UsingAArch32Fault_AsyncExternal() && !,AArch32.GeneralExceptionsToAArch64Fault_AsyncParity() then});
// IsAsyncAbort()
// ==============
boolean
AArch32.TakeUndefInstrExceptionIsAsyncAbort();
else(
fault)
return IsAsyncAbortAArch64.UndefinedFaultFaultRecord();(fault.statuscode);
// EncodeLDFSC()
// =============
// Function that gives the Long-descriptor FSC code for types of Fault
// IsDebugException()
// ==================
bits(6)boolean EncodeLDFSC(IsDebugException(FaultFaultRecord statuscode, integer level)
bits(6) result;
case statuscode of
whenfault)
assert fault.statuscode != Fault_AddressSizeFault_None result = '0000':level<1:0>; assert level IN {0,1,2,3};
when;
return fault.statuscode == Fault_AccessFlag result = '0010':level<1:0>; assert level IN {1,2,3};
when Fault_Permission result = '0011':level<1:0>; assert level IN {1,2,3};
when Fault_Translation result = '0001':level<1:0>; assert level IN {0,1,2,3};
when Fault_SyncExternal result = '010000';
when Fault_SyncExternalOnWalk result = '0101':level<1:0>; assert level IN {0,1,2,3};
when Fault_SyncParity result = '011000';
when Fault_SyncParityOnWalk result = '0111':level<1:0>; assert level IN {0,1,2,3};
when Fault_AsyncParity result = '011001';
when Fault_AsyncExternal result = '010001';
when Fault_Alignment result = '100001';
when Fault_Debug result = '100010';
when Fault_TLBConflict result = '110000';
when Fault_HWUpdateAccessFlag result = '110001';
when Fault_Lockdown result = '110100'; // IMPLEMENTATION DEFINED
when Fault_Exclusive result = '110101'; // IMPLEMENTATION DEFINED
otherwise Unreachable();
return result;;
// IPAValid()
// ==========
// Return TRUE if the IPA is reported for the abort
// IsExternalAbort()
// =================
// Returns TRUE if the abort currently being processed is an external abort and FALSE otherwise.
boolean IPAValid(IsExternalAbort(FaultRecordFault fault)
assert fault.statuscode !=statuscode)
assert statuscode != Fault_None;
if fault.s2fs1walk then
return fault.statuscode IN { return (statuscode IN {Fault_AccessFlagFault_SyncExternal, Fault_PermissionFault_SyncParity, Fault_TranslationFault_SyncExternalOnWalk,
Fault_AddressSizeFault_SyncParityOnWalk};
elsif fault.secondstage then
return fault.statuscode IN {,Fault_AccessFlagFault_AsyncExternal, Fault_TranslationFault_AsyncParity,});
// IsExternalAbort()
// =================
boolean (FaultRecord fault)
return IsExternalAbortFault_AddressSizeIsExternalAbort};
else
return FALSE;(fault.statuscode);
// IsAsyncAbort()
// ==============
// Returns TRUE if the abort currently being processed is an asynchronous abort, and FALSE
// otherwise.
// IsExternalSyncAbort()
// =====================
// Returns TRUE if the abort currently being processed is an external synchronous abort and FALSE otherwise.
boolean IsAsyncAbort(IsExternalSyncAbort(Fault statuscode)
assert statuscode != Fault_None;
return (statuscode IN {Fault_AsyncExternalFault_SyncExternal, Fault_AsyncParityFault_SyncParity});
// IsAsyncAbort()
// ==============
boolean, IsAsyncAbortFault_SyncExternalOnWalk(,Fault_SyncParityOnWalk});
// IsExternalSyncAbort()
// =====================
boolean IsExternalSyncAbort(FaultRecord fault)
return IsAsyncAbortIsExternalSyncAbort(fault.statuscode);
// IsDebugException()
// ==================
// IsFault()
// =========
// Return TRUE if a fault is associated with an address descriptor
boolean IsDebugException(IsFault(FaultRecordAddressDescriptor fault)
assert fault.statuscode !=addrdesc)
return addrdesc.fault.statuscode != Fault_None;
return fault.statuscode == Fault_Debug;
// IsExternalAbort()
// =================
// Returns TRUE if the abort currently being processed is an external abort and FALSE otherwise.
// IsSErrorInterrupt()
// ===================
// Returns TRUE if the abort currently being processed is an SError interrupt, and FALSE
// otherwise.
boolean IsExternalAbort(IsSErrorInterrupt(Fault statuscode)
assert statuscode != Fault_None;
return (statuscode IN {Fault_SyncExternal, Fault_SyncParity, Fault_SyncExternalOnWalk, Fault_SyncParityOnWalk,
Fault_AsyncExternal, Fault_AsyncParity });
// IsExternalAbort()
// =================
// IsSErrorInterrupt()
// ===================
boolean IsExternalAbortIsSErrorInterrupt(FaultRecord fault)
return IsExternalAbortIsSErrorInterrupt(fault.statuscode);
// IsExternalSyncAbort()
// =====================
// Returns TRUE if the abort currently being processed is an external synchronous abort and FALSE otherwise.
// IsSecondStage()
// ===============
boolean IsExternalSyncAbort(IsSecondStage(FaultFaultRecord statuscode)
assert statuscode !=fault)
assert fault.statuscode != Fault_None;
return (statuscode IN {Fault_SyncExternal, Fault_SyncParity, Fault_SyncExternalOnWalk, Fault_SyncParityOnWalk});
// IsExternalSyncAbort()
// =====================
boolean IsExternalSyncAbort(FaultRecord fault)
return IsExternalSyncAbort(fault.statuscode);;
return fault.secondstage;
// IsFault()
// =========
// Return TRUE if a fault is associated with an address descriptor
booleanbits(11) IsFault(LSInstructionSyndrome();AddressDescriptor addrdesc)
return addrdesc.fault.statuscode != Fault_None;
// IsSErrorInterrupt()
// ===================
// Returns TRUE if the abort currently being processed is an SError interrupt, and FALSE
// otherwise.
// ASR()
// =====
booleanbits(N) IsSErrorInterrupt(ASR(bits(N) x, integer shift)
assert shift >= 0;
if shift == 0 then
result = x;
else
(result, -) =FaultASR_C statuscode)
assert statuscode != Fault_None;
return (statuscode IN {Fault_AsyncExternal, Fault_AsyncParity});
// IsSErrorInterrupt()
// ===================
boolean IsSErrorInterrupt(FaultRecord fault)
return IsSErrorInterrupt(fault.statuscode);(x, shift);
return result;
// IsSecondStage()
// ===============
// ASR_C()
// =======
boolean(bits(N), bit) IsSecondStage(ASR_C(bits(N) x, integer shift)
assert shift > 0;
shift = if shift > N then N else shift;
extended_x =FaultRecordSignExtend fault)
assert fault.statuscode != Fault_None;
return fault.secondstage;(x, shift+N);
result = extended_x<shift+N-1:shift>;
carry_out = extended_x<shift-1>;
return (result, carry_out);
bits(11)// Abs()
// =====
integer LSInstructionSyndrome();Abs(integer x)
return if x >= 0 then x else -x;
// Abs()
// =====
realAbs(real x)
return if x >= 0.0 then x else -x;
// ASR()
// =====
// Align()
// =======
bits(N)integer ASR(bits(N) x, integer shift)
assert shift >= 0;
if shift == 0 then
result = x;
else
(result, -) =Align(integer x, integer y)
return y * (x DIV y);
// Align()
// =======
bits(N) (bits(N) x, integer y)
return Align(UIntASR_CAlign(x, shift);
return result;(x), y)<N-1:0>;
// ASR_C()
// =======
// BitCount()
// ==========
(bits(N), bit)integer ASR_C(bits(N) x, integer shift)
assert shift > 0;
shift = if shift > N then N else shift;
extended_x =BitCount(bits(N) x)
integer result = 0;
for i = 0 to N-1
if x<i> == '1' then
result = result + 1;
return result; SignExtend(x, shift+N);
result = extended_x<shift+N-1:shift>;
carry_out = extended_x<shift-1>;
return (result, carry_out);
// Abs()
// =====
// CountLeadingSignBits()
// ======================
integer Abs(integer x)
return if x >= 0 then x else -x;
// Abs()
// =====
realCountLeadingSignBits(bits(N) x)
return AbsCountLeadingZeroBits(real x)
return if x >= 0.0 then x else -x;(x<N-1:1> EOR x<N-2:0>);
// Align()
// =======
// CountLeadingZeroBits()
// ======================
integer Align(integer x, integer y)
return y * (x DIV y);
// Align()
// =======
bits(N)CountLeadingZeroBits(bits(N) x)
return N - ( AlignHighestSetBit(bits(N) x, integer y)
return Align(UInt(x), y)<N-1:0>;(x) + 1);
// BitCount()
// ==========
// Elem[] - non-assignment form
// ============================
integerbits(size) BitCount(bits(N) x)
integer result = 0;
for i = 0 to N-1
if x<i> == '1' then
result = result + 1;
return result;Elem[bits(N) vector, integer e, integer size]
assert e >= 0 && (e+1)*size <= N;
return vector<e*size+size-1 : e*size>;
// Elem[] - non-assignment form
// ============================
bits(size)Elem[bits(N) vector, integer e]
return Elem[vector, e, size];
// Elem[] - assignment form
// ========================
Elem[bits(N) &vector, integer e, integer size] = bits(size) value
assert e >= 0 && (e+1)*size <= N;
vector<(e+1)*size-1:e*size> = value;
return;
// Elem[] - assignment form
// ========================
Elem[bits(N) &vector, integer e] = bits(size) value
Elem[vector, e, size] = value;
return;
// CountLeadingSignBits()
// ======================
// Extend()
// ========
integerbits(N) CountLeadingSignBits(bits(N) x)
returnExtend(bits(M) x, integer N, boolean unsigned)
return if unsigned then (x, N) else SignExtend(x, N);
// Extend()
// ========
bits(N) Extend(bits(M) x, boolean unsigned)
return ExtendCountLeadingZeroBitsZeroExtend(x<N-1:1> EOR x<N-2:0>);(x, N, unsigned);
// CountLeadingZeroBits()
// ======================
// HighestSetBit()
// ===============
integer CountLeadingZeroBits(bits(N) x)
return N - (HighestSetBit(bits(N) x)
for i = N-1 downto 0
if x<i> == '1' then return i;
return -1;HighestSetBit(x) + 1);
// Elem[] - non-assignment form
// ============================
// Int()
// =====
bits(size)integer Elem[bits(N) vector, integer e, integer size]
assert e >= 0 && (e+1)*size <= N;
return vector<e*size+size-1 : e*size>;
// Elem[] - non-assignment form
// ============================
bits(size)Int(bits(N) x, boolean unsigned)
result = if unsigned then Elem[bits(N) vector, integer e]
return(x) else ElemSInt[vector, e, size];
// Elem[] - assignment form
// ========================
Elem[bits(N) &vector, integer e, integer size] = bits(size) value
assert e >= 0 && (e+1)*size <= N;
vector<(e+1)*size-1:e*size> = value;
return;
// Elem[] - assignment form
// ========================
Elem[bits(N) &vector, integer e] = bits(size) value
Elem[vector, e, size] = value;
return;(x);
return result;
// Extend()
// IsOnes()
// ========
bits(N)boolean Extend(bits(M) x, integer N, boolean unsigned)
return if unsigned thenIsOnes(bits(N) x)
return x == ZeroExtendOnes(x, N) else SignExtend(x, N);
// Extend()
// ========
bits(N) Extend(bits(M) x, boolean unsigned)
return Extend(x, N, unsigned);(N);
// HighestSetBit()
// ===============
// IsZero()
// ========
integerboolean HighestSetBit(bits(N) x)
for i = N-1 downto 0
if x<i> == '1' then return i;
return -1;IsZero(bits(N) x)
return x ==Zeros(N);
// Int()
// =====
// IsZeroBit()
// ===========
integerbit Int(bits(N) x, boolean unsigned)
result = if unsigned thenIsZeroBit(bits(N) x)
return if UIntIsZero(x) else SInt(x);
return result;(x) then '1' else '0';
// IsOnes()
// ========
// LSL()
// =====
booleanbits(N) IsOnes(bits(N) x)
return x ==LSL(bits(N) x, integer shift)
assert shift >= 0;
if shift == 0 then
result = x;
else
(result, -) = OnesLSL_C(N);(x, shift);
return result;
// IsZero()
// ========
// LSL_C()
// =======
boolean(bits(N), bit) IsZero(bits(N) x)
return x ==LSL_C(bits(N) x, integer shift)
assert shift > 0;
shift = if shift > N then N else shift;
extended_x = x : Zeros(N);(shift);
result = extended_x<N-1:0>;
carry_out = extended_x<N>;
return (result, carry_out);
// IsZeroBit()
// ===========
// LSR()
// =====
bitbits(N) IsZeroBit(bits(N) x)
return ifLSR(bits(N) x, integer shift)
assert shift >= 0;
if shift == 0 then
result = x;
else
(result, -) = IsZeroLSR_C(x) then '1' else '0';(x, shift);
return result;
// LSL()
// =====
// LSR_C()
// =======
bits(N)(bits(N), bit) LSL(bits(N) x, integer shift)
assert shift >= 0;
if shift == 0 then
result = x;
else
(result, -) =LSR_C(bits(N) x, integer shift)
assert shift > 0;
shift = if shift > N then N else shift;
extended_x = LSL_CZeroExtend(x, shift);
return result;(x, shift+N);
result = extended_x<shift+N-1:shift>;
carry_out = extended_x<shift-1>;
return (result, carry_out);
// LSL_C()
// =======
// LowestSetBit()
// ==============
(bits(N), bit)integer LSL_C(bits(N) x, integer shift)
assert shift > 0;
shift = if shift > N then N else shift;
extended_x = x :LowestSetBit(bits(N) x)
for i = 0 to N-1
if x<i> == '1' then return i;
return N; Zeros(shift);
result = extended_x<N-1:0>;
carry_out = extended_x<N>;
return (result, carry_out);
// LSR()
// Max()
// =====
bits(N)integer LSR(bits(N) x, integer shift)
assert shift >= 0;
if shift == 0 then
result = x;
else
(result, -) =Max(integer a, integer b)
return if a >= b then a else b;
// Max()
// =====
real LSR_CMax(x, shift);
return result;(real a, real b)
return if a >= b then a else b;
// LSR_C()
// =======
// Min()
// =====
(bits(N), bit)integer LSR_C(bits(N) x, integer shift)
assert shift > 0;
shift = if shift > N then N else shift;
extended_x =Min(integer a, integer b)
return if a <= b then a else b;
// Min()
// =====
real ZeroExtendMin(x, shift+N);
result = extended_x<shift+N-1:shift>;
carry_out = extended_x<shift-1>;
return (result, carry_out);(real a, real b)
return if a <= b then a else b;
// LowestSetBit()
// ==============
// Ones()
// ======
integerbits(N) LowestSetBit(bits(N) x)
for i = 0 to N-1
if x<i> == '1' then return i;
return N;Ones(integer N)
returnReplicate('1',N);
// Ones()
// ======
bits(N) Ones()
return Ones(N);
// Max()
// ROR()
// =====
integerbits(N) Max(integer a, integer b)
return if a >= b then a else b;
// Max()
// =====
realROR(bits(N) x, integer shift)
assert shift >= 0;
if shift == 0 then
result = x;
else
(result, -) = MaxROR_C(real a, real b)
return if a >= b then a else b;(x, shift);
return result;
// Min()
// =====
// ROR_C()
// =======
integer(bits(N), bit) Min(integer a, integer b)
return if a <= b then a else b;
// Min()
// =====
realROR_C(bits(N) x, integer shift)
assert shift != 0;
m = shift MOD N;
result = (x,m) OR LSLMinLSR(real a, real b)
return if a <= b then a else b;(x,N-m);
carry_out = result<N-1>;
return (result, carry_out);
// Ones()
// ======
// Replicate()
// ===========
bits(N) Ones(integer N)
Replicate(bits(M) x)
assert N MOD M == 0;
return Replicate('1',N);
(x, N DIV M);
// Ones()
// ======
bits(N)bits(M*N) Ones()
returnReplicate(bits(M) x, integer N); Ones(N);
// ROR()
// =====
bits(N)integer ROR(bits(N) x, integer shift)
assert shift >= 0;
if shift == 0 then
result = x;
else
(result, -) =RoundDown(real x); ROR_C(x, shift);
return result;
// ROR_C()
// =======
// RoundTowardsZero()
// ==================
(bits(N), bit)integer ROR_C(bits(N) x, integer shift)
assert shift != 0;
m = shift MOD N;
result =RoundTowardsZero(real x)
return if x == 0.0 then 0 else if x >= 0.0 then LSRRoundDown(x,m) OR(x) else LSLRoundUp(x,N-m);
carry_out = result<N-1>;
return (result, carry_out);(x);
// Replicate()
// ===========
bits(N)integer Replicate(bits(M) x)
assert N MOD M == 0;
returnRoundUp(real x); Replicate(x, N DIV M);
bits(M*N) Replicate(bits(M) x, integer N);
// SInt()
// ======
integer RoundDown(real x);SInt(bits(N) x)
result = 0;
for i = 0 to N-1
if x<i> == '1' then result = result + 2^i;
if x<N-1> == '1' then result = result - 2^N;
return result;
// RoundTowardsZero()
// ==================
// SignExtend()
// ============
integerbits(N) RoundTowardsZero(real x)
return if x == 0.0 then 0 else if x >= 0.0 thenSignExtend(bits(M) x, integer N)
assert N >= M;
return RoundDownReplicate(x) else(x<M-1>, N-M) : x;
// SignExtend()
// ============
bits(N) SignExtend(bits(M) x)
return SignExtendRoundUp(x);(x, N);
// UInt()
// ======
integer RoundUp(real x);UInt(bits(N) x)
result = 0;
for i = 0 to N-1
if x<i> == '1' then result = result + 2^i;
return result;
// SInt()
// ======
// ZeroExtend()
// ============
integerbits(N) SInt(bits(N) x)
result = 0;
for i = 0 to N-1
if x<i> == '1' then result = result + 2^i;
if x<N-1> == '1' then result = result - 2^N;
return result;ZeroExtend(bits(M) x, integer N)
assert N >= M;
returnZeros(N-M) : x;
// ZeroExtend()
// ============
bits(N) ZeroExtend(bits(M) x)
return ZeroExtend(x, N);
// SignExtend()
// ============
// Zeros()
// =======
bits(N) SignExtend(bits(M) x, integer N)
assert N >= M;
Zeros(integer N)
return Replicate(x<M-1>, N-M) : x;
('0',N);
// SignExtend()
// ============
// Zeros()
// =======
bits(N) SignExtend(bits(M) x)
Zeros()
return SignExtendZeros(x, N);(N);
// UInt()
// ======
// BitReverse()
// ============
integerbits(N) UInt(bits(N) x)
result = 0;
BitReverse(bits(N) data)
bits(N) result;
for i = 0 to N-1
if x<i> == '1' then result = result + 2^i;
result<N-i-1> = data<i>;
return result;
// ZeroExtend()
// HaveCRCExt()
// ============
bits(N)boolean ZeroExtend(bits(M) x, integer N)
assert N >= M;
HaveCRCExt()
return ZerosHasArchVersion(N-M) : x;
// ZeroExtend()
// ============
bits(N)( ZeroExtend(bits(M) x)
return ZeroExtend(x, N);) || boolean IMPLEMENTATION_DEFINED "Have CRC extension";
// Zeros()
// =======
// Poly32Mod2()
// ============
bits(N)// Poly32Mod2 on a bitstring does a polynomial Modulus over {0,1} operation
bits(32) Zeros(integer N)
returnPoly32Mod2(bits(N) data, bits(32) poly)
assert N > 32;
for i = N-1 downto 32
if data<i> == '1' then
data<i-1:0> = data<i-1:0> EOR (poly: Replicate('0',N);
// Zeros()
// =======
bits(N) Zeros()
return Zeros(N);(i-32));
return data<31:0>;
// BitReverse()
// ============
bits(N)bits(128) BitReverse(bits(N) data)
bits(N) result;
for i = 0 to N-1
result<N-i-1> = data<i>;
return result;AESInvMixColumns(bits (128) op);
// HaveCRCExt()
// ============
booleanbits(128) HaveCRCExt()
returnAESInvShiftRows(bits(128) op); HasArchVersion(ARMv8p1) || boolean IMPLEMENTATION_DEFINED "Have CRC extension";
// Poly32Mod2()
// ============
// Poly32Mod2 on a bitstring does a polynomial Modulus over {0,1} operation
bits(32)bits(128) Poly32Mod2(bits(N) data, bits(32) poly)
assert N > 32;
for i = N-1 downto 32
if data<i> == '1' then
data<i-1:0> = data<i-1:0> EOR (poly:AESInvSubBytes(bits(128) op);Zeros(i-32));
return data<31:0>;
bits(128) AESInvMixColumns(bits (128) op);AESMixColumns(bits (128) op);
bits(128) AESInvShiftRows(bits(128) op);AESShiftRows(bits(128) op);
bits(128) AESInvSubBytes(bits(128) op);AESSubBytes(bits(128) op);
bits(128)// HaveAESExt()
// ============
// TRUE if AES cryptographic instructions support is implemented,
// FALSE otherwise.
boolean AESMixColumns(bits (128) op);HaveAESExt()
return boolean IMPLEMENTATION_DEFINED "Has AES Crypto instructions";
bits(128)// HaveBit128PMULLExt()
// ====================
// TRUE if 128 bit form of PMULL instructions support is implemented,
// FALSE otherwise.
boolean AESShiftRows(bits(128) op);HaveBit128PMULLExt()
return boolean IMPLEMENTATION_DEFINED "Has 128-bit form of PMULL instructions";
bits(128)// HaveSHA1Ext()
// =============
// TRUE if SHA1 cryptographic instructions support is implemented,
// FALSE otherwise.
boolean AESSubBytes(bits(128) op);HaveSHA1Ext()
return boolean IMPLEMENTATION_DEFINED "Has SHA1 Crypto instructions";
// HaveAESExt()
// ============
// TRUE if AES cryptographic instructions support is implemented,
// HaveSHA256Ext()
// ===============
// TRUE if SHA256 cryptographic instructions support is implemented,
// FALSE otherwise.
boolean HaveAESExt()
return boolean IMPLEMENTATION_DEFINED "Has AES Crypto instructions";HaveSHA256Ext()
return boolean IMPLEMENTATION_DEFINED "Has SHA256 Crypto instructions";
// HaveBit128PMULLExt()
// ====================
// TRUE if 128 bit form of PMULL instructions support is implemented,
// HaveSHA3Ext()
// =============
// TRUE if SHA3 cryptographic instructions support is implemented,
// and when SHA1 and SHA2 basic cryptographic instructions support is implemented,
// FALSE otherwise.
boolean HaveBit128PMULLExt()
return boolean IMPLEMENTATION_DEFINED "Has 128-bit form of PMULL instructions";HaveSHA3Ext()
if !HasArchVersion(ARMv8p2) || !(HaveSHA1Ext() && HaveSHA256Ext()) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has SHA3 Crypto instructions";
// HaveSHA1Ext()
// =============
// TRUE if SHA1 cryptographic instructions support is implemented,
// HaveSHA512Ext()
// ===============
// TRUE if SHA512 cryptographic instructions support is implemented,
// and when SHA1 and SHA2 basic cryptographic instructions support is implemented,
// FALSE otherwise.
boolean HaveSHA1Ext()
return boolean IMPLEMENTATION_DEFINED "Has SHA1 Crypto instructions";HaveSHA512Ext()
if !HasArchVersion(ARMv8p2) || !(HaveSHA1Ext() && HaveSHA256Ext()) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has SHA512 Crypto instructions";
// HaveSHA256Ext()
// ===============
// TRUE if SHA256 cryptographic instructions support is implemented,
// HaveSM3Ext()
// ============
// TRUE if SM3 cryptographic instructions support is implemented,
// FALSE otherwise.
boolean HaveSHA256Ext()
return boolean IMPLEMENTATION_DEFINED "Has SHA256 Crypto instructions";HaveSM3Ext()
if !HasArchVersion(ARMv8p2) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has SM3 Crypto instructions";
// HaveSHA3Ext()
// =============
// TRUE if SHA3 cryptographic instructions support is implemented,
// and when SHA1 and SHA2 basic cryptographic instructions support is implemented,
// HaveSM4Ext()
// ============
// TRUE if SM4 cryptographic instructions support is implemented,
// FALSE otherwise.
boolean HaveSHA3Ext()
HaveSM4Ext()
if !HasArchVersion(ARMv8p2) || !() then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has SM4 Crypto instructions";HaveSHA1Ext() && HaveSHA256Ext()) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has SHA3 Crypto instructions";
// HaveSHA512Ext()
// ===============
// TRUE if SHA512 cryptographic instructions support is implemented,
// and when SHA1 and SHA2 basic cryptographic instructions support is implemented,
// FALSE otherwise.
// ROL()
// =====
booleanbits(N) HaveSHA512Ext()
if !ROL(bits(N) x, integer shift)
assert shift >= 0 && shift <= N;
if (shift == 0) then
return x;
returnHasArchVersionROR(ARMv8p2) || !(HaveSHA1Ext() && HaveSHA256Ext()) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has SHA512 Crypto instructions";(x, N-shift);
// HaveSM3Ext()
// SHA256hash()
// ============
// TRUE if SM3 cryptographic instructions support is implemented,
// FALSE otherwise.
booleanbits(128) HaveSM3Ext()
if !SHA256hash(bits (128) X, bits(128) Y, bits(128) W, boolean part1)
bits(32) chs, maj, t;
for e = 0 to 3
chs =HasArchVersionSHAchoose((Y<31:0>, Y<63:32>, Y<95:64>);
maj =(X<31:0>, X<63:32>, X<95:64>);
t = Y<127:96> + SHAhashSIGMA1(Y<31:0>) + chs + Elem[W, e, 32];
X<127:96> = t + X<127:96>;
Y<127:96> = t + SHAhashSIGMA0(X<31:0>) + maj;
<Y, X> = ROLARMv8p2SHAmajority) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has SM3 Crypto instructions";(Y : X, 32);
return (if part1 then X else Y);
// HaveSM4Ext()
// ============
// TRUE if SM4 cryptographic instructions support is implemented,
// FALSE otherwise.
// SHAchoose()
// ===========
booleanbits(32) HaveSM4Ext()
if !SHAchoose(bits(32) x, bits(32) y, bits(32) z)
return (((y EOR z) AND x) EOR z);HasArchVersion(ARMv8p2) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has SM4 Crypto instructions";
// ROL()
// =====
// SHAhashSIGMA0()
// ===============
bits(N)bits(32) ROL(bits(N) x, integer shift)
assert shift >= 0 && shift <= N;
if (shift == 0) then
return x;
SHAhashSIGMA0(bits(32) x)
return ROR(x, N-shift);(x, 2) EORROR(x, 13) EOR ROR(x, 22);
// SHA256hash()
// ============
// SHAhashSIGMA1()
// ===============
bits(128)bits(32) SHA256hash(bits (128) X, bits(128) Y, bits(128) W, boolean part1)
bits(32) chs, maj, t;
for e = 0 to 3
chs =SHAhashSIGMA1(bits(32) x)
return SHAchooseROR(Y<31:0>, Y<63:32>, Y<95:64>);
maj =(x, 6) EOR SHAmajorityROR(X<31:0>, X<63:32>, X<95:64>);
t = Y<127:96> +(x, 11) EOR SHAhashSIGMA1ROR(Y<31:0>) + chs + Elem[W, e, 32];
X<127:96> = t + X<127:96>;
Y<127:96> = t + SHAhashSIGMA0(X<31:0>) + maj;
<Y, X> = ROL(Y : X, 32);
return (if part1 then X else Y);(x, 25);
// SHAchoose()
// ===========
// SHAmajority()
// =============
bits(32) SHAchoose(bits(32) x, bits(32) y, bits(32) z)
return (((y EOR z) AND x) EOR z);SHAmajority(bits(32) x, bits(32) y, bits(32) z)
return ((x AND y) OR ((x OR y) AND z));
// SHAhashSIGMA0()
// ===============
// SHAparity()
// ===========
bits(32) SHAhashSIGMA0(bits(32) x)
returnSHAparity(bits(32) x, bits(32) y, bits(32) z)
return (x EOR y EOR z); ROR(x, 2) EOR ROR(x, 13) EOR ROR(x, 22);
// SHAhashSIGMA1()
// ===============
// Sbox()
// ======
// Used in SM4E crypto instruction
bits(32)bits(8) SHAhashSIGMA1(bits(32) x)
returnSbox(bits(8) sboxin)
bits(8) sboxout;
bits(2048) sboxstring = 0xd690e9fecce13db716b614c228fb2c052b679a762abe04c3aa441326498606999c4250f491ef987a33540b43edcfac62e4b31ca9c908e89580df94fa758f3fa64707a7fcf37317ba83593c19e6854fa8686b81b27164da8bf8eb0f4b70569d351e240e5e6358d1a225227c3b01217887d40046579fd327524c3602e7a0c4c89eeabf8ad240c738b5a3f7f2cef96115a1e0ae5da49b341a55ad933230f58cb1e31df6e22e8266ca60c02923ab0d534e6fd5db3745defd8e2f03ff6a726d6c5b518d1baf92bbddbc7f11d95c411f105ad80ac13188a5cd7bbd2d74d012b8e5b4b08969974a0c96777e65b9f109c56ec68418f07dec3adc4d2079ee5f3ed7cb3948<2047:0>;
sboxout = sboxstring<(255- RORUInt(x, 6) EOR(sboxin))*8+7:(255- RORUInt(x, 11) EOR ROR(x, 25);(sboxin))*8>;
return sboxout;
// SHAmajority()
// =============
bits(32)// Clear the global Exclusives monitors for all PEs EXCEPT processorid if they
// record any part of the physical address region of size bytes starting at paddress.
// It is IMPLEMENTATION DEFINED whether the global Exclusives monitor for processorid
// is also cleared if it records any part of the address region. SHAmajority(bits(32) x, bits(32) y, bits(32) z)
return ((x AND y) OR ((x OR y) AND z));ClearExclusiveByAddress(FullAddress paddress, integer processorid, integer size);
// SHAparity()
// ===========
bits(32)// Clear the local Exclusives monitor for the specified processorid. SHAparity(bits(32) x, bits(32) y, bits(32) z)
return (x EOR y EOR z);ClearExclusiveLocal(integer processorid);
// Sbox()
// ======
// Used in SM4E crypto instruction
// ClearExclusiveMonitors()
// ========================
bits(8)// Clear the local Exclusives monitor for the executing PE. Sbox(bits(8) sboxin)
bits(8) sboxout;
bits(2048) sboxstring = 0xd690e9fecce13db716b614c228fb2c052b679a762abe04c3aa441326498606999c4250f491ef987a33540b43edcfac62e4b31ca9c908e89580df94fa758f3fa64707a7fcf37317ba83593c19e6854fa8686b81b27164da8bf8eb0f4b70569d351e240e5e6358d1a225227c3b01217887d40046579fd327524c3602e7a0c4c89eeabf8ad240c738b5a3f7f2cef96115a1e0ae5da49b341a55ad933230f58cb1e31df6e22e8266ca60c02923ab0d534e6fd5db3745defd8e2f03ff6a726d6c5b518d1baf92bbddbc7f11d95c411f105ad80ac13188a5cd7bbd2d74d012b8e5b4b08969974a0c96777e65b9f109c56ec68418f07dec3adc4d2079ee5f3ed7cb3948<2047:0>;
sboxout = sboxstring<(255-ClearExclusiveMonitors()UIntClearExclusiveLocal(sboxin))*8+7:(255-(UIntProcessorID(sboxin))*8>;
return sboxout;());
// Clear the global Exclusives monitors for all PEs EXCEPT processorid if they
// record any part of the physical address region of size bytes starting at paddress.
// It is IMPLEMENTATION DEFINED whether the global Exclusives monitor for processorid
// is also cleared if it records any part of the address region.// Returns '0' to indicate success if the last memory write by this PE was to
// the same physical address region endorsed by ExclusiveMonitorsPass().
// Returns '1' to indicate failure if address translation resulted in a different
// physical address.
bit
ClearExclusiveByAddress(ExclusiveMonitorsStatus();FullAddress paddress, integer processorid, integer size);
// Clear the local Exclusives monitor for the specified processorid.// Return TRUE if the global Exclusives monitor for processorid includes all of
// the physical address region of size bytes starting at paddress.
boolean
ClearExclusiveLocal(integer processorid);IsExclusiveGlobal(FullAddress paddress, integer processorid, integer size);
// ClearExclusiveMonitors()
// ========================
// Clear the local Exclusives monitor for the executing PE.// Return TRUE if the local Exclusives monitor for processorid includes all of
// the physical address region of size bytes starting at paddress.
boolean
ClearExclusiveMonitors()IsExclusiveLocal(
ClearExclusiveLocalFullAddress(ProcessorID());paddress, integer processorid, integer size);
// Returns '0' to indicate success if the last memory write by this PE was to
// the same physical address region endorsed by ExclusiveMonitorsPass().
// Returns '1' to indicate failure if address translation resulted in a different
// physical address.
bit// Record the physical address region of size bytes starting at paddress in
// the global Exclusives monitor for processorid. ExclusiveMonitorsStatus();MarkExclusiveGlobal(FullAddress paddress, integer processorid, integer size);
// Return TRUE if the global Exclusives monitor for processorid includes all of
// the physical address region of size bytes starting at paddress.
boolean// Record the physical address region of size bytes starting at paddress in
// the local Exclusives monitor for processorid. IsExclusiveGlobal(MarkExclusiveLocal(FullAddress paddress, integer processorid, integer size);
// Return TRUE if the local Exclusives monitor for processorid includes all of
// the physical address region of size bytes starting at paddress.
boolean// Return the ID of the currently executing PE.
integer IsExclusiveLocal(ProcessorID();FullAddress paddress, integer processorid, integer size);
// Record the physical address region of size bytes starting at paddress in
// the global Exclusives monitor for processorid.// AArch32.HaveHPDExt()
// ====================
boolean
MarkExclusiveGlobal(AArch32.HaveHPDExt()
return(ARMv8p2FullAddressHasArchVersion paddress, integer processorid, integer size););
// Record the physical address region of size bytes starting at paddress in
// the local Exclusives monitor for processorid.// AArch64.HaveHPDExt()
// ====================
boolean
MarkExclusiveLocal(AArch64.HaveHPDExt()
return(ARMv8p1FullAddressHasArchVersion paddress, integer processorid, integer size););
// Return the ID of the currently executing PE.
integer// Have52BitPAExt()
// ================
boolean ProcessorID();Have52BitPAExt()
returnHasArchVersion(ARMv8p2);
// AArch32.HaveHPDExt()
// ====================
// Have52BitVAExt()
// ================
boolean AArch32.HaveHPDExt()
Have52BitVAExt()
return HasArchVersion(ARMv8p2);
// AArch64.HaveHPDExt()
// ====================
// HaveAtomicExt()
// ===============
boolean AArch64.HaveHPDExt()
HaveAtomicExt()
return HasArchVersion(ARMv8p1);
// Have52BitPAExt()
// ================
// HaveBTIExt()
// ============
// Returns TRUE if support for Branch Target Indentification is implemented.
boolean Have52BitPAExt()
HaveBTIExt()
return HasArchVersion(ARMv8p2ARMv8p5);
// Have52BitVAExt()
// ================
// HaveBlockBBM()
// ==============
// Returns TRUE if support for changing block size without requring break-before-make is implemented.
boolean Have52BitVAExt()
HaveBlockBBM()
return HasArchVersion(ARMv8p2ARMv8p4);
// HaveAtomicExt()
// ===============
// HaveCommonNotPrivateTransExt()
// ==============================
boolean HaveAtomicExt()
HaveCommonNotPrivateTransExt()
return HasArchVersion(ARMv8p1ARMv8p2);
// HaveBTIExt()
// HaveDITExt()
// ============
// Returns TRUE if support for Branch Target Indentification is implemented.
boolean HaveBTIExt()
HaveDITExt()
return HasArchVersion(ARMv8p5ARMv8p4);
// HaveBlockBBM()
// ==============
// Returns TRUE if support for changing block size without requring break-before-make is implemented.
// HaveDOTPExt()
// =============
// Returns TRUE if Dot Product feature support is implemented, and FALSE otherwise.
boolean HaveBlockBBM()
HaveDOTPExt()
return HasArchVersion(ARMv8p4);) || (HasArchVersion(ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Has Dot Product extension");
// HaveCommonNotPrivateTransExt()
// ==============================
// HaveDoubleFaultExt()
// ====================
boolean HaveCommonNotPrivateTransExt()
returnHaveDoubleFaultExt()
return ( HasArchVersion() && HaveEL(EL3) && !ELUsingAArch32(EL3) && HaveIESBARMv8p2ARMv8p4);());
// HaveDITExt()
// ============
// HaveDoubleLock()
// ================
// Returns TRUE if support for the OS Double Lock is implemented.
boolean HaveDITExt()
returnHaveDoubleLock()
return ! HasArchVersion(ARMv8p4);) || boolean IMPLEMENTATION_DEFINED "OS Double Lock is implemented";
// HaveDOTPExt()
// HaveE0PDExt()
// =============
// Returns TRUE if Dot Product feature support is implemented, and FALSE otherwise.
// Returns TRUE if support for constant fault times for unprivileged accesses
// to the memory map is implemented.
boolean HaveDOTPExt()
HaveE0PDExt()
return HasArchVersion(ARMv8p4ARMv8p5) || (HasArchVersion(ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Has Dot Product extension"););
// HaveDoPD()
// ==========
// Returns TRUE if Debug Over Power Down extension
// support is implemented and FALSE otherwise.
// HaveExtendedCacheSets()
// =======================
boolean HaveDoPD()
HaveExtendedCacheSets()
return HasArchVersion(ARMv8p2ARMv8p3) && boolean IMPLEMENTATION_DEFINED "Has DoPD extension";);
// HaveDoubleFaultExt()
// ====================
// HaveExtendedECDebugEvents()
// ===========================
boolean HaveDoubleFaultExt()
return (HaveExtendedECDebugEvents()
returnHasArchVersion(ARMv8p4ARMv8p2) && HaveEL(EL3) && !ELUsingAArch32(EL3) && HaveIESB()););
// HaveDoubleLock()
// ================
// Returns TRUE if support for the OS Double Lock is implemented.
// HaveExtendedExecuteNeverExt()
// =============================
boolean HaveDoubleLock()
return !HaveExtendedExecuteNeverExt()
returnHasArchVersion(ARMv8p4ARMv8p2) || boolean IMPLEMENTATION_DEFINED "OS Double Lock is implemented";);
// HaveE0PDExt()
// =============
// Returns TRUE if support for constant fault times for unprivileged accesses
// to the memory map is implemented.
// HaveFCADDExt()
// ==============
boolean HaveE0PDExt()
HaveFCADDExt()
return HasArchVersion(ARMv8p5ARMv8p3);
// HaveExtendedCacheSets()
// =======================
// HaveFJCVTZSExt()
// ================
boolean HaveExtendedCacheSets()
HaveFJCVTZSExt()
return HasArchVersion(ARMv8p3);
// HaveExtendedECDebugEvents()
// ===========================
// HaveFP16MulNoRoundingToFP32Ext()
// ================================
// Returns TRUE if has FP16 multiply with no intermediate rounding accumulate to FP32 instructions,
// and FALSE otherwise
boolean HaveExtendedECDebugEvents()
returnHaveFP16MulNoRoundingToFP32Ext()
if ! HaveFP16Ext() then return FALSE;
if HasArchVersion(ARMv8p4) then return TRUE;
return (HasArchVersion(ARMv8p2);) &&
boolean IMPLEMENTATION_DEFINED "Has accumulate FP16 product into FP32 extension");
// HaveExtendedExecuteNeverExt()
// =============================
// HaveFlagFormatExt()
// ===================
// Returns TRUE if flag format conversion instructions implemented.
boolean HaveExtendedExecuteNeverExt()
HaveFlagFormatExt()
return HasArchVersion(ARMv8p2ARMv8p5);
// HaveFCADDExt()
// ==============
// HaveFlagManipulateExt()
// =======================
// Returns TRUE if flag manipulate instructions are implemented.
boolean HaveFCADDExt()
HaveFlagManipulateExt()
return HasArchVersion(ARMv8p3ARMv8p4);
// HaveFJCVTZSExt()
// ================
// HaveFrintExt()
// ==============
// Returns TRUE if FRINT instructions are implemented.
boolean HaveFJCVTZSExt()
HaveFrintExt()
return HasArchVersion(ARMv8p3ARMv8p5);
// HaveFP16MulNoRoundingToFP32Ext()
// ================================
// Returns TRUE if has FP16 multiply with no intermediate rounding accumulate to FP32 instructions,
// and FALSE otherwise
// HaveHPMDExt()
// =============
boolean HaveFP16MulNoRoundingToFP32Ext()
if !HaveHPMDExt()
returnHaveFP16Ext() then return FALSE;
if HasArchVersion(ARMv8p4ARMv8p1) then return TRUE;
return (HasArchVersion(ARMv8p2) &&
boolean IMPLEMENTATION_DEFINED "Has accumulate FP16 product into FP32 extension"););
// HaveFlagFormatExt()
// ===================
// Returns TRUE if flag format conversion instructions implemented.
// HaveIDSExt()
// ============
// Returns TRUE if ID register handling feature is implemented.
boolean HaveFlagFormatExt()
HaveIDSExt()
return HasArchVersion(ARMv8p5ARMv8p4);
// HaveFlagManipulateExt()
// =======================
// Returns TRUE if flag manipulate instructions are implemented.
// HaveIESB()
// ==========
boolean HaveFlagManipulateExt()
returnHaveIESB()
return ( HasArchVersionHaveRASExt(ARMv8p4);() &&
boolean IMPLEMENTATION_DEFINED "Has Implicit Error Synchronization Barrier");
// HaveFrintExt()
// ==============
// Returns TRUE if FRINT instructions are implemented.
// HaveMPAMExt()
// =============
// Returns TRUE if MPAM is implemented, and FALSE otherwise.
boolean HaveFrintExt()
returnHaveMPAMExt()
return ( HasArchVersion(ARMv8p5ARMv8p2);) &&
boolean IMPLEMENTATION_DEFINED "Has MPAM extension");
// HaveHPMDExt()
// =============
// HaveMTEExt()
// ============
// Returns TRUE if MTE implemented, and FALSE otherwise.
boolean HaveHPMDExt()
returnHaveMTEExt()
if ! HasArchVersion(ARMv8p1ARMv8p5);) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has MTE extension";
// HaveIDSExt()
// HaveNV2Ext()
// ============
// Returns TRUE if ID register handling feature is implemented.
// Returns TRUE if Enhanced Nested Virtualization is implemented.
boolean HaveIDSExt()
returnHaveNV2Ext()
return ( HasArchVersion(ARMv8p4);) &&HaveNVExt()
&& boolean IMPLEMENTATION_DEFINED "Has support for Enhanced Nested Virtualization");
// HaveIESB()
// ==========
// HaveNVExt()
// ===========
// Returns TRUE if Nested Virtualization is implemented.
boolean HaveIESB()
return (HaveNVExt()
return(ARMv8p3HaveRASExtHasArchVersion() &&
boolean IMPLEMENTATION_DEFINED "Has Implicit Error Synchronization Barrier");) && boolean IMPLEMENTATION_DEFINED "Has Nested Virtualization";
// HaveMPAMExt()
// =============
// Returns TRUE if MPAM is implemented, and FALSE otherwise.
// HaveNoSecurePMUDisableOverride()
// ================================
boolean HaveMPAMExt()
return (HaveNoSecurePMUDisableOverride()
returnHasArchVersion(ARMv8p2) &&
boolean IMPLEMENTATION_DEFINED "Has MPAM extension"););
// HaveMTEExt()
// ============
// Returns TRUE if MTE implemented, and FALSE otherwise.
// HaveNoninvasiveDebugAuth()
// ==========================
// Returns TRUE if the Non-invasive debug controls are implemented.
boolean HaveMTEExt()
if !HaveNoninvasiveDebugAuth()
return !HasArchVersion(ARMv8p5ARMv8p4) then
return FALSE;
return boolean IMPLEMENTATION_DEFINED "Has MTE extension";);
// HaveNV2Ext()
// HavePANExt()
// ============
// Returns TRUE if Enhanced Nested Virtualization is implemented.
boolean HaveNV2Ext()
return (HavePANExt()
returnHasArchVersion(ARMv8p4ARMv8p1) && HaveNVExt()
&& boolean IMPLEMENTATION_DEFINED "Has support for Enhanced Nested Virtualization"););
// HaveNVExt()
// ===========
// Returns TRUE if Nested Virtualization is implemented.
// HavePageBasedHardwareAttributes()
// =================================
boolean HaveNVExt()
HavePageBasedHardwareAttributes()
return HasArchVersion(ARMv8p3ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Has Nested Virtualization";);
// HaveNoSecurePMUDisableOverride()
// ================================
// HavePrivATExt()
// ===============
boolean HaveNoSecurePMUDisableOverride()
HavePrivATExt()
return HasArchVersion(ARMv8p2);
// HaveNoninvasiveDebugAuth()
// ==========================
// Returns TRUE if the Non-invasive debug controls are implemented.
// HaveQRDMLAHExt()
// ================
boolean HaveNoninvasiveDebugAuth()
return !HaveQRDMLAHExt()
returnHasArchVersion();
boolean HaveAccessFlagUpdateExt()
return HasArchVersion(ARMv8p1);
boolean HaveDirtyBitModifierExt()
return HasArchVersion(ARMv8p1ARMv8p4ARMv8p1);
// HavePANExt()
// HaveRASExt()
// ============
boolean HavePANExt()
returnHaveRASExt()
return ( HasArchVersion(ARMv8p1ARMv8p2);) ||
boolean IMPLEMENTATION_DEFINED "Has RAS extension");
// HavePageBasedHardwareAttributes()
// =================================
// HaveSBExt()
// ===========
// Returns TRUE if support for SB is implemented, and FALSE otherwise.
boolean HavePageBasedHardwareAttributes()
HaveSBExt()
return HasArchVersion(ARMv8p2ARMv8p5);) || boolean IMPLEMENTATION_DEFINED "Has SB extension";
// HavePrivATExt()
// ===============
// HaveSSBSExt()
// =============
// Returns TRUE if support for SSBS is implemented, and FALSE otherwise.
boolean HavePrivATExt()
HaveSSBSExt()
return HasArchVersion(ARMv8p2ARMv8p5);) || boolean IMPLEMENTATION_DEFINED "Has SSBS extension";
// HaveQRDMLAHExt()
// ================
// HaveSecureEL2Ext()
// ==================
// Returns TRUE if Secure EL2 is implemented.
boolean HaveQRDMLAHExt()
HaveSecureEL2Ext()
return HasArchVersion(ARMv8p1ARMv8p4);
boolean HaveAccessFlagUpdateExt()
return HasArchVersion(ARMv8p1);
boolean HaveDirtyBitModifierExt()
return HasArchVersion(ARMv8p1);
// HaveRASExt()
// ============
// HaveSecureExtDebugView()
// ========================
// Returns TRUE if support for Secure and Non-secure views of debug peripherals is implemented.
boolean HaveRASExt()
return (HaveSecureExtDebugView()
returnHasArchVersion(ARMv8p2ARMv8p4) ||
boolean IMPLEMENTATION_DEFINED "Has RAS extension"););
// HaveSBExt()
// ===========
// Returns TRUE if support for SB is implemented, and FALSE otherwise.
// HaveSelfHostedTrace()
// =====================
boolean HaveSBExt()
HaveSelfHostedTrace()
return HasArchVersion(ARMv8p5ARMv8p4) || boolean IMPLEMENTATION_DEFINED "Has SB extension";);
// HaveSSBSExt()
// =============
// Returns TRUE if support for SSBS is implemented, and FALSE otherwise.
// HaveSmallPageTblExt()
// =====================
// Returns TRUE if Small Page Table Support is implemented.
boolean HaveSSBSExt()
HaveSmallPageTblExt()
return HasArchVersion(ARMv8p5ARMv8p4) || boolean IMPLEMENTATION_DEFINED "Has SSBS extension";) && boolean IMPLEMENTATION_DEFINED "Has Small Page Table extension";
// HaveSecureEL2Ext()
// ==================
// Returns TRUE if Secure EL2 is implemented.
// HaveStage2MemAttrControl()
// ==========================
// Returns TRUE if support for Stage2 control of memory types and cacheability attributes is implemented.
boolean HaveSecureEL2Ext()
HaveStage2MemAttrControl()
return HasArchVersion(ARMv8p4);
// HaveSecureExtDebugView()
// ========================
// Returns TRUE if support for Secure and Non-secure views of debug peripherals is implemented.
// HaveStatisticalProfiling()
// ==========================
boolean HaveSecureExtDebugView()
HaveStatisticalProfiling()
return HasArchVersion(ARMv8p4ARMv8p2);
// HaveSelfHostedTrace()
// =====================
// HaveTraceExt()
// ==============
// Returns TRUE if Trace functionality as described by the Trace Architecture
// is implemented.
boolean HaveSelfHostedTrace()
returnHaveTraceExt()
return boolean IMPLEMENTATION_DEFINED "Has Trace Architecture functionality"; HasArchVersion(ARMv8p4);
// HaveSmallPageTblExt()
// =====================
// Returns TRUE if Small Page Table Support is implemented.
// HaveTrapLoadStoreMultipleDeviceExt()
// ====================================
boolean HaveSmallPageTblExt()
HaveTrapLoadStoreMultipleDeviceExt()
return HasArchVersion(ARMv8p4ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Has Small Page Table extension";);
// HaveStage2MemAttrControl()
// ==========================
// Returns TRUE if support for Stage2 control of memory types and cacheability attributes is implemented.
// HaveUA16Ext()
// =============
// Returns TRUE if extended unaligned memory access support is implemented, and FALSE otherwise.
boolean HaveStage2MemAttrControl()
HaveUA16Ext()
return HasArchVersion(ARMv8p4);
// HaveStatisticalProfiling()
// ==========================
// HaveUAOExt()
// ============
boolean HaveStatisticalProfiling()
HaveUAOExt()
return HasArchVersion(ARMv8p2);
// HaveTraceExt()
// ==============
// Returns TRUE if Trace functionality as described by the Trace Architecture
// is implemented.
// HaveVirtHostExt()
// =================
boolean HaveTraceExt()
return boolean IMPLEMENTATION_DEFINED "Has Trace Architecture functionality";HaveVirtHostExt()
returnHasArchVersion(ARMv8p1);
// HaveTrapLoadStoreMultipleDeviceExt()
// ====================================
// If SCTLR_ELx.IESB is 1 when an exception is generated to ELx, any pending Unrecoverable
// SError interrupt must be taken before executing any instructions in the exception handler.
// However, this can be before the branch to the exception handler is made.
boolean HaveTrapLoadStoreMultipleDeviceExt()
returnInsertIESBBeforeException(bits(2) el); HasArchVersion(ARMv8p2);
// HaveUA16Ext()
// =============
// Returns TRUE if extended unaligned memory access support is implemented, and FALSE otherwise.
// FixedToFP()
// ===========
boolean// Convert M-bit fixed point OP with FBITS fractional bits to
// N-bit precision floating point, controlled by UNSIGNED and ROUNDING.
bits(N) HaveUA16Ext()
returnFixedToFP(bits(M) op, integer fbits, boolean unsigned, HasArchVersionFPCRType(fpcr, rounding)
assert N IN {16,32,64};
assert M IN {16,32,64};
bits(N) result;
assert fbits >= 0;
assert rounding != FPRounding_ODD;
// Correct signed-ness
int_operand = Int(op, unsigned);
// Scale by fractional bits and generate a real value
real_operand = Real(int_operand) / 2.0^fbits;
if real_operand == 0.0 then
result = FPZero('0');
else
result = FPRoundARMv8p4FPRounding);(real_operand, fpcr, rounding);
return result;
// HaveUAOExt()
// ============
// FPAbs()
// =======
booleanbits(N) HaveUAOExt()
returnFPAbs(bits(N) op)
assert N IN {16,32,64};
return '0' : op<N-2:0>; HasArchVersion(ARMv8p2);
// HaveVirtHostExt()
// =================
// FPAdd()
// =======
booleanbits(N) HaveVirtHostExt()
returnFPAdd(bits(N) op1, bits(N) op2, HasArchVersionFPCRType(fpcr)
assert N IN {16,32,64};
rounding =(fpcr);
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity);
zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero);
if inf1 && inf2 && sign1 == NOT(sign2) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '0') then
result = FPInfinity('0');
elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '1') then
result = FPInfinity('1');
elsif zero1 && zero2 && sign1 == sign2 then
result = FPZero(sign1);
else
result_value = value1 + value2;
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding == FPRounding_NEGINF then '1' else '0';
result = FPZero(result_sign);
else
result = FPRoundARMv8p1FPRoundingMode);(result_value, fpcr, rounding);
return result;
// If SCTLR_ELx.IESB is 1 when an exception is generated to ELx, any pending Unrecoverable
// SError interrupt must be taken before executing any instructions in the exception handler.
// However, this can be before the branch to the exception handler is made.
boolean// FPCompare()
// ===========
bits(4) InsertIESBBeforeException(bits(2) el);FPCompare(bits(N) op1, bits(N) op2, boolean signal_nans,FPCRType fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
if type1==FPType_SNaN || type1==FPType_QNaN || type2==FPType_SNaN || type2==FPType_QNaN then
result = '0011';
if type1==FPType_SNaN || type2==FPType_SNaN || signal_nans then
FPProcessException(FPExc_InvalidOp, fpcr);
else
// All non-NaN cases can be evaluated on the values produced by FPUnpack()
if value1 == value2 then
result = '0110';
elsif value1 < value2 then
result = '1000';
else // value1 > value2
result = '0010';
return result;
// FixedToFP()
// ===========
// FPCompareEQ()
// =============
// Convert M-bit fixed point OP with FBITS fractional bits to
// N-bit precision floating point, controlled by UNSIGNED and ROUNDING.
bits(N)boolean FixedToFP(bits(M) op, integer fbits, boolean unsigned,FPCompareEQ(bits(N) op1, bits(N) op2, FPCRType fpcr,fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) = FPRoundingFPUnpack rounding)
assert N IN {16,32,64};
assert M IN {16,32,64};
bits(N) result;
assert fbits >= 0;
assert rounding !=(op1, fpcr);
(type2,sign2,value2) = FPRounding_ODDFPUnpack;
// Correct signed-ness
int_operand =(op2, fpcr);
if type1== IntFPType_SNaN(op, unsigned);
// Scale by fractional bits and generate a real value
real_operand = Real(int_operand) / 2.0^fbits;
if real_operand == 0.0 then
result =|| type1== FPZeroFPType_QNaN('0');
else
result =|| type2== || type2==FPType_QNaN then
result = FALSE;
if type1==FPType_SNaN || type2==FPType_SNaN then
FPProcessException(FPExc_InvalidOpFPRoundFPType_SNaN(real_operand, fpcr, rounding);
, fpcr);
else
// All non-NaN cases can be evaluated on the values produced by FPUnpack()
result = (value1 == value2);
return result;
// FPAbs()
// =======
// FPCompareGE()
// =============
bits(N)boolean FPAbs(bits(N) op)
assert N IN {16,32,64};
return '0' : op<N-2:0>;FPCompareGE(bits(N) op1, bits(N) op2,FPCRType fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
if type1==FPType_SNaN || type1==FPType_QNaN || type2==FPType_SNaN || type2==FPType_QNaN then
result = FALSE;
FPProcessException(FPExc_InvalidOp, fpcr);
else
// All non-NaN cases can be evaluated on the values produced by FPUnpack()
result = (value1 >= value2);
return result;
// FPAdd()
// =======
// FPCompareGT()
// =============
bits(N)boolean FPAdd(bits(N) op1, bits(N) op2,FPCompareGT(bits(N) op1, bits(N) op2, FPCRType fpcr)
assert N IN {16,32,64};
rounding = (type1,sign1,value1) = FPRoundingMode(fpcr);
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
(done,result) = if type1== FPProcessNaNsFPType_SNaN(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 ==|| type1== FPType_InfinityFPType_QNaN); inf2 = (type2 ==|| type2== FPType_InfinityFPType_SNaN);
zero1 = (type1 ==|| type2== FPType_ZeroFPType_QNaN); zero2 = (type2 ==then
result = FALSE; FPType_Zero);
if inf1 && inf2 && sign1 == NOT(sign2) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '0') then
result = FPInfinity('0');
elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '1') then
result = FPInfinity('1');
elsif zero1 && zero2 && sign1 == sign2 then
result = FPZero(sign1);
else
result_value = value1 + value2;
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding == FPRounding_NEGINF then '1' else '0';
result = FPZero(result_sign);
else
result = FPRound(result_value, fpcr, rounding);
, fpcr);
else
// All non-NaN cases can be evaluated on the values produced by FPUnpack()
result = (value1 > value2);
return result;
// FPCompare()
// FPConvert()
// ===========
bits(4)// Convert floating point OP with N-bit precision to M-bit precision,
// with rounding controlled by ROUNDING.
// This is used by the FP-to-FP conversion instructions and so for
// half-precision data ignores FZ16, but observes AHP.
bits(M) FPCompare(bits(N) op1, bits(N) op2, boolean signal_nans,FPConvert(bits(N) op, FPCRType fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) =fpcr, FPUnpackFPRounding(op1, fpcr);
(type2,sign2,value2) =rounding)
assert M IN {16,32,64};
assert N IN {16,32,64};
bits(M) result;
// Unpack floating-point operand optionally with flush-to-zero.
(fptype,sign,value) = FPUnpackFPUnpackCV(op2, fpcr);
if type1==(op, fpcr);
alt_hp = (M == 16) && (fpcr.AHP == '1');
if fptype ==FPType_SNaN || type1==|| fptype ==FPType_QNaN || type2==then
if alt_hp then
result =FPType_SNaNFPZero || type2==(sign);
elsif fpcr.DN == '1' then
result =FPType_QNaNFPDefaultNaN then
result = '0011';
if type1==();
else
result =FPType_SNaNFPConvertNaN || type2==(op);
if fptype ==FPType_SNaN || signal_nans then|| alt_hp then
FPProcessException(FPExc_InvalidOp,fpcr);
elsif fptype == FPType_Infinity then
if alt_hp then
result = sign:Ones(M-1);
FPProcessException(FPExc_InvalidOp, fpcr);
else
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
result = FPZero(sign);
else
result = FPRoundCV(value, fpcr, rounding);
return result;
// FPConvert()
// ===========
bits(M) FPConvert(bits(N) op, FPCRType fpcr)
return FPConvert(op, fpcr, FPRoundingMode, fpcr);
else
// All non-NaN cases can be evaluated on the values produced by FPUnpack()
if value1 == value2 then
result = '0110';
elsif value1 < value2 then
result = '1000';
else // value1 > value2
result = '0010';
return result;(fpcr));
// FPCompareEQ()
// =============
// FPConvertNaN()
// ==============
// Converts a NaN of one floating-point type to another
booleanbits(M) FPCompareEQ(bits(N) op1, bits(N) op2,FPConvertNaN(bits(N) op)
assert N IN {16,32,64};
assert M IN {16,32,64};
bits(M) result;
bits(51) frac;
sign = op<N-1>;
// Unpack payload from input NaN
case N of
when 64 frac = op<50:0>;
when 32 frac = op<21:0>: FPCRTypeZeros fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) =(29);
when 16 frac = op<8:0>: FPUnpackZeros(op1, fpcr);
(type2,sign2,value2) =(42);
// Repack payload into output NaN, while
// converting an SNaN to a QNaN.
case M of
when 64 result = sign: FPUnpackOnes(op2, fpcr);
if type1==(M-52):frac;
when 32 result = sign:FPType_SNaNOnes || type1==(M-23):frac<50:29>;
when 16 result = sign:FPType_QNaNOnes || type2==FPType_SNaN || type2==FPType_QNaN then
result = FALSE;
if type1==FPType_SNaN || type2==FPType_SNaN then
FPProcessException(FPExc_InvalidOp, fpcr);
else
// All non-NaN cases can be evaluated on the values produced by FPUnpack()
result = (value1 == value2);
(M-10):frac<50:42>;
return result;
// FPCompareGE()
// =============
booleantype FPCompareGE(bits(N) op1, bits(N) op2,FPCRType; FPCRType fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
if type1==FPType_SNaN || type1==FPType_QNaN || type2==FPType_SNaN || type2==FPType_QNaN then
result = FALSE;
FPProcessException(FPExc_InvalidOp, fpcr);
else
// All non-NaN cases can be evaluated on the values produced by FPUnpack()
result = (value1 >= value2);
return result;
// FPCompareGT()
// =============
// FPDecodeRM()
// ============
boolean// Decode most common AArch32 floating-point rounding encoding.
FPRounding FPCompareGT(bits(N) op1, bits(N) op2,FPDecodeRM(bits(2) rm)
case rm of
when '00' return FPCRTypeFPRounding_TIEAWAY fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) =; // A
when '01' return FPUnpackFPRounding_TIEEVEN(op1, fpcr);
(type2,sign2,value2) =; // N
when '10' return FPUnpackFPRounding_POSINF(op2, fpcr);
if type1==; // P
when '11' returnFPType_SNaNFPRounding_NEGINF || type1==FPType_QNaN || type2==FPType_SNaN || type2==FPType_QNaN then
result = FALSE;
FPProcessException(FPExc_InvalidOp, fpcr);
else
// All non-NaN cases can be evaluated on the values produced by FPUnpack()
result = (value1 > value2);
return result;; // M
// FPConvert()
// ===========
// FPDecodeRounding()
// ==================
// Convert floating point OP with N-bit precision to M-bit precision,
// with rounding controlled by ROUNDING.
// This is used by the FP-to-FP conversion instructions and so for
// half-precision data ignores FZ16, but observes AHP.
// Decode floating-point rounding mode and common AArch64 encoding.
bits(M)FPRounding FPConvert(bits(N) op,FPDecodeRounding(bits(2) rmode)
case rmode of
when '00' return FPCRTypeFPRounding_TIEEVEN fpcr,; // N
when '01' return FPRoundingFPRounding_POSINF rounding)
assert M IN {16,32,64};
assert N IN {16,32,64};
bits(M) result;
// Unpack floating-point operand optionally with flush-to-zero.
(fptype,sign,value) =; // P
when '10' return FPUnpackCVFPRounding_NEGINF(op, fpcr);
alt_hp = (M == 16) && (fpcr.AHP == '1');
if fptype ==; // M
when '11' return FPType_SNaNFPRounding_ZERO || fptype == FPType_QNaN then
if alt_hp then
result = FPZero(sign);
elsif fpcr.DN == '1' then
result = FPDefaultNaN();
else
result = FPConvertNaN(op);
if fptype == FPType_SNaN || alt_hp then
FPProcessException(FPExc_InvalidOp,fpcr);
elsif fptype == FPType_Infinity then
if alt_hp then
result = sign:Ones(M-1);
FPProcessException(FPExc_InvalidOp, fpcr);
else
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
result = FPZero(sign);
else
result = FPRoundCV(value, fpcr, rounding);
return result;
// FPConvert()
// ===========
bits(M) FPConvert(bits(N) op, FPCRType fpcr)
return FPConvert(op, fpcr, FPRoundingMode(fpcr));; // Z
// FPConvertNaN()
// FPDefaultNaN()
// ==============
// Converts a NaN of one floating-point type to another
bits(M)bits(N) FPConvertNaN(bits(N) op)
FPDefaultNaN()
assert N IN {16,32,64};
assert M IN {16,32,64};
bits(M) result;
bits(51) frac;
sign = op<N-1>;
// Unpack payload from input NaN
case N of
when 64 frac = op<50:0>;
when 32 frac = op<21:0>: constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
sign = '0';
exp =ZerosOnes(29);
when 16 frac = op<8:0>:(E);
frac = '1':Zeros(42);
// Repack payload into output NaN, while
// converting an SNaN to a QNaN.
case M of
when 64 result = sign:Ones(M-52):frac;
when 32 result = sign:Ones(M-23):frac<50:29>;
when 16 result = sign:Ones(M-10):frac<50:42>;
return result;(F-1);
return sign : exp : frac;
type// FPDiv()
// =======
bits(N) FPCRType;FPDiv(bits(N) op1, bits(N) op2,FPCRType fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 == FPType_Infinity);
inf2 = (type2 == FPType_Infinity);
zero1 = (type1 == FPType_Zero);
zero2 = (type2 == FPType_Zero);
if (inf1 && inf2) || (zero1 && zero2) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
elsif inf1 || zero2 then
result = FPInfinity(sign1 EOR sign2);
if !inf1 then FPProcessException(FPExc_DivideByZero, fpcr);
elsif zero1 || inf2 then
result = FPZero(sign1 EOR sign2);
else
result = FPRound(value1/value2, fpcr);
return result;
// FPDecodeRM()
// ============
// Decode most common AArch32 floating-point rounding encoding.
FPRoundingenumeration FPDecodeRM(bits(2) rm)
case rm of
when '00' returnFPExc { FPRounding_TIEAWAY; // A
when '01' returnFPExc_InvalidOp, FPRounding_TIEEVEN; // N
when '10' returnFPExc_DivideByZero, FPRounding_POSINF; // P
when '11' returnFPExc_Overflow, FPExc_Underflow, FPExc_Inexact, FPRounding_NEGINF; // MFPExc_InputDenorm};
// FPDecodeRounding()
// ==================
// FPInfinity()
// ============
// Decode floating-point rounding mode and common AArch64 encoding.
FPRoundingbits(N) FPDecodeRounding(bits(2) rmode)
case rmode of
when '00' returnFPInfinity(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = FPRounding_TIEEVENOnes; // N
when '01' return(E);
frac = FPRounding_POSINFZeros; // P
when '10' return FPRounding_NEGINF; // M
when '11' return FPRounding_ZERO; // Z(F);
return sign : exp : frac;
// FPDefaultNaN()
// ==============
// FPMax()
// =======
bits(N) FPDefaultNaN()
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
sign = '0';
exp =FPMax(bits(N) op1, bits(N) op2, OnesFPCRType(E);
frac = '1':fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) =(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
if !done then
if value1 > value2 then
(fptype,sign,value) = (type1,sign1,value1);
else
(fptype,sign,value) = (type2,sign2,value2);
if fptype == FPType_Infinity then
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
sign = sign1 AND sign2; // Use most positive sign
result = FPZero(sign);
else
// The use of FPRound() covers the case where there is a trapped underflow exception
// for a denormalized number even though the result is exact.
result = FPRoundZerosFPUnpack(F-1);
return sign : exp : frac;(value, fpcr);
return result;
// FPDiv()
// =======
// FPMaxNormal()
// =============
bits(N) FPDiv(bits(N) op1, bits(N) op2,FPMaxNormal(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = FPCRTypeOnes fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) =(E-1):'0';
frac = FPUnpackOnes(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 == FPType_Infinity);
inf2 = (type2 == FPType_Infinity);
zero1 = (type1 == FPType_Zero);
zero2 = (type2 == FPType_Zero);
if (inf1 && inf2) || (zero1 && zero2) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
elsif inf1 || zero2 then
result = FPInfinity(sign1 EOR sign2);
if !inf1 then FPProcessException(FPExc_DivideByZero, fpcr);
elsif zero1 || inf2 then
result = FPZero(sign1 EOR sign2);
else
result = FPRound(value1/value2, fpcr);
return result;(F);
return sign : exp : frac;
enumeration// FPMaxNum()
// ==========
bits(N) FPExc {FPMaxNum(bits(N) op1, bits(N) op2,FPExc_InvalidOp,fpcr)
assert N IN {16,32,64};
(type1,-,-) = FPExc_DivideByZero,(op1, fpcr);
(type2,-,-) = FPExc_Overflow,(op2, fpcr);
// treat a single quiet-NaN as -Infinity
if type1 ==
FPExc_Underflow,&& type2 != FPExc_Inexact,then
op1 = ('1');
elsif type1 != FPType_QNaN && type2 == FPType_QNaN then
op2 = FPInfinity('1');
return FPMaxFPExc_InputDenorm};(op1, op2, fpcr);
// FPInfinity()
// ============
// FPMin()
// =======
bits(N) FPInfinity(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp =FPMin(bits(N) op1, bits(N) op2, OnesFPCRType(E);
frac =fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) = (op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
if !done then
if value1 < value2 then
(fptype,sign,value) = (type1,sign1,value1);
else
(fptype,sign,value) = (type2,sign2,value2);
if fptype == FPType_Infinity then
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
sign = sign1 OR sign2; // Use most negative sign
result = FPZero(sign);
else
// The use of FPRound() covers the case where there is a trapped underflow exception
// for a denormalized number even though the result is exact.
result = FPRoundZerosFPUnpack(F);
return sign : exp : frac;(value, fpcr);
return result;
// FPMax()
// =======
// FPMinNum()
// ==========
bits(N) FPMax(bits(N) op1, bits(N) op2,FPMinNum(bits(N) op1, bits(N) op2, FPCRType fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) = (type1,-,-) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = (type2,-,-) = FPUnpack(op2, fpcr);
(done,result) =
// Treat a single quiet-NaN as +Infinity
if type1 == FPProcessNaNsFPType_QNaN(type1, type2, op1, op2, fpcr);
if !done then
if value1 > value2 then
(fptype,sign,value) = (type1,sign1,value1);
else
(fptype,sign,value) = (type2,sign2,value2);
if fptype ==&& type2 != FPType_InfinityFPType_QNaN then
result = op1 = FPInfinity(sign);
elsif fptype ==('0');
elsif type1 != FPType_ZeroFPType_QNaN then
sign = sign1 AND sign2; // Use most positive sign
result =&& type2 == FPZeroFPType_QNaN(sign);
else
// The use of FPRound() covers the case where there is a trapped underflow exception
// for a denormalized number even though the result is exact.
result =then
op2 = ('0');
return FPMinFPRoundFPInfinity(value, fpcr);
return result;(op1, op2, fpcr);
// FPMaxNormal()
// =============
// FPMul()
// =======
bits(N) FPMaxNormal(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp =FPMul(bits(N) op1, bits(N) op2, OnesFPCRType(E-1):'0';
frac =fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) = (op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 == FPType_Infinity);
inf2 = (type2 == FPType_Infinity);
zero1 = (type1 == FPType_Zero);
zero2 = (type2 == FPType_Zero);
if (inf1 && zero2) || (zero1 && inf2) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
elsif inf1 || inf2 then
result = FPInfinity(sign1 EOR sign2);
elsif zero1 || zero2 then
result = FPZero(sign1 EOR sign2);
else
result = FPRoundOnesFPUnpack(F);
return sign : exp : frac;(value1*value2, fpcr);
return result;
// FPMaxNum()
// FPMulAdd()
// ==========
//
// Calculates addend + op1*op2 with a single rounding.
bits(N) FPMaxNum(bits(N) op1, bits(N) op2,FPMulAdd(bits(N) addend, bits(N) op1, bits(N) op2, FPCRType fpcr)
assert N IN {16,32,64};
(type1,-,-) = rounding = FPRoundingMode(fpcr);
(typeA,signA,valueA) = FPUnpack(op1, fpcr);
(type2,-,-) =(addend, fpcr);
(type1,sign1,value1) = FPUnpack(op2, fpcr);
// treat a single quiet-NaN as -Infinity
if type1 ==(op1, fpcr);
(type2,sign2,value2) = FPType_QNaNFPUnpack && type2 !=(op2, fpcr);
inf1 = (type1 == FPType_QNaNFPType_Infinity then
op1 =); zero1 = (type1 == FPInfinityFPType_Zero('1');
elsif type1 !=);
inf2 = (type2 == FPType_QNaNFPType_Infinity && type2 ==); zero2 = (type2 == FPType_Zero);
(done,result) = FPProcessNaNs3(typeA, type1, type2, addend, op1, op2, fpcr);
if typeA == FPType_QNaN then
op2 =&& ((inf1 && zero2) || (zero1 && inf2)) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
if !done then
infA = (typeA == FPType_Infinity); zeroA = (typeA == FPType_Zero);
// Determine sign and type product will have if it does not cause an Invalid
// Operation.
signP = sign1 EOR sign2;
infP = inf1 || inf2;
zeroP = zero1 || zero2;
// Non SNaN-generated Invalid Operation cases are multiplies of zero by infinity and
// additions of opposite-signed infinities.
if (inf1 && zero2) || (zero1 && inf2) || (infA && infP && signA != signP) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
// Other cases involving infinities produce an infinity of the same sign.
elsif (infA && signA == '0') || (infP && signP == '0') then
result = FPInfinity('1');
return('0');
elsif (infA && signA == '1') || (infP && signP == '1') then
result = ('1');
// Cases where the result is exactly zero and its sign is not determined by the
// rounding mode are additions of same-signed zeros.
elsif zeroA && zeroP && signA == signP then
result = FPZero(signA);
// Otherwise calculate numerical result and round it.
else
result_value = valueA + (value1 * value2);
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding == FPRounding_NEGINF then '1' else '0';
result = FPZero(result_sign);
else
result = FPRoundFPMaxFPInfinity(op1, op2, fpcr);(result_value, fpcr);
return result;
// FPMin()
// =======
// FPMulAddH()
// ===========
bits(N)bits(N) FPMulAddH(bits(N) addend, bits(N DIV 2) op1, bits(N DIV 2) op2, FPMin(bits(N) op1, bits(N) op2, FPCRType fpcr)
assert N IN {16,32,64};
(type1,sign1,value1) = assert N IN {32,64};
rounding = FPRoundingMode(fpcr);
(typeA,signA,valueA) = FPUnpack(op1, fpcr);
(type2,sign2,value2) =(addend, fpcr);
(type1,sign1,value1) = FPUnpack(op2, fpcr);
(done,result) =(op1, fpcr);
(type2,sign2,value2) = FPProcessNaNsFPUnpack(type1, type2, op1, op2, fpcr);
if !done then
if value1 < value2 then
(fptype,sign,value) = (type1,sign1,value1);
else
(fptype,sign,value) = (type2,sign2,value2);
if fptype ==(op2, fpcr);
inf1 = (type1 == FPType_Infinity then
result =); zero1 = (type1 == FPType_Zero);
inf2 = (type2 == FPType_Infinity); zero2 = (type2 == FPType_Zero);
(done,result) = FPProcessNaNs3H(typeA, type1, type2, addend, op1, op2, fpcr);
if typeA == FPType_QNaN && ((inf1 && zero2) || (zero1 && inf2)) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
if !done then
infA = (typeA == FPType_Infinity); zeroA = (typeA == FPType_Zero);
// Determine sign and type product will have if it does not cause an Invalid
// Operation.
signP = sign1 EOR sign2;
infP = inf1 || inf2;
zeroP = zero1 || zero2;
// Non SNaN-generated Invalid Operation cases are multiplies of zero by infinity and
// additions of opposite-signed infinities.
if (inf1 && zero2) || (zero1 && inf2) || (infA && infP && signA != signP) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
// Other cases involving infinities produce an infinity of the same sign.
elsif (infA && signA == '0') || (infP && signP == '0') then
result = FPInfinity(sign);
elsif fptype ==('0');
elsif (infA && signA == '1') || (infP && signP == '1') then
result = ('1');
// Cases where the result is exactly zero and its sign is not determined by the
// rounding mode are additions of same-signed zeros.
elsif zeroA && zeroP && signA == signP then
result = FPZero(signA);
// Otherwise calculate numerical result and round it.
else
result_value = valueA + (value1 * value2);
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding == FPRounding_NEGINFFPType_ZeroFPInfinity then
sign = sign1 OR sign2; // Use most negative sign
result =then '1' else '0';
result = FPZero(sign);
else
// The use of FPRound() covers the case where there is a trapped underflow exception
// for a denormalized number even though the result is exact.
result =(result_sign);
else
result = FPRound(value, fpcr);
(result_value, fpcr);
return result;
// FPMinNum()
// ==========
// FPProcessNaNs3H()
// =================
bits(N)(boolean, bits(N)) FPProcessNaNs3H( FPMinNum(bits(N) op1, bits(N) op2,type1, FPType type2, FPType type3, bits(N) op1, bits(N DIV 2) op2, bits(N DIV 2) op3, FPCRType fpcr)
assert N IN {16,32,64};
(type1,-,-) = assert N IN {32,64};
bits(N) result;
if type1 == FPUnpackFPType_SNaN(op1, fpcr);
(type2,-,-) =then
done = TRUE; result = FPUnpackFPProcessNaN(op2, fpcr);
// Treat a single quiet-NaN as +Infinity
if type1 ==(type1, op1, fpcr);
elsif type2 == FPType_QNaNFPType_SNaN && type2 !=then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type2, op2, fpcr));
elsif type3 == FPType_SNaN then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type3, op3, fpcr));
elsif type1 == FPType_QNaN then
op1 = done = TRUE; result = FPInfinityFPProcessNaN('0');
elsif type1 !=(type1, op1, fpcr);
elsif type2 == FPType_QNaN && type2 ==then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type2, op2, fpcr));
elsif type3 == FPType_QNaN then
op2 = done = TRUE; result = FPInfinityFPConvertNaN('0');
return( (type3, op3, fpcr));
else
done = FALSE; result = ZerosFPMinFPProcessNaN(op1, op2, fpcr);(); // 'Don't care' result
return (done, result);
// FPMul()
// =======
// FPMulX()
// ========
bits(N) FPMul(bits(N) op1, bits(N) op2,FPMulX(bits(N) op1, bits(N) op2, FPCRType fpcr)
assert N IN {16,32,64};
bits(N) result;
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 == FPType_Infinity);
inf2 = (type2 == FPType_Infinity);
zero1 = (type1 == FPType_Zero);
zero2 = (type2 == FPType_Zero);
if (inf1 && zero2) || (zero1 && inf2) then
result = FPDefaultNaNFPTwo();
FPProcessException(FPExc_InvalidOp, fpcr);
(sign1 EOR sign2);
elsif inf1 || inf2 then
result = FPInfinity(sign1 EOR sign2);
elsif zero1 || zero2 then
result = FPZero(sign1 EOR sign2);
else
result = FPRound(value1*value2, fpcr);
return result;
// FPMulAdd()
// ==========
//
// Calculates addend + op1*op2 with a single rounding.
// FPNeg()
// =======
bits(N) FPMulAdd(bits(N) addend, bits(N) op1, bits(N) op2,FPNeg(bits(N) op)
assert N IN {16,32,64};
return NOT(op<N-1>) : op<N-2:0>; FPCRType fpcr)
assert N IN {16,32,64};
rounding = FPRoundingMode(fpcr);
(typeA,signA,valueA) = FPUnpack(addend, fpcr);
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
inf1 = (type1 == FPType_Infinity); zero1 = (type1 == FPType_Zero);
inf2 = (type2 == FPType_Infinity); zero2 = (type2 == FPType_Zero);
(done,result) = FPProcessNaNs3(typeA, type1, type2, addend, op1, op2, fpcr);
if typeA == FPType_QNaN && ((inf1 && zero2) || (zero1 && inf2)) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
if !done then
infA = (typeA == FPType_Infinity); zeroA = (typeA == FPType_Zero);
// Determine sign and type product will have if it does not cause an Invalid
// Operation.
signP = sign1 EOR sign2;
infP = inf1 || inf2;
zeroP = zero1 || zero2;
// Non SNaN-generated Invalid Operation cases are multiplies of zero by infinity and
// additions of opposite-signed infinities.
if (inf1 && zero2) || (zero1 && inf2) || (infA && infP && signA != signP) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
// Other cases involving infinities produce an infinity of the same sign.
elsif (infA && signA == '0') || (infP && signP == '0') then
result = FPInfinity('0');
elsif (infA && signA == '1') || (infP && signP == '1') then
result = FPInfinity('1');
// Cases where the result is exactly zero and its sign is not determined by the
// rounding mode are additions of same-signed zeros.
elsif zeroA && zeroP && signA == signP then
result = FPZero(signA);
// Otherwise calculate numerical result and round it.
else
result_value = valueA + (value1 * value2);
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding == FPRounding_NEGINF then '1' else '0';
result = FPZero(result_sign);
else
result = FPRound(result_value, fpcr);
return result;
// FPMulAddH()
// ===========
// FPOnePointFive()
// ================
bits(N) FPMulAddH(bits(N) addend, bits(N DIV 2) op1, bits(N DIV 2) op2,bits(N) FPCRType fpcr)
assert N IN {32,64};
rounding =FPOnePointFive(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = '0': FPRoundingModeOnes(fpcr);
(typeA,signA,valueA) =(E-1);
frac = '1': FPUnpackZeros(addend, fpcr);
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
inf1 = (type1 == FPType_Infinity); zero1 = (type1 == FPType_Zero);
inf2 = (type2 == FPType_Infinity); zero2 = (type2 == FPType_Zero);
(done,result) = FPProcessNaNs3H(typeA, type1, type2, addend, op1, op2, fpcr);
if typeA == FPType_QNaN && ((inf1 && zero2) || (zero1 && inf2)) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
if !done then
infA = (typeA == FPType_Infinity); zeroA = (typeA == FPType_Zero);
// Determine sign and type product will have if it does not cause an Invalid
// Operation.
signP = sign1 EOR sign2;
infP = inf1 || inf2;
zeroP = zero1 || zero2;
// Non SNaN-generated Invalid Operation cases are multiplies of zero by infinity and
// additions of opposite-signed infinities.
if (inf1 && zero2) || (zero1 && inf2) || (infA && infP && signA != signP) then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
// Other cases involving infinities produce an infinity of the same sign.
elsif (infA && signA == '0') || (infP && signP == '0') then
result = FPInfinity('0');
elsif (infA && signA == '1') || (infP && signP == '1') then
result = FPInfinity('1');
// Cases where the result is exactly zero and its sign is not determined by the
// rounding mode are additions of same-signed zeros.
elsif zeroA && zeroP && signA == signP then
result = FPZero(signA);
// Otherwise calculate numerical result and round it.
else
result_value = valueA + (value1 * value2);
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding == FPRounding_NEGINF then '1' else '0';
result = FPZero(result_sign);
else
result = FPRound(result_value, fpcr);
return result;(F-1);
return sign : exp : frac;
// FPProcessNaNs3H()
// =================
(boolean, bits(N)) FPProcessNaNs3H(// FPProcessException()
// ====================
//
// The 'fpcr' argument supplies FPCR control bits. Status information is
// updated directly in the FPSR where appropriate.FPType type1,FPProcessException( FPTypeFPExc type2,exception, FPType type3, bits(N) op1, bits(N DIV 2) op2, bits(N DIV 2) op3, FPCRType fpcr)
assert N IN {32,64};
bits(N) result;
if type1 == // Determine the cumulative exception bit number
case exception of
when FPType_SNaNFPExc_InvalidOp then
done = TRUE; result =cumul = 0;
when FPProcessNaNFPExc_DivideByZero(type1, op1, fpcr);
elsif type2 ==cumul = 1;
when FPType_SNaNFPExc_Overflow then
done = TRUE; result =cumul = 2;
when FPConvertNaNFPExc_Underflow(cumul = 3;
whenFPProcessNaNFPExc_Inexact(type2, op2, fpcr));
elsif type3 ==cumul = 4;
when FPType_SNaNFPExc_InputDenorm then
done = TRUE; result =cumul = 7;
enable = cumul + 8;
if fpcr<enable> == '1' then
// Trapping of the exception enabled.
// It is IMPLEMENTATION DEFINED whether the enable bit may be set at all, and
// if so then how exceptions may be accumulated before calling FPTrapException()
IMPLEMENTATION_DEFINED "floating-point trap handling";
elsif FPConvertNaNUsingAArch32(FPProcessNaN(type3, op3, fpcr));
elsif type1 == FPType_QNaN then
done = TRUE; result = FPProcessNaN(type1, op1, fpcr);
elsif type2 == FPType_QNaN then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type2, op2, fpcr));
elsif type3 == FPType_QNaN then
done = TRUE; result = FPConvertNaN(FPProcessNaN(type3, op3, fpcr));
else
done = FALSE; result = Zeros(); // 'Don't care' result
return (done, result);() then
// Set the cumulative exception bit
FPSCR<cumul> = '1';
else
// Set the cumulative exception bit
FPSR<cumul> = '1';
return;
// FPMulX()
// ========
// FPProcessNaN()
// ==============
bits(N) FPMulX(bits(N) op1, bits(N) op2,FPProcessNaN( FPType fptype, bits(N) op, FPCRType fpcr)
assert N IN {16,32,64};
bits(N) result;
(type1,sign1,value1) = assert fptype IN { FPUnpackFPType_QNaN(op1, fpcr);
(type2,sign2,value2) =, FPUnpackFPType_SNaN(op2, fpcr);
(done,result) =};
case N of
when 16 topfrac = 9;
when 32 topfrac = 22;
when 64 topfrac = 51;
result = op;
if fptype == FPProcessNaNsFPType_SNaN(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 ==then
result<topfrac> = '1'; FPType_InfinityFPProcessException);
inf2 = (type2 ==( FPType_InfinityFPExc_InvalidOp);
zero1 = (type1 ==, fpcr);
if fpcr.DN == '1' then // DefaultNaN requested
result = FPType_ZeroFPDefaultNaN);
zero2 = (type2 == FPType_Zero);
if (inf1 && zero2) || (zero1 && inf2) then
result = FPTwo(sign1 EOR sign2);
elsif inf1 || inf2 then
result = FPInfinity(sign1 EOR sign2);
elsif zero1 || zero2 then
result = FPZero(sign1 EOR sign2);
else
result = FPRound(value1*value2, fpcr);
();
return result;
// FPNeg()
// =======
// FPProcessNaNs()
// ===============
//
// The boolean part of the return value says whether a NaN has been found and
// processed. The bits(N) part is only relevant if it has and supplies the
// result of the operation.
//
// The 'fpcr' argument supplies FPCR control bits. Status information is
// updated directly in the FPSR where appropriate.
bits(N)(boolean, bits(N)) FPNeg(bits(N) op)
assert N IN {16,32,64};
return NOT(op<N-1>) : op<N-2:0>;FPProcessNaNs(FPType type1, FPType type2,
bits(N) op1, bits(N) op2,
FPCRType fpcr)
assert N IN {16,32,64};
if type1 == FPType_SNaN then
done = TRUE; result = FPProcessNaN(type1, op1, fpcr);
elsif type2 == FPType_SNaN then
done = TRUE; result = FPProcessNaN(type2, op2, fpcr);
elsif type1 == FPType_QNaN then
done = TRUE; result = FPProcessNaN(type1, op1, fpcr);
elsif type2 == FPType_QNaN then
done = TRUE; result = FPProcessNaN(type2, op2, fpcr);
else
done = FALSE; result = Zeros(); // 'Don't care' result
return (done, result);
// FPOnePointFive()
// FPProcessNaNs3()
// ================
//
// The boolean part of the return value says whether a NaN has been found and
// processed. The bits(N) part is only relevant if it has and supplies the
// result of the operation.
//
// The 'fpcr' argument supplies FPCR control bits. Status information is
// updated directly in the FPSR where appropriate.
bits(N)(boolean, bits(N)) FPOnePointFive(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = '0':FPProcessNaNs3( type1, FPType type2, FPType type3,
bits(N) op1, bits(N) op2, bits(N) op3,
FPCRType fpcr)
assert N IN {16,32,64};
if type1 == FPType_SNaN then
done = TRUE; result = FPProcessNaN(type1, op1, fpcr);
elsif type2 == FPType_SNaN then
done = TRUE; result = FPProcessNaN(type2, op2, fpcr);
elsif type3 == FPType_SNaN then
done = TRUE; result = FPProcessNaN(type3, op3, fpcr);
elsif type1 == FPType_QNaN then
done = TRUE; result = FPProcessNaN(type1, op1, fpcr);
elsif type2 == FPType_QNaN then
done = TRUE; result = FPProcessNaN(type2, op2, fpcr);
elsif type3 == FPType_QNaN then
done = TRUE; result = FPProcessNaNOnesFPType(E-1);
frac = '1':(type3, op3, fpcr);
else
done = FALSE; result =Zeros(F-1);
return sign : exp : frac;(); // 'Don't care' result
return (done, result);
// FPProcessException()
// ====================
//
// The 'fpcr' argument supplies FPCR control bits. Status information is
// updated directly in the FPSR where appropriate.// FPRecipEstimate()
// =================
bits(N)
FPProcessException(FPRecipEstimate(bits(N) operand,FPExc exception, FPCRType fpcr)
// Determine the cumulative exception bit number
case exception of
when assert N IN {16,32,64};
(fptype,sign,value) = FPExc_InvalidOpFPUnpack cumul = 0;
when(operand, fpcr);
if fptype == FPType_SNaN || fptype == FPType_QNaN then
result = FPProcessNaN(fptype, operand, fpcr);
elsif fptype == FPType_Infinity then
result = FPZero(sign);
elsif fptype == FPType_Zero then
result = FPInfinity(sign);
FPProcessException(FPExc_DivideByZero cumul = 1;
when, fpcr);
elsif (
(N == 16 && Abs(value) < 2.0^-16) ||
(N == 32 && Abs(value) < 2.0^-128) ||
(N == 64 && Abs(value) < 2.0^-1024)
) then
case FPRoundingMode(fpcr) of
when FPRounding_TIEEVEN
overflow_to_inf = TRUE;
when FPRounding_POSINF
overflow_to_inf = (sign == '0');
when FPRounding_NEGINF
overflow_to_inf = (sign == '1');
when FPRounding_ZERO
overflow_to_inf = FALSE;
result = if overflow_to_inf then FPInfinity(sign) else FPMaxNormal(sign);
FPProcessException(FPExc_Overflow cumul = 2;
when, fpcr); FPExc_UnderflowFPProcessException cumul = 3;
when( FPExc_Inexact cumul = 4;
when, fpcr);
elsif ((fpcr.FZ == '1' && N != 16) || (fpcr.FZ16 == '1' && N == 16))
&& (
(N == 16 && FPExc_InputDenormAbs cumul = 7;
enable = cumul + 8;
if fpcr<enable> == '1' then
// Trapping of the exception enabled.
// It is IMPLEMENTATION DEFINED whether the enable bit may be set at all, and
// if so then how exceptions may be accumulated before calling FPTrappedException()
IMPLEMENTATION_DEFINED "floating-point trap handling";
elsif(value) >= 2.0^14) ||
(N == 32 && Abs(value) >= 2.0^126) ||
(N == 64 && Abs(value) >= 2.0^1022)
) then
// Result flushed to zero of correct sign
result = FPZero(sign);
if UsingAArch32() then
FPSCR.UFC = '1';
else
FPSR.UFC = '1';
else
// Scale to a fixed point value in the range 0.5 <= x < 1.0 in steps of 1/512, and
// calculate result exponent. Scaled value has copied sign bit,
// exponent = 1022 = double-precision biased version of -1,
// fraction = original fraction
case N of
when 16
fraction = operand<9:0> : Zeros(42);
exp = UInt(operand<14:10>);
when 32
fraction = operand<22:0> : Zeros(29);
exp = UInt(operand<30:23>);
when 64
fraction = operand<51:0>;
exp = UInt(operand<62:52>);
if exp == 0 then
if fraction<51> == '0' then
exp = -1;
fraction = fraction<49:0>:'00';
else
fraction = fraction<50:0>:'0';
integer scaled = UInt('1':fraction<51:44>);
case N of
when 16 result_exp = 29 - exp; // In range 29-30 = -1 to 29+1 = 30
when 32 result_exp = 253 - exp; // In range 253-254 = -1 to 253+1 = 254
when 64 result_exp = 2045 - exp; // In range 2045-2046 = -1 to 2045+1 = 2046
// scaled is in range 256..511 representing a fixed-point number in range [0.5..1.0)
estimate = RecipEstimate(scaled);
// estimate is in the range 256..511 representing a fixed point result in the range [1.0..2.0)
// Convert to scaled floating point result with copied sign bit,
// high-order bits from estimate, and exponent calculated above.
fraction = estimate<7:0> : Zeros() then
// Set the cumulative exception bit
FPSCR<cumul> = '1';
else
// Set the cumulative exception bit
FPSR<cumul> = '1';
return;(44);
if result_exp == 0 then
fraction = '1' : fraction<51:1>;
elsif result_exp == -1 then
fraction = '01' : fraction<51:2>;
result_exp = 0;
case N of
when 16 result = sign : result_exp<N-12:0> : fraction<51:42>;
when 32 result = sign : result_exp<N-25:0> : fraction<51:29>;
when 64 result = sign : result_exp<N-54:0> : fraction<51:0>;
return result;
// FPProcessNaN()
// ==============
// Compute estimate of reciprocal of 9-bit fixed-point number
//
// a is in range 256 .. 511 representing a number in the range 0.5 <= x < 1.0.
// result is in the range 256 .. 511 representing a number in the range in the range 1.0 to 511/256.
bits(N)integer FPProcessNaN(RecipEstimate(integer a)
assert 256 <= a && a < 512;
a = a*2+1; // round to nearest
integer b = (2 ^ 19) DIV a;
r = (b+1) DIV 2; // round to nearest
assert 256 <= r && r < 512;
return r;FPType fptype, bits(N) op, FPCRType fpcr)
assert N IN {16,32,64};
assert fptype IN {FPType_QNaN, FPType_SNaN};
case N of
when 16 topfrac = 9;
when 32 topfrac = 22;
when 64 topfrac = 51;
result = op;
if fptype == FPType_SNaN then
result<topfrac> = '1';
FPProcessException(FPExc_InvalidOp, fpcr);
if fpcr.DN == '1' then // DefaultNaN requested
result = FPDefaultNaN();
return result;
// FPProcessNaNs()
// ===============
//
// The boolean part of the return value says whether a NaN has been found and
// processed. The bits(N) part is only relevant if it has and supplies the
// result of the operation.
//
// The 'fpcr' argument supplies FPCR control bits. Status information is
// updated directly in the FPSR where appropriate.
// FPRecpX()
// =========
(boolean, bits(N))bits(N) FPProcessNaNs(FPRecpX(bits(N) op,FPTypeFPCRType type1,fpcr)
assert N IN {16,32,64};
case N of
when 16 esize = 5;
when 32 esize = 8;
when 64 esize = 11;
bits(N) result;
bits(esize) exp;
bits(esize) max_exp;
bits(N-(esize+1)) frac = FPTypeZeros type2,
bits(N) op1, bits(N) op2,();
case N of
when 16 exp = op<10+esize-1:10>;
when 32 exp = op<23+esize-1:23>;
when 64 exp = op<52+esize-1:52>;
max_exp =
FPCRTypeOnes fpcr)
assert N IN {16,32,64};
if type1 ==(esize) - 1;
(fptype,sign,value) = FPType_SNaNFPUnpack then
done = TRUE; result =(op, fpcr);
if fptype == FPProcessNaN(type1, op1, fpcr);
elsif type2 == FPType_SNaN then
done = TRUE; result =|| fptype == FPProcessNaN(type2, op2, fpcr);
elsif type1 == FPType_QNaN then
done = TRUE; result = result = FPProcessNaN(type1, op1, fpcr);
elsif type2 ==(fptype, op, fpcr);
else
if FPType_QNaNIsZero then
done = TRUE; result = FPProcessNaN(type2, op2, fpcr);
else
done = FALSE; result = Zeros(); // 'Don't care' result
return (done, result);(exp) then // Zero and denormals
result = sign:max_exp:frac;
else // Infinities and normals
result = sign:NOT(exp):frac;
return result;
// FPProcessNaNs3()
// ================
//
// The boolean part of the return value says whether a NaN has been found and
// processed. The bits(N) part is only relevant if it has and supplies the
// result of the operation.
//
// The 'fpcr' argument supplies FPCR control bits. Status information is
// updated directly in the FPSR where appropriate.
// FPRound()
// =========
// Used by data processing and int/fixed <-> FP conversion instructions.
// For half-precision data it ignores AHP, and observes FZ16.
(boolean, bits(N))bits(N) FPProcessNaNs3(FPRound(real op,FPTypeFPCRType type1,fpcr, FPTypeFPRounding type2,rounding)
fpcr.AHP = '0';
return FPTypeFPRoundBase type3,
bits(N) op1, bits(N) op2, bits(N) op3,(op, fpcr, rounding);
// Convert a real number OP into an N-bit floating-point value using the
// supplied rounding mode RMODE.
bits(N)
FPRoundBase(real op, FPCRType fpcr)
assert N IN {16,32,64};
if type1 ==fpcr, FPType_SNaNFPRounding then
done = TRUE; result =rounding)
assert N IN {16,32,64};
assert op != 0.0;
assert rounding != FPProcessNaNFPRounding_TIEAWAY(type1, op1, fpcr);
elsif type2 ==;
bits(N) result;
// Obtain format parameters - minimum exponent, numbers of exponent and fraction bits.
if N == 16 then
minimum_exp = -14; E = 5; F = 10;
elsif N == 32 then
minimum_exp = -126; E = 8; F = 23;
else // N == 64
minimum_exp = -1022; E = 11; F = 52;
// Split value into sign, unrounded mantissa and exponent.
if op < 0.0 then
sign = '1'; mantissa = -op;
else
sign = '0'; mantissa = op;
exponent = 0;
while mantissa < 1.0 do
mantissa = mantissa * 2.0; exponent = exponent - 1;
while mantissa >= 2.0 do
mantissa = mantissa / 2.0; exponent = exponent + 1;
// Deal with flush-to-zero.
if ((fpcr.FZ == '1' && N != 16) || (fpcr.FZ16 == '1' && N == 16)) && exponent < minimum_exp then
// Flush-to-zero never generates a trapped exception
if FPType_SNaNUsingAArch32 then
done = TRUE; result =() then
FPSCR.UFC = '1';
else
FPSR.UFC = '1';
return FPProcessNaNFPZero(type2, op2, fpcr);
elsif type3 ==(sign);
// Start creating the exponent value for the result. Start by biasing the actual exponent
// so that the minimum exponent becomes 1, lower values 0 (indicating possible underflow).
biased_exp = FPType_SNaNMax then
done = TRUE; result =(exponent - minimum_exp + 1, 0);
if biased_exp == 0 then mantissa = mantissa / 2.0^(minimum_exp - exponent);
// Get the unrounded mantissa as an integer, and the "units in last place" rounding error.
int_mant = FPProcessNaNRoundDown(type3, op3, fpcr);
elsif type1 ==(mantissa * 2.0^F); // < 2.0^F if biased_exp == 0, >= 2.0^F if not
error = mantissa * 2.0^F - Real(int_mant);
// Underflow occurs if exponent is too small before rounding, and result is inexact or
// the Underflow exception is trapped.
if biased_exp == 0 && (error != 0.0 || fpcr.UFE == '1') then FPType_QNaNFPProcessException then
done = TRUE; result =( FPProcessNaNFPExc_Underflow(type1, op1, fpcr);
elsif type2 ==, fpcr);
// Round result according to rounding mode.
case rounding of
when FPType_QNaNFPRounding_TIEEVEN then
done = TRUE; result =round_up = (error > 0.5 || (error == 0.5 && int_mant<0> == '1'));
overflow_to_inf = TRUE;
when FPProcessNaNFPRounding_POSINF(type2, op2, fpcr);
elsif type3 ==round_up = (error != 0.0 && sign == '0');
overflow_to_inf = (sign == '0');
when FPType_QNaNFPRounding_NEGINF then
done = TRUE; result =round_up = (error != 0.0 && sign == '1');
overflow_to_inf = (sign == '1');
when FPProcessNaNFPRounding_ZERO(type3, op3, fpcr);
else
done = FALSE; result =,
round_up = FALSE;
overflow_to_inf = FALSE;
if round_up then
int_mant = int_mant + 1;
if int_mant == 2^F then // Rounded up from denormalized to normalized
biased_exp = 1;
if int_mant == 2^(F+1) then // Rounded up to next exponent
biased_exp = biased_exp + 1; int_mant = int_mant DIV 2;
// Handle rounding to odd aka Von Neumann rounding
if error != 0.0 && rounding == FPRounding_ODD then
int_mant<0> = '1';
// Deal with overflow and generate result.
if N != 16 || fpcr.AHP == '0' then // Single, double or IEEE half precision
if biased_exp >= 2^E - 1 then
result = if overflow_to_inf then FPInfinity(sign) else FPMaxNormal(sign);
FPProcessException(FPExc_Overflow, fpcr);
error = 1.0; // Ensure that an Inexact exception occurs
else
result = sign : biased_exp<N-F-2:0> : int_mant<F-1:0>;
else // Alternative half precision
if biased_exp >= 2^E then
result = sign : Ones(N-1);
FPProcessException(FPExc_InvalidOp, fpcr);
error = 0.0; // Ensure that an Inexact exception does not occur
else
result = sign : biased_exp<N-F-2:0> : int_mant<F-1:0>;
// Deal with Inexact exception.
if error != 0.0 then
FPProcessException(FPExc_Inexact, fpcr);
return result;
// FPRound()
// =========
bits(N) FPRound(real op, FPCRType fpcr)
return FPRound(op, fpcr, FPRoundingModeZerosFPRounding_ODD(); // 'Don't care' result
return (done, result);(fpcr));
// FPRecipEstimate()
// =================
// FPRoundCV()
// ===========
// Used for FP <-> FP conversion instructions.
// For half-precision data ignores FZ16 and observes AHP.
bits(N) FPRecipEstimate(bits(N) operand,FPRoundCV(real op, FPCRType fpcr)
assert N IN {16,32,64};
(fptype,sign,value) =fpcr, FPUnpackFPRounding(operand, fpcr);
if fptype ==rounding)
fpcr.FZ16 = '0';
return FPType_SNaNFPRoundBase || fptype == FPType_QNaN then
result = FPProcessNaN(fptype, operand, fpcr);
elsif fptype == FPType_Infinity then
result = FPZero(sign);
elsif fptype == FPType_Zero then
result = FPInfinity(sign);
FPProcessException(FPExc_DivideByZero, fpcr);
elsif (
(N == 16 && Abs(value) < 2.0^-16) ||
(N == 32 && Abs(value) < 2.0^-128) ||
(N == 64 && Abs(value) < 2.0^-1024)
) then
case FPRoundingMode(fpcr) of
when FPRounding_TIEEVEN
overflow_to_inf = TRUE;
when FPRounding_POSINF
overflow_to_inf = (sign == '0');
when FPRounding_NEGINF
overflow_to_inf = (sign == '1');
when FPRounding_ZERO
overflow_to_inf = FALSE;
result = if overflow_to_inf then FPInfinity(sign) else FPMaxNormal(sign);
FPProcessException(FPExc_Overflow, fpcr);
FPProcessException(FPExc_Inexact, fpcr);
elsif ((fpcr.FZ == '1' && N != 16) || (fpcr.FZ16 == '1' && N == 16))
&& (
(N == 16 && Abs(value) >= 2.0^14) ||
(N == 32 && Abs(value) >= 2.0^126) ||
(N == 64 && Abs(value) >= 2.0^1022)
) then
// Result flushed to zero of correct sign
result = FPZero(sign);
if UsingAArch32() then
FPSCR.UFC = '1';
else
FPSR.UFC = '1';
else
// Scale to a fixed point value in the range 0.5 <= x < 1.0 in steps of 1/512, and
// calculate result exponent. Scaled value has copied sign bit,
// exponent = 1022 = double-precision biased version of -1,
// fraction = original fraction
case N of
when 16
fraction = operand<9:0> : Zeros(42);
exp = UInt(operand<14:10>);
when 32
fraction = operand<22:0> : Zeros(29);
exp = UInt(operand<30:23>);
when 64
fraction = operand<51:0>;
exp = UInt(operand<62:52>);
if exp == 0 then
if fraction<51> == '0' then
exp = -1;
fraction = fraction<49:0>:'00';
else
fraction = fraction<50:0>:'0';
integer scaled = UInt('1':fraction<51:44>);
case N of
when 16 result_exp = 29 - exp; // In range 29-30 = -1 to 29+1 = 30
when 32 result_exp = 253 - exp; // In range 253-254 = -1 to 253+1 = 254
when 64 result_exp = 2045 - exp; // In range 2045-2046 = -1 to 2045+1 = 2046
// scaled is in range 256..511 representing a fixed-point number in range [0.5..1.0)
estimate = RecipEstimate(scaled);
// estimate is in the range 256..511 representing a fixed point result in the range [1.0..2.0)
// Convert to scaled floating point result with copied sign bit,
// high-order bits from estimate, and exponent calculated above.
fraction = estimate<7:0> : Zeros(44);
if result_exp == 0 then
fraction = '1' : fraction<51:1>;
elsif result_exp == -1 then
fraction = '01' : fraction<51:2>;
result_exp = 0;
case N of
when 16 result = sign : result_exp<N-12:0> : fraction<51:42>;
when 32 result = sign : result_exp<N-25:0> : fraction<51:29>;
when 64 result = sign : result_exp<N-54:0> : fraction<51:0>;
return result;(op, fpcr, rounding);
// Compute estimate of reciprocal of 9-bit fixed-point number
//
// a is in range 256 .. 511 representing a number in the range 0.5 <= x < 1.0.
// result is in the range 256 .. 511 representing a number in the range in the range 1.0 to 511/256.
integerenumeration RecipEstimate(integer a)
assert 256 <= a && a < 512;
a = a*2+1; // round to nearest
integer b = (2 ^ 19) DIV a;
r = (b+1) DIV 2; // round to nearest
assert 256 <= r && r < 512;
return r;FPRounding {FPRounding_TIEEVEN, FPRounding_POSINF,
FPRounding_NEGINF, FPRounding_ZERO,
FPRounding_TIEAWAY, FPRounding_ODD};
// FPRecpX()
// =========
// FPRoundingMode()
// ================
bits(N)// Return the current floating-point rounding mode.
FPRounding FPRecpX(bits(N) op,FPRoundingMode( FPCRType fpcr)
assert N IN {16,32,64};
case N of
when 16 esize = 5;
when 32 esize = 8;
when 64 esize = 11;
bits(N) result;
bits(esize) exp;
bits(esize) max_exp;
bits(N-(esize+1)) frac = return ZerosFPDecodeRounding();
case N of
when 16 exp = op<10+esize-1:10>;
when 32 exp = op<23+esize-1:23>;
when 64 exp = op<52+esize-1:52>;
max_exp = Ones(esize) - 1;
(fptype,sign,value) = FPUnpack(op, fpcr);
if fptype == FPType_SNaN || fptype == FPType_QNaN then
result = FPProcessNaN(fptype, op, fpcr);
else
if IsZero(exp) then // Zero and denormals
result = sign:max_exp:frac;
else // Infinities and normals
result = sign:NOT(exp):frac;
return result;(fpcr.RMode);
// FPRound()
// =========
// Used by data processing and int/fixed <-> FP conversion instructions.
// For half-precision data it ignores AHP, and observes FZ16.
// FPRoundInt()
// ============
// Round OP to nearest integral floating point value using rounding mode ROUNDING.
// If EXACT is TRUE, set FPSR.IXC if result is not numerically equal to OP.
bits(N) FPRound(real op,FPRoundInt(bits(N) op, FPCRType fpcr, FPRounding rounding)
fpcr.AHP = '0';
returnrounding, boolean exact)
assert rounding != FPRoundBaseFPRounding_ODD(op, fpcr, rounding);
;
assert N IN {16,32,64};
// Convert a real number OP into an N-bit floating-point value using the
// supplied rounding mode RMODE.
bits(N) // Unpack using FPCR to determine if subnormals are flushed-to-zero
(fptype,sign,value) = FPRoundBase(real op,(op, fpcr);
if fptype == FPCRTypeFPType_SNaN fpcr,|| fptype == FPRoundingFPType_QNaN rounding)
assert N IN {16,32,64};
assert op != 0.0;
assert rounding !=then
result = FPRounding_TIEAWAYFPProcessNaN;
bits(N) result;
// Obtain format parameters - minimum exponent, numbers of exponent and fraction bits.
if N == 16 then
minimum_exp = -14; E = 5; F = 10;
elsif N == 32 then
minimum_exp = -126; E = 8; F = 23;
else // N == 64
minimum_exp = -1022; E = 11; F = 52;
// Split value into sign, unrounded mantissa and exponent.
if op < 0.0 then
sign = '1'; mantissa = -op;
else
sign = '0'; mantissa = op;
exponent = 0;
while mantissa < 1.0 do
mantissa = mantissa * 2.0; exponent = exponent - 1;
while mantissa >= 2.0 do
mantissa = mantissa / 2.0; exponent = exponent + 1;
// Deal with flush-to-zero.
if ((fpcr.FZ == '1' && N != 16) || (fpcr.FZ16 == '1' && N == 16)) && exponent < minimum_exp then
// Flush-to-zero never generates a trapped exception
if(fptype, op, fpcr);
elsif fptype == UsingAArch32FPType_Infinity() then
FPSCR.UFC = '1';
else
FPSR.UFC = '1';
returnthen
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
result = FPZero(sign);
// Start creating the exponent value for the result. Start by biasing the actual exponent
// so that the minimum exponent becomes 1, lower values 0 (indicating possible underflow).
biased_exp = else
// extract integer component
int_result = Max(exponent - minimum_exp + 1, 0);
if biased_exp == 0 then mantissa = mantissa / 2.0^(minimum_exp - exponent);
// Get the unrounded mantissa as an integer, and the "units in last place" rounding error.
int_mant = RoundDown(mantissa * 2.0^F); // < 2.0^F if biased_exp == 0, >= 2.0^F if not
error = mantissa * 2.0^F - Real(int_mant);
(value);
error = value - Real(int_result);
// Underflow occurs if exponent is too small before rounding, and result is inexact or
// the Underflow exception is trapped.
if biased_exp == 0 && (error != 0.0 || fpcr.UFE == '1') then // Determine whether supplied rounding mode requires an increment
case rounding of
when
FPProcessException(FPExc_Underflow, fpcr);
// Round result according to rounding mode.
case rounding of
when FPRounding_TIEEVEN
round_up = (error > 0.5 || (error == 0.5 && int_mant<0> == '1'));
overflow_to_inf = TRUE;
whenround_up = (error > 0.5 || (error == 0.5 && int_result<0> == '1'));
when FPRounding_POSINF
round_up = (error != 0.0 && sign == '0');
overflow_to_inf = (sign == '0');
whenround_up = (error != 0.0);
when FPRounding_NEGINF
round_up = (error != 0.0 && sign == '1');
overflow_to_inf = (sign == '1');
whenround_up = FALSE;
when FPRounding_ZERO,round_up = (error != 0.0 && int_result < 0);
when FPRounding_ODDFPRounding_TIEAWAY
round_up = FALSE;
overflow_to_inf = FALSE;
round_up = (error > 0.5 || (error == 0.5 && int_result >= 0));
if round_up then
int_mant = int_mant + 1;
if int_mant == 2^F then // Rounded up from denormalized to normalized
biased_exp = 1;
if int_mant == 2^(F+1) then // Rounded up to next exponent
biased_exp = biased_exp + 1; int_mant = int_mant DIV 2;
if round_up then int_result = int_result + 1;
// Handle rounding to odd aka Von Neumann rounding
if error != 0.0 && rounding == // Convert integer value into an equivalent real value
real_result = Real(int_result);
// Re-encode as a floating-point value, result is always exact
if real_result == 0.0 then
result = FPRounding_ODDFPZero then
int_mant<0> = '1';
// Deal with overflow and generate result.
if N != 16 || fpcr.AHP == '0' then // Single, double or IEEE half precision
if biased_exp >= 2^E - 1 then
result = if overflow_to_inf then(sign);
else
result = FPInfinityFPRound(sign) else(real_result, fpcr, FPMaxNormalFPRounding_ZERO(sign););
// Generate inexact exceptions
if error != 0.0 && exact then
FPProcessException(FPExc_Overflow, fpcr);
error = 1.0; // Ensure that an Inexact exception occurs
else
result = sign : biased_exp<N-F-2:0> : int_mant<F-1:0>;
else // Alternative half precision
if biased_exp >= 2^E then
result = sign : Ones(N-1);
FPProcessException(FPExc_InvalidOp, fpcr);
error = 0.0; // Ensure that an Inexact exception does not occur
else
result = sign : biased_exp<N-F-2:0> : int_mant<F-1:0>;
// Deal with Inexact exception.
if error != 0.0 then
FPProcessException(FPExc_Inexact, fpcr);
return result;
// FPRound()
// =========
bits(N) FPRound(real op, FPCRType fpcr)
return FPRound(op, fpcr, FPRoundingMode(fpcr));, fpcr);
return result;
// FPRoundCV()
// ===========
// Used for FP <-> FP conversion instructions.
// For half-precision data ignores FZ16 and observes AHP.
// FPRoundIntN()
// =============
bits(N) FPRoundCV(real op,FPRoundIntN(bits(N) op, FPCRType fpcr, FPRounding rounding)
fpcr.FZ16 = '0';
returnrounding, integer intsize)
assert rounding != ;
assert N IN {32,64};
assert intsize IN {32, 64};
integer exp;
constant integer E = (if N == 32 then 8 else 11);
constant integer F = N - (E + 1);
// Unpack using FPCR to determine if subnormals are flushed-to-zero
(fptype,sign,value) = FPUnpack(op, fpcr);
if fptype IN {FPType_SNaN, FPType_QNaN, FPType_Infinity} then
if N == 32 then
exp = 126 + intsize;
result = '1':exp<(E-1):0>:Zeros(F);
else
exp = 1022+intsize;
result = '1':exp<(E-1):0>:Zeros(F);
FPProcessException(FPExc_InvalidOp, fpcr);
elsif fptype == FPType_Zero then
result = FPZero(sign);
else
// Extract integer component
int_result = RoundDown(value);
error = value - Real(int_result);
// Determine whether supplied rounding mode requires an increment
case rounding of
when FPRounding_TIEEVEN
round_up = error > 0.5 || (error == 0.5 && int_result<0> == '1');
when FPRounding_POSINF
round_up = error != 0.0;
when FPRounding_NEGINF
round_up = FALSE;
when FPRounding_ZERO
round_up = error != 0.0 && int_result < 0;
when FPRounding_TIEAWAY
round_up = error > 0.5 || (error == 0.5 && int_result >= 0);
if round_up then int_result = int_result + 1;
if int_result > 2^(intsize-1)-1 || int_result < -1*2^(intsize-1) then
if N == 32 then
exp = 126 + intsize;
result = '1':exp<(E-1):0>:Zeros(F);
else
exp = 1022 + intsize;
result = '1':exp<(E-1):0>:Zeros(F);
FPProcessException(FPExc_InvalidOp, fpcr);
// this case shouldn't set Inexact
error = 0.0;
else
// Convert integer value into an equivalent real value
real_result = Real(int_result);
// Re-encode as a floating-point value, result is always exact
if real_result == 0.0 then
result = FPZero(sign);
else
result = FPRound(real_result, fpcr, FPRounding_ZERO);
// Generate inexact exceptions
if error != 0.0 then
FPProcessException(FPExc_InexactFPRoundBaseFPRounding_ODD(op, fpcr, rounding);, fpcr);
return result;
enumeration// FPRSqrtEstimate()
// =================
bits(N) FPRounding {FPRSqrtEstimate(bits(N) operand,FPRounding_TIEEVEN,fpcr)
assert N IN {16,32,64};
(fptype,sign,value) = FPRounding_POSINF,(operand, fpcr);
if fptype ==
FPRounding_NEGINF,|| fptype == FPRounding_ZERO,then
result =
FPRounding_TIEAWAY,(fptype, operand, fpcr);
elsif fptype == then
result = FPInfinity(sign);
FPProcessException(FPExc_DivideByZero, fpcr);
elsif sign == '1' then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
elsif fptype == FPType_Infinity then
result = FPZero('0');
else
// Scale to a fixed-point value in the range 0.25 <= x < 1.0 in steps of 512, with the
// evenness or oddness of the exponent unchanged, and calculate result exponent.
// Scaled value has copied sign bit, exponent = 1022 or 1021 = double-precision
// biased version of -1 or -2, fraction = original fraction extended with zeros.
case N of
when 16
fraction = operand<9:0> : Zeros(42);
exp = UInt(operand<14:10>);
when 32
fraction = operand<22:0> : Zeros(29);
exp = UInt(operand<30:23>);
when 64
fraction = operand<51:0>;
exp = UInt(operand<62:52>);
if exp == 0 then
while fraction<51> == '0' do
fraction = fraction<50:0> : '0';
exp = exp - 1;
fraction = fraction<50:0> : '0';
if exp<0> == '0' then
scaled = UInt('1':fraction<51:44>);
else
scaled = UInt('01':fraction<51:45>);
case N of
when 16 result_exp = ( 44 - exp) DIV 2;
when 32 result_exp = ( 380 - exp) DIV 2;
when 64 result_exp = (3068 - exp) DIV 2;
estimate = RecipSqrtEstimate(scaled);
// estimate is in the range 256..511 representing a fixed point result in the range [1.0..2.0)
// Convert to scaled floating point result with copied sign bit and high-order
// fraction bits, and exponent calculated above.
case N of
when 16 result = '0' : result_exp<N-12:0> : estimate<7:0>:Zeros( 2);
when 32 result = '0' : result_exp<N-25:0> : estimate<7:0>:Zeros(15);
when 64 result = '0' : result_exp<N-54:0> : estimate<7:0>:ZerosFPRounding_ODD};(44);
return result;
// FPRoundingMode()
// ================
// Compute estimate of reciprocal square root of 9-bit fixed-point number
//
// a is in range 128 .. 511 representing a number in the range 0.25 <= x < 1.0.
// result is in the range 256 .. 511 representing a number in the range in the range 1.0 to 511/256.
// Return the current floating-point rounding mode.
FPRoundinginteger FPRoundingMode(RecipSqrtEstimate(integer a)
assert 128 <= a && a < 512;
if a < 256 then // 0.25 .. 0.5
a = a*2+1; // a in units of 1/512 rounded to nearest
else // 0.5 .. 1.0
a = (a >> 1) << 1; // discard bottom bit
a = (a+1)*2; // a in units of 1/256 rounded to nearest
integer b = 512;
while a*(b+1)*(b+1) < 2^28 do
b = b+1;
// b = largest b such that b < 2^14 / sqrt(a) do
r = (b+1) DIV 2; // round to nearest
assert 256 <= r && r < 512;
return r;FPCRType fpcr)
return FPDecodeRounding(fpcr.RMode);
// FPRoundInt()
// ============
// Round OP to nearest integral floating point value using rounding mode ROUNDING.
// If EXACT is TRUE, set FPSR.IXC if result is not numerically equal to OP.
// FPSqrt()
// ========
bits(N) FPRoundInt(bits(N) op,FPSqrt(bits(N) op, FPCRType fpcr,fpcr)
assert N IN {16,32,64};
(fptype,sign,value) = FPRounding rounding, boolean exact)
assert rounding != FPRounding_ODD;
assert N IN {16,32,64};
// Unpack using FPCR to determine if subnormals are flushed-to-zero
(fptype,sign,value) = FPUnpack(op, fpcr);
if fptype == FPType_SNaN || fptype == FPType_QNaN then
result = FPProcessNaN(fptype, op, fpcr);
elsif fptype == FPType_Infinity then
result = FPInfinity(sign);
elsif fptype == FPType_Zero then
result = FPZero(sign);
else
// extract integer component
int_result = elsif fptype == RoundDownFPType_Infinity(value);
error = value - Real(int_result);
// Determine whether supplied rounding mode requires an increment
case rounding of
when&& sign == '0' then
result = FPRounding_TIEEVENFPInfinity
round_up = (error > 0.5 || (error == 0.5 && int_result<0> == '1'));
when(sign);
elsif sign == '1' then
result = FPRounding_POSINFFPDefaultNaN
round_up = (error != 0.0);
when(); FPRounding_NEGINFFPProcessException
round_up = FALSE;
when( FPRounding_ZEROFPExc_InvalidOp
round_up = (error != 0.0 && int_result < 0);
when, fpcr);
else
result = FPRounding_TIEAWAY
round_up = (error > 0.5 || (error == 0.5 && int_result >= 0));
if round_up then int_result = int_result + 1;
// Convert integer value into an equivalent real value
real_result = Real(int_result);
// Re-encode as a floating-point value, result is always exact
if real_result == 0.0 then
result = FPZero(sign);
else
result = FPRound(real_result, fpcr, FPRounding_ZERO);
// Generate inexact exceptions
if error != 0.0 && exact then
FPProcessException(FPExc_Inexact, fpcr);
(Sqrt(value), fpcr);
return result;
// FPRoundIntN()
// =============
// FPSub()
// =======
bits(N) FPRoundIntN(bits(N) op,FPSub(bits(N) op1, bits(N) op2, FPCRType fpcr,fpcr)
assert N IN {16,32,64};
rounding = FPRoundingFPRoundingMode rounding, integer intsize)
assert rounding !=(fpcr);
(type1,sign1,value1) = FPRounding_ODDFPUnpack;
assert N IN {32,64};
assert intsize IN {32, 64};
integer exp;
constant integer E = (if N == 32 then 8 else 11);
constant integer F = N - (E + 1);
// Unpack using FPCR to determine if subnormals are flushed-to-zero
(fptype,sign,value) =(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op, fpcr);
if fptype IN {(op2, fpcr);
(done,result) =FPType_SNaNFPProcessNaNs,(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 == FPType_QNaNFPType_Infinity,);
inf2 = (type2 == FPType_Infinity} then
if N == 32 then
exp = 126 + intsize;
result = '1':exp<(E-1):0>:);
zero1 = (type1 ==ZerosFPType_Zero(F);
else
exp = 1022+intsize;
result = '1':exp<(E-1):0>:);
zero2 = (type2 ==ZerosFPType_Zero(F););
if inf1 && inf2 && sign1 == sign2 then
result =
FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
elsif fptype == elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '1') then
result = FPType_ZeroFPInfinity then
result =('0');
elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '0') then
result = FPInfinity('1');
elsif zero1 && zero2 && sign1 == NOT(sign2) then
result = FPZero(sign);
else
// Extract integer component
int_result =(sign1);
else
result_value = value1 - value2;
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding == RoundDown(value);
error = value - Real(int_result);
// Determine whether supplied rounding mode requires an increment
case rounding of
when FPRounding_TIEEVEN
round_up = error > 0.5 || (error == 0.5 && int_result<0> == '1');
when FPRounding_POSINF
round_up = error != 0.0;
when FPRounding_NEGINF
round_up = FALSE;
whenthen '1' else '0';
result = FPRounding_ZERO
round_up = error != 0.0 && int_result < 0;
when FPRounding_TIEAWAY
round_up = error > 0.5 || (error == 0.5 && int_result >= 0);
if round_up then int_result = int_result + 1;
if int_result > 2^(intsize-1)-1 || int_result < -1*2^(intsize-1) then
if N == 32 then
exp = 126 + intsize;
result = '1':exp<(E-1):0>:Zeros(F);
else
exp = 1022 + intsize;
result = '1':exp<(E-1):0>:Zeros(F);
FPProcessException(FPExc_InvalidOp, fpcr);
// this case shouldn't set Inexact
error = 0.0;
else
// Convert integer value into an equivalent real value
real_result = Real(int_result);
// Re-encode as a floating-point value, result is always exact
if real_result == 0.0 then
result = FPZero(sign);
(result_sign);
else
result = FPRound(real_result, fpcr, FPRounding_ZERO);
// Generate inexact exceptions
if error != 0.0 then
FPProcessException(FPExc_Inexact, fpcr);
(result_value, fpcr, rounding);
return result;
// FPRSqrtEstimate()
// =================
// FPThree()
// =========
bits(N) FPRSqrtEstimate(bits(N) operand,FPThree(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = '1': FPCRType fpcr)
assert N IN {16,32,64};
(fptype,sign,value) = FPUnpack(operand, fpcr);
if fptype == FPType_SNaN || fptype == FPType_QNaN then
result = FPProcessNaN(fptype, operand, fpcr);
elsif fptype == FPType_Zero then
result = FPInfinity(sign);
FPProcessException(FPExc_DivideByZero, fpcr);
elsif sign == '1' then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
elsif fptype == FPType_Infinity then
result = FPZero('0');
else
// Scale to a fixed-point value in the range 0.25 <= x < 1.0 in steps of 512, with the
// evenness or oddness of the exponent unchanged, and calculate result exponent.
// Scaled value has copied sign bit, exponent = 1022 or 1021 = double-precision
// biased version of -1 or -2, fraction = original fraction extended with zeros.
case N of
when 16
fraction = operand<9:0> : Zeros(42);
exp = UInt(operand<14:10>);
when 32
fraction = operand<22:0> : Zeros(29);
exp = UInt(operand<30:23>);
when 64
fraction = operand<51:0>;
exp = UInt(operand<62:52>);
if exp == 0 then
while fraction<51> == '0' do
fraction = fraction<50:0> : '0';
exp = exp - 1;
fraction = fraction<50:0> : '0';
if exp<0> == '0' then
scaled = UInt('1':fraction<51:44>);
else
scaled = UInt('01':fraction<51:45>);
case N of
when 16 result_exp = ( 44 - exp) DIV 2;
when 32 result_exp = ( 380 - exp) DIV 2;
when 64 result_exp = (3068 - exp) DIV 2;
estimate = RecipSqrtEstimate(scaled);
// estimate is in the range 256..511 representing a fixed point result in the range [1.0..2.0)
// Convert to scaled floating point result with copied sign bit and high-order
// fraction bits, and exponent calculated above.
case N of
when 16 result = '0' : result_exp<N-12:0> : estimate<7:0>:Zeros( 2);
when 32 result = '0' : result_exp<N-25:0> : estimate<7:0>:Zeros(15);
when 64 result = '0' : result_exp<N-54:0> : estimate<7:0>:(E-1);
frac = '1':Zeros(44);
return result;(F-1);
return sign : exp : frac;
// Compute estimate of reciprocal square root of 9-bit fixed-point number
//
// a is in range 128 .. 511 representing a number in the range 0.25 <= x < 1.0.
// result is in the range 256 .. 511 representing a number in the range in the range 1.0 to 511/256.
// FPToFixed()
// ===========
integer// Convert N-bit precision floating point OP to M-bit fixed point with
// FBITS fractional bits, controlled by UNSIGNED and ROUNDING.
bits(M) RecipSqrtEstimate(integer a)
assert 128 <= a && a < 512;
if a < 256 then // 0.25 .. 0.5
a = a*2+1; // a in units of 1/512 rounded to nearest
else // 0.5 .. 1.0
a = (a >> 1) << 1; // discard bottom bit
a = (a+1)*2; // a in units of 1/256 rounded to nearest
integer b = 512;
while a*(b+1)*(b+1) < 2^28 do
b = b+1;
// b = largest b such that b < 2^14 / sqrt(a) do
r = (b+1) DIV 2; // round to nearest
assert 256 <= r && r < 512;
return r;FPToFixed(bits(N) op, integer fbits, boolean unsigned,FPCRType fpcr, FPRounding rounding)
assert N IN {16,32,64};
assert M IN {16,32,64};
assert fbits >= 0;
assert rounding != FPRounding_ODD;
// Unpack using fpcr to determine if subnormals are flushed-to-zero
(fptype,sign,value) = FPUnpack(op, fpcr);
// If NaN, set cumulative flag or take exception
if fptype == FPType_SNaN || fptype == FPType_QNaN then
FPProcessException(FPExc_InvalidOp, fpcr);
// Scale by fractional bits and produce integer rounded towards minus-infinity
value = value * 2.0^fbits;
int_result = RoundDown(value);
error = value - Real(int_result);
// Determine whether supplied rounding mode requires an increment
case rounding of
when FPRounding_TIEEVEN
round_up = (error > 0.5 || (error == 0.5 && int_result<0> == '1'));
when FPRounding_POSINF
round_up = (error != 0.0);
when FPRounding_NEGINF
round_up = FALSE;
when FPRounding_ZERO
round_up = (error != 0.0 && int_result < 0);
when FPRounding_TIEAWAY
round_up = (error > 0.5 || (error == 0.5 && int_result >= 0));
if round_up then int_result = int_result + 1;
// Generate saturated result and exceptions
(result, overflow) = SatQ(int_result, M, unsigned);
if overflow then
FPProcessException(FPExc_InvalidOp, fpcr);
elsif error != 0.0 then
FPProcessException(FPExc_Inexact, fpcr);
return result;
// FPSqrt()
// ========
// FPToFixedJS()
// =============
// Converts a double precision floating point input value
// to a signed integer, with rounding to zero.
bits(N) FPSqrt(bits(N) op,FPToFixedJS(bits(M) op, FPCRType fpcr)
assert N IN {16,32,64};
fpcr, boolean Is64)
assert M == 64 && N == 32;
// Unpack using fpcr to determine if subnormals are flushed-to-zero
(fptype,sign,value) = FPUnpack(op, fpcr);
Z = '1';
// If NaN, set cumulative flag or take exception
if fptype == FPType_SNaN || fptype == FPType_QNaN then
result =then FPProcessNaNFPProcessException(fptype, op, fpcr);
elsif fptype ==( FPType_ZeroFPExc_InvalidOp then
result =, fpcr);
Z = '0';
int_result = FPZeroRoundDown(sign);
elsif fptype ==(value);
error = value - Real(int_result);
// Determine whether supplied rounding mode requires an increment
round_it_up = (error != 0.0 && int_result < 0);
if round_it_up then int_result = int_result + 1;
if int_result < 0 then
result = int_result - 2^32* FPType_InfinityRoundUp && sign == '0' then
result =(Real(int_result)/Real(2^32));
else
result = int_result - 2^32* FPInfinityRoundDown(sign);
elsif sign == '1' then
result =(Real(int_result)/Real(2^32));
// Generate exceptions
if int_result < -(2^31) || int_result > (2^31)-1 then FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
else
result = Z = '0';
elsif error != 0.0 then (FPExc_Inexact, fpcr);
Z = '0';
if sign == '1'&& value == 0.0 then
Z = '0';
if fptype == FPType_InfinityFPRoundFPProcessException(Sqrt(value), fpcr);
return result;then result = 0;
if Is64 then
PSTATE.<N,Z,C,V> = '0':Z:'00';
else
FPSCR<31:28> = '0':Z:'00';
return result<N-1:0>;
// FPSub()
// FPTwo()
// =======
bits(N) FPSub(bits(N) op1, bits(N) op2,FPTwo(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = '1': FPCRTypeZeros fpcr)
assert N IN {16,32,64};
rounding =(E-1);
frac = FPRoundingModeZeros(fpcr);
(type1,sign1,value1) = FPUnpack(op1, fpcr);
(type2,sign2,value2) = FPUnpack(op2, fpcr);
(done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
if !done then
inf1 = (type1 == FPType_Infinity);
inf2 = (type2 == FPType_Infinity);
zero1 = (type1 == FPType_Zero);
zero2 = (type2 == FPType_Zero);
if inf1 && inf2 && sign1 == sign2 then
result = FPDefaultNaN();
FPProcessException(FPExc_InvalidOp, fpcr);
elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '1') then
result = FPInfinity('0');
elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '0') then
result = FPInfinity('1');
elsif zero1 && zero2 && sign1 == NOT(sign2) then
result = FPZero(sign1);
else
result_value = value1 - value2;
if result_value == 0.0 then // Sign of exact zero result depends on rounding mode
result_sign = if rounding == FPRounding_NEGINF then '1' else '0';
result = FPZero(result_sign);
else
result = FPRound(result_value, fpcr, rounding);
return result;(F);
return sign : exp : frac;
// FPThree()
// =========
bits(N)enumeration FPThree(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = '1':FPType {Zeros(E-1);
frac = '1':FPType_Nonzero,FPType_Zero, FPType_Infinity,
FPType_QNaN, Zeros(F-1);
return sign : exp : frac;FPType_SNaN};
// FPToFixed()
// ===========
// FPUnpack()
// ==========
//
// Used by data processing and int/fixed <-> FP conversion instructions.
// For half-precision data it ignores AHP, and observes FZ16.
// Convert N-bit precision floating point OP to M-bit fixed point with
// FBITS fractional bits, controlled by UNSIGNED and ROUNDING.
bits(M)(FPType, bit, real) FPToFixed(bits(N) op, integer fbits, boolean unsigned,FPUnpack(bits(N) fpval, FPCRType fpcr,fpcr)
fpcr.AHP = '0';
(fp_type, sign, value) = FPRoundingFPUnpackBase rounding)
assert N IN {16,32,64};
assert M IN {16,32,64};
assert fbits >= 0;
assert rounding != FPRounding_ODD;
// Unpack using fpcr to determine if subnormals are flushed-to-zero
(fptype,sign,value) = FPUnpack(op, fpcr);
// If NaN, set cumulative flag or take exception
if fptype == FPType_SNaN || fptype == FPType_QNaN then
FPProcessException(FPExc_InvalidOp, fpcr);
// Scale by fractional bits and produce integer rounded towards minus-infinity
value = value * 2.0^fbits;
int_result = RoundDown(value);
error = value - Real(int_result);
// Determine whether supplied rounding mode requires an increment
case rounding of
when FPRounding_TIEEVEN
round_up = (error > 0.5 || (error == 0.5 && int_result<0> == '1'));
when FPRounding_POSINF
round_up = (error != 0.0);
when FPRounding_NEGINF
round_up = FALSE;
when FPRounding_ZERO
round_up = (error != 0.0 && int_result < 0);
when FPRounding_TIEAWAY
round_up = (error > 0.5 || (error == 0.5 && int_result >= 0));
if round_up then int_result = int_result + 1;
// Generate saturated result and exceptions
(result, overflow) = SatQ(int_result, M, unsigned);
if overflow then
FPProcessException(FPExc_InvalidOp, fpcr);
elsif error != 0.0 then
FPProcessException(FPExc_Inexact, fpcr);
return result;(fpval, fpcr);
return (fp_type, sign, value);
// FPToFixedJS()
// =============
// FPUnpackBase()
// ==============
//
// Unpack a floating-point number into its type, sign bit and the real number
// that it represents. The real number result has the correct sign for numbers
// and infinities, is very large in magnitude for infinities, and is 0.0 for
// NaNs. (These values are chosen to simplify the description of comparisons
// and conversions.)
//
// The 'fpcr' argument supplies FPCR control bits. Status information is
// updated directly in the FPSR where appropriate.
// Converts a double precision floating point input value
// to a signed integer, with rounding to zero.
(bits(N), bit)(FPType, bit, real) FPToFixedJS(bits(M) op,FPUnpackBase(bits(N) fpval, FPCRType fpcr, boolean Is64)
fpcr)
assert N IN {16,32,64};
assert M == 64 && N == 32;
// Unpack using fpcr to determine if subnormals are flushed-to-zero
(fptype,sign,value) = if N == 16 then
sign = fpval<15>;
exp16 = fpval<14:10>;
frac16 = fpval<9:0>;
if FPUnpackIsZero(op, fpcr);
Z = '1';
// If NaN, set cumulative flag or take exception
if fptype ==(exp16) then
// Produce zero if value is zero or flush-to-zero is selected
if FPType_SNaNIsZero || fptype ==(frac16) || fpcr.FZ16 == '1' then
fptype = FPType_Zero; value = 0.0;
else
fptype = FPType_Nonzero; value = 2.0^-14 * (Real(UInt(frac16)) * 2.0^-10);
elsif IsOnes(exp16) && fpcr.AHP == '0' then // Infinity or NaN in IEEE format
if IsZero(frac16) then
fptype = FPType_Infinity; value = 2.0^1000000;
else
fptype = if frac16<9> == '1' then FPType_QNaN thenelse
FPProcessExceptionFPType_SNaN(;
value = 0.0;
else
fptype =FPExc_InvalidOpFPType_Nonzero, fpcr);
Z = '0';
int_result =;
value = 2.0^( RoundDownUInt(value);
error = value - Real(int_result);
// Determine whether supplied rounding mode requires an increment
round_it_up = (error != 0.0 && int_result < 0);
if round_it_up then int_result = int_result + 1;
if int_result < 0 then
result = int_result - 2^32*(exp16)-15) * (1.0 + Real(RoundUpUInt(Real(int_result)/Real(2^32));
else
result = int_result - 2^32*(frac16)) * 2.0^-10);
elsif N == 32 then
sign = fpval<31>;
exp32 = fpval<30:23>;
frac32 = fpval<22:0>;
ifRoundDownIsZero(Real(int_result)/Real(2^32));
// Generate exceptions
if int_result < -(2^31) || int_result > (2^31)-1 then(exp32) then
// Produce zero if value is zero or flush-to-zero is selected.
if
IsZero(frac32) || fpcr.FZ == '1' then
fptype = FPType_Zero; value = 0.0;
if !IsZero(frac32) then // Denormalized input flushed to zero
FPProcessException(FPExc_InvalidOpFPExc_InputDenorm, fpcr);
Z = '0';
elsif error != 0.0 then else
fptype =
FPType_Nonzero; value = 2.0^-126 * (Real(UInt(frac32)) * 2.0^-23);
elsif IsOnes(exp32) then
if IsZero(frac32) then
fptype = FPType_Infinity; value = 2.0^1000000;
else
fptype = if frac32<22> == '1' then FPType_QNaN else FPType_SNaN;
value = 0.0;
else
fptype = FPType_Nonzero;
value = 2.0^(UInt(exp32)-127) * (1.0 + Real(UInt(frac32)) * 2.0^-23);
else // N == 64
sign = fpval<63>;
exp64 = fpval<62:52>;
frac64 = fpval<51:0>;
if IsZero(exp64) then
// Produce zero if value is zero or flush-to-zero is selected.
if IsZero(frac64) || fpcr.FZ == '1' then
fptype = FPType_Zero; value = 0.0;
if !IsZero(frac64) then // Denormalized input flushed to zero
FPProcessException(FPExc_InexactFPExc_InputDenorm, fpcr);
Z = '0';
elsif sign == '1' && value == 0.0 then
Z = '0';
elsif sign == '0' && value == 0.0 && ! else
fptype =FPType_Nonzero; value = 2.0^-1022 * (Real(UInt(frac64)) * 2.0^-52);
elsif IsOnes(exp64) then
if IsZero(op<51:0>) then
Z = '0';
if fptype ==(frac64) then
fptype = FPType_Infinity; value = 2.0^1000000;
else
fptype = if frac64<51> == '1' then FPType_QNaN else FPType_SNaN;
value = 0.0;
else
fptype = FPType_Nonzero;
value = 2.0^(UInt(exp64)-1023) * (1.0 + Real(UInt then result = 0;
(frac64)) * 2.0^-52);
return (result<N-1:0>, Z); if sign == '1' then value = -value;
return (fptype, sign, value);
// FPTwo()
// =======
// FPUnpackCV()
// ============
//
// Used for FP <-> FP conversion instructions.
// For half-precision data ignores FZ16 and observes AHP.
bits(N)(FPType, bit, real) FPTwo(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp = '1':FPUnpackCV(bits(N) fpval,ZerosFPCRType(E-1);
frac =fpcr)
fpcr.FZ16 = '0';
(fp_type, sign, value) = ZerosFPUnpackBase(F);
return sign : exp : frac;(fpval, fpcr);
return (fp_type, sign, value);
enumeration// FPZero()
// ========
bits(N) FPType {FPZero(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp =FPType_Nonzero,(E);
frac = FPType_Zero, FPType_Infinity,
FPType_QNaN, FPType_SNaN};(F);
return sign : exp : frac;
// FPUnpack()
// ==========
//
// Used by data processing and int/fixed <-> FP conversion instructions.
// For half-precision data it ignores AHP, and observes FZ16.
// VFPExpandImm()
// ==============
(FPType, bit, real)bits(N) FPUnpack(bits(N) fpval,VFPExpandImm(bits(8) imm8)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - E - 1;
sign = imm8<7>;
exp = NOT(imm8<6>): FPCRTypeReplicate fpcr)
fpcr.AHP = '0';
(fp_type, sign, value) =(imm8<6>,E-3):imm8<5:4>;
frac = imm8<3:0>: FPUnpackBaseZeros(fpval, fpcr);
return (fp_type, sign, value);(F-4);
return sign : exp : frac;
// FPUnpackBase()
// AddWithCarry()
// ==============
//
// Unpack a floating-point number into its type, sign bit and the real number
// that it represents. The real number result has the correct sign for numbers
// and infinities, is very large in magnitude for infinities, and is 0.0 for
// NaNs. (These values are chosen to simplify the description of comparisons
// and conversions.)
//
// The 'fpcr' argument supplies FPCR control bits. Status information is
// updated directly in the FPSR where appropriate.
// Integer addition with carry input, returning result and NZCV flags
(FPType, bit, real)(bits(N), bits(4)) FPUnpackBase(bits(N) fpval,AddWithCarry(bits(N) x, bits(N) y, bit carry_in)
integer unsigned_sum = FPCRType fpcr)
assert N IN {16,32,64};
if N == 16 then
sign = fpval<15>;
exp16 = fpval<14:10>;
frac16 = fpval<9:0>;
if IsZero(exp16) then
// Produce zero if value is zero or flush-to-zero is selected
if IsZero(frac16) || fpcr.FZ16 == '1' then
fptype = FPType_Zero; value = 0.0;
else
fptype = FPType_Nonzero; value = 2.0^-14 * (Real(UInt(frac16)) * 2.0^-10);
elsif(x) + IsOnes(exp16) && fpcr.AHP == '0' then // Infinity or NaN in IEEE format
if IsZero(frac16) then
fptype = FPType_Infinity; value = 2.0^1000000;
else
fptype = if frac16<9> == '1' then FPType_QNaN else FPType_SNaN;
value = 0.0;
else
fptype = FPType_Nonzero;
value = 2.0^(UInt(exp16)-15) * (1.0 + Real((y) +UInt(frac16)) * 2.0^-10);
elsif N == 32 then
sign = fpval<31>;
exp32 = fpval<30:23>;
frac32 = fpval<22:0>;
if(carry_in);
integer signed_sum = IsZeroSInt(exp32) then
// Produce zero if value is zero or flush-to-zero is selected.
if(x) + IsZeroSInt(frac32) || fpcr.FZ == '1' then
fptype =(y) + FPType_Zero; value = 0.0;
if !IsZero(frac32) then // Denormalized input flushed to zero
FPProcessException(FPExc_InputDenorm, fpcr);
else
fptype = FPType_Nonzero; value = 2.0^-126 * (Real(UInt(frac32)) * 2.0^-23);
elsif(carry_in);
bits(N) result = unsigned_sum<N-1:0>; // same value as signed_sum<N-1:0>
bit n = result<N-1>;
bit z = if IsOnes(exp32) then
if IsZero(frac32) then
fptype =(result) then '1' else '0';
bit c = if FPType_Infinity; value = 2.0^1000000;
else
fptype = if frac32<22> == '1' then FPType_QNaN else FPType_SNaN;
value = 0.0;
else
fptype = FPType_Nonzero;
value = 2.0^(UInt(exp32)-127) * (1.0 + Real((result) == unsigned_sum then '0' else '1';
bit v = ifUIntSInt(frac32)) * 2.0^-23);
else // N == 64
sign = fpval<63>;
exp64 = fpval<62:52>;
frac64 = fpval<51:0>;
if IsZero(exp64) then
// Produce zero if value is zero or flush-to-zero is selected.
if IsZero(frac64) || fpcr.FZ == '1' then
fptype = FPType_Zero; value = 0.0;
if !IsZero(frac64) then // Denormalized input flushed to zero
FPProcessException(FPExc_InputDenorm, fpcr);
else
fptype = FPType_Nonzero; value = 2.0^-1022 * (Real(UInt(frac64)) * 2.0^-52);
elsif IsOnes(exp64) then
if IsZero(frac64) then
fptype = FPType_Infinity; value = 2.0^1000000;
else
fptype = if frac64<51> == '1' then FPType_QNaN else FPType_SNaN;
value = 0.0;
else
fptype = FPType_Nonzero;
value = 2.0^(UInt(exp64)-1023) * (1.0 + Real(UInt(frac64)) * 2.0^-52);
if sign == '1' then value = -value;
return (fptype, sign, value);(result) == signed_sum then '0' else '1';
return (result, n:z:c:v);
// FPUnpackCV()
// ============
//
// Used for FP <-> FP conversion instructions.
// For half-precision data ignores FZ16 and observes AHP.
// AArch64.BranchAddr()
// ====================
// Return the virtual address with tag bits removed for storing to the program counter.
(FPType, bit, real)bits(64) FPUnpackCV(bits(N) fpval,AArch64.BranchAddr(bits(64) vaddress)
assert ! FPCRTypeUsingAArch32 fpcr)
fpcr.FZ16 = '0';
(fp_type, sign, value) =();
msbit = (vaddress, TRUE, PSTATE.EL);
if msbit == 63 then
return vaddress;
elsif (PSTATE.EL IN {EL0, EL1} || IsInHost()) && vaddress<msbit> == '1' then
return SignExtend(vaddress<msbit:0>);
else
return ZeroExtendFPUnpackBaseAddrTop(fpval, fpcr);
return (fp_type, sign, value);(vaddress<msbit:0>);
// FPZero()
// ========
bits(N)enumeration FPZero(bit sign)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - (E + 1);
exp =AccType { Zeros(E);
frac =AccType_NORMAL, AccType_VEC, // Normal loads and stores
AccType_STREAM, AccType_VECSTREAM, // Streaming loads and stores
AccType_ATOMIC, AccType_ATOMICRW, // Atomic loads and stores
AccType_ORDERED, AccType_ORDEREDRW, // Load-Acquire and Store-Release
AccType_ORDEREDATOMIC, // Load-Acquire and Store-Release with atomic access
AccType_ORDEREDATOMICRW,
AccType_LIMITEDORDERED, // Load-LOAcquire and Store-LORelease
AccType_UNPRIV, // Load and store unprivileged
AccType_IFETCH, // Instruction fetch
AccType_PTW, // Page table walk
AccType_NONFAULT, // Non-faulting loads
AccType_CNOTFIRST, // Contiguous FF load, not first element
AccType_NV2REGISTER, // MRS/MSR instruction used at EL1 and which is converted
// to a memory access that uses the EL2 translation regime
// Other operations
AccType_DC, // Data cache maintenance
AccType_DC_UNPRIV, // Data cache maintenance instruction used at EL0
AccType_IC, // Instruction cache maintenance
AccType_DCZVA, // DC ZVA instructions
Zeros(F);
return sign : exp : frac;AccType_AT}; // Address translation
// VFPExpandImm()
// ==============
bits(N)type AccessDescriptor is ( VFPExpandImm(bits(8) imm8)
assert N IN {16,32,64};
constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11);
constant integer F = N - E - 1;
sign = imm8<7>;
exp = NOT(imm8<6>):Replicate(imm8<6>,E-3):imm8<5:4>;
frac = imm8<3:0>:Zeros(F-4);
return sign : exp : frac;acctype,
MPAMinfo mpam,
boolean page_table_walk,
boolean secondstage,
boolean s2fs1walk,
integer level
)
// AddWithCarry()
// ==============
// Integer addition with carry input, returning result and NZCV flags
// AddrTop()
// =========
// Return the MSB number of a virtual address in the stage 1 translation regime for "el".
// If EL1 is using AArch64 then addresses from EL0 using AArch32 are zero-extended to 64 bits.
(bits(N), bits(4))integer AddWithCarry(bits(N) x, bits(N) y, bit carry_in)
integer unsigned_sum =AddrTop(bits(64) address, boolean IsInstr, bits(2) el)
assert UIntHaveEL(x) +(el);
regime = UIntS1TranslationRegime(y) +(el);
if UIntELUsingAArch32(carry_in);
integer signed_sum =(regime) then
// AArch32 translation regime.
return 31;
else
// AArch64 translation regime.
case regime of
when SIntEL1(x) +tbi = (if address<55> == '1' then TCR_EL1.TBI1 else TCR_EL1.TBI0);
if SIntHavePACExt(y) +() then
tbid = if address<55> == '1' then TCR_EL1.TBID1 else TCR_EL1.TBID0;
when UIntEL2(carry_in);
bits(N) result = unsigned_sum<N-1:0>; // same value as signed_sum<N-1:0>
bit n = result<N-1>;
bit z = ifif IsZeroHaveVirtHostExt(result) then '1' else '0';
bit c = if() && UIntELIsInHost(result) == unsigned_sum then '0' else '1';
bit v = if(el) then
tbi = (if address<55> == '1' then TCR_EL2.TBI1 else TCR_EL2.TBI0);
if () then
tbid = if address<55> == '1' then TCR_EL2.TBID1 else TCR_EL2.TBID0;
else
tbi = TCR_EL2.TBI;
if HavePACExt() then tbid = TCR_EL2.TBID;
when EL3
tbi = TCR_EL3.TBI;
if HavePACExt() then tbid = TCR_EL3.TBID;
return (if tbi == '1' && (!HavePACExtSIntHavePACExt(result) == signed_sum then '0' else '1';
return (result, n:z:c:v);() || tbid == '0' || !IsInstr ) then 55 else 63);
// AArch64.BranchAddr()
// ====================
// Return the virtual address with tag bits removed for storing to the program counter.
bits(64)type AArch64.BranchAddr(bits(64) vaddress)
assert !AddressDescriptor is (UsingAArch32FaultRecord();
msbit =fault, // fault.statuscode indicates whether the address is valid AddrTopMemoryAttributes(vaddress, TRUE, PSTATE.EL);
if msbit == 63 then
return vaddress;
elsif (PSTATE.EL IN {memattrs,EL0FullAddress, EL1} || IsInHost()) && vaddress<msbit> == '1' then
return SignExtend(vaddress<msbit:0>);
else
return ZeroExtend(vaddress<msbit:0>);paddress,
bits(64) vaddress
)
enumerationconstant bits(2) AccType {MemHint_No = '00'; // No Read-Allocate, No Write-Allocate
constant bits(2)AccType_NORMAL,MemHint_WA = '01'; // No Read-Allocate, Write-Allocate
constant bits(2) AccType_VEC, // Normal loads and storesMemHint_RA = '10'; // Read-Allocate, No Write-Allocate
constant bits(2)
AccType_STREAM,MemHint_RWA = '11'; // Read-Allocate, Write-Allocate AccType_VECSTREAM, // Streaming loads and stores
AccType_ATOMIC, AccType_ATOMICRW, // Atomic loads and stores
AccType_ORDERED, AccType_ORDEREDRW, // Load-Acquire and Store-Release
AccType_ORDEREDATOMIC, // Load-Acquire and Store-Release with atomic access
AccType_ORDEREDATOMICRW,
AccType_LIMITEDORDERED, // Load-LOAcquire and Store-LORelease
AccType_UNPRIV, // Load and store unprivileged
AccType_IFETCH, // Instruction fetch
AccType_PTW, // Page table walk
AccType_NONFAULT, // Non-faulting loads
AccType_CNOTFIRST, // Contiguous FF load, not first element
AccType_NV2REGISTER, // MRS/MSR instruction used at EL1 and which is converted
// to a memory access that uses the EL2 translation regime
// Other operations
AccType_DC, // Data cache maintenance
AccType_DC_UNPRIV, // Data cache maintenance instruction used at EL0
AccType_IC, // Instruction cache maintenance
AccType_DCZVA, // DC ZVA instructions
AccType_AT}; // Address translation
type// BigEndian()
// ===========
boolean AccessDescriptor is (BigEndian()
boolean bigend;
if
AccTypeUsingAArch32 acctype,() then
bigend = (PSTATE.E != '0');
elsif PSTATE.EL ==
then
bigend = (SCTLR[].E0E != '0');
else
bigend = (SCTLRMPAMinfoEL0 mpam,
boolean page_table_walk,
boolean secondstage,
boolean s2fs1walk,
integer level
)[].EE != '0');
return bigend;
// AddrTop()
// =========
// Return the MSB number of a virtual address in the stage 1 translation regime for "el".
// If EL1 is using AArch64 then addresses from EL0 using AArch32 are zero-extended to 64 bits.
// BigEndianReverse()
// ==================
integerbits(width) AddrTop(bits(64) address, boolean IsInstr, bits(2) el)
assertBigEndianReverse (bits(width) value)
assert width IN {8, 16, 32, 64, 128};
integer half = width DIV 2;
if width == 8 then return value;
return HaveELBigEndianReverse(el);
regime =(value<half-1:0>) : S1TranslationRegimeBigEndianReverse(el);
if ELUsingAArch32(regime) then
// AArch32 translation regime.
return 31;
else
// AArch64 translation regime.
case regime of
when EL1
tbi = (if address<55> == '1' then TCR_EL1.TBI1 else TCR_EL1.TBI0);
if HavePACExt() then
tbid = if address<55> == '1' then TCR_EL1.TBID1 else TCR_EL1.TBID0;
when EL2
if HaveVirtHostExt() && ELIsInHost(el) then
tbi = (if address<55> == '1' then TCR_EL2.TBI1 else TCR_EL2.TBI0);
if HavePACExt() then
tbid = if address<55> == '1' then TCR_EL2.TBID1 else TCR_EL2.TBID0;
else
tbi = TCR_EL2.TBI;
if HavePACExt() then tbid = TCR_EL2.TBID;
when EL3
tbi = TCR_EL3.TBI;
if HavePACExt() then tbid = TCR_EL3.TBID;
return (if tbi == '1' && (!HavePACExt() || tbid == '0' || !IsInstr ) then 55 else 63);(value<width-1:half>);
typeconstant bits(2) AddressDescriptor is (MemAttr_NC = '00'; // Non-cacheable
constant bits(2)
FaultRecord fault, // fault.statuscode indicates whether the address is validMemAttr_WT = '10'; // Write-through
constant bits(2)
MemoryAttributes memattrs,
FullAddress paddress,
bits(64) vaddress
)MemAttr_WB = '11'; // Write-back
constant bits(2)// CreateAccessDescriptor()
// ========================
AccessDescriptor CreateAccessDescriptor( MemHint_No = '00'; // No Read-Allocate, No Write-Allocate
constant bits(2)acctype)
AccessDescriptor accdesc;
accdesc.acctype = acctype;
accdesc.mpam = GenMPAMcurEL(acctype IN { MemHint_WA = '01'; // No Read-Allocate, Write-Allocate
constant bits(2), MemHint_RA = '10'; // Read-Allocate, No Write-Allocate
constant bits(2) MemHint_RWA = '11'; // Read-Allocate, Write-Allocate});
accdesc.page_table_walk = FALSE;
return accdesc;
// BigEndian()
// ===========
// CreateAccessDescriptorPTW()
// ===========================
booleanAccessDescriptor CreateAccessDescriptorPTW( BigEndian()
boolean bigend;
ifacctype, boolean secondstage,
boolean s2fs1walk, integer level)
AccessDescriptor accdesc;
accdesc.acctype = acctype;
accdesc.mpam = GenMPAMcurEL(acctype IN { UsingAArch32AccType_IFETCH() then
bigend = (PSTATE.E != '0');
elsif PSTATE.EL ==, EL0AccType_IC then
bigend = (SCTLR[].E0E != '0');
else
bigend = (SCTLR[].EE != '0');
return bigend;});
accdesc.page_table_walk = TRUE;
accdesc.secondstage = s2fs1walk;
accdesc.secondstage = secondstage;
accdesc.level = level;
return accdesc;
// BigEndianReverse()
// ==================
bits(width) BigEndianReverse (bits(width) value)
assert width IN {8, 16, 32, 64, 128};
integer half = width DIV 2;
if width == 8 then return value;
returnDataMemoryBarrier( BigEndianReverseMBReqDomain(value<half-1:0>) :domain, BigEndianReverseMBReqTypes(value<width-1:half>);types);
constant bits(2) MemAttr_NC = '00'; // Non-cacheable
constant bits(2)DataSynchronizationBarrier( MemAttr_WT = '10'; // Write-through
constant bits(2)domain, MemAttr_WB = '11'; // Write-backtypes);
// CreateAccessDescriptor()
// ========================
AccessDescriptortype CreateAccessDescriptor(DescriptorUpdate is (
boolean AF, // AF needs to be set
boolean AP, // AP[2] / S2AP[2] will be modifiedAccTypeAddressDescriptor acctype)
AccessDescriptor accdesc;
accdesc.acctype = acctype;
accdesc.mpam = GenMPAMcurEL(acctype IN {AccType_IFETCH, AccType_IC});
accdesc.page_table_walk = FALSE;
return accdesc;descaddr // Descriptor to be updated
)
// CreateAccessDescriptorPTW()
// ===========================
AccessDescriptorenumeration CreateAccessDescriptorPTW(DeviceType {AccType acctype, boolean secondstage,
boolean s2fs1walk, integer level)DeviceType_GRE,
DeviceType_nGRE, DeviceType_nGnRE, AccessDescriptor accdesc;
accdesc.acctype = acctype;
accdesc.mpam = GenMPAMcurEL(acctype IN {AccType_IFETCH, AccType_IC});
accdesc.page_table_walk = TRUE;
accdesc.s2fs1walk = s2fs1walk;
accdesc.secondstage = secondstage;
accdesc.level = level;
return accdesc;DeviceType_nGnRnE};
// EffectiveTBI()
// ==============
// Returns the effective TBI in the AArch64 stage 1 translation regime for "el".
bit DataMemoryBarrier(EffectiveTBI(bits(64) address, boolean IsInstr, bits(2) el)
assertMBReqDomainHaveEL domain,(el);
regime = (el);
assert(!ELUsingAArch32(regime));
case regime of
when EL1
tbi = if address<55> == '1' then TCR_EL1.TBI1 else TCR_EL1.TBI0;
if HavePACExt() then
tbid = if address<55> == '1' then TCR_EL1.TBID1 else TCR_EL1.TBID0;
when EL2
if HaveVirtHostExt() && ELIsInHost(el) then
tbi = if address<55> == '1' then TCR_EL2.TBI1 else TCR_EL2.TBI0;
if HavePACExt() then
tbid = if address<55> == '1' then TCR_EL2.TBID1 else TCR_EL2.TBID0;
else
tbi = TCR_EL2.TBI;
if HavePACExt() then tbid = TCR_EL2.TBID;
when EL3
tbi = TCR_EL3.TBI;
if HavePACExt() then tbid = TCR_EL3.TBID;
return (if tbi == '1' && (!HavePACExtMBReqTypesS1TranslationRegime types);() || tbid == '0' || !IsInstr) then '1' else '0');
// EffectiveTCMA()
// ===============
// Returns the effective TCMA of a virtual address in the stage 1 translation regime for "el".
bit DataSynchronizationBarrier(EffectiveTCMA(bits(64) address, bits(2) el)
assertMBReqDomainHaveEL domain,(el);
regime = (el);
assert(!ELUsingAArch32(regime));
case regime of
when EL1
tcma = if address<55> == '1' then TCR_EL1.TCMA1 else TCR_EL1.TCMA0;
when EL2
if HaveVirtHostExt() && ELIsInHost(el) then
tcma = if address<55> == '1' then TCR_EL2.TCMA1 else TCR_EL2.TCMA0;
else
tcma = TCR_EL2.TCMA;
when EL3MBReqTypesS1TranslationRegime types);tcma = TCR_EL3.TCMA;
return tcma;
typeenumeration DescriptorUpdate is (
boolean AF, // AF needs to be set
boolean AP, // AP[2] / S2AP[2] will be modifiedFault {
Fault_None,
Fault_AccessFlag,
Fault_Alignment,
Fault_Background,
Fault_Domain,
Fault_Permission,
Fault_Translation,
Fault_AddressSize,
Fault_SyncExternal,
Fault_SyncExternalOnWalk,
Fault_SyncParity,
Fault_SyncParityOnWalk,
Fault_AsyncParity,
Fault_AsyncExternal,
Fault_Debug,
Fault_TLBConflict,
Fault_BranchTarget,
Fault_HWUpdateAccessFlag,
Fault_Lockdown,
Fault_Exclusive,
AddressDescriptor descaddr // Descriptor to be updated
)Fault_ICacheMaint};
enumerationtype DeviceType {FaultRecord is (DeviceType_GRE,statuscode, // Fault Status DeviceType_nGRE,acctype, // Type of access that faulted DeviceType_nGnRE, DeviceType_nGnRnE};ipaddress, // Intermediate physical address
boolean s2fs1walk, // Is on a Stage 1 page table walk
boolean write, // TRUE for a write, FALSE for a read
integer level, // For translation, access flag and permission faults
bit extflag, // IMPLEMENTATION DEFINED syndrome for external aborts
boolean secondstage, // Is a Stage 2 abort
bits(4) domain, // Domain number, AArch32 only
bits(2) errortype, // [Armv8.2 RAS] AArch32 AET or AArch64 SET
bits(4) debugmoe) // Debug method of entry, from AArch32 only
type PARTIDtype = bits(16);
type PMGtype = bits(8);
type MPAMinfo is (
bit mpam_ns,
PARTIDtype partid,
PMGtype pmg
)
// EffectiveTBI()
// ==============
// Returns the effective TBI in the AArch64 stage 1 translation regime for "el".
bittype EffectiveTBI(bits(64) address, boolean IsInstr, bits(2) el)
assertFullAddress is (
bits(52) address,
bit NS // '0' = Secure, '1' = Non-secure
) HaveEL(el);
regime = S1TranslationRegime(el);
assert(!ELUsingAArch32(regime));
case regime of
when EL1
tbi = if address<55> == '1' then TCR_EL1.TBI1 else TCR_EL1.TBI0;
if HavePACExt() then
tbid = if address<55> == '1' then TCR_EL1.TBID1 else TCR_EL1.TBID0;
when EL2
if HaveVirtHostExt() && ELIsInHost(el) then
tbi = if address<55> == '1' then TCR_EL2.TBI1 else TCR_EL2.TBI0;
if HavePACExt() then
tbid = if address<55> == '1' then TCR_EL2.TBID1 else TCR_EL2.TBID0;
else
tbi = TCR_EL2.TBI;
if HavePACExt() then tbid = TCR_EL2.TBID;
when EL3
tbi = TCR_EL3.TBI;
if HavePACExt() then tbid = TCR_EL3.TBID;
return (if tbi == '1' && (!HavePACExt() || tbid == '0' || !IsInstr) then '1' else '0');
// EffectiveTCMA()
// ===============
// Returns the effective TCMA of a virtual address in the stage 1 translation regime for "el".
bit// Signals the memory system that memory accesses of type HINT to or from the specified address are
// likely in the near future. The memory system may take some action to speed up the memory
// accesses when they do occur, such as pre-loading the the specified address into one or more
// caches as indicated by the innermost cache level target (0=L1, 1=L2, etc) and non-temporal hint
// stream. Any or all prefetch hints may be treated as a NOP. A prefetch hint must not cause a
// synchronous abort due to Alignment or Translation faults and the like. Its only effect on
// software-visible state should be on caches and TLBs associated with address, which must be
// accessible by reads, writes or execution, as defined in the translation regime of the current
// Exception level. It is guaranteed not to access Device memory.
// A Prefetch_EXEC hint must not result in an access that could not be performed by a speculative
// instruction fetch, therefore if all associated MMUs are disabled, then it cannot access any
// memory location that cannot be accessed by instruction fetches. EffectiveTCMA(bits(64) address, bits(2) el)
assertHint_Prefetch(bits(64) address, HaveELPrefetchHint(el);
regime = S1TranslationRegime(el);
assert(!ELUsingAArch32(regime));
case regime of
when EL1
tcma = if address<55> == '1' then TCR_EL1.TCMA1 else TCR_EL1.TCMA0;
when EL2
if HaveVirtHostExt() && ELIsInHost(el) then
tcma = if address<55> == '1' then TCR_EL2.TCMA1 else TCR_EL2.TCMA0;
else
tcma = TCR_EL2.TCMA;
when EL3
tcma = TCR_EL3.TCMA;
return tcma;hint, integer target, boolean stream);
enumeration Fault {MBReqDomain {Fault_None,MBReqDomain_Nonshareable,
Fault_AccessFlag,MBReqDomain_InnerShareable,
Fault_Alignment,MBReqDomain_OuterShareable,
Fault_Background,MBReqDomain_FullSystem};
Fault_Domain,
Fault_Permission,
Fault_Translation,
Fault_AddressSize,
Fault_SyncExternal,
Fault_SyncExternalOnWalk,
Fault_SyncParity,
Fault_SyncParityOnWalk,
Fault_AsyncParity,
Fault_AsyncExternal,
Fault_Debug,
Fault_TLBConflict,
Fault_BranchTarget,
Fault_HWUpdateAccessFlag,
Fault_Lockdown,
Fault_Exclusive,
Fault_ICacheMaint};
typeenumeration FaultRecord is (MBReqTypes {Fault statuscode, // Fault Status
AccType acctype, // Type of access that faulted
FullAddress ipaddress, // Intermediate physical address
boolean s2fs1walk, // Is on a Stage 1 page table walk
boolean write, // TRUE for a write, FALSE for a read
integer level, // For translation, access flag and permission faults
bit extflag, // IMPLEMENTATION DEFINED syndrome for external aborts
boolean secondstage, // Is a Stage 2 abort
bits(4) domain, // Domain number, AArch32 only
bits(2) errortype, // [Armv8.2 RAS] AArch32 AET or AArch64 SET
bits(4) debugmoe) // Debug method of entry, from AArch32 only
type PARTIDtype = bits(16);
typeMBReqTypes_Reads, PMGtype = bits(8);
typeMBReqTypes_Writes, MPAMinfo is (
bit mpam_ns,
PARTIDtype partid,
PMGtype pmg
)MBReqTypes_All};
type FullAddress is (
bits(52) address,
bit NS // '0' = Secure, '1' = Non-secure
MemAttrHints is (
bits(2) attrs, // See MemAttr_*, Cacheability attributes
bits(2) hints, // See MemHint_*, Allocation hints
boolean transient
)
// Signals the memory system that memory accesses of type HINT to or from the specified address are
// likely in the near future. The memory system may take some action to speed up the memory
// accesses when they do occur, such as pre-loading the the specified address into one or more
// caches as indicated by the innermost cache level target (0=L1, 1=L2, etc) and non-temporal hint
// stream. Any or all prefetch hints may be treated as a NOP. A prefetch hint must not cause a
// synchronous abort due to Alignment or Translation faults and the like. Its only effect on
// software-visible state should be on caches and TLBs associated with address, which must be
// accessible by reads, writes or execution, as defined in the translation regime of the current
// Exception level. It is guaranteed not to access Device memory.
// A Prefetch_EXEC hint must not result in an access that could not be performed by a speculative
// instruction fetch, therefore if all associated MMUs are disabled, then it cannot access any
// memory location that cannot be accessed by instruction fetches.enumeration
Hint_Prefetch(bits(64) address,MemType { MemType_Normal, PrefetchHint hint, integer target, boolean stream);MemType_Device};
enumerationtype MBReqDomain {MemoryAttributes is (MBReqDomain_Nonshareable,memtype, MBReqDomain_InnerShareable,device, // For Device memory types
MBReqDomain_OuterShareable,inner, // Inner hints and attributes MBReqDomain_FullSystem};outer, // Outer hints and attributes
boolean tagged, // Tagged access
boolean shareable,
boolean outershareable
)
enumerationtype MBReqTypes {Permissions is (
bits(3) ap, // Access permission bits
bit xn, // Execute-never bit
bit xxn, // [Armv8.2] Extended execute-never bit for stage 2
bit pxn // Privileged execute-never bit
)MBReqTypes_Reads, MBReqTypes_Writes, MBReqTypes_All};
typeenumeration MemAttrHints is (
bits(2) attrs, // See MemAttr_*, Cacheability attributes
bits(2) hints, // See MemHint_*, Allocation hints
boolean transient
)PrefetchHint {Prefetch_READ, Prefetch_WRITE, Prefetch_EXEC};
enumeration MemType {MemType_Normal, MemType_Device};SpeculativeStoreBypassBarrierToPA();
type MemoryAttributes is (
MemType memtype,
DeviceType device, // For Device memory types
MemAttrHints inner, // Inner hints and attributes
MemAttrHints outer, // Outer hints and attributes
boolean tagged, // Tagged access
boolean shareable,
boolean outershareable
)SpeculativeStoreBypassBarrierToVA();
type Permissions is (
bits(3) ap, // Access permission bits
bit xn, // Execute-never bit
bit xxn, // [Armv8.2] Extended execute-never bit for stage 2
bit pxn // Privileged execute-never bit
)TLBRecord is (Permissions perms,
bit nG, // '0' = Global, '1' = not Global
bits(4) domain, // AArch32 only
bit GP, // Guarded Page
boolean contiguous, // Contiguous bit from page table
integer level, // AArch32 Short-descriptor format: Indicates Section/Page
integer blocksize, // Describes size of memory translated in KBytes
DescriptorUpdate descupdate, // [Armv8.1] Context for h/w update of table descriptor
bit CnP, // [Armv8.2] TLB entry can be shared between different PEs
AddressDescriptor addrdesc
)
enumeration// These two _Mem[] accessors are the hardware operations which perform single-copy atomic,
// aligned, little-endian memory accesses of size bytes from/to the underlying physical
// memory array of bytes.
//
// The functions address the array using desc.paddress which supplies:
// * A 52-bit physical address
// * A single NS bit to select between Secure and Non-secure parts of the array.
//
// The accdesc descriptor describes the access type: normal, exclusive, ordered, streaming,
// etc and other parameters required to access the physical memory or for setting syndrome
// register in the event of an external abort.
bits(8*size) _Mem[ PrefetchHint {desc, integer size, AccessDescriptor accdesc];
_Mem[Prefetch_READ, Prefetch_WRITE, Prefetch_EXEC};desc, integer size, AccessDescriptor accdesc] = bits(8*size) value;
SpeculativeStoreBypassBarrierToPA();// DefaultMPAMinfo
// ===============
// Returns default MPAM info. If secure is TRUE return default Secure
// MPAMinfo, otherwise return default Non-secure MPAMinfo.
MPAMinfo DefaultMPAMinfo(boolean secure)
MPAMinfo DefaultInfo;
DefaultInfo.mpam_ns = if secure then '0' else '1';
DefaultInfo.partid = DefaultPARTID;
DefaultInfo.pmg = DefaultPMG;
return DefaultInfo;
SpeculativeStoreBypassBarrierToVA();constant PARTIDtype DefaultPARTID = 0<15:0>;
typeconstant PMGtype DefaultPMG = 0<7:0>; TLBRecord is (
Permissions perms,
bit nG, // '0' = Global, '1' = not Global
bits(4) domain, // AArch32 only
bit GP, // Guarded Page
boolean contiguous, // Contiguous bit from page table
integer level, // AArch32 Short-descriptor format: Indicates Section/Page
integer blocksize, // Describes size of memory translated in KBytes
DescriptorUpdate descupdate, // [Armv8.1] Context for h/w update of table descriptor
bit CnP, // [Armv8.2] TLB entry can be shared between different PEs
AddressDescriptor addrdesc
)
// These two _Mem[] accessors are the hardware operations which perform single-copy atomic,
// aligned, little-endian memory accesses of size bytes from/to the underlying physical
// memory array of bytes.
//
// The functions address the array using desc.paddress which supplies:
// * A 52-bit physical address
// * A single NS bit to select between Secure and Non-secure parts of the array.
//
// The accdesc descriptor describes the access type: normal, exclusive, ordered, streaming,
// etc and other parameters required to access the physical memory or for setting syndrome
// register in the event of an external abort.
bits(8*size)// GenMPAMcurEL
// ============
// Returns MPAMinfo for the current EL and security state.
// InD is TRUE instruction access and FALSE otherwise.
// May be called if MPAM is not implemented (but in an version that supports
// MPAM), MPAM is disabled, or in AArch32. In AArch32, convert the mode to
// EL if can and use that to drive MPAM information generation. If mode
// cannot be converted, MPAM is not implemented, or MPAM is disabled return
// default MPAM information for the current security state.
MPAMinfo GenMPAMcurEL(boolean InD)
bits(2) mpamel;
boolean validEL;
boolean secure = _Mem[();
ifAddressDescriptorHaveMPAMExt desc, integer size,() && AccessDescriptorMPAMisEnabled accdesc];() then
if
_Mem[() then
(validEL, mpamel) =AddressDescriptorELFromM32 desc, integer size,(PSTATE.M);
else
validEL = TRUE;
mpamel = PSTATE.EL;
if validEL then
return genMPAM( AccessDescriptorUInt accdesc] = bits(8*size) value;(mpamel), InD, secure);
return DefaultMPAMinfo(secure);
// DefaultMPAMinfo
// ===============
// Returns default MPAM info. If secure is TRUE return default Secure
// MPAMinfo, otherwise return default Non-secure MPAMinfo.
// MAP_vPARTID
// ===========
// Performs conversion of virtual PARTID into physical PARTID
// Contains all of the error checking and implementation
// choices for the conversion.
MPAMinfo(PARTIDtype, boolean) MAP_vPARTID(PARTIDtype vpartid)
// should not ever be called if EL2 is not implemented
// or is implemented but not enabled in the current
// security state.
PARTIDtype ret;
boolean err;
integer virt = DefaultMPAMinfo(boolean secure)( vpartid );
integer vmprmax =
MPAMinfoUInt DefaultInfo;
DefaultInfo.mpam_ns = if secure then '0' else '1';
DefaultInfo.partid =( MPAMIDR_EL1.VPMR_MAX );
// vpartid_max is largest vpartid supported
integer vpartid_max = 4 * vmprmax + 3;
// One of many ways to reduce vpartid to value less than vpartid_max.
if virt > vpartid_max then
virt = virt MOD (vpartid_max+1);
// Check for valid mapping entry.
if MPAMVPMV_EL2<virt> == '1' then
// vpartid has a valid mapping so access the map.
ret = mapvpmw(virt);
err = FALSE;
// Is the default virtual PARTID valid?
elsif MPAMVPMV_EL2<0> == '1' then
// Yes, so use default mapping for vpartid == 0.
ret = MPAMVPM0_EL2<0 +: 16>;
err = FALSE;
// Neither is valid so use default physical PARTID.
else
ret = DefaultPARTID;
err = TRUE;
// Check that the physical PARTID is in-range.
// This physical PARTID came from a virtual mapping entry.
integer partid_max = DefaultPARTIDUInt;
DefaultInfo.pmg =( MPAMIDR_EL1.PARTID_MAX );
if DefaultPMGUInt;
return DefaultInfo;(ret) > partid_max then
// Out of range, so return default physical PARTID
ret = DefaultPARTID;
err = TRUE;
return (ret, err);
constant PARTIDtype// MPAMisEnabled
// =============
// Returns TRUE if MPAMisEnabled.
boolean DefaultPARTID = 0<15:0>;MPAMisEnabled()
el =HighestEL();
case el of
when EL3 return MPAM3_EL3.MPAMEN == '1';
when EL2 return MPAM2_EL2.MPAMEN == '1';
when EL1 return MPAM1_EL1.MPAMEN == '1';
constant PMGtype// MPAMisVirtual
// =============
// Returns TRUE if MPAM is configured to be virtual at EL.
boolean DefaultPMG = 0<7:0>;MPAMisVirtual(integer el)
return (MPAMIDR_EL1.HAS_HCR == '1' &&EL2Enabled() &&
(( el == 0 && MPAMHCR_EL2.EL0_VPMEN == '1' ) ||
( el == 1 && MPAMHCR_EL2.EL1_VPMEN == '1')));
// GenMPAMcurEL
// ============
// Returns MPAMinfo for the current EL and security state.
// InD is TRUE instruction access and FALSE otherwise.
// May be called if MPAM is not implemented (but in an version that supports
// MPAM), MPAM is disabled, or in AArch32. In AArch32, convert the mode to
// EL if can and use that to drive MPAM information generation. If mode
// cannot be converted, MPAM is not implemented, or MPAM is disabled return
// default MPAM information for the current security state.
// genMPAM
// =======
// Returns MPAMinfo for exception level el.
// If InD is TRUE returns MPAM information using PARTID_I and PMG_I fields
// of MPAMel_ELx register and otherwise using PARTID_D and PMG_D fields.
// Produces a Secure PARTID if Secure is TRUE and a Non-secure PARTID otherwise.
MPAMinfoMPAMinfo genMPAM(integer el, boolean InD, boolean secure)
MPAMinfo returnInfo;
PARTIDtype partidel;
boolean perr;
boolean gstplk = (el == 0 && GenMPAMcurEL(boolean InD)
bits(2) mpamel;
boolean validEL;
boolean secure = IsSecure();
if HaveMPAMExt() && MPAMisEnabled() then
if UsingAArch32() then
(validEL, mpamel) = ELFromM32(PSTATE.M);
else
validEL = TRUE;
mpamel = PSTATE.EL;
if validEL then
return genMPAM(UInt(mpamel), InD, secure);
return DefaultMPAMinfo(secure);() &&
MPAMHCR_EL2.GSTAPP_PLK == '1' && HCR_EL2.TGE == '0');
integer eff_el = if gstplk then 1 else el;
(partidel, perr) = genPARTID(eff_el, InD);
PMGtype groupel = genPMG(eff_el, InD, perr);
returnInfo.mpam_ns = if secure then '0' else '1';
returnInfo.partid = partidel;
returnInfo.pmg = groupel;
return returnInfo;
// MAP_vPARTID
// ===========
// Performs conversion of virtual PARTID into physical PARTID
// Contains all of the error checking and implementation
// choices for the conversion.
// genMPAMel
// =========
// Returns MPAMinfo for specified EL in the current security state.
// InD is TRUE for instruction access and FALSE otherwise.
(PARTIDtype, boolean)MPAMinfo genMPAMel(bits(2) el, boolean InD)
boolean secure = MAP_vPARTID(();
ifPARTIDtypeHaveMPAMExt vpartid)
// should not ever be called if EL2 is not implemented
// or is implemented but not enabled in the current
// security state.() &&
PARTIDtypeMPAMisEnabled ret;
boolean err;
integer virt =() then
return genMPAM( UInt( vpartid );
integer vmprmax = UInt( MPAMIDR_EL1.VPMR_MAX );
// vpartid_max is largest vpartid supported
integer vpartid_max = 4 * vmprmax + 3;
// One of many ways to reduce vpartid to value less than vpartid_max.
if virt > vpartid_max then
virt = virt MOD (vpartid_max+1);
// Check for valid mapping entry.
if MPAMVPMV_EL2<virt> == '1' then
// vpartid has a valid mapping so access the map.
ret = mapvpmw(virt);
err = FALSE;
// Is the default virtual PARTID valid?
elsif MPAMVPMV_EL2<0> == '1' then
// Yes, so use default mapping for vpartid == 0.
ret = MPAMVPM0_EL2<0 +: 16>;
err = FALSE;
// Neither is valid so use default physical PARTID.
else
ret = DefaultPARTID;
err = TRUE;
// Check that the physical PARTID is in-range.
// This physical PARTID came from a virtual mapping entry.
integer partid_max = UInt( MPAMIDR_EL1.PARTID_MAX );
if UInt(ret) > partid_max then
// Out of range, so return default physical PARTID
ret = DefaultPARTID;
err = TRUE;
return (ret, err);(el), InD, secure);
return DefaultMPAMinfo(secure);
// MPAMisEnabled
// =============
// Returns TRUE if MPAMisEnabled.
// genPARTID
// =========
// Returns physical PARTID and error boolean for exception level el.
// If InD is TRUE then PARTID is from MPAMel_ELx.PARTID_I and
// otherwise from MPAMel_ELx.PARTID_D.
boolean(PARTIDtype, boolean) genPARTID(integer el, boolean InD)
PARTIDtype partidel = getMPAM_PARTID(el, InD);
integer partid_max = MPAMisEnabled()
el =(MPAMIDR_EL1.PARTID_MAX);
if HighestELUInt();
case el of
when(partidel) > partid_max then
return (DefaultPARTID, TRUE);
if EL3MPAMisVirtual return MPAM3_EL3.MPAMEN == '1';
when EL2 return MPAM2_EL2.MPAMEN == '1';
when EL1 return MPAM1_EL1.MPAMEN == '1';(el) then
return MAP_vPARTID(partidel);
else
return (partidel, FALSE);
// MPAMisVirtual
// =============
// Returns TRUE if MPAM is configured to be virtual at EL.
// genPMG
// ======
// Returns PMG for exception level el and I- or D-side (InD).
// If PARTID generation (genPARTID) encountered an error, genPMG() should be
// called with partid_err as TRUE.
booleanPMGtype genPMG(integer el, boolean InD, boolean partid_err)
integer pmg_max = MPAMisVirtual(integer el)
return ( MPAMIDR_EL1.HAS_HCR == '1' &&(MPAMIDR_EL1.PMG_MAX);
// It is CONSTRAINED UNPREDICTABLE whether partid_err forces PMG to
// use the default or if it uses the PMG from getMPAM_PMG.
if partid_err then
return DefaultPMG;
PMGtype groupel = getMPAM_PMG(el, InD);
if EL2EnabledUInt() &&
( HCR_EL2.E2H == '0' || HCR_EL2.TGE == '0' ) &&
(( el == 0 && MPAMHCR_EL2.EL0_VPMEN == '1' ) ||
( el == 1 && MPAMHCR_EL2.EL1_VPMEN == '1')));(groupel) <= pmg_max then
return groupel;
return DefaultPMG;
// genMPAM
// =======
// Returns MPAMinfo for exception level el.
// If InD is TRUE returns MPAM information using PARTID_I and PMG_I fields
// of MPAMel_ELx register and otherwise using PARTID_D and PMG_D fields.
// Produces a Secure PARTID if Secure is TRUE and a Non-secure PARTID otherwise.
// getMPAM_PARTID
// ==============
// Returns a PARTID from one of the MPAMn_ELx registers.
// MPAMn selects the MPAMn_ELx register used.
// If InD is TRUE, selects the PARTID_I field of that
// register. Otherwise, selects the PARTID_D field.
MPAMinfoPARTIDtype getMPAM_PARTID(integer MPAMn, boolean InD)
PARTIDtype partid;
boolean el2avail = genMPAM(integer el, boolean InD, boolean secure)
MPAMinfo returnInfo;
PARTIDtype partidel;
boolean perr;
boolean gstplk = (el == 0 && EL2Enabled() &&
MPAMHCR_EL2.GSTAPP_PLK == '1' && HCR_EL2.TGE == '0');
integer eff_el = if gstplk then 1 else el;
(partidel, perr) =();
if InD then
case MPAMn of
when 3 partid = MPAM3_EL3.PARTID_I;
when 2 partid = if el2avail then MPAM2_EL2.PARTID_I else genPARTIDZeros(eff_el, InD);();
when 1 partid = MPAM1_EL1.PARTID_I;
when 0 partid = MPAM0_EL1.PARTID_I;
otherwise partid = PARTIDtype UNKNOWN;
else
case MPAMn of
when 3 partid = MPAM3_EL3.PARTID_D;
when 2 partid = if el2avail then MPAM2_EL2.PARTID_D else
PMGtypeZeros groupel = genPMG(eff_el, InD, perr);
returnInfo.mpam_ns = if secure then '0' else '1';
returnInfo.partid = partidel;
returnInfo.pmg = groupel;
return returnInfo;();
when 1 partid = MPAM1_EL1.PARTID_D;
when 0 partid = MPAM0_EL1.PARTID_D;
otherwise partid = PARTIDtype UNKNOWN;
return partid;
// genMPAMel
// =========
// Returns MPAMinfo for specified EL in the current security state.
// InD is TRUE for instruction access and FALSE otherwise.
// getMPAM_PMG
// ===========
// Returns a PMG from one of the MPAMn_ELx registers.
// MPAMn selects the MPAMn_ELx register used.
// If InD is TRUE, selects the PMG_I field of that
// register. Otherwise, selects the PMG_D field.
MPAMinfoPMGtype getMPAM_PMG(integer MPAMn, boolean InD)
PMGtype pmg;
boolean el2avail = genMPAMel(bits(2) el, boolean InD)
boolean secure =();
if InD then
case MPAMn of
when 3 pmg = MPAM3_EL3.PMG_I;
when 2 pmg = if el2avail then MPAM2_EL2.PMG_I else IsSecureZeros();
if when 1 pmg = MPAM1_EL1.PMG_I;
when 0 pmg = MPAM0_EL1.PMG_I;
otherwise pmg = PMGtype UNKNOWN;
else
case MPAMn of
when 3 pmg = MPAM3_EL3.PMG_D;
when 2 pmg = if el2avail then MPAM2_EL2.PMG_D else HaveMPAMExtZeros() && MPAMisEnabled() then
return genMPAM(UInt(el), InD, secure);
return DefaultMPAMinfo(secure);();
when 1 pmg = MPAM1_EL1.PMG_D;
when 0 pmg = MPAM0_EL1.PMG_D;
otherwise pmg = PMGtype UNKNOWN;
return pmg;
// genPARTID
// =========
// Returns physical PARTID and error boolean for exception level el.
// If InD is TRUE then PARTID is from MPAMel_ELx.PARTID_I and
// otherwise from MPAMel_ELx.PARTID_D.
// mapvpmw
// =======
// Map a virtual PARTID into a physical PARTID using
// the MPAMVPMn_EL2 registers.
// vpartid is now assumed in-range and valid (checked by caller)
// returns physical PARTID from mapping entry.
(PARTIDtype, boolean)PARTIDtype mapvpmw(integer vpartid)
bits(64) vpmw;
integer wd = vpartid DIV 4;
case wd of
when 0 vpmw = MPAMVPM0_EL2;
when 1 vpmw = MPAMVPM1_EL2;
when 2 vpmw = MPAMVPM2_EL2;
when 3 vpmw = MPAMVPM3_EL2;
when 4 vpmw = MPAMVPM4_EL2;
when 5 vpmw = MPAMVPM5_EL2;
when 6 vpmw = MPAMVPM6_EL2;
when 7 vpmw = MPAMVPM7_EL2;
otherwise vpmw = genPARTID(integer el, boolean InD)
PARTIDtype partidel = getMPAM_PARTID(el, InD);
integer partid_max = UInt(MPAMIDR_EL1.PARTID_MAX);
if UInt(partidel) > partid_max then
return (DefaultPARTID, TRUE);
if MPAMisVirtual(el) then
return MAP_vPARTID(partidel);
else
return (partidel, FALSE);(64);
// vpme_lsb selects LSB of field within register
integer vpme_lsb = (vpartid REM 4) * 16;
return vpmw<vpme_lsb +: 16>;
// genPMG
// ======
// Returns PMG for exception level el and I- or D-side (InD).
// If PARTID generation (genPARTID) encountered an error, genPMG() should be
// called with partid_err as TRUE.
// BranchTo()
// ==========
PMGtype// Set program counter to a new address, with a branch type
// In AArch64 state the address might include a tag in the top eight bits. genPMG(integer el, boolean InD, boolean partid_err)
integer pmg_max =BranchTo(bits(N) target, UIntBranchType(MPAMIDR_EL1.PMG_MAX);
// It is CONSTRAINED UNPREDICTABLE whether partid_err forces PMG to
// use the default or if it uses the PMG from getMPAM_PMG.
if partid_err then
returnbranch_type) DefaultPMGHint_Branch;(branch_type);
if N == 32 then
assert
PMGtypeUsingAArch32 groupel =();
_PC = getMPAM_PMGZeroExtend(el, InD);
if(target);
else
assert N == 64 && ! UIntUsingAArch32(groupel) <= pmg_max then
return groupel;
return();
_PC = DefaultPMGAArch64.BranchAddr;(target<63:0>);
return;
// getMPAM_PARTID
// BranchToAddr()
// ==============
// Returns a PARTID from one of the MPAMn_ELx registers.
// MPAMn selects the MPAMn_ELx register used.
// If InD is TRUE, selects the PARTID_I field of that
// register. Otherwise, selects the PARTID_D field.
PARTIDtype// Set program counter to a new address, with a branch type
// In AArch64 state the address does not include a tag in the top eight bits. getMPAM_PARTID(integer MPAMn, boolean InD)BranchToAddr(bits(N) target,
PARTIDtypeBranchType partid;
boolean el2avail =branch_type) EL2EnabledHint_Branch();
if InD then
case MPAMn of
when 3 partid = MPAM3_EL3.PARTID_I;
when 2 partid = if el2avail then MPAM2_EL2.PARTID_I else(branch_type);
if N == 32 then
assert ZerosUsingAArch32();
when 1 partid = MPAM1_EL1.PARTID_I;
when 0 partid = MPAM0_EL1.PARTID_I;
otherwise partid = _PC = PARTIDtypeZeroExtend UNKNOWN;
(target);
else
case MPAMn of
when 3 partid = MPAM3_EL3.PARTID_D;
when 2 partid = if el2avail then MPAM2_EL2.PARTID_D else assert N == 64 && ! ZerosUsingAArch32();
when 1 partid = MPAM1_EL1.PARTID_D;
when 0 partid = MPAM0_EL1.PARTID_D;
otherwise partid = PARTIDtype UNKNOWN;
return partid;();
_PC = target<63:0>;
return;
// getMPAM_PMG
// ===========
// Returns a PMG from one of the MPAMn_ELx registers.
// MPAMn selects the MPAMn_ELx register used.
// If InD is TRUE, selects the PMG_I field of that
// register. Otherwise, selects the PMG_D field.
PMGtypeenumeration getMPAM_PMG(integer MPAMn, boolean InD)BranchType {
PMGtype pmg;
boolean el2avail =BranchType_DIRCALL, // Direct Branch with link EL2Enabled();
if InD then
case MPAMn of
when 3 pmg = MPAM3_EL3.PMG_I;
when 2 pmg = if el2avail then MPAM2_EL2.PMG_I elseBranchType_INDCALL, // Indirect Branch with link Zeros();
when 1 pmg = MPAM1_EL1.PMG_I;
when 0 pmg = MPAM0_EL1.PMG_I;
otherwise pmg =BranchType_ERET, // Exception return (indirect) PMGtype UNKNOWN;
else
case MPAMn of
when 3 pmg = MPAM3_EL3.PMG_D;
when 2 pmg = if el2avail then MPAM2_EL2.PMG_D elseBranchType_DBGEXIT, // Exit from Debug state Zeros();
when 1 pmg = MPAM1_EL1.PMG_D;
when 0 pmg = MPAM0_EL1.PMG_D;
otherwise pmg =BranchType_RET, // Indirect branch with function return hint BranchType_DIR, // Direct branch
BranchType_INDIR, // Indirect branch
BranchType_EXCEPTION, // Exception entry
BranchType_RESET, // Reset
PMGtype UNKNOWN;
return pmg;BranchType_UNKNOWN}; // Other
// mapvpmw
// =======
// Map a virtual PARTID into a physical PARTID using
// the MPAMVPMn_EL2 registers.
// vpartid is now assumed in-range and valid (checked by caller)
// returns physical PARTID from mapping entry.
PARTIDtype// Report the hint passed to BranchTo() and BranchToAddr(), for consideration when processing
// the next instruction. mapvpmw(integer vpartid)
bits(64) vpmw;
integer wd = vpartid DIV 4;
case wd of
when 0 vpmw = MPAMVPM0_EL2;
when 1 vpmw = MPAMVPM1_EL2;
when 2 vpmw = MPAMVPM2_EL2;
when 3 vpmw = MPAMVPM3_EL2;
when 4 vpmw = MPAMVPM4_EL2;
when 5 vpmw = MPAMVPM5_EL2;
when 6 vpmw = MPAMVPM6_EL2;
when 7 vpmw = MPAMVPM7_EL2;
otherwise vpmw =Hint_Branch( ZerosBranchType(64);
// vpme_lsb selects LSB of field within register
integer vpme_lsb = (vpartid REM 4) * 16;
return vpmw<vpme_lsb +: 16>;hint);
// BranchTo()
// ==========
// Set program counter to a new address, with a branch type
// In AArch64 state the address might include a tag in the top eight bits.// Return address of the sequentially next instruction.
bits(N)
BranchTo(bits(N) target,NextInstrAddr(); BranchType branch_type)
Hint_Branch(branch_type);
if N == 32 then
assert UsingAArch32();
_PC = ZeroExtend(target);
else
assert N == 64 && !UsingAArch32();
_PC = AArch64.BranchAddr(target<63:0>);
return;
// BranchToAddr()
// ==============
// Set program counter to a new address, with a branch type
// In AArch64 state the address does not include a tag in the top eight bits.// Reset the External Debug registers in the Core power domain.
BranchToAddr(bits(N) target,ResetExternalDebugRegisters(boolean cold_reset); BranchType branch_type)
Hint_Branch(branch_type);
if N == 32 then
assert UsingAArch32();
_PC = ZeroExtend(target);
else
assert N == 64 && !UsingAArch32();
_PC = target<63:0>;
return;
enumeration// ThisInstrAddr()
// ===============
// Return address of the current instruction.
bits(N) BranchType {ThisInstrAddr()
assert N == 64 || (N == 32 &&
BranchType_DIRCALL, // Direct Branch with link
BranchType_INDCALL, // Indirect Branch with link
BranchType_ERET, // Exception return (indirect)
BranchType_DBGEXIT, // Exit from Debug state
BranchType_RET, // Indirect branch with function return hint
BranchType_DIR, // Direct branch
BranchType_INDIR, // Indirect branch
BranchType_EXCEPTION, // Exception entry
BranchType_RESET, // Reset
BranchType_UNKNOWN}; // Other());
return _PC<N-1:0>;
// Report the hint passed to BranchTo() and BranchToAddr(), for consideration when processing
// the next instruction.bits(64) _PC;
Hint_Branch(BranchType hint);
// Return address of the sequentially next instruction.
bits(N)array bits(64) _R[0..30]; NextInstrAddr();
// Reset the External Debug registers in the Core power domain.// SPSR[] - non-assignment form
// ============================
bits(32)
ResetExternalDebugRegisters(boolean cold_reset);SPSR[]
bits(32) result;
ifUsingAArch32() then
case PSTATE.M of
when M32_FIQ result = SPSR_fiq;
when M32_IRQ result = SPSR_irq;
when M32_Svc result = SPSR_svc;
when M32_Monitor result = SPSR_mon;
when M32_Abort result = SPSR_abt;
when M32_Hyp result = SPSR_hyp;
when M32_Undef result = SPSR_und;
otherwise Unreachable();
else
case PSTATE.EL of
when EL1 result = SPSR_EL1;
when EL2 result = SPSR_EL2;
when EL3 result = SPSR_EL3;
otherwise Unreachable();
return result;
// SPSR[] - assignment form
// ========================
SPSR[] = bits(32) value
if UsingAArch32() then
case PSTATE.M of
when M32_FIQ SPSR_fiq = value;
when M32_IRQ SPSR_irq = value;
when M32_Svc SPSR_svc = value;
when M32_Monitor SPSR_mon = value;
when M32_Abort SPSR_abt = value;
when M32_Hyp SPSR_hyp = value;
when M32_Undef SPSR_und = value;
otherwise Unreachable();
else
case PSTATE.EL of
when EL1 SPSR_EL1 = value;
when EL2 SPSR_EL2 = value;
when EL3 SPSR_EL3 = value;
otherwise Unreachable();
return;
// ThisInstrAddr()
// ===============
// Return address of the current instruction.
bits(N)enumeration ThisInstrAddr()
assert N == 64 || (N == 32 &&ArchVersion { ARMv8p0
, ARMv8p1
, ARMv8p2
, ARMv8p3
, ARMv8p4
, UsingAArch32());
return _PC<N-1:0>;ARMv8p5
};
bits(64) _PC;// BranchTargetCheck()
// ===================
// This function is executed checks if the current instruction is a valid target for a branch
// taken into, or inside, a guarded page. It is executed on every cycle once the current
// instruction has been decoded and the values of InGuardedPage and BTypeCompatible have been
// determined for the current instruction.BranchTargetCheck()
assert HaveBTIExt() && !UsingAArch32();
// The branch target check considers two state variables:
// * InGuardedPage, which is evaluated during instruction fetch.
// * BTypeCompatible, which is evaluated during instruction decode.
if InGuardedPage && PSTATE.BTYPE != '00' && !BTypeCompatible && !Halted() then
bits(64) pc = ThisInstrAddr();
AArch64.BranchTargetException(pc<51:0>);
boolean branch_instr = AArch64.ExecutingBROrBLROrRetInstr();
boolean bti_instr = AArch64.ExecutingBTIInstr();
// PSTATE.BTYPE defaults to 00 for instructions that do not explictly set BTYPE.
if !(branch_instr || bti_instr) then
BTypeNext = '00';
array bits(64) _R[0..30];// ClearEventRegister()
// ====================
// Clear the Event Register of this PEClearEventRegister()
EventRegister = '0';
return;
// SPSR[] - non-assignment form
// ============================
bits(32)// Clear a pending physical SError interrupt SPSR[]
bits(32) result;
ifClearPendingPhysicalSError(); UsingAArch32() then
case PSTATE.M of
when M32_FIQ result = SPSR_fiq;
when M32_IRQ result = SPSR_irq;
when M32_Svc result = SPSR_svc;
when M32_Monitor result = SPSR_mon;
when M32_Abort result = SPSR_abt;
when M32_Hyp result = SPSR_hyp;
when M32_Undef result = SPSR_und;
otherwise Unreachable();
else
case PSTATE.EL of
when EL1 result = SPSR_EL1;
when EL2 result = SPSR_EL2;
when EL3 result = SPSR_EL3;
otherwise Unreachable();
return result;
// SPSR[] - assignment form
// ========================
SPSR[] = bits(32) value
if UsingAArch32() then
case PSTATE.M of
when M32_FIQ SPSR_fiq = value;
when M32_IRQ SPSR_irq = value;
when M32_Svc SPSR_svc = value;
when M32_Monitor SPSR_mon = value;
when M32_Abort SPSR_abt = value;
when M32_Hyp SPSR_hyp = value;
when M32_Undef SPSR_und = value;
otherwise Unreachable();
else
case PSTATE.EL of
when EL1 SPSR_EL1 = value;
when EL2 SPSR_EL2 = value;
when EL3 SPSR_EL3 = value;
otherwise Unreachable();
return;
enumeration// Clear a pending virtual SError interrupt ArchVersion {ClearPendingVirtualSError();
ARMv8p0
, ARMv8p1
, ARMv8p2
, ARMv8p3
, ARMv8p4
, ARMv8p5
};
// BranchTargetCheck()
// ===================
// This function is executed checks if the current instruction is a valid target for a branch
// taken into, or inside, a guarded page. It is executed on every cycle once the current
// instruction has been decoded and the values of InGuardedPage and BTypeCompatible have been
// determined for the current instruction.// ConditionHolds()
// ================
// Return TRUE iff COND currently holds
boolean
BranchTargetCheck()
assertConditionHolds(bits(4) cond)
// Evaluate base condition.
case cond<3:1> of
when '000' result = (PSTATE.Z == '1'); // EQ or NE
when '001' result = (PSTATE.C == '1'); // CS or CC
when '010' result = (PSTATE.N == '1'); // MI or PL
when '011' result = (PSTATE.V == '1'); // VS or VC
when '100' result = (PSTATE.C == '1' && PSTATE.Z == '0'); // HI or LS
when '101' result = (PSTATE.N == PSTATE.V); // GE or LT
when '110' result = (PSTATE.N == PSTATE.V && PSTATE.Z == '0'); // GT or LE
when '111' result = TRUE; // AL
// Condition flag values in the set '111x' indicate always true
// Otherwise, invert condition if necessary.
if cond<0> == '1' && cond != '1111' then
result = !result;
return result; HaveBTIExt() && !UsingAArch32();
// The branch target check considers two state variables:
// * InGuardedPage, which is evaluated during instruction fetch.
// * BTypeCompatible, which is evaluated during instruction decode.
if InGuardedPage && PSTATE.BTYPE != '00' && !BTypeCompatible && !Halted() then
bits(64) pc = ThisInstrAddr();
AArch64.BranchTargetException(pc<51:0>);
boolean branch_instr = AArch64.ExecutingBROrBLROrRetInstr();
boolean bti_instr = AArch64.ExecutingBTIInstr();
// PSTATE.BTYPE defaults to 00 for instructions that do not explictly set BTYPE.
if !(branch_instr || bti_instr) then
BTypeNext = '00';
// ClearEventRegister()
// ====================
// Clear the Event Register of this PE
ClearEventRegister()
EventRegister = '0';
return;ConsumptionOfSpeculativeDataBarrier();
// Clear a pending physical SError interrupt// CurrentInstrSet()
// =================
InstrSet
ClearPendingPhysicalSError();CurrentInstrSet()
ifUsingAArch32() then
result = if PSTATE.T == '0' then InstrSet_A32 else InstrSet_T32;
// PSTATE.J is RES0. Implementation of T32EE or Jazelle state not permitted.
else
result = InstrSet_A64;
return result;
// Clear a pending virtual SError interrupt// CurrentPL()
// ===========
PrivilegeLevel
ClearPendingVirtualSError();CurrentPL()
returnPLOfEL(PSTATE.EL);
// ConditionHolds()
// ================
// Return TRUE iff COND currently holds
booleanconstant bits(2) ConditionHolds(bits(4) cond)
// Evaluate base condition.
case cond<3:1> of
when '000' result = (PSTATE.Z == '1'); // EQ or NE
when '001' result = (PSTATE.C == '1'); // CS or CC
when '010' result = (PSTATE.N == '1'); // MI or PL
when '011' result = (PSTATE.V == '1'); // VS or VC
when '100' result = (PSTATE.C == '1' && PSTATE.Z == '0'); // HI or LS
when '101' result = (PSTATE.N == PSTATE.V); // GE or LT
when '110' result = (PSTATE.N == PSTATE.V && PSTATE.Z == '0'); // GT or LE
when '111' result = TRUE; // AL
// Condition flag values in the set '111x' indicate always true
// Otherwise, invert condition if necessary.
if cond<0> == '1' && cond != '1111' then
result = !result;
return result;EL3 = '11';
constant bits(2)EL2 = '10';
constant bits(2) EL1 = '01';
constant bits(2) EL0 = '00';
// EL2Enabled()
// ============
// Returns TRUE if EL2 is present and executing
// - with SCR_EL3.NS==1 when Non-secure EL2 is implemented, or
// - with SCR_EL3.NS==0 when Secure EL2 is implemented and enabled, or
// - when EL3 is not implemented.
boolean EL2Enabled()
return HaveEL(EL2) && (!HaveEL(EL3) || SCR_EL3.NS == '1' || IsSecureEL2EnabledConsumptionOfSpeculativeDataBarrier();());
// CurrentInstrSet()
// =================
// ELFromM32()
// ===========
InstrSet(boolean,bits(2)) CurrentInstrSet()
ifELFromM32(bits(5) mode)
// Convert an AArch32 mode encoding to an Exception level.
// Returns (valid,EL):
// 'valid' is TRUE if 'mode<4:0>' encodes a mode that is both valid for this implementation
// and the current value of SCR.NS/SCR_EL3.NS.
// 'EL' is the Exception level decoded from 'mode'.
bits(2) el;
boolean valid = ! UsingAArch32BadMode() then
result = if PSTATE.T == '0' then(mode); // Check for modes that are not valid for this implementation
case mode of
when InstrSet_A32M32_Monitor elseel = InstrSet_T32EL3;
// PSTATE.J is RES0. Implementation of T32EE or Jazelle state not permitted.
else
result = when
el = EL2;
valid = valid && (!HaveEL(EL3) || SCR_GEN[].NS == '1');
when M32_FIQ, M32_IRQ, M32_Svc, M32_Abort, M32_Undef, M32_System
// If EL3 is implemented and using AArch32, then these modes are EL3 modes in Secure
// state, and EL1 modes in Non-secure state. If EL3 is not implemented or is using
// AArch64, then these modes are EL1 modes.
el = (if HaveEL(EL3) && HighestELUsingAArch32() && SCR.NS == '0' then EL3 else EL1);
when M32_User
el = EL0InstrSet_A64M32_Hyp;
return result; otherwise
valid = FALSE; // Passed an illegal mode value
if !valid then el = bits(2) UNKNOWN;
return (valid, el);
// CurrentPL()
// ===========
// ELFromSPSR()
// ============
PrivilegeLevel// Convert an SPSR value encoding to an Exception level.
// Returns (valid,EL):
// 'valid' is TRUE if 'spsr<4:0>' encodes a valid mode for the current state.
// 'EL' is the Exception level decoded from 'spsr'.
(boolean,bits(2)) CurrentPL()
returnELFromSPSR(bits(32) spsr)
if spsr<4> == '0' then // AArch64 state
el = spsr<3:2>;
if () then // No AArch64 support
valid = FALSE;
elsif !HaveEL(el) then // Exception level not implemented
valid = FALSE;
elsif spsr<1> == '1' then // M[1] must be 0
valid = FALSE;
elsif el == EL0 && spsr<0> == '1' then // for EL0, M[0] must be 0
valid = FALSE;
elsif el == EL2 && HaveEL(EL3) && !IsSecureEL2Enabled() && SCR_EL3.NS == '0' then
valid = FALSE; // Unless Secure EL2 is enabled, EL2 only valid in Non-secure state
else
valid = TRUE;
elsif HaveAnyAArch32() then // AArch32 state
(valid, el) = ELFromM32PLOfELHighestELUsingAArch32(PSTATE.EL);(spsr<4:0>);
else
valid = FALSE;
if !valid then el = bits(2) UNKNOWN;
return (valid,el);
constant bits(2)// ELIsInHost()
// ============
boolean EL3 = '11';
constant bits(2)ELIsInHost(bits(2) el)
return (( EL2 = '10';
constant bits(2)() || ! EL1 = '01';
constant bits(2)()) && () && !ELUsingAArch32(EL2) &&
HCR_EL2.E2H == '1' && (el == EL2 || (el == EL0EL0 = '00';&& HCR_EL2.TGE == '1')));
// EL2Enabled()
// ============
// Returns TRUE if EL2 is present and executing
// - with SCR_EL3.NS==1 when Non-secure EL2 is implemented, or
// - with SCR_EL3.NS==0 when Secure EL2 is implemented and enabled, or
// - when EL3 is not implemented.
// ELStateUsingAArch32()
// =====================
boolean EL2Enabled()
returnELStateUsingAArch32(bits(2) el, boolean secure)
// See ELStateUsingAArch32K() for description. Must only be called in circumstances where
// result is valid (typically, that means 'el IN {EL1,EL2,EL3}').
(known, aarch32) = HaveELELStateUsingAArch32K(EL2) && (!HaveEL(EL3) || SCR_EL3.NS == '1' || IsSecureEL2Enabled());(el, secure);
assert known;
return aarch32;
// ELFromM32()
// ===========
// ELStateUsingAArch32K()
// ======================
(boolean,bits(2))(boolean,boolean) ELFromM32(bits(5) mode)
// Convert an AArch32 mode encoding to an Exception level.
// Returns (valid,EL):
// 'valid' is TRUE if 'mode<4:0>' encodes a mode that is both valid for this implementation
// and the current value of SCR.NS/SCR_EL3.NS.
// 'EL' is the Exception level decoded from 'mode'.
bits(2) el;
boolean valid = !ELStateUsingAArch32K(bits(2) el, boolean secure)
// Returns (known, aarch32):
// 'known' is FALSE for EL0 if the current Exception level is not EL0 and EL1 is
// using AArch64, since it cannot determine the state of EL0; TRUE otherwise.
// 'aarch32' is TRUE if the specified Exception level is using AArch32; FALSE otherwise.
if !BadModeHaveAArch32EL(mode); // Check for modes that are not valid for this implementation
case mode of
when(el) then
return (TRUE, FALSE); // Exception level is using AArch64
elsif secure && el == M32_MonitorEL2
el =then
return (TRUE, FALSE); // Secure EL2 is using AArch64
elsif EL3HighestELUsingAArch32;
when() then
return (TRUE, TRUE); // Highest Exception level, and therefore all levels are using AArch32
elsif el == M32_HypHighestEL
el =() then
return (TRUE, FALSE); // This is highest Exception level, so is using AArch64
// Remainder of function deals with the interprocessing cases when highest Exception level is using AArch64
boolean aarch32 = boolean UNKNOWN;
boolean known = TRUE;
aarch32_below_el3 = EL2;
valid = valid && (!HaveEL(EL3) ||) && SCR_EL3.RW == '0' && (!secure || ! SCR_GENHaveSecureEL2Ext[].NS == '1');
when() || SCR_EL3.EEL2 == '0');
aarch32_at_el1 = (aarch32_below_el3 || ( M32_FIQHaveEL,( M32_IRQEL2,) &&
(( M32_SvcHaveSecureEL2Ext,() && SCR_EL3.EEL2 == '1') || !secure) && HCR_EL2.RW == '0' &&
!(HCR_EL2.E2H == '1' && HCR_EL2.TGE == '1' && M32_AbortHaveVirtHostExt,())));
if el == M32_UndefEL0,&& !aarch32_at_el1 then // Only know if EL0 using AArch32 from PSTATE
if PSTATE.EL == M32_SystemEL0
// If EL3 is implemented and using AArch32, then these modes are EL3 modes in Secure
// state, and EL1 modes in Non-secure state. If EL3 is not implemented or is using
// AArch64, then these modes are EL1 modes.
el = (ifthen
aarch32 = PSTATE.nRW == '1'; // EL0 controlled by PSTATE
else
known = FALSE; // EL0 state is UNKNOWN
else
aarch32 = (aarch32_below_el3 && el != HaveEL(EL3) &&) || (aarch32_at_el1 && el IN { HighestELUsingAArch32() && SCR.NS == '0' then EL3 else EL1);
when M32_User
el =, EL0;
otherwise
valid = FALSE; // Passed an illegal mode value
if !valid then el = bits(2) UNKNOWN;
return (valid, el);});
if !known then aarch32 = boolean UNKNOWN;
return (known, aarch32);
// ELFromSPSR()
// ============
// ELUsingAArch32()
// ================
// Convert an SPSR value encoding to an Exception level.
// Returns (valid,EL):
// 'valid' is TRUE if 'spsr<4:0>' encodes a valid mode for the current state.
// 'EL' is the Exception level decoded from 'spsr'.
(boolean,bits(2))boolean ELFromSPSR(bits(32) spsr)
if spsr<4> == '0' then // AArch64 state
el = spsr<3:2>;
ifELUsingAArch32(bits(2) el)
return HighestELUsingAArch32ELStateUsingAArch32() then // No AArch64 support
valid = FALSE;
elsif !(el,HaveELIsSecureBelowEL3(el) then // Exception level not implemented
valid = FALSE;
elsif spsr<1> == '1' then // M[1] must be 0
valid = FALSE;
elsif el == EL0 && spsr<0> == '1' then // for EL0, M[0] must be 0
valid = FALSE;
elsif el == EL2 && HaveEL(EL3) && !IsSecureEL2Enabled() && SCR_EL3.NS == '0' then
valid = FALSE; // Unless Secure EL2 is enabled, EL2 only valid in Non-secure state
else
valid = TRUE;
elsif HaveAnyAArch32() then // AArch32 state
(valid, el) = ELFromM32(spsr<4:0>);
else
valid = FALSE;
if !valid then el = bits(2) UNKNOWN;
return (valid,el);());
// ELIsInHost()
// ============
// ELUsingAArch32K()
// =================
boolean(boolean,boolean) ELIsInHost(bits(2) el)
return ((ELUsingAArch32K(bits(2) el)
returnIsSecureEL2EnabledELStateUsingAArch32K() || !(el,IsSecureBelowEL3()) && HaveVirtHostExt() && !ELUsingAArch32(EL2) &&
HCR_EL2.E2H == '1' && (el == EL2 || (el == EL0 && HCR_EL2.TGE == '1')));());
// ELStateUsingAArch32()
// =====================
boolean// Terminate processing of the current instruction. ELStateUsingAArch32(bits(2) el, boolean secure)
// See ELStateUsingAArch32K() for description. Must only be called in circumstances where
// result is valid (typically, that means 'el IN {EL1,EL2,EL3}').
(known, aarch32) =EndOfInstruction(); ELStateUsingAArch32K(el, secure);
assert known;
return aarch32;
// ELStateUsingAArch32K()
// ======================
(boolean,boolean)// PE enters a low-power state ELStateUsingAArch32K(bits(2) el, boolean secure)
// Returns (known, aarch32):
// 'known' is FALSE for EL0 if the current Exception level is not EL0 and EL1 is
// using AArch64, since it cannot determine the state of EL0; TRUE otherwise.
// 'aarch32' is TRUE if the specified Exception level is using AArch32; FALSE otherwise.
if !EnterLowPowerState();HaveAArch32EL(el) then
return (TRUE, FALSE); // Exception level is using AArch64
elsif secure && el == EL2 then
return (TRUE, FALSE); // Secure EL2 is using AArch64
elsif HighestELUsingAArch32() then
return (TRUE, TRUE); // Highest Exception level, and therefore all levels are using AArch32
elsif el == HighestEL() then
return (TRUE, FALSE); // This is highest Exception level, so is using AArch64
// Remainder of function deals with the interprocessing cases when highest Exception level is using AArch64
boolean aarch32 = boolean UNKNOWN;
boolean known = TRUE;
aarch32_below_el3 = HaveEL(EL3) && SCR_EL3.RW == '0' && (!secure || !HaveSecureEL2Ext() || SCR_EL3.EEL2 == '0');
aarch32_at_el1 = (aarch32_below_el3 || (HaveEL(EL2) &&
((HaveSecureEL2Ext() && SCR_EL3.EEL2 == '1') || !secure) && HCR_EL2.RW == '0' &&
!(HCR_EL2.E2H == '1' && HCR_EL2.TGE == '1' && HaveVirtHostExt())));
if el == EL0 && !aarch32_at_el1 then // Only know if EL0 using AArch32 from PSTATE
if PSTATE.EL == EL0 then
aarch32 = PSTATE.nRW == '1'; // EL0 controlled by PSTATE
else
known = FALSE; // EL0 state is UNKNOWN
else
aarch32 = (aarch32_below_el3 && el != EL3) || (aarch32_at_el1 && el IN {EL1,EL0});
if !known then aarch32 = boolean UNKNOWN;
return (known, aarch32);
// ELUsingAArch32()
// ================
booleanbits(1) EventRegister; ELUsingAArch32(bits(2) el)
return ELStateUsingAArch32(el, IsSecureBelowEL3());
// ELUsingAArch32K()
// =================
// GetPSRFromPSTATE()
// ==================
// Return a PSR value which represents the current PSTATE
(boolean,boolean)bits(32) ELUsingAArch32K(bits(2) el)
returnGetPSRFromPSTATE()
bits(32) spsr = ELStateUsingAArch32KZeros(el,();
spsr<31:28> = PSTATE.<N,Z,C,V>;
if () then spsr<22> = PSTATE.PAN;
spsr<20> = PSTATE.IL;
if PSTATE.nRW == '1' then // AArch32 state
spsr<27> = PSTATE.Q;
spsr<26:25> = PSTATE.IT<1:0>;
if HaveSSBSExt() then spsr<23> = PSTATE.SSBS;
if HaveDITExt() then spsr<21> = PSTATE.DIT;
spsr<19:16> = PSTATE.GE;
spsr<15:10> = PSTATE.IT<7:2>;
spsr<9> = PSTATE.E;
spsr<8:6> = PSTATE.<A,I,F>; // No PSTATE.D in AArch32 state
spsr<5> = PSTATE.T;
assert PSTATE.M<4> == PSTATE.nRW; // bit [4] is the discriminator
spsr<4:0> = PSTATE.M;
else // AArch64 state
if HaveMTEExt() then spsr<25> = PSTATE.TCO;
if HaveDITExt() then spsr<24> = PSTATE.DIT;
if HaveUAOExt() then spsr<23> = PSTATE.UAO;
spsr<21> = PSTATE.SS;
if HaveSSBSExt() then spsr<12> = PSTATE.SSBS;
if HaveBTIExtIsSecureBelowEL3HavePANExt());() then spsr<11:10> = PSTATE.BTYPE;
spsr<9:6> = PSTATE.<D,A,I,F>;
spsr<4> = PSTATE.nRW;
spsr<3:2> = PSTATE.EL;
spsr<0> = PSTATE.SP;
return spsr;
// Terminate processing of the current instruction.// HasArchVersion()
// ================
// Return TRUE if the implemented architecture includes the extensions defined in the specified
// architecture version.
boolean
EndOfInstruction();HasArchVersion(ArchVersion version)
return version == ARMv8p0 || boolean IMPLEMENTATION_DEFINED;
// PE enters a low-power state// HaveAArch32EL()
// ===============
boolean
EnterLowPowerState();HaveAArch32EL(bits(2) el)
// Return TRUE if Exception level 'el' supports AArch32 in this implementation
if !HaveEL(el) then
return FALSE; // The Exception level is not implemented
elsif !HaveAnyAArch32() then
return FALSE; // No Exception level can use AArch32
elsif HighestELUsingAArch32() then
return TRUE; // All Exception levels are using AArch32
elsif el == HighestEL() then
return FALSE; // The highest Exception level is using AArch64
elsif el == EL0 then
return TRUE; // EL0 must support using AArch32 if any AArch32
return boolean IMPLEMENTATION_DEFINED;
bits(1) EventRegister;// HaveAnyAArch32()
// ================
// Return TRUE if AArch32 state is supported at any Exception level
booleanHaveAnyAArch32()
return boolean IMPLEMENTATION_DEFINED;
// GetPSRFromPSTATE()
// ==================
// Return a PSR value which represents the current PSTATE
// HaveAnyAArch64()
// ================
// Return TRUE if AArch64 state is supported at any Exception level
bits(32)boolean GetPSRFromPSTATE()
bits(32) spsr =HaveAnyAArch64()
return ! ZerosHighestELUsingAArch32();
spsr<31:28> = PSTATE.<N,Z,C,V>;
if HavePANExt() then spsr<22> = PSTATE.PAN;
spsr<20> = PSTATE.IL;
if PSTATE.nRW == '1' then // AArch32 state
spsr<27> = PSTATE.Q;
spsr<26:25> = PSTATE.IT<1:0>;
if HaveSSBSExt() then spsr<23> = PSTATE.SSBS;
if HaveDITExt() then spsr<21> = PSTATE.DIT;
spsr<19:16> = PSTATE.GE;
spsr<15:10> = PSTATE.IT<7:2>;
spsr<9> = PSTATE.E;
spsr<8:6> = PSTATE.<A,I,F>; // No PSTATE.D in AArch32 state
spsr<5> = PSTATE.T;
assert PSTATE.M<4> == PSTATE.nRW; // bit [4] is the discriminator
spsr<4:0> = PSTATE.M;
else // AArch64 state
if HaveMTEExt() then spsr<25> = PSTATE.TCO;
if HaveDITExt() then spsr<24> = PSTATE.DIT;
if HaveUAOExt() then spsr<23> = PSTATE.UAO;
spsr<21> = PSTATE.SS;
if HaveSSBSExt() then spsr<12> = PSTATE.SSBS;
if HaveBTIExt() then spsr<11:10> = PSTATE.BTYPE;
spsr<9:6> = PSTATE.<D,A,I,F>;
spsr<4> = PSTATE.nRW;
spsr<3:2> = PSTATE.EL;
spsr<0> = PSTATE.SP;
return spsr;();
// HasArchVersion()
// ================
// Return TRUE if the implemented architecture includes the extensions defined in the specified
// architecture version.
// HaveEL()
// ========
// Return TRUE if Exception level 'el' is supported
boolean HasArchVersion(HaveEL(bits(2) el)
if el IN {ArchVersionEL1 version)
return version ==, ARMv8p0EL0 || boolean IMPLEMENTATION_DEFINED;} then
return TRUE; // EL1 and EL0 must exist
return boolean IMPLEMENTATION_DEFINED;
// HaveAArch32EL()
// ===============
// HaveFP16Ext()
// =============
// Return TRUE if FP16 extension is supported
boolean HaveAArch32EL(bits(2) el)
// Return TRUE if Exception level 'el' supports AArch32 in this implementation
if !HaveFP16Ext()
return boolean IMPLEMENTATION_DEFINED;HaveEL(el) then
return FALSE; // The Exception level is not implemented
elsif !HaveAnyAArch32() then
return FALSE; // No Exception level can use AArch32
elsif HighestELUsingAArch32() then
return TRUE; // All Exception levels are using AArch32
elsif el == HighestEL() then
return FALSE; // The highest Exception level is using AArch64
elsif el == EL0 then
return TRUE; // EL0 must support using AArch32 if any AArch32
return boolean IMPLEMENTATION_DEFINED;
// HaveAnyAArch32()
// ================
// Return TRUE if AArch32 state is supported at any Exception level
// HighestEL()
// ===========
// Returns the highest implemented Exception level.
booleanbits(2) HaveAnyAArch32()
return boolean IMPLEMENTATION_DEFINED;HighestEL()
ifHaveEL(EL3) then
return EL3;
elsif HaveEL(EL2) then
return EL2;
else
return EL1;
// HaveAnyAArch64()
// ================
// Return TRUE if AArch64 state is supported at any Exception level
// HighestELUsingAArch32()
// =======================
// Return TRUE if configured to boot into AArch32 operation
boolean HaveAnyAArch64()
return !HighestELUsingAArch32()
if !HighestELUsingAArch32HaveAnyAArch32();() then return FALSE;
return boolean IMPLEMENTATION_DEFINED; // e.g. CFG32SIGNAL == HIGH
// HaveEL()
// ========
// Return TRUE if Exception level 'el' is supported
boolean HaveEL(bits(2) el)
if el IN {EL1,EL0} then
return TRUE; // EL1 and EL0 must exist
return boolean IMPLEMENTATION_DEFINED;Hint_Yield();
// HaveELUsingSecurityState()
// ==========================
// Returns TRUE if Exception level 'el' with Security state 'secure' is supported,
// FALSE otherwise.
// IllegalExceptionReturn()
// ========================
boolean HaveELUsingSecurityState(bits(2) el, boolean secure)
IllegalExceptionReturn(bits(32) spsr)
case el of
when // Check for illegal return:
// * To an unimplemented Exception level.
// * To EL2 in Secure state, when SecureEL2 is not enabled.
// * To EL0 using AArch64 state, with SPSR.M[0]==1.
// * To AArch64 state with SPSR.M[1]==1.
// * To AArch32 state with an illegal value of SPSR.M.
(valid, target) = EL3ELFromSPSR
assert secure;
return(spsr);
if !valid then return TRUE;
// Check for return to higher Exception level
if HaveELUInt((target) >EL3UInt);
when(PSTATE.EL) then return TRUE;
spsr_mode_is_aarch32 = (spsr<4> == '1');
// Check for illegal return:
// * To EL1, EL2 or EL3 with register width specified in the SPSR different from the
// Execution state used in the Exception level being returned to, as determined by
// the SCR_EL3.RW or HCR_EL2.RW bits, or as configured from reset.
// * To EL0 using AArch64 state when EL1 is using AArch32 state as determined by the
// SCR_EL3.RW or HCR_EL2.RW bits or as configured from reset.
// * To AArch64 state from AArch32 state (should be caught by above)
(known, target_el_is_aarch32) = EL2ELUsingAArch32K
if secure then
return(target);
assert known || (target == HaveELEL0(&& !EL2ELUsingAArch32) &&( HaveSecureEL2ExtEL1();
else
return));
if known && spsr_mode_is_aarch32 != target_el_is_aarch32 then return TRUE;
// Check for illegal return from AArch32 to AArch64
if UsingAArch32() && !spsr_mode_is_aarch32 then return TRUE;
// Check for illegal return to EL1 when HCR.TGE is set and when either of
// * SecureEL2 is enabled.
// * SecureEL2 is not enabled and EL1 is in Non-secure state.
if HaveEL(EL2);
otherwise
return () && target ==HaveELEL1(&& HCR_EL2.TGE == '1' then
if (!() || IsSecureEL2EnabledEL3IsSecureBelowEL3) ||
(secure == boolean IMPLEMENTATION_DEFINED "Secure-only implementation"));()) then return TRUE;
return FALSE;
// HaveFP16Ext()
// =============
// Return TRUE if FP16 extension is supported
booleanenumeration HaveFP16Ext()
return boolean IMPLEMENTATION_DEFINED;InstrSet {InstrSet_A64, InstrSet_A32, InstrSet_T32};
// HighestEL()
// ===========
// Returns the highest implemented Exception level.
bits(2) HighestEL()
if HaveEL(EL3) then
return EL3;
elsif HaveEL(EL2) then
return EL2;
else
return EL1;InstructionSynchronizationBarrier();
// HighestELUsingAArch32()
// =======================
// Return TRUE if configured to boot into AArch32 operation
// InterruptPending()
// ==================
// Return TRUE if there are any pending physical or virtual interrupts, and FALSE otherwise
boolean HighestELUsingAArch32()
if !InterruptPending()
return() || IsVirtualSErrorPendingHaveAnyAArch32IsPhysicalSErrorPending() then return FALSE;
return boolean IMPLEMENTATION_DEFINED; // e.g. CFG32SIGNAL == HIGH();
// IsEventRegisterSet()
// ====================
// Return TRUE if the Event Register of this PE is set, and FALSE otherwise
boolean Hint_Yield();IsEventRegisterSet()
return EventRegister == '1';
// IllegalExceptionReturn()
// ========================
// IsHighestEL()
// =============
// Returns TRUE if given exception level is the highest exception level implemented
boolean IllegalExceptionReturn(bits(32) spsr)
// Check for illegal return:
// * To an unimplemented Exception level.
// * To EL2 in Secure state, when SecureEL2 is not enabled.
// * To EL0 using AArch64 state, with SPSR.M[0]==1.
// * To AArch64 state with SPSR.M[1]==1.
// * To AArch32 state with an illegal value of SPSR.M.
(valid, target) =IsHighestEL(bits(2) el)
return ELFromSPSRHighestEL(spsr);
if !valid then return TRUE;
// Check for return to higher Exception level
if UInt(target) > UInt(PSTATE.EL) then return TRUE;
spsr_mode_is_aarch32 = (spsr<4> == '1');
// Check for illegal return:
// * To EL1, EL2 or EL3 with register width specified in the SPSR different from the
// Execution state used in the Exception level being returned to, as determined by
// the SCR_EL3.RW or HCR_EL2.RW bits, or as configured from reset.
// * To EL0 using AArch64 state when EL1 is using AArch32 state as determined by the
// SCR_EL3.RW or HCR_EL2.RW bits or as configured from reset.
// * To AArch64 state from AArch32 state (should be caught by above)
(known, target_el_is_aarch32) = ELUsingAArch32K(target);
assert known || (target == EL0 && !ELUsingAArch32(EL1));
if known && spsr_mode_is_aarch32 != target_el_is_aarch32 then return TRUE;
// Check for illegal return from AArch32 to AArch64
if UsingAArch32() && !spsr_mode_is_aarch32 then return TRUE;
// Check for illegal return to EL1 when HCR.TGE is set and when either of
// * SecureEL2 is enabled.
// * SecureEL2 is not enabled and EL1 is in Non-secure state.
if HaveEL(EL2) && target == EL1 && HCR_EL2.TGE == '1' then
if (!IsSecureBelowEL3() || IsSecureEL2Enabled()) then return TRUE;
return FALSE;() == el;
enumeration// IsInHost()
// ==========
boolean InstrSet {IsInHost()
returnInstrSet_A64, InstrSet_A32, InstrSet_T32};(PSTATE.EL);
// Return TRUE if a physical SError interrupt is pending
boolean InstructionSynchronizationBarrier();IsPhysicalSErrorPending();
// InterruptPending()
// ==================
// Return TRUE if there are any pending physical or virtual interrupts, and FALSE otherwise
// IsSecure()
// ==========
// Returns TRUE if current Exception level is in Secure state.
boolean InterruptPending()
returnIsSecure()
if IsPhysicalSErrorPendingHaveEL() ||( ) && !UsingAArch32() && PSTATE.EL == EL3 then
return TRUE;
elsif HaveEL(EL3) && UsingAArch32() && PSTATE.M == M32_Monitor then
return TRUE;
return IsSecureBelowEL3IsVirtualSErrorPendingEL3();
// IsEventRegisterSet()
// ====================
// Return TRUE if the Event Register of this PE is set, and FALSE otherwise
// IsSecureBelowEL3()
// ==================
// Return TRUE if an Exception level below EL3 is in Secure state
// or would be following an exception return to that level.
//
// Differs from IsSecure in that it ignores the current EL or Mode
// in considering security state.
// That is, if at AArch64 EL3 or in AArch32 Monitor mode, whether an
// exception return would pass to Secure or Non-secure state.
boolean IsEventRegisterSet()
return EventRegister == '1';IsSecureBelowEL3()
ifHaveEL(EL3) then
return SCR_GEN[].NS == '0';
elsif HaveEL(EL2) && (!HaveSecureEL2Ext() || HighestELUsingAArch32()) then
// If Secure EL2 is not an architecture option then we must be Non-secure.
return FALSE;
else
// TRUE if processor is Secure or FALSE if Non-secure.
return boolean IMPLEMENTATION_DEFINED "Secure-only implementation";
// IsHighestEL()
// =============
// Returns TRUE if given exception level is the highest exception level implemented
// IsSecureEL2Enabled()
// ====================
// Returns TRUE if Secure EL2 is enabled, FALSE otherwise.
boolean IsHighestEL(bits(2) el)
returnIsSecureEL2Enabled()
if (EL2) && HaveSecureEL2Ext() then
if HaveEL(EL3) then
if !ELUsingAArch32(EL3) && SCR_EL3.EEL2 == '1' then
return TRUE;
else
return FALSE;
else
return IsSecureHighestELHaveEL() == el;();
else
return FALSE;
// IsInHost()
// ==========
// Return TRUE if a virtual SError interrupt is pending
boolean IsInHost()
returnIsVirtualSErrorPending(); ELIsInHost(PSTATE.EL);
// Return TRUE if a physical SError interrupt is pending
booleanconstant bits(5) IsPhysicalSErrorPending();M32_User = '10000';
constant bits(5)M32_FIQ = '10001';
constant bits(5) M32_IRQ = '10010';
constant bits(5) M32_Svc = '10011';
constant bits(5) M32_Monitor = '10110';
constant bits(5) M32_Abort = '10111';
constant bits(5) M32_Hyp = '11010';
constant bits(5) M32_Undef = '11011';
constant bits(5) M32_System = '11111';
// IsSecure()
// ==========
// Returns TRUE if current Exception level is in Secure state.
// PLOfEL()
// ========
booleanPrivilegeLevel IsSecure()
ifPLOfEL(bits(2) el)
case el of
when HaveEL(EL3) && !return ifUsingAArch32HighestELUsingAArch32() && PSTATE.EL ==() then EL3PL1 then
return TRUE;
elsifelse HaveELPL3(;
whenEL3EL2) &&return UsingAArch32PL2() && PSTATE.M ==;
when M32_MonitorEL1 then
return TRUE;
returnreturn ;
when EL0 return PL0IsSecureBelowEL3PL1();;
ProcState// IsSecureBelowEL3()
// ==================
// Return TRUE if an Exception level below EL3 is in Secure state
// or would be following an exception return to that level.
//
// Differs from IsSecure in that it ignores the current EL or Mode
// in considering security state.
// That is, if at AArch64 EL3 or in AArch32 Monitor mode, whether an
// exception return would pass to Secure or Non-secure state.
boolean IsSecureBelowEL3()
if HaveEL(EL3) then
return SCR_GEN[].NS == '0';
elsif HaveEL(EL2) && (!HaveSecureEL2Ext() || HighestELUsingAArch32()) then
// If Secure EL2 is not an architecture option then we must be Non-secure.
return FALSE;
else
// TRUE if processor is Secure or FALSE if Non-secure.
return boolean IMPLEMENTATION_DEFINED "Secure-only implementation";PSTATE;
// IsSecureEL2Enabled()
// ====================
// Returns TRUE if Secure EL2 is enabled, FALSE otherwise.
booleanenumeration IsSecureEL2Enabled()
ifPrivilegeLevel { HaveEL(PL3,EL2) &&PL2, HaveSecureEL2Ext() then
ifPL1, HaveEL(EL3) then
if !ELUsingAArch32(EL3) && SCR_EL3.EEL2 == '1' then
return TRUE;
else
return FALSE;
else
return IsSecure();
else
return FALSE;PL0};
// Return TRUE if a virtual SError interrupt is pending
booleantype IsVirtualSErrorPending();ProcState is (
bits (1) N, // Negative condition flag
bits (1) Z, // Zero condition flag
bits (1) C, // Carry condition flag
bits (1) V, // oVerflow condition flag
bits (1) D, // Debug mask bit [AArch64 only]
bits (1) A, // SError interrupt mask bit
bits (1) I, // IRQ mask bit
bits (1) F, // FIQ mask bit
bits (1) PAN, // Privileged Access Never Bit [v8.1]
bits (1) UAO, // User Access Override [v8.2]
bits (1) DIT, // Data Independent Timing [v8.4]
bits (1) TCO, // Tag Check Override [v8.5, AArch64 only]
bits (2) BTYPE, // Branch Type [v8.5]
bits (1) SS, // Software step bit
bits (1) IL, // Illegal Execution state bit
bits (2) EL, // Exception Level
bits (1) nRW, // not Register Width: 0=64, 1=32
bits (1)SP, // Stack pointer select: 0=SP0, 1=SPx [AArch64 only]
bits (1) Q, // Cumulative saturation flag [AArch32 only]
bits (4) GE, // Greater than or Equal flags [AArch32 only]
bits (1) SSBS, // Speculative Store Bypass Safe
bits (8) IT, // If-then bits, RES0 in CPSR [AArch32 only]
bits (1) J, // J bit, RES0 [AArch32 only, RES0 in SPSR and CPSR]
bits (1) T, // T32 bit, RES0 in CPSR [AArch32 only]
bits (1) E, // Endianness bit [AArch32 only]
bits (5) M // Mode field [AArch32 only]
)
constant bits(5)// RestoredITBits()
// ================
// Get the value of PSTATE.IT to be restored on this exception return.
bits(8) M32_User = '10000';
constant bits(5)RestoredITBits(bits(32) spsr)
it = spsr<15:10,26:25>;
// When PSTATE.IL is set, it is CONSTRAINED UNPREDICTABLE whether the IT bits are each set
// to zero or copied from the SPSR.
if PSTATE.IL == '1' then
if M32_FIQ = '10001';
constant bits(5)( M32_IRQ = '10010';
constant bits(5)) then return '00000000';
else return it;
// The IT bits are forced to zero when they are set to a reserved value.
if ! M32_Svc = '10011';
constant bits(5)(it<7:4>) && M32_Monitor = '10110';
constant bits(5)(it<3:0>) then
return '00000000';
// The IT bits are forced to zero when returning to A32 state, or when returning to an EL
// with the ITD bit set to 1, and the IT bits are describing a multi-instruction block.
itd = if PSTATE.EL == M32_Abort = '10111';
constant bits(5)then HSCTLR.ITD else SCTLR.ITD;
if (spsr<5> == '0' && ! M32_Hyp = '11010';
constant bits(5)(it)) || (itd == '1' && ! M32_Undef = '11011';
constant bits(5) M32_System = '11111';(it<2:0>)) then
return '00000000';
else
return it;
// PLOfEL()
// ========
PrivilegeLeveltype PLOfEL(bits(2) el)
case el of
whenSCRType; EL3 return if HighestELUsingAArch32() then PL1 else PL3;
when EL2 return PL2;
when EL1 return PL1;
when EL0 return PL0;
// SCR_GEN[]
// =========
SCRType SCR_GEN[]
// AArch32 secure & AArch64 EL3 registers are not architecturally mapped
assert HaveEL(EL3);
bits(64) r;
if HighestELUsingAArch32() then
r = ZeroExtendProcState PSTATE;(SCR);
else
r = SCR_EL3;
return r;
enumeration// Signal an event to all PEs in a multiprocessor system to set their Event Registers.
// When a PE executes the SEV instruction, it causes this function to be executed PrivilegeLevel {SendEvent();PL3, PL2, PL1, PL0};
type// SendEventLocal()
// ================
// Set the local Event Register of this PE.
// When a PE executes the SEVL instruction, it causes this function to be executed ProcState is (
bits (1) N, // Negative condition flag
bits (1) Z, // Zero condition flag
bits (1) C, // Carry condition flag
bits (1) V, // oVerflow condition flag
bits (1) D, // Debug mask bit [AArch64 only]
bits (1) A, // SError interrupt mask bit
bits (1) I, // IRQ mask bit
bits (1) F, // FIQ mask bit
bits (1) PAN, // Privileged Access Never Bit [v8.1]
bits (1) UAO, // User Access Override [v8.2]
bits (1) DIT, // Data Independent Timing [v8.4]
bits (1) TCO, // Tag Check Override [v8.5, AArch64 only]
bits (2) BTYPE, // Branch Type [v8.5]
bits (1) SS, // Software step bit
bits (1) IL, // Illegal Execution state bit
bits (2) EL, // Exception Level
bits (1) nRW, // not Register Width: 0=64, 1=32
bits (1)SendEventLocal()
EventRegister = '1';
return; SP, // Stack pointer select: 0=SP0, 1=SPx [AArch64 only]
bits (1) Q, // Cumulative saturation flag [AArch32 only]
bits (4) GE, // Greater than or Equal flags [AArch32 only]
bits (1) SSBS, // Speculative Store Bypass Safe
bits (8) IT, // If-then bits, RES0 in CPSR [AArch32 only]
bits (1) J, // J bit, RES0 [AArch32 only, RES0 in SPSR and CPSR]
bits (1) T, // T32 bit, RES0 in CPSR [AArch32 only]
bits (1) E, // Endianness bit [AArch32 only]
bits (5) M // Mode field [AArch32 only]
)
// RestoredITBits()
// ================
// Get the value of PSTATE.IT to be restored on this exception return.
bits(8)// SetPSTATEFromPSR()
// ==================
// Set PSTATE based on a PSR value RestoredITBits(bits(32) spsr)
it = spsr<15:10,26:25>;
// When PSTATE.IL is set, it is CONSTRAINED UNPREDICTABLE whether the IT bits are each set
// to zero or copied from the SPSR.
if PSTATE.IL == '1' then
ifSetPSTATEFromPSR(bits(32) spsr)
PSTATE.SS = DebugExceptionReturnSS(spsr);
if IllegalExceptionReturn(spsr) then
PSTATE.IL = '1';
if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN;
else
// State that is reinstated only on a legal exception return
PSTATE.IL = spsr<20>;
if spsr<4> == '1' then // AArch32 state
AArch32.WriteMode(spsr<4:0>); // Sets PSTATE.EL correctly
if HaveSSBSExt() then PSTATE.SSBS = spsr<23>;
else // AArch64 state
PSTATE.nRW = '0';
PSTATE.EL = spsr<3:2>;
PSTATE.SP = spsr<0>;
if HaveSSBSExt() then PSTATE.SSBS = spsr<12>;
// If PSTATE.IL is set and returning to AArch32 state, it is CONSTRAINED UNPREDICTABLE whether
// the T bit is set to zero or copied from SPSR.
if PSTATE.IL == '1' && PSTATE.nRW == '1' then
if ConstrainUnpredictableBool(Unpredictable_ILZEROITUnpredictable_ILZEROT) then return '00000000';
else return it;
) then spsr<5> = '0';
// The IT bits are forced to zero when they are set to a reserved value.
if ! // State that is reinstated regardless of illegal exception return
PSTATE.<N,Z,C,V> = spsr<31:28>;
ifIsZeroHavePANExt(it<7:4>) &&() then PSTATE.PAN = spsr<22>;
if PSTATE.nRW == '1' then // AArch32 state
PSTATE.Q = spsr<27>;
PSTATE.IT = IsZeroRestoredITBits(it<3:0>) then
return '00000000';
// The IT bits are forced to zero when returning to A32 state, or when returning to an EL
// with the ITD bit set to 1, and the IT bits are describing a multi-instruction block.
itd = if PSTATE.EL ==(spsr);
ShouldAdvanceIT = FALSE;
if EL2HaveDITExt then HSCTLR.ITD else SCTLR.ITD;
if (spsr<5> == '0' && !() then PSTATE.DIT = (ifIsZeroRestarting(it)) || (itd == '1' && !() then spsr<24> else spsr<21>);
PSTATE.GE = spsr<19:16>;
PSTATE.E = spsr<9>;
PSTATE.<A,I,F> = spsr<8:6>; // No PSTATE.D in AArch32 state
PSTATE.T = spsr<5>; // PSTATE.J is RES0
else // AArch64 state
if() then PSTATE.TCO = spsr<25>;
if HaveDITExt() then PSTATE.DIT = spsr<24>;
if HaveUAOExt() then PSTATE.UAO = spsr<23>;
if HaveBTIExtIsZeroHaveMTEExt(it<2:0>)) then
return '00000000';
else
return it;() then PSTATE.BTYPE = spsr<11:10>;
PSTATE.<D,A,I,F> = spsr<9:6>; // No PSTATE.<Q,IT,GE,E,T> in AArch64 state
return;
// SCR_GEN[]
// =========
SCRType SCR_GEN[]
// AArch32 secure & AArch64 EL3 registers are not architecturally mapped
assert HaveEL(EL3);
bits(64) r;
if HighestELUsingAArch32() then
r = ZeroExtend(SCR);
else
r = SCR_EL3;
return r;SpeculationBarrier();
// Signal an event to all PEs in a multiprocessor system to set their Event Registers.
// When a PE executes the SEV instruction, it causes this function to be executed
SendEvent();SynchronizeContext();
// SendEventLocal()
// ================
// Set the local Event Register of this PE.
// When a PE executes the SEVL instruction, it causes this function to be executed// Implements the error synchronization event.
SendEventLocal()
EventRegister = '1';
return;SynchronizeErrors();
// SetPSTATEFromPSR()
// ==================
// Set PSTATE based on a PSR value// Take any pending unmasked physical SError interrupt
SetPSTATEFromPSR(bits(32) spsr)
PSTATE.SS =TakeUnmaskedPhysicalSErrorInterrupts(boolean iesb_req); DebugExceptionReturnSS(spsr);
if IllegalExceptionReturn(spsr) then
PSTATE.IL = '1';
if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN;
else
// State that is reinstated only on a legal exception return
PSTATE.IL = spsr<20>;
if spsr<4> == '1' then // AArch32 state
AArch32.WriteMode(spsr<4:0>); // Sets PSTATE.EL correctly
if HaveSSBSExt() then PSTATE.SSBS = spsr<23>;
else // AArch64 state
PSTATE.nRW = '0';
PSTATE.EL = spsr<3:2>;
PSTATE.SP = spsr<0>;
if HaveSSBSExt() then PSTATE.SSBS = spsr<12>;
// If PSTATE.IL is set and returning to AArch32 state, it is CONSTRAINED UNPREDICTABLE whether
// the T bit is set to zero or copied from SPSR.
if PSTATE.IL == '1' && PSTATE.nRW == '1' then
if ConstrainUnpredictableBool(Unpredictable_ILZEROT) then spsr<5> = '0';
// State that is reinstated regardless of illegal exception return
PSTATE.<N,Z,C,V> = spsr<31:28>;
if HavePANExt() then PSTATE.PAN = spsr<22>;
if PSTATE.nRW == '1' then // AArch32 state
PSTATE.Q = spsr<27>;
PSTATE.IT = RestoredITBits(spsr);
ShouldAdvanceIT = FALSE;
if HaveDITExt() then PSTATE.DIT = (if Restarting() then spsr<24> else spsr<21>);
PSTATE.GE = spsr<19:16>;
PSTATE.E = spsr<9>;
PSTATE.<A,I,F> = spsr<8:6>; // No PSTATE.D in AArch32 state
PSTATE.T = spsr<5>; // PSTATE.J is RES0
else // AArch64 state
if HaveMTEExt() then PSTATE.TCO = spsr<25>;
if HaveDITExt() then PSTATE.DIT = spsr<24>;
if HaveUAOExt() then PSTATE.UAO = spsr<23>;
if HaveBTIExt() then PSTATE.BTYPE = spsr<11:10>;
PSTATE.<D,A,I,F> = spsr<9:6>; // No PSTATE.<Q,IT,GE,E,T> in AArch64 state
return;
boolean ShouldAdvanceIT;// Take any pending unmasked physical SError interrupt or unmasked virtual SError
// interrupt.TakeUnmaskedSErrorInterrupts();
// Implements the error synchronization event.
SynchronizeErrors();Unreachable()
assert FALSE;
// Take any pending unmasked physical SError interrupt// UsingAArch32()
// ==============
// Return TRUE if the current Exception level is using AArch32, FALSE if using AArch64.
boolean
TakeUnmaskedPhysicalSErrorInterrupts(boolean iesb_req);UsingAArch32()
boolean aarch32 = (PSTATE.nRW == '1');
if !HaveAnyAArch32() then assert !aarch32;
if HighestELUsingAArch32() then assert aarch32;
return aarch32;
// Take any pending unmasked physical SError interrupt or unmasked virtual SError
// interrupt.// WaitForEvent()
// ==============
// PE suspends its operation and enters a low-power state
// if the Event Register is clear when the WFE is executed
TakeUnmaskedSErrorInterrupts();WaitForEvent()
if EventRegister == '0' thenEnterLowPowerState();
return;
bits(32)// WaitForInterrupt()
// ==================
// PE suspends its operation to enter a low-power state
// until a WFI wake-up event occurs or the PE is reset ThisInstr();WaitForInterrupt()EnterLowPowerState();
return;
integer// ConstrainUnpredictable()
// ========================
// Return the appropriate Constraint result to control the caller's behavior. The return value
// is IMPLEMENTATION DEFINED within a permitted list for each UNPREDICTABLE case.
// (The permitted list is determined by an assert or case statement at the call site.)
// NOTE: This version of the function uses an Unpredictable argument to define the call site.
// This argument does not appear in the version used in the Armv8 Architecture Reference Manual.
// The extra argument is used here to allow this example definition. This is an example only and
// does not imply a fixed implementation of these behaviors. Indeed the intention is that it should
// be defined by each implementation, according to its implementation choices.
Constraint ThisInstrLength();ConstrainUnpredictable(Unpredictable which)
case which of
when Unpredictable_WBOVERLAPLD
return Constraint_WBSUPPRESS; // return loaded value
when Unpredictable_WBOVERLAPST
return Constraint_NONE; // store pre-writeback value
when Unpredictable_LDPOVERLAP
return Constraint_UNDEF; // instruction is UNDEFINED
when Unpredictable_BASEOVERLAP
return Constraint_NONE; // use original address
when Unpredictable_DATAOVERLAP
return Constraint_NONE; // store original value
when Unpredictable_DEVPAGE2
return Constraint_FAULT; // take an alignment fault
when Unpredictable_INSTRDEVICE
return Constraint_NONE; // Do not take a fault
when Unpredictable_RESCPACR
return Constraint_UNKNOWN; // Map to UNKNOWN value
when Unpredictable_RESMAIR
return Constraint_UNKNOWN; // Map to UNKNOWN value
when Unpredictable_RESTEXCB
return Constraint_UNKNOWN; // Map to UNKNOWN value
when Unpredictable_RESDACR
return Constraint_UNKNOWN; // Map to UNKNOWN value
when Unpredictable_RESPRRR
return Constraint_UNKNOWN; // Map to UNKNOWN value
when Unpredictable_RESVTCRS
return Constraint_UNKNOWN; // Map to UNKNOWN value
when Unpredictable_RESTnSZ
return Constraint_FORCE; // Map to the limit value
when Unpredictable_OORTnSZ
return Constraint_FORCE; // Map to the limit value
when Unpredictable_LARGEIPA
return Constraint_FORCE; // Restrict the inputsize to the PAMax value
when Unpredictable_ESRCONDPASS
return Constraint_FALSE; // Report as "AL"
when Unpredictable_ILZEROIT
return Constraint_FALSE; // Do not zero PSTATE.IT
when Unpredictable_ILZEROT
return Constraint_FALSE; // Do not zero PSTATE.T
when Unpredictable_BPVECTORCATCHPRI
return Constraint_TRUE; // Debug Vector Catch: match on 2nd halfword
when Unpredictable_VCMATCHHALF
return Constraint_FALSE; // No match
when Unpredictable_VCMATCHDAPA
return Constraint_FALSE; // No match on Data Abort or Prefetch abort
when Unpredictable_WPMASKANDBAS
return Constraint_FALSE; // Watchpoint disabled
when Unpredictable_WPBASCONTIGUOUS
return Constraint_FALSE; // Watchpoint disabled
when Unpredictable_RESWPMASK
return Constraint_DISABLED; // Watchpoint disabled
when Unpredictable_WPMASKEDBITS
return Constraint_FALSE; // Watchpoint disabled
when Unpredictable_RESBPWPCTRL
return Constraint_DISABLED; // Breakpoint/watchpoint disabled
when Unpredictable_BPNOTIMPL
return Constraint_DISABLED; // Breakpoint disabled
when Unpredictable_RESBPTYPE
return Constraint_DISABLED; // Breakpoint disabled
when Unpredictable_BPNOTCTXCMP
return Constraint_DISABLED; // Breakpoint disabled
when Unpredictable_BPMATCHHALF
return Constraint_FALSE; // No match
when Unpredictable_BPMISMATCHHALF
return Constraint_FALSE; // No match
when Unpredictable_RESTARTALIGNPC
return Constraint_FALSE; // Do not force alignment
when Unpredictable_RESTARTZEROUPPERPC
return Constraint_TRUE; // Force zero extension
when Unpredictable_ZEROUPPER
return Constraint_TRUE; // zero top halves of X registers
when Unpredictable_ERETZEROUPPERPC
return Constraint_TRUE; // zero top half of PC
when Unpredictable_A32FORCEALIGNPC
return Constraint_FALSE; // Do not force alignment
when Unpredictable_SMD
return Constraint_UNDEF; // disabled SMC is Unallocated
when Unpredictable_NONFAULT
return Constraint_FALSE; // Speculation enabled
when Unpredictable_SVEZEROUPPER
return Constraint_TRUE; // zero top bits of Z registers
when Unpredictable_SVELDNFDATA
return Constraint_TRUE; // Load mem data in NF loads
when Unpredictable_SVELDNFZERO
return Constraint_TRUE; // Write zeros in NF loads
when Unpredictable_AFUPDATE // AF update for alignment or permission fault
return Constraint_TRUE;
when Unpredictable_IESBinDebug // Use SCTLR[].IESB in Debug state
return Constraint_TRUE;
when Unpredictable_ZEROBTYPE
return Constraint_TRUE; // Save BTYPE in SPSR_ELx/DPSR_EL0 as '00'
when Unpredictable_CLEARERRITEZERO // Clearing sticky errors when instruction in flight
return Constraint_FALSE;
// ConstrainUnpredictableBits()
// ============================
// This is a variant of ConstrainUnpredictable for when the result can be Constraint_UNKNOWN.
// If the result is Constraint_UNKNOWN then the function also returns UNKNOWN value, but that
// value is always an allocated value; that is, one for which the behavior is not itself
// CONSTRAINED.
// NOTE: This version of the function uses an Unpredictable argument to define the call site.
// This argument does not appear in the version used in the Armv8 Architecture Reference Manual.
// See the NOTE on ConstrainUnpredictable() for more information.
// This is an example placeholder only and does not imply a fixed implementation of the bits part
// of the result, and may not be applicable in all cases.
(Constraint,bits(width)) ConstrainUnpredictableBits(Unpredictable which)
c = ConstrainUnpredictable(which);
if c == Constraint_UNKNOWN then
return (c, ZerosUnreachable()
assert FALSE;(width)); // See notes; this is an example implementation only
else
return (c, bits(width) UNKNOWN); // bits result not used
// UsingAArch32()
// ==============
// Return TRUE if the current Exception level is using AArch32, FALSE if using AArch64.
// ConstrainUnpredictableBool()
// ============================
// This is a simple wrapper function for cases where the constrained result is either TRUE or FALSE.
// NOTE: This version of the function uses an Unpredictable argument to define the call site.
// This argument does not appear in the version used in the Armv8 Architecture Reference Manual.
// See the NOTE on ConstrainUnpredictable() for more information.
boolean UsingAArch32()
boolean aarch32 = (PSTATE.nRW == '1');
if !ConstrainUnpredictableBool(HaveAnyAArch32Unpredictable() then assert !aarch32;
ifwhich)
c = (which);
assert c IN {Constraint_TRUE, Constraint_FALSE};
return (c == Constraint_TRUEHighestELUsingAArch32ConstrainUnpredictable() then assert aarch32;
return aarch32;);
// WaitForEvent()
// ==============
// PE suspends its operation and enters a low-power state
// if the Event Register is clear when the WFE is executed// ConstrainUnpredictableInteger()
// ===============================
// This is a variant of ConstrainUnpredictable for when the result can be Constraint_UNKNOWN. If
// the result is Constraint_UNKNOWN then the function also returns an UNKNOWN value in the range
// low to high, inclusive.
// NOTE: This version of the function uses an Unpredictable argument to define the call site.
// This argument does not appear in the version used in the Armv8 Architecture Reference Manual.
// See the NOTE on ConstrainUnpredictable() for more information.
// This is an example placeholder only and does not imply a fixed implementation of the integer part
// of the result.
(Constraint,integer)
WaitForEvent()
if EventRegister == '0' thenConstrainUnpredictableInteger(integer low, integer high,
which)
c = ConstrainUnpredictable(which);
if c == Constraint_UNKNOWNEnterLowPowerStateUnpredictable();
return;then
return (c, low); // See notes; this is an example implementation only
else
return (c, integer UNKNOWN); // integer result not used
// WaitForInterrupt()
// ==================
// PE suspends its operation to enter a low-power state
// until a WFI wake-up event occurs or the PE is resetenumeration
WaitForInterrupt()Constraint {// General
Constraint_NONE, // Instruction executes with
// no change or side-effect to its described behavior
Constraint_UNKNOWN, // Destination register has UNKNOWN value
Constraint_UNDEF, // Instruction is UNDEFINED
Constraint_UNDEFEL0, // Instruction is UNDEFINED at EL0 only
Constraint_NOP, // Instruction executes as NOP
Constraint_TRUE,
Constraint_FALSE,
Constraint_DISABLED,
Constraint_UNCOND, // Instruction executes unconditionally
Constraint_COND, // Instruction executes conditionally
Constraint_ADDITIONAL_DECODE, // Instruction executes with additional decode
// Load-store
Constraint_WBSUPPRESS, Constraint_FAULT,
// IPA too large
Constraint_FORCE, EnterLowPowerState();
return;Constraint_FORCENOSLCHECK};
// ConstrainUnpredictable()
// ========================
// Return the appropriate Constraint result to control the caller's behavior. The return value
// is IMPLEMENTATION DEFINED within a permitted list for each UNPREDICTABLE case.
// (The permitted list is determined by an assert or case statement at the call site.)
// NOTE: This version of the function uses an Unpredictable argument to define the call site.
// This argument does not appear in the version used in the Armv8 Architecture Reference Manual.
// The extra argument is used here to allow this example definition. This is an example only and
// does not imply a fixed implementation of these behaviors. Indeed the intention is that it should
// be defined by each implementation, according to its implementation choices.
Constraintenumeration ConstrainUnpredictable(Unpredictable {// Writeback/transfer register overlap (load)Unpredictable which)
case which of
whenUnpredictable_WBOVERLAPLD,
// Writeback/transfer register overlap (store) Unpredictable_WBOVERLAPLD
returnUnpredictable_WBOVERLAPST,
// Load Pair transfer register overlap Constraint_WBSUPPRESS; // return loaded value
whenUnpredictable_LDPOVERLAP,
// Store-exclusive base/status register overlap Unpredictable_WBOVERLAPST
returnUnpredictable_BASEOVERLAP,
// Store-exclusive data/status register overlap Constraint_NONE; // store pre-writeback value
whenUnpredictable_DATAOVERLAP,
// Load-store alignment checks Unpredictable_LDPOVERLAP
returnUnpredictable_DEVPAGE2,
// Instruction fetch from Device memory Constraint_UNDEF; // instruction is UNDEFINED
whenUnpredictable_INSTRDEVICE,
// Reserved CPACR value Unpredictable_BASEOVERLAP
returnUnpredictable_RESCPACR,
// Reserved MAIR value Constraint_NONE; // use original address
whenUnpredictable_RESMAIR,
// Reserved TEX:C:B value Unpredictable_DATAOVERLAP
returnUnpredictable_RESTEXCB,
// Reserved PRRR value Constraint_NONE; // store original value
whenUnpredictable_RESPRRR,
// Reserved DACR field Unpredictable_DEVPAGE2
returnUnpredictable_RESDACR,
// Reserved VTCR.S value Constraint_FAULT; // take an alignment fault
whenUnpredictable_RESVTCRS,
// Reserved TCR.TnSZ value Unpredictable_INSTRDEVICE
returnUnpredictable_RESTnSZ,
// Out-of-range TCR.TnSZ value Constraint_NONE; // Do not take a fault
whenUnpredictable_OORTnSZ,
// IPA size exceeds PA size Unpredictable_RESCPACR
returnUnpredictable_LARGEIPA,
// Syndrome for a known-passing conditional A32 instruction Constraint_UNKNOWN; // Map to UNKNOWN value
whenUnpredictable_ESRCONDPASS,
// Illegal State exception: zero PSTATE.IT Unpredictable_RESMAIR
returnUnpredictable_ILZEROIT,
// Illegal State exception: zero PSTATE.T Constraint_UNKNOWN; // Map to UNKNOWN value
whenUnpredictable_ILZEROT,
// Debug: prioritization of Vector Catch Unpredictable_RESTEXCB
returnUnpredictable_BPVECTORCATCHPRI,
// Debug Vector Catch: match on 2nd halfword Constraint_UNKNOWN; // Map to UNKNOWN value
whenUnpredictable_VCMATCHHALF,
// Debug Vector Catch: match on Data Abort or Prefetch abort Unpredictable_RESDACR
returnUnpredictable_VCMATCHDAPA,
// Debug watchpoints: non-zero MASK and non-ones BAS Constraint_UNKNOWN; // Map to UNKNOWN value
whenUnpredictable_WPMASKANDBAS,
// Debug watchpoints: non-contiguous BAS Unpredictable_RESPRRR
returnUnpredictable_WPBASCONTIGUOUS,
// Debug watchpoints: reserved MASK Constraint_UNKNOWN; // Map to UNKNOWN value
whenUnpredictable_RESWPMASK,
// Debug watchpoints: non-zero MASKed bits of address Unpredictable_RESVTCRS
returnUnpredictable_WPMASKEDBITS,
// Debug breakpoints and watchpoints: reserved control bits Constraint_UNKNOWN; // Map to UNKNOWN value
whenUnpredictable_RESBPWPCTRL,
// Debug breakpoints: not implemented Unpredictable_RESTnSZ
returnUnpredictable_BPNOTIMPL,
// Debug breakpoints: reserved type Constraint_FORCE; // Map to the limit value
whenUnpredictable_RESBPTYPE,
// Debug breakpoints: not-context-aware breakpoint Unpredictable_OORTnSZ
returnUnpredictable_BPNOTCTXCMP,
// Debug breakpoints: match on 2nd halfword of instruction Constraint_FORCE; // Map to the limit value
whenUnpredictable_BPMATCHHALF,
// Debug breakpoints: mismatch on 2nd halfword of instruction Unpredictable_LARGEIPA
returnUnpredictable_BPMISMATCHHALF,
// Debug: restart to a misaligned AArch32 PC value Constraint_FORCE; // Restrict the inputsize to the PAMax value
whenUnpredictable_RESTARTALIGNPC,
// Debug: restart to a not-zero-extended AArch32 PC value Unpredictable_ESRCONDPASS
returnUnpredictable_RESTARTZEROUPPERPC,
// Zero top 32 bits of X registers in AArch32 state Constraint_FALSE; // Report as "AL"
whenUnpredictable_ZEROUPPER,
// Zero top 32 bits of PC on illegal return to AArch32 state Unpredictable_ILZEROIT
returnUnpredictable_ERETZEROUPPERPC,
// Force address to be aligned when interworking branch to A32 state Constraint_FALSE; // Do not zero PSTATE.IT
whenUnpredictable_A32FORCEALIGNPC,
// SMC disabled Unpredictable_ILZEROT
returnUnpredictable_SMD,
// FF speculation Constraint_FALSE; // Do not zero PSTATE.T
whenUnpredictable_NONFAULT,
// Zero top bits of Z registers in EL change Unpredictable_BPVECTORCATCHPRI
returnUnpredictable_SVEZEROUPPER,
// Load mem data in NF loads Constraint_TRUE; // Debug Vector Catch: match on 2nd halfword
whenUnpredictable_SVELDNFDATA,
// Write zeros in NF loads Unpredictable_VCMATCHHALF
returnUnpredictable_SVELDNFZERO,
// Access Flag Update by HW Constraint_FALSE; // No match
whenUnpredictable_AFUPDATE,
// Consider SCTLR[].IESB in Debug state Unpredictable_VCMATCHDAPA
returnUnpredictable_IESBinDebug,
// No events selected in PMSEVFR_EL1 Constraint_FALSE; // No match on Data Abort or Prefetch abort
whenUnpredictable_ZEROPMSEVFR,
// No operation type selected in PMSFCR_EL1 Unpredictable_WPMASKANDBAS
returnUnpredictable_NOOPTYPES,
// Zero latency in PMSLATFR_EL1 Constraint_FALSE; // Watchpoint disabled
whenUnpredictable_ZEROMINLATENCY,
// Zero saved BType value in SPSR_ELx/DPSR_EL0 Unpredictable_WPBASCONTIGUOUS
returnUnpredictable_ZEROBTYPE,
// Timestamp constrained to virtual or physical Constraint_FALSE; // Watchpoint disabled
whenUnpredictable_EL2TIMESTAMP, Unpredictable_RESWPMASK
returnUnpredictable_EL1TIMESTAMP,
// Clearing DCC/ITR sticky flags when instruction is in flight Constraint_DISABLED; // Watchpoint disabled
when Unpredictable_WPMASKEDBITS
return Constraint_FALSE; // Watchpoint disabled
when Unpredictable_RESBPWPCTRL
return Constraint_DISABLED; // Breakpoint/watchpoint disabled
when Unpredictable_BPNOTIMPL
return Constraint_DISABLED; // Breakpoint disabled
when Unpredictable_RESBPTYPE
return Constraint_DISABLED; // Breakpoint disabled
when Unpredictable_BPNOTCTXCMP
return Constraint_DISABLED; // Breakpoint disabled
when Unpredictable_BPMATCHHALF
return Constraint_FALSE; // No match
when Unpredictable_BPMISMATCHHALF
return Constraint_FALSE; // No match
when Unpredictable_RESTARTALIGNPC
return Constraint_FALSE; // Do not force alignment
when Unpredictable_RESTARTZEROUPPERPC
return Constraint_TRUE; // Force zero extension
when Unpredictable_ZEROUPPER
return Constraint_TRUE; // zero top halves of X registers
when Unpredictable_ERETZEROUPPERPC
return Constraint_TRUE; // zero top half of PC
when Unpredictable_A32FORCEALIGNPC
return Constraint_FALSE; // Do not force alignment
when Unpredictable_SMD
return Constraint_UNDEF; // disabled SMC is Unallocated
when Unpredictable_NONFAULT
return Constraint_FALSE; // Speculation enabled
when Unpredictable_SVEZEROUPPER
return Constraint_TRUE; // zero top bits of Z registers
when Unpredictable_SVELDNFDATA
return Constraint_TRUE; // Load mem data in NF loads
when Unpredictable_SVELDNFZERO
return Constraint_TRUE; // Write zeros in NF loads
when Unpredictable_AFUPDATE // AF update for alignment or permission fault
return Constraint_TRUE;
when Unpredictable_IESBinDebug // Use SCTLR[].IESB in Debug state
return Constraint_TRUE;
when Unpredictable_ZEROBTYPE
return Constraint_TRUE; // Save BTYPE in SPSR_ELx/DPSR_EL0 as '00'
when Unpredictable_CLEARERRITEZERO // Clearing sticky errors when instruction in flight
return Constraint_FALSE;Unpredictable_CLEARERRITEZERO};
// ConstrainUnpredictableBits()
// ============================
// AdvSIMDExpandImm()
// ==================
// This is a variant of ConstrainUnpredictable for when the result can be Constraint_UNKNOWN.
// If the result is Constraint_UNKNOWN then the function also returns UNKNOWN value, but that
// value is always an allocated value; that is, one for which the behavior is not itself
// CONSTRAINED.
// NOTE: This version of the function uses an Unpredictable argument to define the call site.
// This argument does not appear in the version used in the Armv8 Architecture Reference Manual.
// See the NOTE on ConstrainUnpredictable() for more information.
// This is an example placeholder only and does not imply a fixed implementation of the bits part
// of the result, and may not be applicable in all cases.
(Constraint,bits(width))bits(64) ConstrainUnpredictableBits(AdvSIMDExpandImm(bit op, bits(4) cmode, bits(8) imm8)
case cmode<3:1> of
when '000'
imm64 =UnpredictableReplicate which)
c =( ConstrainUnpredictableZeros(which);
if c ==(24):imm8, 2);
when '001'
imm64 = (Zeros(16):imm8:Zeros(8), 2);
when '010'
imm64 = Replicate(Zeros(8):imm8:Zeros(16), 2);
when '011'
imm64 = Replicate(imm8:Zeros(24), 2);
when '100'
imm64 = Replicate(Zeros(8):imm8, 4);
when '101'
imm64 = Replicate(imm8:Zeros(8), 4);
when '110'
if cmode<0> == '0' then
imm64 = Replicate(Zeros(16):imm8:Ones(8), 2);
else
imm64 = Replicate(Zeros(8):imm8:Ones(16), 2);
when '111'
if cmode<0> == '0' && op == '0' then
imm64 = Replicate(imm8, 8);
if cmode<0> == '0' && op == '1' then
imm8a = Replicate(imm8<7>, 8); imm8b = Replicate(imm8<6>, 8);
imm8c = Replicate(imm8<5>, 8); imm8d = Replicate(imm8<4>, 8);
imm8e = Replicate(imm8<3>, 8); imm8f = Replicate(imm8<2>, 8);
imm8g = Replicate(imm8<1>, 8); imm8h = Replicate(imm8<0>, 8);
imm64 = imm8a:imm8b:imm8c:imm8d:imm8e:imm8f:imm8g:imm8h;
if cmode<0> == '1' && op == '0' then
imm32 = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19);
imm64 = Replicate(imm32, 2);
if cmode<0> == '1' && op == '1' then
if UsingAArch32() then ReservedEncoding();
imm64 = imm8<7>:NOT(imm8<6>):ReplicateConstraint_UNKNOWNReplicate then
return (c,(imm8<6>,8):imm8<5:0>: Zeros(width)); // See notes; this is an example implementation only
else
return (c, bits(width) UNKNOWN); // bits result not used(48);
return imm64;
// ConstrainUnpredictableBool()
// ============================
// PolynomialMult()
// ================
// This is a simple wrapper function for cases where the constrained result is either TRUE or FALSE.
// NOTE: This version of the function uses an Unpredictable argument to define the call site.
// This argument does not appear in the version used in the Armv8 Architecture Reference Manual.
// See the NOTE on ConstrainUnpredictable() for more information.
booleanbits(M+N) ConstrainUnpredictableBool(PolynomialMult(bits(M) op1, bits(N) op2)
result =UnpredictableZeros which)
c =(M+N);
extended_op2 = ConstrainUnpredictableZeroExtend(which);
assert c IN {(op2, M+N);
for i=0 to M-1
if op1<i> == '1' then
result = result EORConstraint_TRUELSL, Constraint_FALSE};
return (c == Constraint_TRUE);(extended_op2, i);
return result;
// ConstrainUnpredictableInteger()
// ===============================
// SatQ()
// ======
// This is a variant of ConstrainUnpredictable for when the result can be Constraint_UNKNOWN. If
// the result is Constraint_UNKNOWN then the function also returns an UNKNOWN value in the range
// low to high, inclusive.
// NOTE: This version of the function uses an Unpredictable argument to define the call site.
// This argument does not appear in the version used in the Armv8 Architecture Reference Manual.
// See the NOTE on ConstrainUnpredictable() for more information.
// This is an example placeholder only and does not imply a fixed implementation of the integer part
// of the result.
(Constraint,integer)(bits(N), boolean) ConstrainUnpredictableInteger(integer low, integer high,SatQ(integer i, integer N, boolean unsigned)
(result, sat) = if unsigned then UnpredictableUnsignedSatQ which)
c =(i, N) else ConstrainUnpredictableSignedSatQ(which);
if c == Constraint_UNKNOWN then
return (c, low); // See notes; this is an example implementation only
else
return (c, integer UNKNOWN); // integer result not used(i, N);
return (result, sat);
enumeration// SignedSatQ()
// ============
(bits(N), boolean) Constraint {// GeneralSignedSatQ(integer i, integer N)
if i > 2^(N-1) - 1 then
result = 2^(N-1) - 1; saturated = TRUE;
elsif i < -(2^(N-1)) then
result = -(2^(N-1)); saturated = TRUE;
else
result = i; saturated = FALSE;
return (result<N-1:0>, saturated);
Constraint_NONE, // Instruction executes with
// no change or side-effect to its described behavior
Constraint_UNKNOWN, // Destination register has UNKNOWN value
Constraint_UNDEF, // Instruction is UNDEFINED
Constraint_UNDEFEL0, // Instruction is UNDEFINED at EL0 only
Constraint_NOP, // Instruction executes as NOP
Constraint_TRUE,
Constraint_FALSE,
Constraint_DISABLED,
Constraint_UNCOND, // Instruction executes unconditionally
Constraint_COND, // Instruction executes conditionally
Constraint_ADDITIONAL_DECODE, // Instruction executes with additional decode
// Load-store
Constraint_WBSUPPRESS, Constraint_FAULT,
// IPA too large
Constraint_FORCE, Constraint_FORCENOSLCHECK};
enumeration// UnsignedRSqrtEstimate()
// =======================
bits(N) Unpredictable {// Writeback/transfer register overlap (load)UnsignedRSqrtEstimate(bits(N) operand)
assert N IN {16,32};
if operand<N-1:N-2> == '00' then // Operands <= 0x3FFFFFFF produce 0xFFFFFFFF
result =
Unpredictable_WBOVERLAPLD,
// Writeback/transfer register overlap (store)(N);
else
// input is in the range 0x40000000 .. 0xffffffff representing [0.25 .. 1.0)
// estimate is in the range 256 .. 511 representing [1.0 .. 2.0)
case N of
when 16 estimate =
Unpredictable_WBOVERLAPST,
// Load Pair transfer register overlap(
Unpredictable_LDPOVERLAP,
// Store-exclusive base/status register overlap(operand<15:7>));
when 32 estimate =
Unpredictable_BASEOVERLAP,
// Store-exclusive data/status register overlap(
Unpredictable_DATAOVERLAP,
// Load-store alignment checks(operand<31:23>));
// result is in the range 0x80000000 .. 0xff800000 representing [1.0 .. 2.0)
result = estimate<8:0> :
Unpredictable_DEVPAGE2,
// Instruction fetch from Device memory
Unpredictable_INSTRDEVICE,
// Reserved CPACR value
Unpredictable_RESCPACR,
// Reserved MAIR value
Unpredictable_RESMAIR,
// Reserved TEX:C:B value
Unpredictable_RESTEXCB,
// Reserved PRRR value
Unpredictable_RESPRRR,
// Reserved DACR field
Unpredictable_RESDACR,
// Reserved VTCR.S value
Unpredictable_RESVTCRS,
// Reserved TCR.TnSZ value
Unpredictable_RESTnSZ,
// Reserved SCTLR_ELx.TCF value
Unpredictable_RESTCF,
// Out-of-range TCR.TnSZ value
Unpredictable_OORTnSZ,
// IPA size exceeds PA size
Unpredictable_LARGEIPA,
// Syndrome for a known-passing conditional A32 instruction
Unpredictable_ESRCONDPASS,
// Illegal State exception: zero PSTATE.IT
Unpredictable_ILZEROIT,
// Illegal State exception: zero PSTATE.T
Unpredictable_ILZEROT,
// Debug: prioritization of Vector Catch
Unpredictable_BPVECTORCATCHPRI,
// Debug Vector Catch: match on 2nd halfword
Unpredictable_VCMATCHHALF,
// Debug Vector Catch: match on Data Abort or Prefetch abort
Unpredictable_VCMATCHDAPA,
// Debug watchpoints: non-zero MASK and non-ones BAS
Unpredictable_WPMASKANDBAS,
// Debug watchpoints: non-contiguous BAS
Unpredictable_WPBASCONTIGUOUS,
// Debug watchpoints: reserved MASK
Unpredictable_RESWPMASK,
// Debug watchpoints: non-zero MASKed bits of address
Unpredictable_WPMASKEDBITS,
// Debug breakpoints and watchpoints: reserved control bits
Unpredictable_RESBPWPCTRL,
// Debug breakpoints: not implemented
Unpredictable_BPNOTIMPL,
// Debug breakpoints: reserved type
Unpredictable_RESBPTYPE,
// Debug breakpoints: not-context-aware breakpoint
Unpredictable_BPNOTCTXCMP,
// Debug breakpoints: match on 2nd halfword of instruction
Unpredictable_BPMATCHHALF,
// Debug breakpoints: mismatch on 2nd halfword of instruction
Unpredictable_BPMISMATCHHALF,
// Debug: restart to a misaligned AArch32 PC value
Unpredictable_RESTARTALIGNPC,
// Debug: restart to a not-zero-extended AArch32 PC value
Unpredictable_RESTARTZEROUPPERPC,
// Zero top 32 bits of X registers in AArch32 state
Unpredictable_ZEROUPPER,
// Zero top 32 bits of PC on illegal return to AArch32 state
Unpredictable_ERETZEROUPPERPC,
// Force address to be aligned when interworking branch to A32 state
Unpredictable_A32FORCEALIGNPC,
// SMC disabled
Unpredictable_SMD,
// FF speculation
Unpredictable_NONFAULT,
// Zero top bits of Z registers in EL change
Unpredictable_SVEZEROUPPER,
// Load mem data in NF loads
Unpredictable_SVELDNFDATA,
// Write zeros in NF loads
Unpredictable_SVELDNFZERO,
// Access Flag Update by HW
Unpredictable_AFUPDATE,
// Consider SCTLR[].IESB in Debug state
Unpredictable_IESBinDebug,
// No events selected in PMSEVFR_EL1
Unpredictable_ZEROPMSEVFR,
// No operation type selected in PMSFCR_EL1
Unpredictable_NOOPTYPES,
// Zero latency in PMSLATFR_EL1
Unpredictable_ZEROMINLATENCY,
// Zero saved BType value in SPSR_ELx/DPSR_EL0
Unpredictable_ZEROBTYPE,
// Timestamp constrained to virtual or physical
Unpredictable_EL2TIMESTAMP,
Unpredictable_EL1TIMESTAMP,
// Clearing DCC/ITR sticky flags when instruction is in flight
Unpredictable_CLEARERRITEZERO};(N-9);
return result;
// AdvSIMDExpandImm()
// ==================
// UnsignedRecipEstimate()
// =======================
bits(64)bits(N) AdvSIMDExpandImm(bit op, bits(4) cmode, bits(8) imm8)
case cmode<3:1> of
when '000'
imm64 =UnsignedRecipEstimate(bits(N) operand)
assert N IN {16,32};
if operand<N-1> == '0' then // Operands <= 0x7FFFFFFF produce 0xFFFFFFFF
result = Replicate(Zeros(24):imm8, 2);
when '001'
imm64 = Replicate(Zeros(16):imm8:Zeros(8), 2);
when '010'
imm64 = Replicate(Zeros(8):imm8:Zeros(16), 2);
when '011'
imm64 = Replicate(imm8:Zeros(24), 2);
when '100'
imm64 = Replicate(Zeros(8):imm8, 4);
when '101'
imm64 = Replicate(imm8:Zeros(8), 4);
when '110'
if cmode<0> == '0' then
imm64 = Replicate(Zeros(16):imm8:Ones(8), 2);
else
imm64 =(N);
else
// input is in the range 0x80000000 .. 0xffffffff representing [0.5 .. 1.0)
// estimate is in the range 256 to 511 representing [1.0 .. 2.0)
case N of
when 16 estimate = ReplicateRecipEstimate(ZerosUInt(8):imm8:(operand<15:7>));
when 32 estimate =OnesRecipEstimate(16), 2);
when '111'
if cmode<0> == '0' && op == '0' then
imm64 =( ReplicateUInt(imm8, 8);
if cmode<0> == '0' && op == '1' then
imm8a = Replicate(imm8<7>, 8); imm8b = Replicate(imm8<6>, 8);
imm8c = Replicate(imm8<5>, 8); imm8d = Replicate(imm8<4>, 8);
imm8e = Replicate(imm8<3>, 8); imm8f = Replicate(imm8<2>, 8);
imm8g = Replicate(imm8<1>, 8); imm8h = Replicate(imm8<0>, 8);
imm64 = imm8a:imm8b:imm8c:imm8d:imm8e:imm8f:imm8g:imm8h;
if cmode<0> == '1' && op == '0' then
imm32 = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19);
imm64 = Replicate(imm32, 2);
if cmode<0> == '1' && op == '1' then
if UsingAArch32() then ReservedEncoding();
imm64 = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,8):imm8<5:0>:(operand<31:23>));
// result is in the range 0x80000000 .. 0xff800000 representing [1.0 .. 2.0)
result = estimate<8:0> :Zeros(48);
(N-9);
return imm64; return result;
// PolynomialMult()
// ================
// UnsignedSatQ()
// ==============
bits(M+N)(bits(N), boolean) PolynomialMult(bits(M) op1, bits(N) op2)
result =UnsignedSatQ(integer i, integer N)
if i > 2^N - 1 then
result = 2^N - 1; saturated = TRUE;
elsif i < 0 then
result = 0; saturated = TRUE;
else
result = i; saturated = FALSE;
return (result<N-1:0>, saturated); Zeros(M+N);
extended_op2 = ZeroExtend(op2, M+N);
for i=0 to M-1
if op1<i> == '1' then
result = result EOR LSL(extended_op2, i);
return result;
// SatQ()
// ======
// SelfHostedTraceEnabled()
// ========================
// Returns TRUE if Self-hosted Trace is enabled.
(bits(N), boolean)boolean SatQ(integer i, integer N, boolean unsigned)
(result, sat) = if unsigned thenSelfHostedTraceEnabled()
if ! UnsignedSatQHaveTraceExt(i, N) else() || ! () then return FALSE;
if HaveEL(EL3) then
secure_trace_enable = (if ELUsingAArch32(EL3) then SDCR.STE else MDCR_EL3.STE);
niden = (secure_trace_enable == '0' || ExternalSecureNoninvasiveDebugEnabled());
else
// If no EL3, IsSecure() returns the Effective value of (SCR_EL3.NS == '0')
niden = (!IsSecure() || ExternalSecureNoninvasiveDebugEnabledSignedSatQHaveSelfHostedTrace(i, N);
return (result, sat);());
return (EDSCR.TFO == '0' || !niden);
// SignedSatQ()
// ============
// TraceAllowed()
// ==============
// Returns TRUE if Self-hosted Trace is allowed in the current Security state and Exception Level
(bits(N), boolean)boolean SignedSatQ(integer i, integer N)
if i > 2^(N-1) - 1 then
result = 2^(N-1) - 1; saturated = TRUE;
elsif i < -(2^(N-1)) then
result = -(2^(N-1)); saturated = TRUE;
else
result = i; saturated = FALSE;
return (result<N-1:0>, saturated);TraceAllowed()
if !HaveTraceExt() then return FALSE;
if SelfHostedTraceEnabled() then
if IsSecure() && HaveEL(EL3) then
secure_trace_enable = (if ELUsingAArch32(EL3) then SDCR.STE else MDCR_EL3.STE);
if secure_trace_enable == '0' then return FALSE;
TGE_bit = if EL2Enabled() then HCR_EL2.TGE else '0';
case PSTATE.EL of
when EL3 TRE_bit = if HighestELUsingAArch32() then TRFCR.E1TRE else '0';
when EL2 TRE_bit = TRFCR_EL2.E2TRE;
when EL1 TRE_bit = TRFCR_EL1.E1TRE;
when EL0 TRE_bit = if TGE_bit == '1' then TRFCR_EL2.E0HTRE else TRFCR_EL1.E0TRE;
return TRE_bit == '1';
else
return (!IsSecure() || ExternalSecureNoninvasiveDebugEnabled());
// UnsignedRSqrtEstimate()
// =======================
// TraceContextIDR2()
// ==================
bits(N)boolean UnsignedRSqrtEstimate(bits(N) operand)
assert N IN {16,32};
if operand<N-1:N-2> == '00' then // Operands <= 0x3FFFFFFF produce 0xFFFFFFFF
result =TraceContextIDR2()
if ! OnesTraceAllowed(N);
else
// input is in the range 0x40000000 .. 0xffffffff representing [0.25 .. 1.0)
// estimate is in the range 256 .. 511 representing [1.0 .. 2.0)
case N of
when 16 estimate =()|| ! RecipSqrtEstimateHaveEL(UIntEL2(operand<15:7>));
when 32 estimate =) then return FALSE;
return (! RecipSqrtEstimateSelfHostedTraceEnabled(UInt(operand<31:23>));
// result is in the range 0x80000000 .. 0xff800000 representing [1.0 .. 2.0)
result = estimate<8:0> : Zeros(N-9);
return result;() || TRFCR_EL2.CX == '1');
// UnsignedRecipEstimate()
// =======================
// TraceTimeStamp()
// ================
bits(N)TimeStamp UnsignedRecipEstimate(bits(N) operand)
assert N IN {16,32};
if operand<N-1> == '0' then // Operands <= 0x7FFFFFFF produce 0xFFFFFFFF
result =TraceTimeStamp()
if OnesSelfHostedTraceEnabled(N);
else
// input is in the range 0x80000000 .. 0xffffffff representing [0.5 .. 1.0)
// estimate is in the range 256 to 511 representing [1.0 .. 2.0)
case N of
when 16 estimate =() then
if RecipEstimateHaveEL(UIntEL2(operand<15:7>));
when 32 estimate =) then
TS_el2 = TRFCR_EL2.TS;
if TS_el2 == '10' then (-, TS_el2) = RecipEstimateConstrainUnpredictableBits(UIntUnpredictable_EL2TIMESTAMP(operand<31:23>));
// result is in the range 0x80000000 .. 0xff800000 representing [1.0 .. 2.0)
result = estimate<8:0> :); // Reserved value
case TS_el2 of
when '00' /* falls through to check TRFCR_EL1.TS */
when '01' return ;
when '11' return TimeStamp_Physical;
otherwise Unreachable(); // ConstrainUnpredictableBits removes this case
TS_el1 = TRFCR_EL1.TS;
if TS_el1 == 'x0' then (-, TS_el1) = ConstrainUnpredictableBits(Unpredictable_EL1TIMESTAMP); // Reserved values
case TS_el1 of
when '01' return TimeStamp_Virtual;
when '11' return TimeStamp_Physical;
otherwise Unreachable(); // ConstrainUnpredictableBits removes this case
else
return TimeStamp_CoreSightZerosTimeStamp_Virtual(N-9);
return result;;
// UnsignedSatQ()
// ==============
// CombineS1S2AttrHints()
// ======================
// Combines cacheability attributes and allocation hints from stage 1 and stage 2
(bits(N), boolean)MemAttrHints UnsignedSatQ(integer i, integer N)
if i > 2^N - 1 then
result = 2^N - 1; saturated = TRUE;
elsif i < 0 then
result = 0; saturated = TRUE;
else
result = i; saturated = FALSE;
return (result<N-1:0>, saturated);CombineS1S2AttrHints(MemAttrHints s1desc, MemAttrHints s2desc)
MemAttrHints result;
apply_force_writeback = HaveStage2MemAttrControl() && HCR_EL2.FWB == '1';
if apply_force_writeback then
if s2desc.attrs == '11' then
result.attrs = s1desc.attrs;
elsif s2desc.attrs == '10' then
result.attrs = MemAttr_WB; // force Write-back
else
result.attrs = MemAttr_NC;
else
if s2desc.attrs == '01' || s1desc.attrs == '01' then
result.attrs = bits(2) UNKNOWN; // Reserved
elsif s2desc.attrs == MemAttr_NC || s1desc.attrs == MemAttr_NC then
result.attrs = MemAttr_NC; // Non-cacheable
elsif s2desc.attrs == MemAttr_WT || s1desc.attrs == MemAttr_WT then
result.attrs = MemAttr_WT; // Write-through
else
result.attrs = MemAttr_WB; // Write-back
if HaveStage2MemAttrControl() && HCR_EL2.FWB == '1' then
if s1desc.attrs != MemAttr_NC && result.attrs != MemAttr_NC then
result.hints = s1desc.hints;
elsif s1desc.attrs == MemAttr_NC && result.attrs != MemAttr_NC then
result.hints = MemHint_RWA;
else
result.hints = s1desc.hints;
result.transient = s1desc.transient;
return result;
// SelfHostedTraceEnabled()
// ========================
// Returns TRUE if Self-hosted Trace is enabled.
// CombineS1S2Desc()
// =================
// Combines the address descriptors from stage 1 and stage 2
booleanAddressDescriptor SelfHostedTraceEnabled()
if !CombineS1S2Desc(HaveTraceExtAddressDescriptor() || !s1desc,HaveSelfHostedTraceAddressDescriptor() then return FALSE;
ifs2desc) HaveELAddressDescriptor(result;
result.paddress = s2desc.paddress;
apply_force_writeback =EL3HaveStage2MemAttrControl) then
secure_trace_enable = (if() && HCR_EL2.FWB == '1';
if ELUsingAArch32IsFault((s1desc) ||EL3IsFault) then SDCR.STE else MDCR_EL3.STE);
niden = (secure_trace_enable == '0' ||(s2desc) then
result = if ExternalSecureNoninvasiveDebugEnabledIsFault());
else
// If no EL3, IsSecure() returns the Effective value of (SCR_EL3.NS == '0')
niden = (!(s1desc) then s1desc else s2desc;
elsif s2desc.memattrs.memtype ==IsSecureMemType_Device() |||| (
(apply_force_writeback && s1desc.memattrs.memtype == && s2desc.memattrs.inner.attrs != '10') ||
(!apply_force_writeback && s1desc.memattrs.memtype == MemType_Device) ) then
result.memattrs.memtype = MemType_Device;
if s1desc.memattrs.memtype == MemType_Normal then
result.memattrs.device = s2desc.memattrs.device;
elsif s2desc.memattrs.memtype == MemType_Normal then
result.memattrs.device = s1desc.memattrs.device;
else // Both Device
result.memattrs.device = CombineS1S2Device(s1desc.memattrs.device,
s2desc.memattrs.device);
result.memattrs.tagged = FALSE;
else
result.memattrs.memtype = MemType_Normal;
result.memattrs.device = DeviceType UNKNOWN;
if apply_force_writeback then
if s2desc.memattrs.memtype == MemType_Normal && s2desc.memattrs.inner.attrs == '10' then
result.memattrs.inner.attrs = MemAttr_WB; // force Write-back
elsif s2desc.memattrs.inner.attrs == '11' then
result.memattrs.inner.attrs = s1desc.memattrs.inner.attrs;
else
result.memattrs.inner.attrs = MemAttr_NC;
result.memattrs.outer = result.memattrs.inner;
else
result.memattrs.inner = CombineS1S2AttrHints(s1desc.memattrs.inner, s2desc.memattrs.inner);
result.memattrs.outer = CombineS1S2AttrHints(s1desc.memattrs.outer, s2desc.memattrs.outer);
result.memattrs.shareable = (s1desc.memattrs.shareable || s2desc.memattrs.shareable);
result.memattrs.outershareable = (s1desc.memattrs.outershareable ||
s2desc.memattrs.outershareable);
result.memattrs.tagged = (s1desc.memattrs.tagged &&
result.memattrs.inner.attrs == MemAttr_WB &&
result.memattrs.inner.hints == MemHint_RWA &&
result.memattrs.outer.attrs == MemAttr_WB &&
result.memattrs.outer.hints == MemHint_RWA);
result.memattrs = MemAttrDefaultsExternalSecureNoninvasiveDebugEnabledMemType_Device());
return (EDSCR.TFO == '0' || !niden);(result.memattrs);
return result;
// TraceAllowed()
// ==============
// Returns TRUE if Self-hosted Trace is allowed in the current Security state and Exception Level
// CombineS1S2Device()
// ===================
// Combines device types from stage 1 and stage 2
booleanDeviceType TraceAllowed()
if !CombineS1S2Device(HaveTraceExtDeviceType() then return FALSE;
ifs1device, SelfHostedTraceEnabledDeviceType() then
ifs2device)
if s2device == IsSecureDeviceType_nGnRnE() &&|| s1device == HaveELDeviceType_nGnRnE(then
result =EL3DeviceType_nGnRnE) then
secure_trace_enable = (if;
elsif s2device == ELUsingAArch32DeviceType_nGnRE(|| s1device ==EL3DeviceType_nGnRE) then SDCR.STE else MDCR_EL3.STE);
if secure_trace_enable == '0' then return FALSE;
TGE_bit = ifthen
result = EL2EnabledDeviceType_nGnRE() then HCR_EL2.TGE else '0';
case PSTATE.EL of
when;
elsif s2device == EL3DeviceType_nGRE TRE_bit = if|| s1device == HighestELUsingAArch32DeviceType_nGRE() then TRFCR.E1TRE else '0';
whenthen
result = EL2DeviceType_nGRE TRE_bit = TRFCR_EL2.E2TRE;
when;
else
result = EL1DeviceType_GRE TRE_bit = TRFCR_EL1.E1TRE;
when EL0 TRE_bit = if TGE_bit == '1' then TRFCR_EL2.E0HTRE else TRFCR_EL1.E0TRE;
return TRE_bit == '1';
else
return (!IsSecure() || ExternalSecureNoninvasiveDebugEnabled());;
return result;
// TraceContextIDR2()
// ==================
// LongConvertAttrsHints()
// =======================
// Convert the long attribute fields for Normal memory as used in the MAIR fields
// to orthogonal attributes and hints
booleanMemAttrHints TraceContextIDR2()
if !LongConvertAttrsHints(bits(4) attrfield,TraceAllowedAccType()|| !acctype)
assert !HaveELIsZero((attrfield);EL2MemAttrHints) then return FALSE;
return (!result;
if(acctype) then // Force Non-cacheable
result.attrs = MemAttr_NC;
result.hints = MemHint_No;
else
if attrfield<3:2> == '00' then // Write-through transient
result.attrs = MemAttr_WT;
result.hints = attrfield<1:0>;
result.transient = TRUE;
elsif attrfield<3:0> == '0100' then // Non-cacheable (no allocate)
result.attrs = MemAttr_NC;
result.hints = MemHint_No;
result.transient = FALSE;
elsif attrfield<3:2> == '01' then // Write-back transient
result.attrs = MemAttr_WBSelfHostedTraceEnabledS1CacheDisabled() || TRFCR_EL2.CX == '1');;
result.hints = attrfield<1:0>;
result.transient = TRUE;
else // Write-through/Write-back non-transient
result.attrs = attrfield<3:2>;
result.hints = attrfield<1:0>;
result.transient = FALSE;
return result;
// Memory barrier instruction that preserves the relative order of memory accesses to System
// registers due to trace operations and other memory accesses to the same registers// MemAttrDefaults()
// =================
// Supply default values for memory attributes, including overriding the shareability attributes
// for Device and Non-cacheable memory types.
MemoryAttributes
TraceSynchronizationBarrier();MemAttrDefaults(MemoryAttributes memattrs)
if memattrs.memtype == MemType_Device then
memattrs.inner = MemAttrHints UNKNOWN;
memattrs.outer = MemAttrHints UNKNOWN;
memattrs.shareable = TRUE;
memattrs.outershareable = TRUE;
else
memattrs.device = DeviceType UNKNOWN;
if memattrs.inner.attrs == MemAttr_NC && memattrs.outer.attrs == MemAttr_NC then
memattrs.shareable = TRUE;
memattrs.outershareable = TRUE;
return memattrs;
// TraceTimeStamp()
// ================
// S1CacheDisabled()
// =================
TimeStampboolean TraceTimeStamp()
ifS1CacheDisabled( SelfHostedTraceEnabledAccType() then
ifacctype)
if HaveELELUsingAArch32(S1TranslationRegime()) then
if PSTATE.EL == EL2) then
TS_el2 = TRFCR_EL2.TS;
if TS_el2 == '10' then (-, TS_el2) =then
enable = if acctype == ConstrainUnpredictableBitsAccType_IFETCH(then HSCTLR.I else HSCTLR.C;
else
enable = if acctype ==Unpredictable_EL2TIMESTAMPAccType_IFETCH); // Reserved value
case TS_el2 of
when '00' /* falls through to check TRFCR_EL1.TS */
when '01' returnthen SCTLR.I else SCTLR.C;
else
enable = if acctype == TimeStamp_VirtualAccType_IFETCH;
when '11' returnthen TimeStamp_PhysicalSCTLR;
otherwise[].I else UnreachableSCTLR(); // ConstrainUnpredictableBits removes this case
TS_el1 = TRFCR_EL1.TS;
if TS_el1 == 'x0' then (-, TS_el1) = ConstrainUnpredictableBits(Unpredictable_EL1TIMESTAMP); // Reserved values
case TS_el1 of
when '01' return TimeStamp_Virtual;
when '11' return TimeStamp_Physical;
otherwise Unreachable(); // ConstrainUnpredictableBits removes this case
else
return TimeStamp_CoreSight;[].C;
return enable == '0';
// CombineS1S2AttrHints()
// ======================
// Combines cacheability attributes and allocation hints from stage 1 and stage 2
// S2AttrDecode()
// ==============
// Converts the Stage 2 attribute fields into orthogonal attributes and hints
MemAttrHintsMemoryAttributes CombineS1S2AttrHints(S2AttrDecode(bits(2) SH, bits(4) attr,MemAttrHintsAccType s1desc,acctype) MemAttrHintsMemoryAttributes s2desc)memattrs;
apply_force_writeback =
MemAttrHints result;
apply_force_writeback = HaveStage2MemAttrControl() && HCR_EL2.FWB == '1';
if apply_force_writeback then
if s2desc.attrs == '11' then
result.attrs = s1desc.attrs;
elsif s2desc.attrs == '10' then
result.attrs = // Device memory
if (apply_force_writeback && attr<2> == '0') || attr<3:2> == '00' then
memattrs.memtype = MemAttr_WBMemType_Device; // force Write-back
else
result.attrs =;
case attr<1:0> of
when '00' memattrs.device = MemAttr_NCDeviceType_nGnRnE;
else
if s2desc.attrs == '01' || s1desc.attrs == '01' then
result.attrs = bits(2) UNKNOWN; // Reserved
elsif s2desc.attrs == when '01' memattrs.device = MemAttr_NCDeviceType_nGnRE || s1desc.attrs ==;
when '10' memattrs.device = MemAttr_NCDeviceType_nGRE then
result.attrs =;
when '11' memattrs.device = MemAttr_NCDeviceType_GRE; // Non-cacheable
elsif s2desc.attrs ==;
// Normal memory
elsif apply_force_writeback then
if attr<2> == '1' then
memattrs.memtype = MemAttr_WTMemType_Normal || s1desc.attrs ==;
memattrs.inner.attrs = attr<1:0>;
memattrs.outer.attrs = attr<1:0>;
elsif attr<1:0> != '00' then
memattrs.memtype = MemAttr_WTMemType_Normal then
result.attrs =;
memattrs.outer = MemAttr_WTS2ConvertAttrsHints; // Write-through
else
result.attrs =(attr<3:2>, acctype);
memattrs.inner = MemAttr_WBS2ConvertAttrsHints; // Write-back
if(attr<1:0>, acctype);
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
else
memattrs = HaveStage2MemAttrControlMemoryAttributes() && HCR_EL2.FWB == '1' then
if s1desc.attrs !=UNKNOWN; // Reserved
return MemAttr_NCMemAttrDefaults && result.attrs != MemAttr_NC then
result.hints = s1desc.hints;
elsif s1desc.attrs == MemAttr_NC && result.attrs != MemAttr_NC then
result.hints = MemHint_RWA;
else
result.hints = s1desc.hints;
result.transient = s1desc.transient;
return result;(memattrs);
// CombineS1S2Desc()
// S2CacheDisabled()
// =================
// Combines the address descriptors from stage 1 and stage 2
AddressDescriptorboolean CombineS1S2Desc(S2CacheDisabled(AddressDescriptorAccType s1desc,acctype)
if AddressDescriptorELUsingAArch32 s2desc)(
AddressDescriptorEL2 result;
result.paddress = s2desc.paddress;
apply_force_writeback =) then
disable = if acctype == HaveStage2MemAttrControlAccType_IFETCH() && HCR_EL2.FWB == '1';
ifthen HCR2.ID else HCR2.CD;
else
disable = if acctype == IsFaultAccType_IFETCH(s1desc) || IsFault(s2desc) then
result = if IsFault(s1desc) then s1desc else s2desc;
else
result.fault = NSPACE(NoFault)();
if s2desc.memattrs.memtype == MemType_Device || (
(apply_force_writeback && s1desc.memattrs.memtype == MemType_Device && s2desc.memattrs.inner.attrs != '10') ||
(!apply_force_writeback && s1desc.memattrs.memtype == MemType_Device) ) then
result.memattrs.memtype = MemType_Device;
if s1desc.memattrs.memtype == MemType_Normal then
result.memattrs.device = s2desc.memattrs.device;
elsif s2desc.memattrs.memtype == MemType_Normal then
result.memattrs.device = s1desc.memattrs.device;
else // Both Device
result.memattrs.device = CombineS1S2Device(s1desc.memattrs.device,
s2desc.memattrs.device);
result.memattrs.tagged = FALSE;
else
result.memattrs.memtype = MemType_Normal;
result.memattrs.device = DeviceType UNKNOWN;
if apply_force_writeback then
if s2desc.memattrs.memtype == MemType_Normal && s2desc.memattrs.inner.attrs == '10' then
result.memattrs.inner.attrs = MemAttr_WB; // force Write-back
elsif s2desc.memattrs.inner.attrs == '11' then
result.memattrs.inner.attrs = s1desc.memattrs.inner.attrs;
else
result.memattrs.inner.attrs = MemAttr_NC;
result.memattrs.outer = result.memattrs.inner;
else
result.memattrs.inner = CombineS1S2AttrHints(s1desc.memattrs.inner, s2desc.memattrs.inner);
result.memattrs.outer = CombineS1S2AttrHints(s1desc.memattrs.outer, s2desc.memattrs.outer);
result.memattrs.shareable = (s1desc.memattrs.shareable || s2desc.memattrs.shareable);
result.memattrs.outershareable = (s1desc.memattrs.outershareable ||
s2desc.memattrs.outershareable);
result.memattrs.tagged = (s1desc.memattrs.tagged &&
result.memattrs.inner.attrs == MemAttr_WB &&
result.memattrs.inner.hints == MemHint_RWA &&
result.memattrs.outer.attrs == MemAttr_WB &&
result.memattrs.outer.hints == MemHint_RWA);
result.memattrs = MemAttrDefaults(result.memattrs);
return result;then HCR_EL2.ID else HCR_EL2.CD;
return disable == '1';
// CombineS1S2Device()
// ===================
// Combines device types from stage 1 and stage 2
// S2ConvertAttrsHints()
// =====================
// Converts the attribute fields for Normal memory as used in stage 2
// descriptors to orthogonal attributes and hints
DeviceTypeMemAttrHints CombineS1S2Device(S2ConvertAttrsHints(bits(2) attr,DeviceTypeAccType s1device,acctype)
assert ! DeviceTypeIsZero s2device)
if s2device ==(attr); DeviceType_nGnRnEMemAttrHints || s1device ==result;
if DeviceType_nGnRnES2CacheDisabled then
result =(acctype) then // Force Non-cacheable
result.attrs = DeviceType_nGnRnEMemAttr_NC;
elsif s2device == result.hints = DeviceType_nGnREMemHint_No || s1device ==;
else
case attr of
when '01' // Non-cacheable (no allocate)
result.attrs = DeviceType_nGnREMemAttr_NC then
result =;
result.hints = DeviceType_nGnREMemHint_No;
elsif s2device == when '10' // Write-through
result.attrs = DeviceType_nGREMemAttr_WT || s1device ==;
result.hints = DeviceType_nGREMemHint_RWA then
result =;
when '11' // Write-back
result.attrs = DeviceType_nGREMemAttr_WB;
else
result = result.hints = DeviceType_GREMemHint_RWA;
result.transient = FALSE;
return result;
// LongConvertAttrsHints()
// =======================
// Convert the long attribute fields for Normal memory as used in the MAIR fields
// to orthogonal attributes and hints
// ShortConvertAttrsHints()
// ========================
// Converts the short attribute fields for Normal memory as used in the TTBR and
// TEX fields to orthogonal attributes and hints
MemAttrHints LongConvertAttrsHints(bits(4) attrfield,ShortConvertAttrsHints(bits(2) RGN, AccType acctype)
assert !acctype, boolean secondstage)IsZero(attrfield);
MemAttrHints result;
if
if (!secondstage && S1CacheDisabled(acctype) then // Force Non-cacheable
result.attrs =(acctype)) || (secondstage && S2CacheDisabled(acctype)) then
// Force Non-cacheable
result.attrs = MemAttr_NC;
result.hints = MemHint_No;
else
if attrfield<3:2> == '00' then // Write-through transient
result.attrs = case RGN of
when '00' // Non-cacheable (no allocate)
result.attrs = MemAttr_WT;
result.hints = attrfield<1:0>;
result.transient = TRUE;
elsif attrfield<3:0> == '0100' then // Non-cacheable (no allocate)
result.attrs = MemAttr_NC;
result.hints = MemHint_No;
result.transient = FALSE;
elsif attrfield<3:2> == '01' then // Write-back transient
result.attrs = when '01' // Write-back, Read and Write allocate
result.attrs = MemAttr_WB;
result.hints = MemHint_RWA;
when '10' // Write-through, Read allocate
result.attrs = MemAttr_WT;
result.hints = MemHint_RA;
when '11' // Write-back, Read allocate
result.attrs = MemAttr_WB;
result.hints = MemHint_RA;
result.hints = attrfield<1:0>;
result.transient = TRUE;
else // Write-through/Write-back non-transient
result.attrs = attrfield<3:2>;
result.hints = attrfield<1:0>;
result.transient = FALSE;
result.transient = FALSE;
return result;
// MemAttrDefaults()
// =================
// Supply default values for memory attributes, including overriding the shareability attributes
// for Device and Non-cacheable memory types.
// WalkAttrDecode()
// ================
MemoryAttributes MemAttrDefaults(WalkAttrDecode(bits(2) SH, bits(2) ORGN, bits(2) IRGN, boolean secondstage)MemoryAttributes memattrs)
if memattrs.memtype ==memattrs; MemType_DeviceAccType then
memattrs.inner =acctype = MemAttrHintsAccType_NORMAL UNKNOWN;
memattrs.outer =;
memattrs.memtype = MemAttrHintsMemType_Normal UNKNOWN;
memattrs.shareable = TRUE;
memattrs.outershareable = TRUE;
else
memattrs.device =;
memattrs.inner = DeviceTypeShortConvertAttrsHints UNKNOWN;
if memattrs.inner.attrs ==(IRGN, acctype, secondstage);
memattrs.outer = MemAttr_NCShortConvertAttrsHints && memattrs.outer.attrs ==(ORGN, acctype, secondstage);
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
memattrs.tagged = FALSE;
return MemAttr_NCMemAttrDefaults then
memattrs.shareable = TRUE;
memattrs.outershareable = TRUE;
return memattrs;(memattrs);
// S1CacheDisabled()
// =================
// HasS2Translation()
// ==================
// Returns TRUE if stage 2 translation is present for the current translation regime
boolean S1CacheDisabled(HasS2Translation()
return (AccTypeEL2Enabled acctype)
if() && ! ELUsingAArch32IsInHost(() && PSTATE.EL IN {S1TranslationRegimeEL0()) then
if PSTATE.EL ==, EL2EL1 then
enable = if acctype == AccType_IFETCH then HSCTLR.I else HSCTLR.C;
else
enable = if acctype == AccType_IFETCH then SCTLR.I else SCTLR.C;
else
enable = if acctype == AccType_IFETCH then SCTLR[].I else SCTLR[].C;
return enable == '0';});
// S2AttrDecode()
// ==============
// Converts the Stage 2 attribute fields into orthogonal attributes and hints
// Have16bitVMID()
// ===============
// Returns TRUE if EL2 and support for a 16-bit VMID are implemented.
MemoryAttributesboolean S2AttrDecode(bits(2) SH, bits(4) attr,Have16bitVMID()
return AccTypeHaveEL acctype)(
MemoryAttributesEL2 memattrs;
apply_force_writeback = HaveStage2MemAttrControl() && HCR_EL2.FWB == '1';
// Device memory
if (apply_force_writeback && attr<2> == '0') || attr<3:2> == '00' then
memattrs.memtype = MemType_Device;
case attr<1:0> of
when '00' memattrs.device = DeviceType_nGnRnE;
when '01' memattrs.device = DeviceType_nGnRE;
when '10' memattrs.device = DeviceType_nGRE;
when '11' memattrs.device = DeviceType_GRE;
// Normal memory
elsif apply_force_writeback then
if attr<2> == '1' then
memattrs.memtype = MemType_Normal;
memattrs.inner.attrs = attr<1:0>;
memattrs.outer.attrs = attr<1:0>;
elsif attr<1:0> != '00' then
memattrs.memtype = MemType_Normal;
memattrs.outer = S2ConvertAttrsHints(attr<3:2>, acctype);
memattrs.inner = S2ConvertAttrsHints(attr<1:0>, acctype);
memattrs.shareable = SH<1> == '1';
memattrs.outershareable = SH == '10';
else
memattrs = MemoryAttributes UNKNOWN; // Reserved
return MemAttrDefaults(memattrs);) && boolean IMPLEMENTATION_DEFINED;
// S2CacheDisabled()
// =================
// PAMax()
// =======
// Returns the IMPLEMENTATION DEFINED upper limit on the physical address
// size for this processor, as log2().
booleaninteger S2CacheDisabled(PAMax()
return integer IMPLEMENTATION_DEFINED "Maximum Physical Address Size";AccType acctype)
if ELUsingAArch32(EL2) then
disable = if acctype == AccType_IFETCH then HCR2.ID else HCR2.CD;
else
disable = if acctype == AccType_IFETCH then HCR_EL2.ID else HCR_EL2.CD;
return disable == '1';
// S2ConvertAttrsHints()
// S1TranslationRegime()
// =====================
// Converts the attribute fields for Normal memory as used in stage 2
// descriptors to orthogonal attributes and hints
// Stage 1 translation regime for the given Exception level
MemAttrHintsbits(2) S2ConvertAttrsHints(bits(2) attr,S1TranslationRegime(bits(2) el)
if el != AccTypeEL0 acctype)
assert !then
return el;
elsifIsZeroHaveEL(attr);(
MemAttrHintsEL3 result;
if HCR_EL2.FWB=='0' &&) && S2CacheDisabledELUsingAArch32(acctype) then // Force Non-cacheable
result.attrs =( MemAttr_NCEL3;
result.hints =) && SCR.NS == '0' then
return MemHint_NoEL3;
else
case attr of
when '01' // Non-cacheable (no allocate)
result.attrs = elsif MemAttr_NCHaveVirtHostExt;
result.hints =() && MemHint_NoELIsInHost;
when '10' // Write-through
result.attrs =(el) then
return MemAttr_WTEL2;
result.hints = else
return MemHint_RWAEL1;
when '11' // Write-back
result.attrs =
// S1TranslationRegime()
// =====================
// Returns the Exception level controlling the current Stage 1 translation regime. For the most
// part this is unused in code because the system register accessors (SCTLR[], etc.) implicitly
// return the correct value.
bits(2) MemAttr_WB;
result.hints =S1TranslationRegime()
return MemHint_RWAS1TranslationRegime;
result.transient = FALSE;
return result;(PSTATE.EL);
// ShortConvertAttrsHints()
// ========================
// Converts the short attribute fields for Normal memory as used in the TTBR and
// TEX fields to orthogonal attributes and hints
// VAMax()
// =======
// Returns the IMPLEMENTATION DEFINED upper limit on the virtual address
// size for this processor, as log2().
MemAttrHintsinteger ShortConvertAttrsHints(bits(2) RGN,VAMax()
return integer IMPLEMENTATION_DEFINED "Maximum Virtual Address Size"; AccType acctype, boolean secondstage)
MemAttrHints result;
if (!secondstage && S1CacheDisabled(acctype)) || (secondstage && S2CacheDisabled(acctype)) then
// Force Non-cacheable
result.attrs = MemAttr_NC;
result.hints = MemHint_No;
else
case RGN of
when '00' // Non-cacheable (no allocate)
result.attrs = MemAttr_NC;
result.hints = MemHint_No;
when '01' // Write-back, Read and Write allocate
result.attrs = MemAttr_WB;
result.hints = MemHint_RWA;
when '10' // Write-through, Read allocate
result.attrs = MemAttr_WT;
result.hints = MemHint_RA;
when '11' // Write-back, Read allocate
result.attrs = MemAttr_WB;
result.hints = MemHint_RA;
result.transient = FALSE;
return result;
// WalkAttrDecode() // ================ MemoryAttributes WalkAttrDecode(bits(2) SH, bits(2) ORGN, bits(2) IRGN, boolean secondstage) MemoryAttributes memattrs; AccType acctype = AccType_NORMAL; memattrs.memtype = MemType_Normal; memattrs.inner = ShortConvertAttrsHints(IRGN, acctype, secondstage); memattrs.outer = ShortConvertAttrsHints(ORGN, acctype, secondstage); memattrs.shareable = SH<1> == '1'; memattrs.outershareable = SH == '10'; memattrs.tagged = FALSE; return MemAttrDefaults(memattrs);
// HasS2Translation() // ================== // Returns TRUE if stage 2 translation is present for the current translation regime boolean HasS2Translation() return (EL2Enabled() && !IsInHost() && PSTATE.EL IN {EL0,EL1});
// Have16bitVMID() // =============== // Returns TRUE if EL2 and support for a 16-bit VMID are implemented. boolean Have16bitVMID() return HaveEL(EL2) && boolean IMPLEMENTATION_DEFINED;
// PAMax() // ======= // Returns the IMPLEMENTATION DEFINED upper limit on the physical address // size for this processor, as log2(). integer PAMax() return integer IMPLEMENTATION_DEFINED "Maximum Physical Address Size";
// S1TranslationRegime() // ===================== // Stage 1 translation regime for the given Exception level bits(2) S1TranslationRegime(bits(2) el) if el != EL0 then return el; elsif HaveEL(EL3) && ELUsingAArch32(EL3) && SCR.NS == '0' then return EL3; elsif HaveVirtHostExt() && ELIsInHost(el) then return EL2; else return EL1; // S1TranslationRegime() // ===================== // Returns the Exception level controlling the current Stage 1 translation regime. For the most // part this is unused in code because the system register accessors (SCTLR[], etc.) implicitly // return the correct value. bits(2) S1TranslationRegime() return S1TranslationRegime(PSTATE.EL);
// VAMax() // ======= // Returns the IMPLEMENTATION DEFINED upper limit on the virtual address // size for this processor, as log2(). integer VAMax() return integer IMPLEMENTATION_DEFINED "Maximum Virtual Address Size";
Internal version only: isa v00_98v00_96, pseudocode v8.5-2019-06_rc2-5-g22901f2r8p5_00bet2_rc5
; Build timestamp: 2019-06-27T012019-03-28T07:2659
Copyright © 2010-2019 Arm Limited or its affiliates. All rights reserved. This document is Non-Confidential.
(old) | htmldiff from- | (new) |