(old) htmldiff from-(new)

Shared Pseudocode Functions

This page displays common pseudocode functions shared by many pages.

Pseudocodes

Library pseudocode for aarch32/debug/VCRMatch/AArch32.VCRMatch

// AArch32.VCRMatch() // ================== boolean AArch32.VCRMatch(bits(32) vaddress) if UsingAArch32() && ELUsingAArch32(EL1) && PSTATE.EL != EL2 then // Each bit position in this string corresponds to a bit in DBGVCR and an exception vector. match_word = Zeros(32); if vaddress<31:5> == ExcVectorBase()<31:5> then if HaveEL(EL3) && !IsSecure() then match_word<UInt(vaddress<4:2>) + 24> = '1'; // Non-secure vectors else match_word<UInt(vaddress<4:2>) + 0> = '1'; // Secure vectors (or no EL3) if HaveEL(EL3) && ELUsingAArch32(EL3) && IsSecure() && vaddress<31:5> == MVBAR<31:5> then match_word<UInt(vaddress<4:2>) + 8> = '1'; // Monitor vectors // Mask out bits not corresponding to vectors. if !HaveEL(EL3) then mask = '00000000':'00000000':'00000000':'11011110'; // DBGVCR[31:8] are RES0 elsif !ELUsingAArch32(EL3) then mask = '11011110':'00000000':'00000000':'11011110'; // DBGVCR[15:8] are RES0 else mask = '11011110':'00000000':'11011100':'11011110'; match_word = match_word AND DBGVCR AND mask; match = !IsZero(match_word); // Check for UNPREDICTABLE case - match on Prefetch Abort and Data Abort vectors if !IsZero(match_word<28:27,12:11,4:3>) && DebugTarget() == PSTATE.EL then match = ConstrainUnpredictableBool(Unpredictable_VCMATCHDAPA); if !IsZero(vaddress<1:0>) && match then match = ConstrainUnpredictableBool(Unpredictable_VCMATCHHALF); else match = FALSE; return match;

Library pseudocode for aarch32/debug/authentication/AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled

// AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled() // ======================================================== boolean AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled() // The definition of this function is IMPLEMENTATION DEFINED. // In the recommended interface, AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled returns // the state of the (DBGEN AND SPIDEN) signal. if !HaveEL(EL3) && !IsSecure() then return FALSE; return DBGEN == HIGH && SPIDEN == HIGH;

Library pseudocode for aarch32/debug/breakpoint/AArch32.BreakpointMatch

// AArch32.BreakpointMatch() // ========================= // Breakpoint matching in an AArch32 translation regime. (boolean,boolean) AArch32.BreakpointMatch(integer n, bits(32) vaddress, integer size) assert ELUsingAArch32(S1TranslationRegime()); assert n <= UInt(DBGDIDR.BRPs); enabled = DBGBCR[n].E == '1'; ispriv = PSTATE.EL != EL0; linked = DBGBCR[n].BT == '0x01'; isbreakpnt = TRUE; linked_to = FALSE; state_match = AArch32.StateMatch(DBGBCR[n].SSC, DBGBCR[n].HMC, DBGBCR[n].PMC, linked, DBGBCR[n].LBN, isbreakpnt, ispriv); (value_match, value_mismatch) = AArch32.BreakpointValueMatch(n, vaddress, linked_to); if size == 4 then // Check second halfword // If the breakpoint address and BAS of an Address breakpoint match the address of the // second halfword of an instruction, but not the address of the first halfword, it is // CONSTRAINED UNPREDICTABLE whether or not this breakpoint generates a Breakpoint debug // event. (match_i, mismatch_i) = AArch32.BreakpointValueMatch(n, vaddress + 2, linked_to); if !value_match && match_i then value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF); if value_mismatch && !mismatch_i then value_mismatch = ConstrainUnpredictableBool(Unpredictable_BPMISMATCHHALF); if vaddress<1> == '1' && DBGBCR[n].BAS == '1111' then // The above notwithstanding, if DBGBCR[n].BAS == '1111', then it is CONSTRAINED // UNPREDICTABLE whether or not a Breakpoint debug event is generated for an instruction // at the address DBGBVR[n]+2. if value_match then value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF); if !value_mismatch then value_mismatch = ConstrainUnpredictableBool(Unpredictable_BPMISMATCHHALF); match = value_match && state_match && enabled; mismatch = value_mismatch && state_match && enabled; return (match, mismatch);

Library pseudocode for aarch32/debug/breakpoint/AArch32.BreakpointValueMatch

// AArch32.BreakpointValueMatch() // ============================== // The first result is whether an Address Match or Context breakpoint is programmed on the // instruction at "address". The second result is whether an Address Mismatch breakpoint is // programmed on the instruction, that is, whether the instruction should be stepped. (boolean,boolean) AArch32.BreakpointValueMatch(integer n, bits(32) vaddress, boolean linked_to) // "n" is the identity of the breakpoint unit to match against. // "vaddress" is the current instruction address, ignored if linked_to is TRUE and for Context // matching breakpoints. // "linked_to" is TRUE if this is a call from StateMatch for linking. // If a non-existent breakpoint then it is CONSTRAINED UNPREDICTABLE whether this gives // no match or the breakpoint is mapped to another UNKNOWN implemented breakpoint. if n > UInt(DBGDIDR.BRPs) then (c, n) = ConstrainUnpredictableInteger(0, UInt(DBGDIDR.BRPs), Unpredictable_BPNOTIMPL); assert c IN {Constraint_DISABLED, Constraint_UNKNOWN}; if c == Constraint_DISABLED then return (FALSE,FALSE); // If this breakpoint is not enabled, it cannot generate a match. (This could also happen on a // call from StateMatch for linking). if DBGBCR[n].E == '0' then return (FALSE,FALSE); context_aware = (n >= UInt(DBGDIDR.BRPs) - UInt(DBGDIDR.CTX_CMPs)); // If BT is set to a reserved type, behaves either as disabled or as a not-reserved type. dbgtype = DBGBCR[n].BT; if ((dbgtype IN {'011x','11xx'} && !HaveVirtHostExt() && !HaveV82Debug()) || // Context matching (dbgtype == '010x' && HaltOnBreakpointOrWatchpoint()) || // Address mismatch (dbgtype != '0x0x' && !context_aware) || // Context matching (dbgtype == '1xxx' && !HaveEL(EL2))) then // EL2 extension (c, dbgtype) = ConstrainUnpredictableBits(Unpredictable_RESBPTYPE); assert c IN {Constraint_DISABLED, Constraint_UNKNOWN}; if c == Constraint_DISABLED then return (FALSE,FALSE); // Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value // Determine what to compare against. match_addr = (dbgtype == '0x0x'); mismatch = (dbgtype == '010x'); match_vmid = (dbgtype == '10xx'); match_cid1 = (dbgtype == 'xx1x'); match_cid2 = (dbgtype == '11xx'); linked = (dbgtype == 'xxx1'); // If this is a call from StateMatch, return FALSE if the breakpoint is not programmed for a // VMID and/or context ID match, of if not context-aware. The above assertions mean that the // code can just test for match_addr == TRUE to confirm all these things. if linked_to && (!linked || match_addr) then return (FALSE,FALSE); // If called from BreakpointMatch return FALSE for Linked context ID and/or VMID matches. if !linked_to && linked && !match_addr then return (FALSE,FALSE); // Do the comparison. if match_addr then byte = UInt(vaddress<1:0>); assert byte IN {0,2}; // "vaddress" is halfword aligned byte_select_match = (DBGBCR[n].BAS<byte> == '1'); integer top = 31; BVR_match = (vaddress<top:2> == DBGBVR[n]<top:2>) && byte_select_match; BVR_match = vaddress<31:2> == DBGBVR[n]<31:2> && byte_select_match; elsif match_cid1 then BVR_match = (PSTATE.EL != EL2 && CONTEXTIDR == DBGBVR[n]<31:0>); if match_vmid then if ELUsingAArch32(EL2) then vmid = ZeroExtend(VTTBR.VMID, 16); bvr_vmid = ZeroExtend(DBGBXVR[n]<7:0>, 16); elsif !Have16bitVMID() || VTCR_EL2.VS == '0' then vmid = ZeroExtend(VTTBR_EL2.VMID<7:0>, 16); bvr_vmid = ZeroExtend(DBGBXVR[n]<7:0>, 16); else vmid = VTTBR_EL2.VMID; bvr_vmid = DBGBXVR[n]<15:0>; BXVR_match = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && vmid == bvr_vmid); elsif match_cid2 then BXVR_match = ((HaveVirtHostExt() || HaveV82Debug()) && EL2Enabled() && !ELUsingAArch32(EL2) && DBGBXVR[n]<31:0> == CONTEXTIDR_EL2<31:0>); DBGBXVR[n]<31:0> == CONTEXTIDR_EL2); bvr_match_valid = (match_addr || match_cid1); bxvr_match_valid = (match_vmid || match_cid2); match = (!bxvr_match_valid || BXVR_match) && (!bvr_match_valid || BVR_match); return (match && !mismatch, !match && mismatch);

Library pseudocode for aarch32/debug/breakpoint/AArch32.StateMatch

// AArch32.StateMatch() // ==================== // Determine whether a breakpoint or watchpoint is enabled in the current mode and state. boolean AArch32.StateMatch(bits(2) SSC, bit HMC, bits(2) PxC, boolean linked, bits(4) LBN, boolean isbreakpnt, boolean ispriv) // "SSC", "HMC", "PxC" are the control fields from the DBGBCR[n] or DBGWCR[n] register. // "linked" is TRUE if this is a linked breakpoint/watchpoint type. // "LBN" is the linked breakpoint number from the DBGBCR[n] or DBGWCR[n] register. // "isbreakpnt" is TRUE for breakpoints, FALSE for watchpoints. // "ispriv" is valid for watchpoints, and selects between privileged and unprivileged accesses. // If parameters are set to a reserved type, behaves as either disabled or a defined type (c, SSC, HMC, PxC) = CheckValidStateMatch(SSC, HMC, PxC, isbreakpnt); if c == Constraint_DISABLED then return FALSE; // Otherwise the HMC,SSC,PxC values are either valid or the values returned by // CheckValidStateMatch are valid. PL2_match = HaveEL(EL2) && ((HMC == '1' && (SSC:PxC != '1000')) || SSC == '11'); PL1_match = PxC<0> == '1'; PL0_match = PxC<1> == '1'; SSU_match = isbreakpnt && HMC == '0' && PxC == '00' && SSC != '11'; if !ispriv && !isbreakpnt then priv_match = PL0_match; elsif SSU_match then priv_match = PSTATE.M IN {M32_User,M32_Svc,M32_System}; else case PSTATE.EL of when EL3 priv_match = PL1_match; // EL3 and EL1 are both PL1 when EL2 priv_match = PL2_match; when EL1 priv_match = PL1_match; when EL0 priv_match = PL0_match; case SSC of when '00' security_state_match = TRUE; // Both when '01' security_state_match = !IsSecure(); // Non-secure only when '10' security_state_match = IsSecure(); // Secure only when '11' security_state_match = (HMC == '1' || IsSecure()); // HMC=1 -> Both, 0 -> Secure only if linked then // "LBN" must be an enabled context-aware breakpoint unit. If it is not context-aware then // it is CONSTRAINED UNPREDICTABLE whether this gives no match, or LBN is mapped to some // UNKNOWN breakpoint that is context-aware. lbn = UInt(LBN); first_ctx_cmp = (UInt(DBGDIDR.BRPs) - UInt(DBGDIDR.CTX_CMPs)); last_ctx_cmp = UInt(DBGDIDR.BRPs); if (lbn < first_ctx_cmp || lbn > last_ctx_cmp) then (c, lbn) = ConstrainUnpredictableInteger(first_ctx_cmp, last_ctx_cmp, Unpredictable_BPNOTCTXCMP); assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN}; case c of when Constraint_DISABLED return FALSE; // Disabled when Constraint_NONE linked = FALSE; // No linking // Otherwise ConstrainUnpredictableInteger returned a context-aware breakpoint if linked then vaddress = bits(32) UNKNOWN; linked_to = TRUE; (linked_match,-) = AArch32.BreakpointValueMatch(lbn, vaddress, linked_to); return priv_match && security_state_match && (!linked || linked_match);

Library pseudocode for aarch32/debug/enables/AArch32.GenerateDebugExceptions

// AArch32.GenerateDebugExceptions() // ================================= boolean AArch32.GenerateDebugExceptions() return AArch32.GenerateDebugExceptionsFrom(PSTATE.EL, IsSecure());

Library pseudocode for aarch32/debug/enables/AArch32.GenerateDebugExceptionsFrom

// AArch32.GenerateDebugExceptionsFrom() // ===================================== boolean AArch32.GenerateDebugExceptionsFrom(bits(2) from, boolean secure) if from == EL0 && (&& !ELIsInHost(from) || !ELStateUsingAArch32(EL1, secure)) then , secure) then mask = bit UNKNOWN; // PSTATE.D mask, unused for EL0 case return AArch64.GenerateDebugExceptionsFrom(from, secure, mask); if DBGOSLSR.OSLK == '1' || DoubleLockStatus() || Halted() then return FALSE; if HaveEL(EL3) && secure then assert from != EL2; // Secure EL2 always uses AArch64 if IsSecureEL2Enabled() then // Implies that EL3 and EL2 both using AArch64 enabled = MDCR_EL3.SDD == '0'; else spd = if) && secure then spd = if ELUsingAArch32(EL3) then SDCR.SPD else MDCR_EL3.SPD32; if spd<1> == '1' then enabled = spd<0> == '1'; else // SPD == 0b01 is reserved, but behaves the same as 0b00. enabled = AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled(); if from == EL0 then enabled = enabled || SDER.SUIDEN == '1'; else enabled = from != EL2; return enabled;

Library pseudocode for aarch32/debug/pmu/AArch32.CheckForPMUOverflow

// AArch32.CheckForPMUOverflow() // ============================= // Signal Performance Monitors overflow IRQ and CTI overflow events boolean AArch32.CheckForPMUOverflow() if !ELUsingAArch32(EL1) then return AArch64.CheckForPMUOverflow(); pmuirq = PMCR.E == '1' && PMINTENSET<31> == '1' && PMOVSSET<31> == '1'; for n = 0 to UInt(PMCR.N) - 1 if HaveEL(EL2) then hpmn = if !ELUsingAArch32(EL2) then MDCR_EL2.HPMN else HDCR.HPMN; hpme = if !ELUsingAArch32(EL2) then MDCR_EL2.HPME else HDCR.HPME; E = (if n < UInt(hpmn) then PMCR.E else hpme); else E = PMCR.E; if E == '1' && PMINTENSET<n> == '1' && PMOVSSET<n> == '1' then pmuirq = TRUE; SetInterruptRequestLevel(InterruptID_PMUIRQ, if pmuirq then HIGH else LOW); CTI_SetEventLevel(CrossTriggerIn_PMUOverflow, if pmuirq then HIGH else LOW); // The request remains set until the condition is cleared. (For example, an interrupt handler // or cross-triggered event handler clears the overflow status flag by writing to PMOVSCLR_EL0.) return pmuirq;

Library pseudocode for aarch32/debug/pmu/AArch32.CountEvents

// AArch32.CountEvents() // ===================== // Return TRUE if counter "n" should count its event. For the cycle counter, n == 31. boolean AArch32.CountEvents(integer n) assert n == 31 || n < UInt(PMCR.N); if !ELUsingAArch32(EL1) then return AArch64.CountEvents(n); // Event counting is disabled in Debug state debug = Halted(); // In Non-secure state, some counters are reserved for EL2 if HaveEL(EL2) then hpmn = if !ELUsingAArch32(EL2) then MDCR_EL2.HPMN else HDCR.HPMN; hpme = if !ELUsingAArch32(EL2) then MDCR_EL2.HPME else HDCR.HPME; resvd_for_el2 = n >= E = if n < UInt(hpmn) && n != 31; (hpmn) || n == 31 then PMCR.E else hpme; else resvd_for_el2 = FALSE; E = PMCR.E; enabled = E == '1' && PMCNTENSET<n> == '1'; // Main enable controls if resvd_for_el2 then E = if // Event counting in Secure state is prohibited unless any one of: // * EL3 is not implemented // * EL3 is using AArch64 and MDCR_EL3.SPME == 1 // * EL3 is using AArch32 and SDCR.SPME == 1 // * Executing at EL0, and SDER.SUNIDEN == 1. spme = (if ELUsingAArch32(EL2EL3) then HDCR.HPME else MDCR_EL2.HPME; else E = PMCR.E; enabled = E == '1' && PMCNTENSET<n> == '1'; // Event counting is allowed unless it is prohibited by any rule below prohibited = FALSE; // Event counting in Secure state is prohibited if all of: // * EL3 is implemented // * One of the following is true: // - EL3 is using AArch64, MDCR_EL3.SPME == 0, and either: // - FEAT_PMUv3p7 is not implemented // - MDCR_EL3.MPMX == 0 // - EL3 is using AArch32 and SDCR.SPME == 0 // * Not executing at EL0, or SDER.SUNIDEN == 0 if) then SDCR.SPME else MDCR_EL3.SPME); prohibited = HaveEL(EL3) && IsSecure() then spme = if() && spme == '0' && (PSTATE.EL != ELUsingAArch32EL0(|| SDER.SUNIDEN == '0'); // Event counting at EL2 is prohibited if all of: // * The HPMD Extension is implemented // * Executing at EL2 // * PMNx is not reserved for EL2 // * HDCR.HPMD == 1 if !prohibited &&EL3HaveEL) then SDCR.SPME else MDCR_EL3.SPME; if !(ELUsingAArch32EL2() &&EL3HaveHPMDExt) &&() && PSTATE.EL == HavePMUv3p7() then prohibited = spme == '0' && MDCR_EL3.MPMX == '0'; else prohibited = spme == '0'; if prohibited && PSTATE.EL == EL0 then prohibited = SDER.SUNIDEN == '0'; // Event counting at EL2 is prohibited if all of: // * The HPMD Extension is implemented // * PMNx is not reserved for EL2 // * HDCR.HPMD == 1 if !prohibited && PSTATE.EL == EL2 &&&& (n < HaveHPMDExtUInt() && !resvd_for_el2 then prohibited = HDCR.HPMD == '1'; (hpmn) || n == 31) then prohibited = (HDCR.HPMD == '1'); // The IMPLEMENTATION DEFINED authentication interface might override software // The IMPLEMENTATION DEFINED authentication interface might override software controls if prohibited && !HaveNoSecurePMUDisableOverride() then prohibited = !ExternalSecureNoninvasiveDebugEnabled(); // PMCR.DP disables the cycle counter when event counting is prohibited if enabled && prohibited && n == 31 then enabled = PMCR.DP == '0'; // For the cycle counter, PMCR.DP enables counting when otherwise prohibited if prohibited && n == 31 then prohibited = (PMCR.DP == '1'); // If FEAT_PMUv3p5 is implemented, cycle counting can be prohibited. // This is not overridden by PMCR.DP. if Havev85PMU() && n == 31 then if HaveEL(EL3) && IsSecure() then sccd = if sccd = (if ELUsingAArch32(EL3) then SDCR.SCCD else MDCR_EL3.SCCD; ) then SDCR.SCCD else MDCR_EL3.SCCD); if sccd == '1' then prohibited = TRUE; if PSTATE.EL == EL2 && HDCR.HCCD == '1' then prohibited = TRUE; // Event counting might be frozen frozen = FALSE; // If FEAT_PMUv3p7 is implemented, event counting can be frozen if HavePMUv3p7() && n != 31 then ovflw = PMOVSR<UInt(PMCR.N)-1:0>; if resvd_for_el2 then FZ = if ELUsingAArch32(EL2) then HDCR.HPMFZO else MDCR_EL2.HPMFZO; ovflw<UInt(hpmn)-1:0> = Zeros(); else FZ = PMCR.FZO; if HaveEL(EL2) then ovflw<UInt(PMCR.N)-1:UInt(hpmn)> = Zeros(); frozen = FZ == '1' && !IsZero(ovflw); && HDCR.HCCD == '1' then prohibited = TRUE; // Event counting can be filtered by the {P, U, NSK, NSU, NSH} bits filter = if n == 31 then PMCCFILTR else PMEVTYPER[n]; P = filter<31>; U = filter<30>; P = filter<31>; U = filter<30>; NSK = if HaveEL(EL3) then filter<29> else '0'; NSU = if HaveEL(EL3) then filter<28> else '0'; NSH = if HaveEL(EL2) then filter<27> else '0'; case PSTATE.EL of when EL0 filtered = if IsSecure() then U == '1' else U != NSU; when EL1 filtered = if IsSecure() then P == '1' else P != NSK; when EL2 filtered = NSH == '0'; filtered = (NSH == '0'); when EL3 filtered = P == '1'; filtered = (P == '1'); return !debug && enabled && !prohibited && !filtered && !frozen; return !debug && enabled && !prohibited && !filtered;

Library pseudocode for aarch32/debug/takeexceptiondbg/AArch32.EnterHypModeInDebugState

// AArch32.EnterHypModeInDebugState() // ================================== // Take an exception in Debug state to Hyp mode. AArch32.EnterHypModeInDebugState(ExceptionRecord exception) SynchronizeContext(); assert HaveEL(EL2) && !IsSecure() && ELUsingAArch32(EL2); AArch32.ReportHypEntry(exception); AArch32.WriteMode(M32_Hyp); SPSR[] = bits(32) UNKNOWN; ELR_hyp = bits(32) UNKNOWN; // In Debug state, the PE always execute T32 instructions when in AArch32 state, and // PSTATE.{SS,A,I,F} are not observable so behave as UNKNOWN. PSTATE.T = '1'; // PSTATE.J is RES0 PSTATE.<SS,A,I,F> = bits(4) UNKNOWN; DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; PSTATE.E = HSCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN; EDSCR.ERR = '1'; UpdateEDSCRFields(); EndOfInstruction();

Library pseudocode for aarch32/debug/takeexceptiondbg/AArch32.EnterModeInDebugState

// AArch32.EnterModeInDebugState() // =============================== // Take an exception in Debug state to a mode other than Monitor and Hyp mode. AArch32.EnterModeInDebugState(bits(5) target_mode) SynchronizeContext(); assert ELUsingAArch32(EL1) && PSTATE.EL != EL2; if PSTATE.M == M32_Monitor then SCR.NS = '0'; AArch32.WriteMode(target_mode); SPSR[] = bits(32) UNKNOWN; R[14] = bits(32) UNKNOWN; // In Debug state, the PE always execute T32 instructions when in AArch32 state, and // PSTATE.{SS,A,I,F} are not observable so behave as UNKNOWN. PSTATE.T = '1'; // PSTATE.J is RES0 PSTATE.<SS,A,I,F> = bits(4) UNKNOWN; DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; PSTATE.E = SCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if HavePANExt() && SCTLR.SPAN == '0' then PSTATE.PAN = '1'; if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN; EDSCR.ERR = '1'; UpdateEDSCRFields(); // Update EDSCR processor state flags. EndOfInstruction();

Library pseudocode for aarch32/debug/takeexceptiondbg/AArch32.EnterMonitorModeInDebugState

// AArch32.EnterMonitorModeInDebugState() // ====================================== // Take an exception in Debug state to Monitor mode. AArch32.EnterMonitorModeInDebugState() SynchronizeContext(); assert HaveEL(EL3) && ELUsingAArch32(EL3); from_secure = IsSecure(); if PSTATE.M == M32_Monitor then SCR.NS = '0'; AArch32.WriteMode(M32_Monitor); SPSR[] = bits(32) UNKNOWN; R[14] = bits(32) UNKNOWN; // In Debug state, the PE always execute T32 instructions when in AArch32 state, and // PSTATE.{SS,A,I,F} are not observable so behave as UNKNOWN. PSTATE.T = '1'; // PSTATE.J is RES0 PSTATE.<SS,A,I,F> = bits(4) UNKNOWN; PSTATE.E = SCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if HavePANExt() then if !from_secure then PSTATE.PAN = '0'; elsif SCTLR.SPAN == '0' then PSTATE.PAN = '1'; if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN; DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; EDSCR.ERR = '1'; UpdateEDSCRFields(); // Update EDSCR processor state flags. EndOfInstruction();

Library pseudocode for aarch32/debug/watchpoint/AArch32.WatchpointByteMatch

// AArch32.WatchpointByteMatch() // ============================= boolean AArch32.WatchpointByteMatch(integer n, bits(32) vaddress) integer top = 31; bottom = if DBGWVR[n]<2> == '1' then 2 else 3; // Word or doubleword byte_select_match = (DBGWCR[n].BAS<UInt(vaddress<bottom-1:0>)> != '0'); mask = UInt(DBGWCR[n].MASK); // If DBGWCR[n].MASK is non-zero value and DBGWCR[n].BAS is not set to '11111111', or // DBGWCR[n].BAS specifies a non-contiguous set of bytes behavior is CONSTRAINED // UNPREDICTABLE. if mask > 0 && !IsOnes(DBGWCR[n].BAS) then byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPMASKANDBAS); else LSB = (DBGWCR[n].BAS AND NOT(DBGWCR[n].BAS - 1)); MSB = (DBGWCR[n].BAS + LSB); if !IsZero(MSB AND (MSB - 1)) then // Not contiguous byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPBASCONTIGUOUS); bottom = 3; // For the whole doubleword // If the address mask is set to a reserved value, the behavior is CONSTRAINED UNPREDICTABLE. if mask > 0 && mask <= 2 then (c, mask) = ConstrainUnpredictableInteger(3, 31, Unpredictable_RESWPMASK); assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN}; case c of when Constraint_DISABLED return FALSE; // Disabled when Constraint_NONE mask = 0; // No masking // Otherwise the value returned by ConstrainUnpredictableInteger is a not-reserved value if mask > bottom then // If the DBGxVR<n>_EL1.RESS field bits are not a sign extension of the MSB // of DBGBVR<n>_EL1.VA, it is UNPREDICTABLE whether they appear to be // included in the match. if ! WVR_match = (vaddress<31:mask> == DBGWVR[n]<31:mask>); // If masked bits of DBGWVR_EL1[n] are not zero, the behavior is CONSTRAINED UNPREDICTABLE. if WVR_match && !IsOnes(DBGBVR_EL1[n]<63:top>) && !IsZero(DBGBVR_EL1[n]<63:top>) then if ConstrainUnpredictableBool(Unpredicatable_DBGxVR_RESS) then top = 63; WVR_match = (vaddress<top:mask> == DBGWVR[n]<top:mask>); // If masked bits of DBGWVR_EL1[n] are not zero, the behavior is CONSTRAINED UNPREDICTABLE. if WVR_match && !IsZero(DBGWVR[n]<mask-1:bottom>) then WVR_match = ConstrainUnpredictableBool(Unpredictable_WPMASKEDBITS); else WVR_match = vaddress<top:bottom> == DBGWVR[n]<top:bottom>; WVR_match = vaddress<31:bottom> == DBGWVR[n]<31:bottom>; return WVR_match && byte_select_match;

Library pseudocode for aarch32/debug/watchpoint/AArch32.WatchpointMatch

// AArch32.WatchpointMatch() // ========================= // Watchpoint matching in an AArch32 translation regime. boolean AArch32.WatchpointMatch(integer n, bits(32) vaddress, integer size, boolean ispriv,AArch32.WatchpointMatch(integer n, bits(32) vaddress, integer size, boolean ispriv, boolean iswrite) assert AccType acctype, boolean iswrite) assert ELUsingAArch32(S1TranslationRegime()); assert n <= UInt(DBGDIDR.WRPs); // "ispriv" is: // * FALSE for all loads, stores, and atomic operations executed at EL0. // * FALSE if the access is unprivileged. // * TRUE for all other loads, stores, and atomic operations. // "ispriv" is FALSE for LDRT/STRT instructions executed at EL1 and all // load/stores at EL0, TRUE for all other load/stores. "iswrite" is TRUE for stores, FALSE for // loads. enabled = DBGWCR[n].E == '1'; linked = DBGWCR[n].WT == '1'; isbreakpnt = FALSE; state_match = AArch32.StateMatch(DBGWCR[n].SSC, DBGWCR[n].HMC, DBGWCR[n].PAC, linked, DBGWCR[n].LBN, isbreakpnt, ispriv); ls_match = FALSE; ls_match = (DBGWCR[n].LSC<(if iswrite then 1 else 0)> == '1'); value_match = FALSE; for byte = 0 to size - 1 value_match = value_match || AArch32.WatchpointByteMatch(n, vaddress + byte); return value_match && state_match && ls_match && enabled;

Library pseudocode for aarch32/exceptions/aborts/AArch32.Abort

// AArch32.Abort() // =============== // Abort and Debug exception handling in an AArch32 translation regime. AArch32.Abort(bits(32) vaddress, FaultRecord fault) // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then route_to_aarch64 = (HCR_EL2.TGE == '1' || IsSecondStage(fault) || (HaveRASExt() && HCR_EL2.TEA == '1' &&() && HCR2.TEA == '1' && IsExternalAbort(fault)) || (IsDebugException(fault) && MDCR_EL2.TDE == '1')); if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = SCR_EL3.EA == '1' && IsExternalAbort(fault); if route_to_aarch64 then AArch64.Abort(ZeroExtend(vaddress), fault); elsif fault.acctype == AccType_IFETCH then AArch32.TakePrefetchAbortException(vaddress, fault); else AArch32.TakeDataAbortException(vaddress, fault);

Library pseudocode for aarch32/exceptions/aborts/AArch32.AbortSyndrome

// AArch32.AbortSyndrome() // ======================= // Creates an exception syndrome record for Abort exceptions taken to Hyp mode // from an AArch32 translation regime. ExceptionRecord AArch32.AbortSyndrome(Exception exceptype, FaultRecord fault, bits(32) vaddress) exception = ExceptionSyndrome(exceptype); d_side = exceptype == Exception_DataAbort; exception.syndrome = AArch32.FaultSyndrome(d_side, fault); exception.vaddress = ZeroExtend(vaddress); if IPAValid(fault) then exception.ipavalid = TRUE; exception.NS = fault.ipaddress.NS; exception.ipaddress = ZeroExtend(fault.ipaddress.address); else exception.ipavalid = FALSE; return exception;

Library pseudocode for aarch32/exceptions/aborts/AArch32.CheckPCAlignment

// AArch32.CheckPCAlignment() // ========================== AArch32.CheckPCAlignment() bits(32) pc = ThisInstrAddr(); if (CurrentInstrSet() == InstrSet_A32 && pc<1> == '1') || pc<0> == '1' then if AArch32.GeneralExceptionsToAArch64() then AArch64.PCAlignmentFault(); // Generate an Alignment fault Prefetch Abort exception vaddress = pc; acctype = AccType_IFETCH; iswrite = FALSE; secondstage = FALSE; AArch32.Abort(vaddress, AArch32.AlignmentFault(acctype, iswrite, secondstage));

Library pseudocode for aarch32/exceptions/aborts/AArch32.ReportDataAbort

// AArch32.ReportDataAbort() // ========================= // Report syndrome information for aborts taken to modes other than Hyp mode. AArch32.ReportDataAbort(boolean route_to_monitor, FaultRecord fault, bits(32) vaddress) // The encoding used in the IFSR or DFSR can be Long-descriptor format or Short-descriptor // format. Normally, the current translation table format determines the format. For an abort // from Non-secure state to Monitor mode, the IFSR or DFSR uses the Long-descriptor format if // any of the following applies: // * The Secure TTBCR.EAE is set to 1. // * The abort is synchronous and either: // - It is taken from Hyp mode. // - It is taken from EL1 or EL0, and the Non-secure TTBCR.EAE is set to 1. long_format = FALSE; if route_to_monitor && !IsSecure() then long_format = TTBCR_S.EAE == '1'; if !IsSErrorInterrupt(fault) && !long_format then long_format = PSTATE.EL == EL2 || TTBCR.EAE == '1'; else long_format = TTBCR.EAE == '1'; d_side = TRUE; if long_format then syndrome = AArch32.FaultStatusLD(d_side, fault); else syndrome = AArch32.FaultStatusSD(d_side, fault); if fault.acctype == AccType_IC then if (!long_format && boolean IMPLEMENTATION_DEFINED "Report I-cache maintenance fault in IFSR") then i_syndrome = syndrome; syndrome<10,3:0> = EncodeSDFSC(Fault_ICacheMaint, 1); else i_syndrome = bits(32) UNKNOWN; if route_to_monitor then IFSR_S = i_syndrome; else IFSR = i_syndrome; if route_to_monitor then DFSR_S = syndrome; DFAR_S = vaddress; else DFSR = syndrome; DFAR = vaddress; return;

Library pseudocode for aarch32/exceptions/aborts/AArch32.ReportPrefetchAbort

// AArch32.ReportPrefetchAbort() // ============================= // Report syndrome information for aborts taken to modes other than Hyp mode. AArch32.ReportPrefetchAbort(boolean route_to_monitor, FaultRecord fault, bits(32) vaddress) // The encoding used in the IFSR can be Long-descriptor format or Short-descriptor format. // Normally, the current translation table format determines the format. For an abort from // Non-secure state to Monitor mode, the IFSR uses the Long-descriptor format if any of the // following applies: // * The Secure TTBCR.EAE is set to 1. // * It is taken from Hyp mode. // * It is taken from EL1 or EL0, and the Non-secure TTBCR.EAE is set to 1. long_format = FALSE; if route_to_monitor && !IsSecure() then long_format = TTBCR_S.EAE == '1' || PSTATE.EL == EL2 || TTBCR.EAE == '1'; else long_format = TTBCR.EAE == '1'; d_side = FALSE; if long_format then fsr = AArch32.FaultStatusLD(d_side, fault); else fsr = AArch32.FaultStatusSD(d_side, fault); if route_to_monitor then IFSR_S = fsr; IFAR_S = vaddress; else IFSR = fsr; IFAR = vaddress; return;

Library pseudocode for aarch32/exceptions/aborts/AArch32.TakeDataAbortException

// AArch32.TakeDataAbortException() // ================================ AArch32.TakeDataAbortException(bits(32) vaddress, FaultRecord fault) route_to_monitor = HaveEL(EL3) && SCR.EA == '1' && IsExternalAbort(fault); route_to_hyp = (HaveEL(EL2) && !IsSecure() && PSTATE.EL IN {EL0, EL1} && (HCR.TGE == '1' || IsSecondStage(fault) || (HaveRASExt() && HCR2.TEA == '1' && IsExternalAbort(fault)) || (IsDebugException(fault) && HDCR.TDE == '1'))); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x10; lr_offset = 8; if IsDebugException(fault) then DBGDSCRext.MOE = fault.debugmoe; if route_to_monitor then AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then exception = AArch32.AbortSyndrome(Exception_DataAbort, fault, vaddress); if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMode(M32_Abort, preferred_exception_return, lr_offset, vect_offset);

Library pseudocode for aarch32/exceptions/aborts/AArch32.TakePrefetchAbortException

// AArch32.TakePrefetchAbortException() // ==================================== AArch32.TakePrefetchAbortException(bits(32) vaddress, FaultRecord fault) route_to_monitor = HaveEL(EL3) && SCR.EA == '1' && IsExternalAbort(fault); route_to_hyp = (HaveEL(EL2) && !IsSecure() && PSTATE.EL IN {EL0, EL1} && (HCR.TGE == '1' || IsSecondStage(fault) || (HaveRASExt() && HCR2.TEA == '1' && IsExternalAbort(fault)) || (IsDebugException(fault) && HDCR.TDE == '1'))); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0C; lr_offset = 4; if IsDebugException(fault) then DBGDSCRext.MOE = fault.debugmoe; if route_to_monitor then AArch32.ReportPrefetchAbort(route_to_monitor, fault, vaddress); AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then if fault.statuscode == Fault_Alignment then // PC Alignment fault exception = ExceptionSyndrome(Exception_PCAlignment); exception.vaddress = ThisInstrAddr(); else exception = AArch32.AbortSyndrome(Exception_InstructionAbort, fault, vaddress); if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.ReportPrefetchAbort(route_to_monitor, fault, vaddress); AArch32.EnterMode(M32_Abort, preferred_exception_return, lr_offset, vect_offset);

Library pseudocode for aarch32/exceptions/asynchaborts/AArch32.TakePhysicalFIQExceptionBranchTargetException

// AArch32.TakePhysicalFIQException() // ==================================// BranchTargetException // ===================== // Raise branch target exception. AArch32.TakePhysicalFIQException() AArch64.BranchTargetException(bits(52) vaddress) // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == route_to_el2 = PSTATE.EL == EL0 && !&&ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !() && HCR_EL2.TGE == '1'; bits(64) preferred_exception_return =ELUsingAArch32ThisInstrAddr((); vect_offset = 0x0; exception =EL2ExceptionSyndrome) then route_to_aarch64 = HCR_EL2.TGE == '1' || (HCR_EL2.FMO == '1' && !(IsInHostException_BranchTarget()); if !route_to_aarch64 &&); exception.syndrome<1:0> = PSTATE.BTYPE; exception.syndrome<24:2> = HaveELZeros((); // RES0 ifEL3UInt) && !(PSTATE.EL) >ELUsingAArch32UInt(EL3) then route_to_aarch64 = SCR_EL3.FIQ == '1'; if route_to_aarch64 then AArch64.TakePhysicalFIQException(); route_to_monitor = HaveEL(EL3) && SCR.FIQ == '1'; route_to_hyp = (PSTATE.EL IN {EL0, EL1} &&) then EL2EnabledAArch64.TakeException() && (HCR.TGE == '1' || HCR.FMO == '1')); bits(32) preferred_exception_return =(PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif route_to_el2 then ThisInstrAddrAArch64.TakeException(); vect_offset = 0x1C; lr_offset = 4; if route_to_monitor then( AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then exception =, exception, preferred_exception_return, vect_offset); else ExceptionSyndromeAArch64.TakeException(Exception_FIQEL1); AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterMode(M32_FIQ, preferred_exception_return, lr_offset, vect_offset);, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch32/exceptions/asynch/AArch32.TakePhysicalIRQExceptionAArch32.TakePhysicalFIQException

// AArch32.TakePhysicalIRQException() // ================================== // Take an enabled physical IRQ exception.// AArch32.TakePhysicalFIQException() // ================================== AArch32.TakePhysicalIRQException() AArch32.TakePhysicalFIQException() // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then route_to_aarch64 = HCR_EL2.TGE == '1' || (HCR_EL2.IMO == '1' && ! route_to_aarch64 = HCR_EL2.TGE == '1' || (HCR_EL2.FMO == '1' && !IsInHost()); if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = SCR_EL3.IRQ == '1'; route_to_aarch64 = SCR_EL3.FIQ == '1'; if route_to_aarch64 then AArch64.TakePhysicalIRQExceptionAArch64.TakePhysicalFIQException(); route_to_monitor = HaveEL(EL3) && SCR.IRQ == '1'; ) && SCR.FIQ == '1'; route_to_hyp = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR.TGE == '1' || HCR.IMO == '1')); (HCR.TGE == '1' || HCR.FMO == '1')); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x18; vect_offset = 0x1C; lr_offset = 4; if route_to_monitor then AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then exception = ExceptionSyndrome(Exception_IRQException_FIQ); AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterMode(M32_IRQM32_FIQ, preferred_exception_return, lr_offset, vect_offset);

Library pseudocode for aarch32/exceptions/asynch/AArch32.TakePhysicalSErrorExceptionAArch32.TakePhysicalIRQException

// AArch32.TakePhysicalSErrorException() // =====================================// AArch32.TakePhysicalIRQException() // ================================== // Take an enabled physical IRQ exception. AArch32.TakePhysicalSErrorException(boolean parity, bit extflag, bits(2) errortype, boolean impdef_syndrome, bits(24) full_syndrome) AArch32.TakePhysicalIRQException() // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then route_to_aarch64 = (HCR_EL2.TGE == '1' || (! route_to_aarch64 = HCR_EL2.TGE == '1' || (HCR_EL2.IMO == '1' && !IsInHost() && HCR_EL2.AMO == '1')); ()); if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = SCR_EL3.EA == '1'; route_to_aarch64 = SCR_EL3.IRQ == '1'; if route_to_aarch64 then AArch64.TakePhysicalSErrorExceptionAArch64.TakePhysicalIRQException(impdef_syndrome, full_syndrome); (); route_to_monitor = HaveEL(EL3) && SCR.EA == '1'; ) && SCR.IRQ == '1'; route_to_hyp = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR.TGE == '1' || HCR.AMO == '1')); (HCR.TGE == '1' || HCR.IMO == '1')); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x10; lr_offset = 8; if vect_offset = 0x18; lr_offset = 4; if route_to_monitor then IsSErrorEdgeTriggered(full_syndrome) then ClearPendingPhysicalSError(); fault = AArch32.AsynchExternalAbort(parity, errortype, extflag); vaddress = bits(32) UNKNOWN; if route_to_monitor then AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then exception = AArch32.AbortSyndromeExceptionSyndrome(Exception_DataAbortException_IRQ, fault, vaddress); if PSTATE.EL ==); EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMode(M32_AbortM32_IRQ, preferred_exception_return, lr_offset, vect_offset);

Library pseudocode for aarch32/exceptions/asynch/AArch32.TakeVirtualFIQExceptionAArch32.TakePhysicalSErrorException

// AArch32.TakeVirtualFIQException() // =================================// AArch32.TakePhysicalSErrorException() // ===================================== AArch32.TakeVirtualFIQException() assert PSTATE.EL IN {AArch32.TakePhysicalSErrorException(boolean parity, bit extflag, bits(2) errortype, boolean impdef_syndrome, bits(24) full_syndrome)ClearPendingPhysicalSError(); // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == EL0,&& ! ELUsingAArch32(EL1} &&); if !route_to_aarch64 && EL2Enabled(); if() && ! ELUsingAArch32(EL2) then // Virtual IRQ enabled if TGE==0 and FMO==1 assert HCR.TGE == '0' && HCR.FMO == '1'; else assert HCR_EL2.TGE == '0' && HCR_EL2.FMO == '1'; // Check if routed to AArch64 state if PSTATE.EL ==) then route_to_aarch64 = (HCR_EL2.TGE == '1' || (! EL0IsInHost && !() && HCR_EL2.AMO == '1')); if !route_to_aarch64 &&HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = SCR_EL3.EA == '1'; if route_to_aarch64 then AArch64.TakePhysicalSErrorException(impdef_syndrome, full_syndrome); route_to_monitor = HaveEL(EL3) && SCR.EA == '1'; route_to_hyp = (PSTATE.EL IN {EL0, EL1) then} && AArch64.TakeVirtualFIQExceptionEL2Enabled(); () && (HCR.TGE == '1' || HCR.AMO == '1')); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x1C; lr_offset = 4; vect_offset = 0x10; lr_offset = 8; fault = AArch32.AsynchExternalAbort(parity, errortype, extflag); vaddress = bits(32) UNKNOWN; if route_to_monitor then AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then exception = AArch32.AbortSyndrome(Exception_DataAbort, fault, vaddress); if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMode(M32_FIQM32_Abort, preferred_exception_return, lr_offset, vect_offset);

Library pseudocode for aarch32/exceptions/asynch/AArch32.TakeVirtualIRQExceptionAArch32.TakeVirtualFIQException

// AArch32.TakeVirtualIRQException() // AArch32.TakeVirtualFIQException() // ================================= AArch32.TakeVirtualIRQException() AArch32.TakeVirtualFIQException() assert PSTATE.EL IN {EL0, EL1} && EL2Enabled(); if ELUsingAArch32(EL2) then // Virtual IRQs enabled if TGE==0 and IMO==1 assert HCR.TGE == '0' && HCR.IMO == '1'; ) then // Virtual IRQ enabled if TGE==0 and FMO==1 assert HCR.TGE == '0' && HCR.FMO == '1'; else assert HCR_EL2.TGE == '0' && HCR_EL2.IMO == '1'; assert HCR_EL2.TGE == '0' && HCR_EL2.FMO == '1'; // Check if routed to AArch64 state if PSTATE.EL == EL0 && !ELUsingAArch32(EL1) then AArch64.TakeVirtualIRQExceptionAArch64.TakeVirtualFIQException(); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x18; vect_offset = 0x1C; lr_offset = 4; AArch32.EnterMode(M32_IRQM32_FIQ, preferred_exception_return, lr_offset, vect_offset);

Library pseudocode for aarch32/exceptions/asynch/AArch32.TakeVirtualSErrorExceptionAArch32.TakeVirtualIRQException

// AArch32.TakeVirtualSErrorException() // ====================================// AArch32.TakeVirtualIRQException() // ================================= AArch32.TakeVirtualSErrorException(bit extflag, bits(2) errortype, boolean impdef_syndrome, bits(24) full_syndrome) AArch32.TakeVirtualIRQException() assert PSTATE.EL IN {EL0, EL1} && EL2Enabled(); if ELUsingAArch32(EL2) then // Virtual SError enabled if TGE==0 and AMO==1 assert HCR.TGE == '0' && HCR.AMO == '1'; ) then // Virtual IRQs enabled if TGE==0 and IMO==1 assert HCR.TGE == '0' && HCR.IMO == '1'; else assert HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1'; assert HCR_EL2.TGE == '0' && HCR_EL2.IMO == '1'; // Check if routed to AArch64 state if PSTATE.EL == EL0 && !ELUsingAArch32(EL1) then AArch64.TakeVirtualSErrorExceptionAArch64.TakeVirtualIRQException(impdef_syndrome, full_syndrome); route_to_monitor = FALSE; (); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x10; lr_offset = 8; vaddress = bits(32) UNKNOWN; parity = FALSE; if vect_offset = 0x18; lr_offset = 4; HaveRASExt() then if ELUsingAArch32(EL2) then fault = AArch32.AsynchExternalAbort(FALSE, VDFSR.AET, VDFSR.ExT); else fault = AArch32.AsynchExternalAbort(FALSE, VSESR_EL2.AET, VSESR_EL2.ExT); else fault = AArch32.AsynchExternalAbort(parity, errortype, extflag); ClearPendingVirtualSError(); AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMode(M32_AbortM32_IRQ, preferred_exception_return, lr_offset, vect_offset);

Library pseudocode for aarch32/exceptions/debugasynch/AArch32.SoftwareBreakpointAArch32.TakeVirtualSErrorException

// AArch32.SoftwareBreakpoint() // ============================// AArch32.TakeVirtualSErrorException() // ==================================== AArch32.SoftwareBreakpoint(bits(16) immediate) AArch32.TakeVirtualSErrorException(bit extflag, bits(2) errortype, boolean impdef_syndrome, bits(24) full_syndrome) if ( assert PSTATE.EL IN {EL0, EL1} && EL2Enabled() && !(); ifELUsingAArch32(EL2) && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1')) || !) then // Virtual SError enabled if TGE==0 and AMO==1 assert HCR.TGE == '0' && HCR.AMO == '1'; else assert HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1'; // Check if routed to AArch64 state if PSTATE.EL ==EL0 && !ELUsingAArch32(EL1) then AArch64.SoftwareBreakpointAArch64.TakeVirtualSErrorException(immediate); vaddress = bits(32) UNKNOWN; acctype =(impdef_syndrome, full_syndrome); route_to_monitor = FALSE; bits(32) preferred_exception_return = AccType_IFETCHThisInstrAddr; // Take as a Prefetch Abort iswrite = FALSE; entry =(); vect_offset = 0x10; lr_offset = 8; vaddress = bits(32) UNKNOWN; parity = FALSE; if DebugException_BKPTHaveRASExt; fault =() then if AArch32.DebugFaultELUsingAArch32(acctype, iswrite, entry);( ) then fault = AArch32.AsynchExternalAbort(FALSE, VDFSR.AET, VDFSR.ExT); else fault = AArch32.AsynchExternalAbort(FALSE, VSESR_EL2.AET, VSESR_EL2.ExT); else fault = AArch32.AsynchExternalAbort(parity, errortype, extflag); ClearPendingVirtualSError(); AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMode(M32_AbortAArch32.AbortEL2(vaddress, fault);, preferred_exception_return, lr_offset, vect_offset);

Library pseudocode for aarch32/exceptions/debug/DebugExceptionAArch32.SoftwareBreakpoint

constant bits(4)// AArch32.SoftwareBreakpoint() // ============================ DebugException_Breakpoint = '0001'; constant bits(4)AArch32.SoftwareBreakpoint(bits(16) immediate) if ( DebugException_BKPT = '0011'; constant bits(4)() && ! DebugException_VectorCatch = '0101'; constant bits(4)( ) && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1')) || !ELUsingAArch32(EL1) then AArch64.SoftwareBreakpoint(immediate); vaddress = bits(32) UNKNOWN; acctype = AccType_IFETCH; // Take as a Prefetch Abort iswrite = FALSE; entry = DebugException_BKPT; fault = AArch32.DebugFault(acctype, iswrite, entry); AArch32.AbortDebugException_Watchpoint = '1010';(vaddress, fault);

Library pseudocode for aarch32/exceptions/exceptionsdebug/AArch32.CheckAdvSIMDOrFPRegisterTrapsDebugException

// AArch32.CheckAdvSIMDOrFPRegisterTraps() // ======================================= // Check if an instruction that accesses an Advanced SIMD and // floating-point System register is trapped by an appropriate HCR.TIDx // ID group trap control.constant bits(4) AArch32.CheckAdvSIMDOrFPRegisterTraps(bits(4) reg) if PSTATE.EL ==DebugException_Breakpoint = '0001'; constant bits(4) EL1 &&DebugException_BKPT = '0011'; constant bits(4) EL2Enabled() then tid0 = ifDebugException_VectorCatch = '0101'; constant bits(4) ELUsingAArch32(EL2) then HCR.TID0 else HCR_EL2.TID0; tid3 = if ELUsingAArch32(EL2) then HCR.TID3 else HCR_EL2.TID3; if (tid0 == '1' && reg == '0000') // FPSID || (tid3 == '1' && reg IN {'0101', '0110', '0111'}) then // MVFRx if ELUsingAArch32(EL2) then AArch32.SystemAccessTrap(M32_Hyp, 0x8); // Exception_AdvSIMDFPAccessTrap else AArch64.AArch32SystemAccessTrap(EL2, 0x8); // Exception_AdvSIMDFPAccessTrapDebugException_Watchpoint = '1010';

Library pseudocode for aarch32/exceptions/exceptions/AArch32.ExceptionClassAArch32.CheckAdvSIMDOrFPRegisterTraps

// AArch32.ExceptionClass() // ======================== // Returns the Exception Class and Instruction Length fields to be reported in HSR (integer,bit)// AArch32.CheckAdvSIMDOrFPRegisterTraps() // ======================================= // Check if an instruction that accesses an Advanced SIMD and // floating-point System register is trapped by an appropriate HCR.TIDx // ID group trap control. AArch32.ExceptionClass(AArch32.CheckAdvSIMDOrFPRegisterTraps(bits(4) reg) if PSTATE.EL ==ExceptionEL1 exceptype) il_is_valid = TRUE; case exceptype of when&& Exception_UncategorizedEL2Enabled ec = 0x00; il_is_valid = FALSE; when() then tid0 = if Exception_WFxTrapELUsingAArch32 ec = 0x01; when( Exception_CP15RTTrapEL2 ec = 0x03; when) then HCR.TID0 else HCR_EL2.TID0; tid3 = if Exception_CP15RRTTrapELUsingAArch32 ec = 0x04; when( Exception_CP14RTTrapEL2 ec = 0x05; when) then HCR.TID3 else HCR_EL2.TID3; if (tid0 == '1' && reg == '0000') // FPSID || (tid3 == '1' && reg IN {'0101', '0110', '0111'}) then // MVFRx if Exception_CP14DTTrapELUsingAArch32 ec = 0x06; when( Exception_AdvSIMDFPAccessTrapEL2 ec = 0x07; when) then Exception_FPIDTrapAArch32.SystemAccessTrap ec = 0x08; when( Exception_PACTrapM32_Hyp ec = 0x09; when, 0x8); // Exception_AdvSIMDFPAccessTrap else Exception_LDST64BTrapAArch64.AArch32SystemAccessTrap ec = 0x0A; when( Exception_CP14RRTTrap ec = 0x0C; when Exception_BranchTarget ec = 0x0D; when Exception_IllegalState ec = 0x0E; il_is_valid = FALSE; when Exception_SupervisorCall ec = 0x11; when Exception_HypervisorCall ec = 0x12; when Exception_MonitorCall ec = 0x13; when Exception_InstructionAbort ec = 0x20; il_is_valid = FALSE; when Exception_PCAlignment ec = 0x22; il_is_valid = FALSE; when Exception_DataAbort ec = 0x24; when Exception_NV2DataAbort ec = 0x25; when Exception_FPTrappedException ec = 0x28; otherwise Unreachable(); if ec IN {0x20,0x24} && PSTATE.EL == EL2 then ec = ec + 1; if il_is_valid then il = if ThisInstrLength() == 32 then '1' else '0'; else il = '1'; return (ec,il);, 0x8); // Exception_AdvSIMDFPAccessTrap

Library pseudocode for aarch32/exceptions/exceptions/AArch32.GeneralExceptionsToAArch64AArch32.ExceptionClass

// AArch32.GeneralExceptionsToAArch64() // ==================================== // Returns TRUE if exceptions normally routed to EL1 are being handled at an Exception // level using AArch64, because either EL1 is using AArch64 or TGE is in force and EL2 // is using AArch64. // AArch32.ExceptionClass() // ======================== // Returns the Exception Class and Instruction Length fields to be reported in HSR boolean(integer,bit) AArch32.GeneralExceptionsToAArch64() return ((PSTATE.EL ==AArch32.ExceptionClass( EL0Exception && !exceptype) il = ifELUsingAArch32ThisInstrLength(() == 32 then '1' else '0'; case exceptype of whenEL1Exception_Uncategorized)) || (ec = 0x00; il = '1'; whenEL2EnabledException_WFxTrap() && !ec = 0x01; when ec = 0x03; when Exception_CP15RRTTrap ec = 0x04; when Exception_CP14RTTrap ec = 0x05; when Exception_CP14DTTrap ec = 0x06; when Exception_AdvSIMDFPAccessTrap ec = 0x07; when Exception_FPIDTrap ec = 0x08; when Exception_PACTrap ec = 0x09; when Exception_CP14RRTTrap ec = 0x0C; when Exception_BranchTarget ec = 0x0D; when Exception_IllegalState ec = 0x0E; il = '1'; when Exception_SupervisorCall ec = 0x11; when Exception_HypervisorCall ec = 0x12; when Exception_MonitorCall ec = 0x13; when Exception_ERetTrap ec = 0x1A; when Exception_PACFail ec = 0x1C; when Exception_InstructionAbort ec = 0x20; il = '1'; when Exception_PCAlignment ec = 0x22; il = '1'; when Exception_DataAbort ec = 0x24; when Exception_NV2DataAbort ec = 0x25; when Exception_FPTrappedException ec = 0x28; otherwise UnreachableELUsingAArch32Exception_CP15RTTrap((); if ec IN {0x20,0x24} && PSTATE.EL ==EL2) && HCR_EL2.TGE == '1'));then ec = ec + 1; return (ec,il);

Library pseudocode for aarch32/exceptions/exceptions/AArch32.ReportHypEntryAArch32.GeneralExceptionsToAArch64

// AArch32.ReportHypEntry() // ======================== // Report syndrome information to Hyp mode registers.// AArch32.GeneralExceptionsToAArch64() // ==================================== // Returns TRUE if exceptions normally routed to EL1 are being handled at an Exception // level using AArch64, because either EL1 is using AArch64 or TGE is in force and EL2 // is using AArch64. boolean AArch32.ReportHypEntry(AArch32.GeneralExceptionsToAArch64() return ((PSTATE.EL ==ExceptionRecordEL0 exception)&& ! ExceptionELUsingAArch32 exceptype = exception.exceptype; (ec,il) =( AArch32.ExceptionClassEL1(exceptype); iss = exception.syndrome; // IL is not valid for Data Abort exceptions without valid instruction syndrome information if ec IN {0x24,0x25} && iss<24> == '0' then il = '1'; HSR = ec<5:0>:il:iss; if exceptype IN {)) || (Exception_InstructionAbortEL2Enabled,() && ! Exception_PCAlignmentELUsingAArch32} then HIFAR = exception.vaddress<31:0>; HDFAR = bits(32) UNKNOWN; elsif exceptype ==( Exception_DataAbortEL2 then HIFAR = bits(32) UNKNOWN; HDFAR = exception.vaddress<31:0>; if exception.ipavalid then HPFAR<31:4> = exception.ipaddress<39:12>; else HPFAR<31:4> = bits(28) UNKNOWN; return;) && HCR_EL2.TGE == '1'));

Library pseudocode for aarch32/exceptions/exceptions/AArch32.ResetControlRegistersAArch32.ReportHypEntry

// Resets System registers and memory-mapped control registers that have architecturally-defined // reset values to those values.// AArch32.ReportHypEntry() // ======================== // Report syndrome information to Hyp mode registers. AArch32.ResetControlRegisters(boolean cold_reset);AArch32.ReportHypEntry(ExceptionRecord exception) Exception exceptype = exception.exceptype; (ec,il) = AArch32.ExceptionClass(exceptype); iss = exception.syndrome; // IL is not valid for Data Abort exceptions without valid instruction syndrome information if ec IN {0x24,0x25} && iss<24> == '0' then il = '1'; HSR = ec<5:0>:il:iss; if exceptype IN {Exception_InstructionAbort, Exception_PCAlignment} then HIFAR = exception.vaddress<31:0>; HDFAR = bits(32) UNKNOWN; elsif exceptype == Exception_DataAbort then HIFAR = bits(32) UNKNOWN; HDFAR = exception.vaddress<31:0>; if exception.ipavalid then HPFAR<31:4> = exception.ipaddress<39:12>; else HPFAR<31:4> = bits(28) UNKNOWN; return;

Library pseudocode for aarch32/exceptions/exceptions/AArch32.TakeResetAArch32.ResetControlRegisters

// AArch32.TakeReset() // =================== // Reset into AArch32 state// Resets System registers and memory-mapped control registers that have architecturally-defined // reset values to those values. AArch32.TakeReset(boolean cold_reset) assertAArch32.ResetControlRegisters(boolean cold_reset); HighestELUsingAArch32(); // Enter the highest implemented Exception level in AArch32 state if HaveEL(EL3) then AArch32.WriteMode(M32_Svc); SCR.NS = '0'; // Secure state elsif HaveEL(EL2) then AArch32.WriteMode(M32_Hyp); else AArch32.WriteMode(M32_Svc); // Reset the CP14 and CP15 registers and other system components AArch32.ResetControlRegisters(cold_reset); FPEXC.EN = '0'; // Reset all other PSTATE fields, including instruction set and endianness according to the // SCTLR values produced by the above call to ResetControlRegisters() PSTATE.<A,I,F> = '111'; // All asynchronous exceptions masked PSTATE.IT = '00000000'; // IT block state reset PSTATE.T = SCTLR.TE; // Instruction set: TE=0: A32, TE=1: T32. PSTATE.J is RES0. PSTATE.E = SCTLR.EE; // Endianness: EE=0: little-endian, EE=1: big-endian PSTATE.IL = '0'; // Clear Illegal Execution state bit // All registers, bits and fields not reset by the above pseudocode or by the BranchTo() call // below are UNKNOWN bitstrings after reset. In particular, the return information registers // R14 or ELR_hyp and SPSR have UNKNOWN values, so that it // is impossible to return from a reset in an architecturally defined way. AArch32.ResetGeneralRegisters(); AArch32.ResetSIMDFPRegisters(); AArch32.ResetSpecialRegisters(); ResetExternalDebugRegisters(cold_reset); bits(32) rv; // IMPLEMENTATION DEFINED reset vector if HaveEL(EL3) then if MVBAR<0> == '1' then // Reset vector in MVBAR rv = MVBAR<31:1>:'0'; else rv = bits(32) IMPLEMENTATION_DEFINED "reset vector address"; else rv = RVBAR<31:1>:'0'; // The reset vector must be correctly aligned assert rv<0> == '0' && (PSTATE.T == '1' || rv<1> == '0'); BranchTo(rv, BranchType_RESET);

Library pseudocode for aarch32/exceptions/exceptions/ExcVectorBaseAArch32.TakeReset

// ExcVectorBase() // =============== bits(32)// AArch32.TakeReset() // =================== // Reset into AArch32 state ExcVectorBase() if SCTLR.V == '1' then // Hivecs selected, base = 0xFFFF0000 returnAArch32.TakeReset(boolean cold_reset) assert OnesHighestELUsingAArch32(16):(); // Enter the highest implemented Exception level in AArch32 state ifZerosHaveEL(16); else return VBAR<31:5>:() then AArch32.WriteMode(M32_Svc); SCR.NS = '0'; // Secure state elsif HaveEL(EL2) then AArch32.WriteMode(M32_Hyp); else AArch32.WriteMode(M32_Svc); // Reset the CP14 and CP15 registers and other system components AArch32.ResetControlRegisters(cold_reset); FPEXC.EN = '0'; // Reset all other PSTATE fields, including instruction set and endianness according to the // SCTLR values produced by the above call to ResetControlRegisters() PSTATE.<A,I,F> = '111'; // All asynchronous exceptions masked PSTATE.IT = '00000000'; // IT block state reset PSTATE.T = SCTLR.TE; // Instruction set: TE=0: A32, TE=1: T32. PSTATE.J is RES0. PSTATE.E = SCTLR.EE; // Endianness: EE=0: little-endian, EE=1: big-endian PSTATE.IL = '0'; // Clear Illegal Execution state bit // All registers, bits and fields not reset by the above pseudocode or by the BranchTo() call // below are UNKNOWN bitstrings after reset. In particular, the return information registers // R14 or ELR_hyp and SPSR have UNKNOWN values, so that it // is impossible to return from a reset in an architecturally defined way. AArch32.ResetGeneralRegisters(); AArch32.ResetSIMDFPRegisters(); AArch32.ResetSpecialRegisters(); ResetExternalDebugRegisters(cold_reset); bits(32) rv; // IMPLEMENTATION DEFINED reset vector if HaveEL(EL3) then if MVBAR<0> == '1' then // Reset vector in MVBAR rv = MVBAR<31:1>:'0'; else rv = bits(32) IMPLEMENTATION_DEFINED "reset vector address"; else rv = RVBAR<31:1>:'0'; // The reset vector must be correctly aligned assert rv<0> == '0' && (PSTATE.T == '1' || rv<1> == '0'); BranchTo(rv, BranchType_RESETZerosEL3(5););

Library pseudocode for aarch32/exceptions/ieeefpexceptions/AArch32.FPTrappedExceptionExcVectorBase

// AArch32.FPTrappedException() // ============================// ExcVectorBase() // =============== bits(32) AArch32.FPTrappedException(bits(8) accumulated_exceptions) ifExcVectorBase() if SCTLR.V == '1' then // Hivecs selected, base = 0xFFFF0000 return AArch32.GeneralExceptionsToAArch64Ones() then is_ase = FALSE; element = 0;(16): AArch64.FPTrappedExceptionZeros(is_ase, accumulated_exceptions); FPEXC.DEX = '1'; FPEXC.TFV = '1'; FPEXC<7,4:0> = accumulated_exceptions<7,4:0>; // IDF,IXF,UFF,OFF,DZF,IOF FPEXC<10:8> = '111'; // VECITR is RES1(16); else return VBAR<31:5>: AArch32.TakeUndefInstrExceptionZeros();(5);

Library pseudocode for aarch32/exceptions/syscallsieeefp/AArch32.CallHypervisorAArch32.FPTrappedException

// AArch32.CallHypervisor() // ======================== // Performs a HVC call// AArch32.FPTrappedException() // ============================ AArch32.CallHypervisor(bits(16) immediate) assertAArch32.FPTrappedException(bits(8) accumulated_exceptions) if HaveELAArch32.GeneralExceptionsToAArch64(() then is_ase = FALSE; element = 0;EL2AArch64.FPTrappedException); if !(is_ase, element, accumulated_exceptions); FPEXC.DEX = '1'; FPEXC.TFV = '1'; FPEXC<7,4:0> = accumulated_exceptions<7,4:0>; // IDF,IXF,UFF,OFF,DZF,IOF FPEXC<10:8> = '111'; // VECITR is RES1ELUsingAArch32AArch32.TakeUndefInstrException(EL2) then AArch64.CallHypervisor(immediate); else AArch32.TakeHVCException(immediate);();

Library pseudocode for aarch32/exceptions/syscalls/AArch32.CallSupervisorAArch32.CallHypervisor

// AArch32.CallSupervisor() // AArch32.CallHypervisor() // ======================== // Calls the Supervisor// Performs a HVC call AArch32.CallSupervisor(bits(16) immediate) ifAArch32.CallHypervisor(bits(16) immediate) assert AArch32.CurrentCondHaveEL() != '1110' then immediate = bits(16) UNKNOWN; if( AArch32.GeneralExceptionsToAArch64EL2() then); if ! AArch64.CallSupervisorELUsingAArch32(immediate); else( ) then AArch64.CallHypervisor(immediate); else AArch32.TakeHVCExceptionAArch32.TakeSVCExceptionEL2(immediate);

Library pseudocode for aarch32/exceptions/syscalls/AArch32.TakeHVCExceptionAArch32.CallSupervisor

// AArch32.TakeHVCException() // ==========================// AArch32.CallSupervisor() // ======================== // Calls the Supervisor AArch32.TakeHVCException(bits(16) immediate) assertAArch32.CallSupervisor(bits(16) immediate) if HaveELAArch32.CurrentCond(() != '1110' then immediate = bits(16) UNKNOWN; ifEL2AArch32.GeneralExceptionsToAArch64) &&() then ELUsingAArch32AArch64.CallSupervisor((immediate); elseEL2AArch32.TakeSVCException); AArch32.ITAdvance(); SSAdvance(); bits(32) preferred_exception_return = NextInstrAddr(); vect_offset = 0x08; exception = ExceptionSyndrome(Exception_HypervisorCall); exception.syndrome<15:0> = immediate; if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14);(immediate);

Library pseudocode for aarch32/exceptions/syscalls/AArch32.TakeSMCExceptionAArch32.TakeHVCException

// AArch32.TakeSMCException() // AArch32.TakeHVCException() // ========================== AArch32.TakeSMCException() AArch32.TakeHVCException(bits(16) immediate) assert HaveEL(EL3EL2) && ELUsingAArch32(EL3EL2); AArch32.ITAdvance(); SSAdvance(); bits(32) preferred_exception_return = NextInstrAddr(); vect_offset = 0x08; lr_offset = 0; exception = (Exception_HypervisorCall); exception.syndrome<15:0> = immediate; if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypModeAArch32.EnterMonitorModeExceptionSyndrome(preferred_exception_return, lr_offset, vect_offset);(exception, preferred_exception_return, 0x14);

Library pseudocode for aarch32/exceptions/syscalls/AArch32.TakeSVCExceptionAArch32.TakeSMCException

// AArch32.TakeSVCException() // AArch32.TakeSMCException() // ========================== AArch32.TakeSVCException(bits(16) immediate)AArch32.TakeSMCException() assert AArch32.ITAdvanceHaveEL();( SSAdvanceEL3(); route_to_hyp = PSTATE.EL ==) && EL0ELUsingAArch32 &&( EL2EnabledEL3() && HCR.TGE == '1'; bits(32) preferred_exception_return =); NextInstrAddrAArch32.ITAdvance(); vect_offset = 0x08; lr_offset = 0; if PSTATE.EL ==(); EL2SSAdvance || route_to_hyp then exception =(); bits(32) preferred_exception_return = ExceptionSyndromeNextInstrAddr((); vect_offset = 0x08; lr_offset = 0;Exception_SupervisorCallAArch32.EnterMonitorMode); exception.syndrome<15:0> = immediate; if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.EnterMode(M32_Svc, preferred_exception_return, lr_offset, vect_offset);(preferred_exception_return, lr_offset, vect_offset);

Library pseudocode for aarch32/exceptions/takeexceptionsyscalls/AArch32.EnterHypModeAArch32.TakeSVCException

// AArch32.EnterHypMode() // ====================== // Take an exception to Hyp mode.// AArch32.TakeSVCException() // ========================== AArch32.EnterHypMode(AArch32.TakeSVCException(bits(16) immediate)ExceptionRecordAArch32.ITAdvance exception, bits(32) preferred_exception_return, integer vect_offset)(); SynchronizeContextSSAdvance(); assert route_to_hyp = PSTATE.EL == HaveELEL0(&&EL2EL2Enabled) && !() && HCR.TGE == '1'; bits(32) preferred_exception_return =IsSecureNextInstrAddr() &&(); vect_offset = 0x08; lr_offset = 0; if PSTATE.EL == ELUsingAArch32(EL2); bits(32) spsr =|| route_to_hyp then exception = GetPSRFromPSTATEExceptionSyndrome(AArch32_NonDebugStateException_SupervisorCall); if !(exception.exceptype IN { exception.syndrome<15:0> = immediate; if PSTATE.EL ==Exception_IRQEL2,then Exception_FIQAArch32.EnterHypMode}) then(exception, preferred_exception_return, vect_offset); else AArch32.ReportHypEntryAArch32.EnterHypMode(exception);(exception, preferred_exception_return, 0x14); else AArch32.WriteModeAArch32.EnterMode(M32_HypM32_Svc); SPSR[] = spsr; ELR_hyp = preferred_exception_return; PSTATE.T = HSCTLR.TE; // PSTATE.J is RES0 PSTATE.SS = '0'; ShouldAdvanceSS = FALSE; if !HaveEL(EL3) || SCR_GEN[].EA == '0' then PSTATE.A = '1'; if !HaveEL(EL3) || SCR_GEN[].IRQ == '0' then PSTATE.I = '1'; if !HaveEL(EL3) || SCR_GEN[].FIQ == '0' then PSTATE.F = '1'; PSTATE.E = HSCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if HaveSSBSExt() then PSTATE.SSBS = HSCTLR.DSSBS; BranchTo(HVBAR<31:5>:vect_offset<4:0>, BranchType_EXCEPTION); EndOfInstruction();, preferred_exception_return, lr_offset, vect_offset);

Library pseudocode for aarch32/exceptions/takeexception/AArch32.EnterModeAArch32.EnterHypMode

// AArch32.EnterMode() // =================== // Take an exception to a mode other than Monitor and Hyp mode.// AArch32.EnterHypMode() // ====================== // Take an exception to Hyp mode. AArch32.EnterMode(bits(5) target_mode, bits(32) preferred_exception_return, integer lr_offset, integer vect_offset)AArch32.EnterHypMode( ExceptionRecord exception, bits(32) preferred_exception_return, integer vect_offset) SynchronizeContext(); assert HaveEL(EL2) && !IsSecure() && ELUsingAArch32(EL1) && PSTATE.EL != EL2; ); bits(32) spsr = spsr = GetPSRFromPSTATE((); if !(exception.exceptype IN {AArch32_NonDebugStateException_IRQ); if PSTATE.M ==, M32_MonitorException_FIQ then SCR.NS = '0';}) then AArch32.ReportHypEntry(exception); AArch32.WriteMode(target_mode);( M32_Hyp); SPSR[] = spsr;[] = spsr; ELR_hyp = preferred_exception_return; PSTATE.T = HSCTLR.TE; // PSTATE.J is RES0 PSTATE.SS = '0'; if ! RHaveEL[14] = preferred_exception_return + lr_offset; PSTATE.T = SCTLR.TE; // PSTATE.J is RES0 PSTATE.SS = '0'; ShouldAdvanceSS = FALSE; if target_mode ==( M32_FIQEL3 then PSTATE.<A,I,F> = '111'; elsif target_mode IN {) ||M32_AbortSCR_GEN,[].EA == '0' then PSTATE.A = '1'; if ! M32_IRQHaveEL} then PSTATE.<A,I> = '11'; else PSTATE.I = '1'; PSTATE.E = SCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if( HavePANExtEL3() && SCTLR.SPAN == '0' then PSTATE.PAN = '1'; if) || SCR_GEN[].IRQ == '0' then PSTATE.I = '1'; if !HaveEL(EL3) || SCR_GEN[].FIQ == '0' then PSTATE.F = '1'; PSTATE.E = HSCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if HaveSSBSExt() then PSTATE.SSBS = SCTLR.DSSBS;() then PSTATE.SSBS = HSCTLR.DSSBS; BranchTo(ExcVectorBase()<31:5>:vect_offset<4:0>,(HVBAR<31:5>:vect_offset<4:0>, BranchType_EXCEPTION); EndOfInstruction();

Library pseudocode for aarch32/exceptions/takeexception/AArch32.EnterMonitorModeAArch32.EnterMode

// AArch32.EnterMonitorMode() // ========================== // Take an exception to Monitor mode.// AArch32.EnterMode() // =================== // Take an exception to a mode other than Monitor and Hyp mode. AArch32.EnterMonitorMode(bits(32) preferred_exception_return, integer lr_offset, integer vect_offset)AArch32.EnterMode(bits(5) target_mode, bits(32) preferred_exception_return, integer lr_offset, integer vect_offset) SynchronizeContext(); assert HaveEL(EL3) && ELUsingAArch32(EL3EL1); from_secure =) && PSTATE.EL != IsSecureEL2(); bits(32) spsr =; spsr = GetPSRFromPSTATE((); if PSTATE.M ==AArch32_NonDebugState); if PSTATE.M == M32_Monitor then SCR.NS = '0'; AArch32.WriteMode((target_mode);M32_Monitor); SPSR[] = spsr; R[14] = preferred_exception_return + lr_offset; PSTATE.T = SCTLR.TE; // PSTATE.J is RES0 PSTATE.SS = '0'; ShouldAdvanceSS = FALSE; PSTATE.<A,I,F> = '111'; PSTATE.E = SCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if if target_mode == M32_FIQ then PSTATE.<A,I,F> = '111'; elsif target_mode IN {M32_Abort, M32_IRQ} then PSTATE.<A,I> = '11'; else PSTATE.I = '1'; PSTATE.E = SCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if HavePANExt() then if !from_secure then PSTATE.PAN = '0'; elsif SCTLR.SPAN == '0' then PSTATE.PAN = '1'; () && SCTLR.SPAN == '0' then PSTATE.PAN = '1'; if HaveSSBSExt() then PSTATE.SSBS = SCTLR.DSSBS; BranchTo(ExcVectorBase(MVBAR<31:5>:vect_offset<4:0>,()<31:5>:vect_offset<4:0>, BranchType_EXCEPTION); EndOfInstruction();

Library pseudocode for aarch32/exceptions/trapstakeexception/AArch32.CheckAdvSIMDOrFPEnabledAArch32.EnterMonitorMode

// AArch32.CheckAdvSIMDOrFPEnabled() // ================================= // Check against CPACR, FPEXC, HCPTR, NSACR, and CPTR_EL3.// AArch32.EnterMonitorMode() // ========================== // Take an exception to Monitor mode. AArch32.CheckAdvSIMDOrFPEnabled(boolean fpexc_check, boolean advsimd) if PSTATE.EL ==AArch32.EnterMonitorMode(bits(32) preferred_exception_return, integer lr_offset, integer vect_offset) EL0SynchronizeContext && (!(); assertHaveEL(EL2EL3) || (!) &&ELUsingAArch32(EL2EL3) && HCR_EL2.TGE == '0')) && !); from_secure =ELUsingAArch32IsSecure((); spsr =EL1GetPSRFromPSTATE) then // The PE behaves as if FPEXC.EN is 1(); if PSTATE.M == AArch64.CheckFPAdvSIMDEnabledM32_Monitor(); elsif PSTATE.EL ==then SCR.NS = '0'; EL0AArch32.WriteMode &&( HaveELM32_Monitor();EL2SPSR) && ![] = spsr;ELUsingAArch32R([14] = preferred_exception_return + lr_offset; PSTATE.T = SCTLR.TE; // PSTATE.J is RES0 PSTATE.SS = '0'; PSTATE.<A,I,F> = '111'; PSTATE.E = SCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; ifEL2HavePANExt) && HCR_EL2.TGE == '1' && !() then if !from_secure then PSTATE.PAN = '0'; elsif SCTLR.SPAN == '0' then PSTATE.PAN = '1'; ifELUsingAArch32HaveSSBSExt(() then PSTATE.SSBS = SCTLR.DSSBS;EL1BranchTo) then if fpexc_check && HCR_EL2.RW == '0' then fpexc_en = bits(1) IMPLEMENTATION_DEFINED "FPEXC.EN value when TGE==1 and RW==0"; if fpexc_en == '0' then UNDEFINED;(MVBAR<31:5>:vect_offset<4:0>, AArch64.CheckFPAdvSIMDEnabledBranchType_EXCEPTION(); else cpacr_asedis = CPACR.ASEDIS; cpacr_cp10 = CPACR.cp10; if); HaveELEndOfInstruction(EL3) && ELUsingAArch32(EL3) && !IsSecure() then // Check if access disabled in NSACR if NSACR.NSASEDIS == '1' then cpacr_asedis = '1'; if NSACR.cp10 == '0' then cpacr_cp10 = '00'; if PSTATE.EL != EL2 then // Check if Advanced SIMD disabled in CPACR if advsimd && cpacr_asedis == '1' then UNDEFINED; // Check if access disabled in CPACR case cpacr_cp10 of when '00' disabled = TRUE; when '01' disabled = PSTATE.EL == EL0; when '10' disabled = ConstrainUnpredictableBool(Unpredictable_RESCPACR); when '11' disabled = FALSE; if disabled then UNDEFINED; // If required, check FPEXC enabled bit. if fpexc_check && FPEXC.EN == '0' then UNDEFINED; AArch32.CheckFPAdvSIMDTrap(advsimd); // Also check against HCPTR and CPTR_EL3();

Library pseudocode for aarch32/exceptions/traps/AArch32.CheckFPAdvSIMDTrapAArch32.CheckAdvSIMDOrFPEnabled

// AArch32.CheckFPAdvSIMDTrap() // ============================ // Check against CPTR_EL2 and CPTR_EL3.// AArch32.CheckAdvSIMDOrFPEnabled() // ================================= // Check against CPACR, FPEXC, HCPTR, NSACR, and CPTR_EL3. AArch32.CheckFPAdvSIMDTrap(boolean advsimd) ifAArch32.CheckAdvSIMDOrFPEnabled(boolean fpexc_check, boolean advsimd) if PSTATE.EL == EL2EnabledEL0() && !&& (!HaveEL(EL2) || (!ELUsingAArch32(EL2) then) && HCR_EL2.TGE == '0')) && ! AArch64.CheckFPAdvSIMDTrapELUsingAArch32(); else if( HaveELEL1() then // The PE behaves as if FPEXC.EN is 1EL2AArch64.CheckFPAdvSIMDEnabled) && !(); elsif PSTATE.EL ==IsSecureEL0() then hcptr_tase = HCPTR.TASE; hcptr_cp10 = HCPTR.TCP10; if&& HaveEL(EL3EL2) &&) && ! ELUsingAArch32(EL3EL2) && !) && HCR_EL2.TGE == '1' && !IsSecureELUsingAArch32() then // Check if access disabled in NSACR if NSACR.NSASEDIS == '1' then hcptr_tase = '1'; if NSACR.cp10 == '0' then hcptr_cp10 = '1'; // Check if access disabled in HCPTR if (advsimd && hcptr_tase == '1') || hcptr_cp10 == '1' then exception =( ExceptionSyndromeEL1() then if fpexc_check && HCR_EL2.RW == '0' then fpexc_en = bits(1) IMPLEMENTATION_DEFINED "FPEXC.EN value when TGE==1 and RW==0"; if fpexc_en == '0' then UNDEFINED;Exception_AdvSIMDFPAccessTrapAArch64.CheckFPAdvSIMDEnabled); exception.syndrome<24:20> =(); else cpacr_asedis = CPACR.ASEDIS; cpacr_cp10 = CPACR.cp10; if ConditionSyndrome(); if advsimd then exception.syndrome<5> = '1'; else exception.syndrome<5> = '0'; exception.syndrome<3:0> = '1010'; // coproc field, always 0xA if PSTATE.EL == EL2 then AArch32.TakeUndefInstrException(exception); else AArch32.TakeHypTrapException(exception); if HaveEL(EL3) && !) &&ELUsingAArch32(EL3) then // Check if access disabled in CPTR_EL3 if CPTR_EL3.TFP == '1' then) && ! AArch64.AdvSIMDFPAccessTrapIsSecure(() then // Check if access disabled in NSACR if NSACR.NSASEDIS == '1' then cpacr_asedis = '1'; if NSACR.cp10 == '0' then cpacr_cp10 = '00'; if PSTATE.EL != then // Check if Advanced SIMD disabled in CPACR if advsimd && cpacr_asedis == '1' then UNDEFINED; // Check if access disabled in CPACR case cpacr_cp10 of when '00' disabled = TRUE; when '01' disabled = PSTATE.EL == EL0; when '10' disabled = ConstrainUnpredictableBool(Unpredictable_RESCPACR); when '11' disabled = FALSE; if disabled then UNDEFINED; // If required, check FPEXC enabled bit. if fpexc_check && FPEXC.EN == '0' then UNDEFINED; AArch32.CheckFPAdvSIMDTrapEL3EL2); return;(advsimd); // Also check against HCPTR and CPTR_EL3

Library pseudocode for aarch32/exceptions/traps/AArch32.CheckForSMCUndefOrTrapAArch32.CheckFPAdvSIMDTrap

// AArch32.CheckForSMCUndefOrTrap() // ================================ // Check for UNDEFINED or trap on SMC instruction// AArch32.CheckFPAdvSIMDTrap() // ============================ // Check against CPTR_EL2 and CPTR_EL3. AArch32.CheckForSMCUndefOrTrap() if !AArch32.CheckFPAdvSIMDTrap(boolean advsimd) ifHaveEL(EL3) || PSTATE.EL == EL0 then UNDEFINED; if EL2Enabled() && !ELUsingAArch32(EL2) then AArch64.CheckForSMCUndefOrTrapAArch64.CheckFPAdvSIMDTrap((); else ifZeros(16)); else route_to_hyp = HaveEL(EL2) && !IsSecure() && PSTATE.EL ==() then hcptr_tase = HCPTR.TASE; hcptr_cp10 = HCPTR.TCP10; if EL1HaveEL && HCR.TSC == '1'; if route_to_hyp then exception =( EL3) && ELUsingAArch32(EL3) && !IsSecure() then // Check if access disabled in NSACR if NSACR.NSASEDIS == '1' then hcptr_tase = '1'; if NSACR.cp10 == '0' then hcptr_cp10 = '1'; // Check if access disabled in HCPTR if (advsimd && hcptr_tase == '1') || hcptr_cp10 == '1' then exception = ExceptionSyndrome(Exception_MonitorCallException_AdvSIMDFPAccessTrap);); exception.syndrome<24:20> = ConditionSyndrome(); if advsimd then exception.syndrome<5> = '1'; else exception.syndrome<5> = '0'; exception.syndrome<3:0> = '1010'; // coproc field, always 0xA if PSTATE.EL == EL2 then AArch32.TakeUndefInstrException(exception); else AArch32.TakeHypTrapException(exception); if HaveEL(EL3) && !ELUsingAArch32(EL3) then // Check if access disabled in CPTR_EL3 if CPTR_EL3.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL3(exception);); return;

Library pseudocode for aarch32/exceptions/traps/AArch32.CheckForSVCTrapAArch32.CheckForSMCUndefOrTrap

// AArch32.CheckForSVCTrap() // ========================= // Check for trap on SVC instruction// AArch32.CheckForSMCUndefOrTrap() // ================================ // Check for UNDEFINED or trap on SMC instruction AArch32.CheckForSVCTrap(bits(16) immediate) ifAArch32.CheckForSMCUndefOrTrap() if ! HaveFGTExtHaveEL() then route_to_el2 = FALSE; if PSTATE.EL ==( EL3) || PSTATE.EL == EL0 then route_to_el2 = (! UNDEFINED; ifEL2Enabled() && !ELUsingAArch32(EL1EL2) &&) then EL2EnabledAArch64.CheckForSMCUndefOrTrap() && HFGITR_EL2.SVC_EL0 == '1' && (HCR_EL2.<E2H, TGE> != '11' && (!(Zeros(16)); else route_to_hyp = HaveEL(EL3EL2) || SCR_EL3.FGTEn == '1'))); if route_to_el2 then exception =) && ! ExceptionSyndromeIsSecure(() && PSTATE.EL ==Exception_SupervisorCallEL1); exception.syndrome<15:0> = immediate; bits(64) preferred_exception_return =&& HCR.TSC == '1'; if route_to_hyp then exception = ThisInstrAddrExceptionSyndrome(); vect_offset = 0x0;( AArch64.TakeExceptionException_MonitorCall();EL2AArch32.TakeHypTrapException, exception, preferred_exception_return, vect_offset);(exception);

Library pseudocode for aarch32/exceptions/traps/AArch32.CheckForWFxTrapAArch32.CheckForSVCTrap

// AArch32.CheckForWFxTrap() // AArch32.CheckForSVCTrap() // ========================= // Check for trap on WFE or WFI instruction// Check for trap on SVC instruction AArch32.CheckForWFxTrap(bits(2) target_el, boolean is_wfe) assertAArch32.CheckForSVCTrap(bits(16) immediate) if HaveELHaveFGTExt(target_el); // Check for routing to AArch64 if !() then route_to_el2 = FALSE; if PSTATE.EL ==EL0 then route_to_el2 = (!ELUsingAArch32(target_el) then( AArch64.CheckForWFxTrap(target_el, is_wfe); return; case target_el of when EL1 trap = (if is_wfe then SCTLR.nTWE else SCTLR.nTWI) == '0'; when) && EL2 trap = (if is_wfe then HCR.TWE else HCR.TWI) == '1'; when EL3 trap = (if is_wfe then SCR.TWE else SCR.TWI) == '1'; if trap then if target_el == EL1 && EL2Enabled() && !() && HFGITR_EL2.SVC_EL0 == '1' && (HCR_EL2.<E2H, TGE> != '11' && (!ELUsingAArch32HaveEL(EL2) && HCR_EL2.TGE == '1' then AArch64.WFxTrap(target_el, is_wfe); if target_el == EL3 then) || SCR_EL3.FGTEn == '1'))); if route_to_el2 then exception = AArch32.TakeMonitorTrapException(); elsif target_el == EL2 then exception = ExceptionSyndrome(Exception_WFxTrapException_SupervisorCall); exception.syndrome<24:20> = exception.syndrome<15:0> = immediate; bits(64) preferred_exception_return = ConditionSyndromeThisInstrAddr(); exception.syndrome<0> = if is_wfe then '1' else '0'; vect_offset = 0x0; AArch32.TakeHypTrapExceptionAArch64.TakeException(exception); else( AArch32.TakeUndefInstrExceptionEL2();, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch32/exceptions/traps/AArch32.CheckITEnabledAArch32.CheckForWFxTrap

// AArch32.CheckITEnabled() // ======================== // Check whether the T32 IT instruction is disabled.// AArch32.CheckForWFxTrap() // ========================= // Check for trap on WFE or WFI instruction AArch32.CheckITEnabled(bits(4) mask) if PSTATE.EL ==AArch32.CheckForWFxTrap(bits(2) target_el, boolean is_wfe) assert EL2HaveEL then it_disabled = HSCTLR.ITD; else it_disabled = (if(target_el); // Check for routing to AArch64 if ! ELUsingAArch32((target_el) thenAArch64.CheckForWFxTrap(target_el, is_wfe); return; case target_el of when EL1) then SCTLR.ITD else SCTLR[].ITD); if it_disabled == '1' then if mask != '1000' then UNDEFINED; // Otherwise whether the IT block is allowed depends on hw1 of the next instruction. next_instr =trap = (if is_wfe then SCTLR.nTWE else SCTLR.nTWI) == '0'; when AArch32.MemSingleEL2[trap = (if is_wfe then HCR.TWE else HCR.TWI) == '1'; whenNextInstrAddrEL3(), 2,trap = (if is_wfe then SCR.TWE else SCR.TWI) == '1'; if trap then if target_el == && EL2Enabled() && !ELUsingAArch32(EL2) && HCR_EL2.TGE == '1' then AArch64.WFxTrap(target_el, is_wfe); if target_el == EL3 then AArch32.TakeMonitorTrapException(); elsif target_el == EL2 then exception = ExceptionSyndrome(Exception_WFxTrap); exception.syndrome<24:20> = ConditionSyndrome(); exception.syndrome<0> = if is_wfe then '1' else '0'; AArch32.TakeHypTrapException(exception); else AArch32.TakeUndefInstrExceptionAccType_IFETCHEL1, TRUE]; if next_instr IN {'11xxxxxxxxxxxxxx', '1011xxxxxxxxxxxx', '10100xxxxxxxxxxx', '01001xxxxxxxxxxx', '010001xxx1111xxx', '010001xx1xxxx111'} then // It is IMPLEMENTATION DEFINED whether the Undefined Instruction exception is // taken on the IT instruction or the next instruction. This is not reflected in // the pseudocode, which always takes the exception on the IT instruction. This // also does not take into account cases where the next instruction is UNPREDICTABLE. UNDEFINED; return;();

Library pseudocode for aarch32/exceptions/traps/AArch32.CheckIllegalStateAArch32.CheckITEnabled

// AArch32.CheckIllegalState() // =========================== // Check PSTATE.IL bit and generate Illegal Execution state exception if set.// AArch32.CheckITEnabled() // ======================== // Check whether the T32 IT instruction is disabled. AArch32.CheckIllegalState() ifAArch32.CheckITEnabled(bits(4) mask) if PSTATE.EL == AArch32.GeneralExceptionsToAArch64() then AArch64.CheckIllegalState(); elsif PSTATE.IL == '1' then route_to_hyp = PSTATE.EL == EL0 && EL2Enabled() && HCR.TGE == '1'; bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x04; if PSTATE.EL == EL2 || route_to_hyp then exception =then it_disabled = HSCTLR.ITD; else it_disabled = (if ExceptionSyndromeELUsingAArch32(Exception_IllegalStateEL1); if PSTATE.EL ==) then SCTLR.ITD else SCTLR[].ITD); if it_disabled == '1' then if mask != '1000' then UNDEFINED; // Otherwise whether the IT block is allowed depends on hw1 of the next instruction. next_instr = EL2AArch32.MemSingle then[ AArch32.EnterHypModeNextInstrAddr(exception, preferred_exception_return, vect_offset); else(), 2, AArch32.EnterHypModeAccType_IFETCH(exception, preferred_exception_return, 0x14); else AArch32.TakeUndefInstrException();, TRUE]; if next_instr IN {'11xxxxxxxxxxxxxx', '1011xxxxxxxxxxxx', '10100xxxxxxxxxxx', '01001xxxxxxxxxxx', '010001xxx1111xxx', '010001xx1xxxx111'} then // It is IMPLEMENTATION DEFINED whether the Undefined Instruction exception is // taken on the IT instruction or the next instruction. This is not reflected in // the pseudocode, which always takes the exception on the IT instruction. This // also does not take into account cases where the next instruction is UNPREDICTABLE. UNDEFINED; return;

Library pseudocode for aarch32/exceptions/traps/AArch32.CheckSETENDEnabledAArch32.CheckIllegalState

// AArch32.CheckSETENDEnabled() // ============================ // Check whether the AArch32 SETEND instruction is disabled.// AArch32.CheckIllegalState() // =========================== // Check PSTATE.IL bit and generate Illegal Execution state exception if set. AArch32.CheckSETENDEnabled() if PSTATE.EL ==AArch32.CheckIllegalState() if AArch32.GeneralExceptionsToAArch64() then AArch64.CheckIllegalState(); elsif PSTATE.IL == '1' then route_to_hyp = PSTATE.EL == EL0 && EL2Enabled() && HCR.TGE == '1'; bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x04; if PSTATE.EL == EL2 then setend_disabled = HSCTLR.SED; else setend_disabled = (if|| route_to_hyp then exception = ELUsingAArch32ExceptionSyndrome(); if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.TakeUndefInstrExceptionEL1Exception_IllegalState) then SCTLR.SED else SCTLR[].SED); if setend_disabled == '1' then UNDEFINED; return;();

Library pseudocode for aarch32/exceptions/traps/AArch32.SystemAccessTrapAArch32.CheckSETENDEnabled

// AArch32.SystemAccessTrap() // ========================== // Trapped system register access.// AArch32.CheckSETENDEnabled() // ============================ // Check whether the AArch32 SETEND instruction is disabled. AArch32.SystemAccessTrap(bits(5) mode, integer ec) (valid, target_el) =AArch32.CheckSETENDEnabled() if PSTATE.EL == ELFromM32(mode); assert valid && HaveEL(target_el) && target_el != EL0 && UInt(target_el) >= UInt(PSTATE.EL); if target_el == EL2 then exception = setend_disabled = HSCTLR.SED; else setend_disabled = (if AArch32.SystemAccessTrapSyndromeELUsingAArch32(ThisInstrEL1(), ec); AArch32.TakeHypTrapException(exception); else AArch32.TakeUndefInstrException();) then SCTLR.SED else SCTLR[].SED); if setend_disabled == '1' then UNDEFINED; return;

Library pseudocode for aarch32/exceptions/traps/AArch32.SystemAccessTrapSyndromeAArch32.SystemAccessTrap

// AArch32.SystemAccessTrapSyndrome() // ================================== // Returns the syndrome information for traps on AArch32 MCR, MCRR, MRC, MRRC, and VMRS, VMSR instructions, // other than traps that are due to HCPTR or CPACR. ExceptionRecord// AArch32.SystemAccessTrap() // ========================== // Trapped system register access. AArch32.SystemAccessTrapSyndrome(bits(32) instr, integer ec)AArch32.SystemAccessTrap(bits(5) mode, integer ec) (valid, target_el) = ExceptionRecordELFromM32 exception; case ec of when 0x0 exception =(mode); assert valid && ExceptionSyndromeHaveEL((target_el) && target_el !=Exception_UncategorizedEL0); when 0x3 exception =&& ExceptionSyndromeUInt((target_el) >=Exception_CP15RTTrapUInt); when 0x4 exception =(PSTATE.EL); if target_el == ExceptionSyndromeEL2(then exception =Exception_CP15RRTTrapAArch32.SystemAccessTrapSyndrome); when 0x5 exception =( ExceptionSyndromeThisInstr((), ec);Exception_CP14RTTrapAArch32.TakeHypTrapException); when 0x6 exception =(exception); else ExceptionSyndromeAArch32.TakeUndefInstrException(Exception_CP14DTTrap); when 0x7 exception = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrap); when 0x8 exception = ExceptionSyndrome(Exception_FPIDTrap); when 0xC exception = ExceptionSyndrome(Exception_CP14RRTTrap); otherwise Unreachable(); bits(20) iss = Zeros(); if exception.exceptype IN {Exception_FPIDTrap, Exception_CP14RTTrap, Exception_CP15RTTrap} then // Trapped MRC/MCR, VMRS on FPSID iss<13:10> = instr<19:16>; // CRn, Reg in case of VMRS iss<8:5> = instr<15:12>; // Rt iss<9> = '0'; // RES0 if exception.exceptype != Exception_FPIDTrap then // When trap is not for VMRS iss<19:17> = instr<7:5>; // opc2 iss<16:14> = instr<23:21>; // opc1 iss<4:1> = instr<3:0>; //CRm else //VMRS Access iss<19:17> = '000'; //opc2 - Hardcoded for VMRS iss<16:14> = '111'; //opc1 - Hardcoded for VMRS iss<4:1> = '0000'; //CRm - Hardcoded for VMRS elsif exception.exceptype IN {Exception_CP14RRTTrap, Exception_AdvSIMDFPAccessTrap, Exception_CP15RRTTrap} then // Trapped MRRC/MCRR, VMRS/VMSR iss<19:16> = instr<7:4>; // opc1 iss<13:10> = instr<19:16>; // Rt2 iss<8:5> = instr<15:12>; // Rt iss<4:1> = instr<3:0>; // CRm elsif exception.exceptype == Exception_CP14DTTrap then // Trapped LDC/STC iss<19:12> = instr<7:0>; // imm8 iss<4> = instr<23>; // U iss<2:1> = instr<24,21>; // P,W if instr<19:16> == '1111' then // Rn==15, LDC(Literal addressing)/STC iss<8:5> = bits(4) UNKNOWN; iss<3> = '1'; elsif exception.exceptype == Exception_Uncategorized then // Trapped for unknown reason iss<8:5> = instr<19:16>; // Rn iss<3> = '0'; iss<0> = instr<20>; // Direction exception.syndrome<24:20> = ConditionSyndrome(); exception.syndrome<19:0> = iss; return exception;();

Library pseudocode for aarch32/exceptions/traps/AArch32.TakeHypTrapExceptionAArch32.SystemAccessTrapSyndrome

// AArch32.TakeHypTrapException() // ============================== // Exceptions routed to Hyp mode as a Hyp Trap exception.// AArch32.SystemAccessTrapSyndrome() // ================================== // Returns the syndrome information for traps on AArch32 MCR, MCRR, MRC, MRRC, and VMRS, VMSR instructions, // other than traps that are due to HCPTR or CPACR. ExceptionRecord AArch32.TakeHypTrapException(integer ec) exception =AArch32.SystemAccessTrapSyndrome(bits(32) instr, integer ec) AArch32.SystemAccessTrapSyndromeExceptionRecord(exception; case ec of when 0x0 exception =ThisInstrExceptionSyndrome(), ec);( AArch32.TakeHypTrapExceptionException_Uncategorized(exception); // AArch32.TakeHypTrapException() // ============================== // Exceptions routed to Hyp mode as a Hyp Trap exception.); when 0x3 exception = AArch32.TakeHypTrapExceptionExceptionSyndrome(ExceptionRecordException_CP15RTTrap exception) assert); when 0x4 exception = HaveELExceptionSyndrome(EL2Exception_CP15RRTTrap) && !); when 0x5 exception =IsSecureExceptionSyndrome() &&( ELUsingAArch32Exception_CP14RTTrap(); when 0x6 exception =EL2ExceptionSyndrome); bits(32) preferred_exception_return =( ThisInstrAddrException_CP14DTTrap(); vect_offset = 0x14;); when 0x7 exception = (Exception_AdvSIMDFPAccessTrap); when 0x8 exception = ExceptionSyndrome(Exception_FPIDTrap); when 0xC exception = ExceptionSyndrome(Exception_CP14RRTTrap); otherwise Unreachable(); bits(20) iss = Zeros(); if exception.exceptype IN {Exception_FPIDTrap, Exception_CP14RTTrap, Exception_CP15RTTrap} then // Trapped MRC/MCR, VMRS on FPSID iss<13:10> = instr<19:16>; // CRn, Reg in case of VMRS iss<8:5> = instr<15:12>; // Rt iss<9> = '0'; // RES0 if exception.exceptype != Exception_FPIDTrap then // When trap is not for VMRS iss<19:17> = instr<7:5>; // opc2 iss<16:14> = instr<23:21>; // opc1 iss<4:1> = instr<3:0>; //CRm else //VMRS Access iss<19:17> = '000'; //opc2 - Hardcoded for VMRS iss<16:14> = '111'; //opc1 - Hardcoded for VMRS iss<4:1> = '0000'; //CRm - Hardcoded for VMRS elsif exception.exceptype IN {Exception_CP14RRTTrap, Exception_AdvSIMDFPAccessTrap, Exception_CP15RRTTrap} then // Trapped MRRC/MCRR, VMRS/VMSR iss<19:16> = instr<7:4>; // opc1 iss<13:10> = instr<19:16>; // Rt2 iss<8:5> = instr<15:12>; // Rt iss<4:1> = instr<3:0>; // CRm elsif exception.exceptype == Exception_CP14DTTrap then // Trapped LDC/STC iss<19:12> = instr<7:0>; // imm8 iss<4> = instr<23>; // U iss<2:1> = instr<24,21>; // P,W if instr<19:16> == '1111' then // Rn==15, LDC(Literal addressing)/STC iss<8:5> = bits(4) UNKNOWN; iss<3> = '1'; elsif exception.exceptype == Exception_Uncategorized then // Trapped for unknown reason iss<8:5> = instr<19:16>; // Rn iss<3> = '0'; iss<0> = instr<20>; // Direction exception.syndrome<24:20> = ConditionSyndromeAArch32.EnterHypModeExceptionSyndrome(exception, preferred_exception_return, vect_offset);(); exception.syndrome<19:0> = iss; return exception;

Library pseudocode for aarch32/exceptions/traps/AArch32.TakeMonitorTrapExceptionAArch32.TakeHypTrapException

// AArch32.TakeMonitorTrapException() // ================================== // Exceptions routed to Monitor mode as a Monitor Trap exception.// AArch32.TakeHypTrapException() // ============================== // Exceptions routed to Hyp mode as a Hyp Trap exception. AArch32.TakeMonitorTrapException() assertAArch32.TakeHypTrapException(integer ec) exception = AArch32.SystemAccessTrapSyndrome(ThisInstr(), ec); AArch32.TakeHypTrapException(exception); // AArch32.TakeHypTrapException() // ============================== // Exceptions routed to Hyp mode as a Hyp Trap exception. AArch32.TakeHypTrapException(ExceptionRecord exception) assert HaveEL(EL3EL2) &&) && ! IsSecure() && ELUsingAArch32(EL3EL2); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x04; lr_offset = if vect_offset = 0x14; CurrentInstrSetAArch32.EnterHypMode() == InstrSet_A32 then 4 else 2; AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset);(exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch32/exceptions/traps/AArch32.TakeUndefInstrExceptionAArch32.TakeMonitorTrapException

// AArch32.TakeUndefInstrException() // =================================// AArch32.TakeMonitorTrapException() // ================================== // Exceptions routed to Monitor mode as a Monitor Trap exception. AArch32.TakeUndefInstrException() exception =AArch32.TakeMonitorTrapException() assert ExceptionSyndromeHaveEL(Exception_UncategorizedEL3);) && AArch32.TakeUndefInstrExceptionELUsingAArch32(exception); // AArch32.TakeUndefInstrException() // =================================( AArch32.TakeUndefInstrException(); bits(32) preferred_exception_return =ExceptionRecord exception) route_to_hyp = PSTATE.EL == EL0 && EL2Enabled() && HCR.TGE == '1'; bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x04; lr_offset = if CurrentInstrSet() == InstrSet_A32 then 4 else 2; if PSTATE.EL ==then 4 else 2; EL2AArch32.EnterMonitorMode then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); elsif route_to_hyp then AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.EnterMode(M32_Undef, preferred_exception_return, lr_offset, vect_offset);(preferred_exception_return, lr_offset, vect_offset);

Library pseudocode for aarch32/exceptions/traps/AArch32.UndefinedFaultAArch32.TakeUndefInstrException

// AArch32.UndefinedFault() // ========================// AArch32.TakeUndefInstrException() // ================================= AArch32.UndefinedFault() ifAArch32.TakeUndefInstrException() exception = AArch32.GeneralExceptionsToAArch64ExceptionSyndrome() then( AArch64.UndefinedFaultException_Uncategorized();); AArch32.TakeUndefInstrException(exception); // AArch32.TakeUndefInstrException() // ================================= AArch32.TakeUndefInstrException(ExceptionRecord exception) route_to_hyp = PSTATE.EL == EL0 && EL2Enabled() && HCR.TGE == '1'; bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x04; lr_offset = if CurrentInstrSet() == InstrSet_A32 then 4 else 2; if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); elsif route_to_hyp then AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.EnterMode(M32_Undef();, preferred_exception_return, lr_offset, vect_offset);

Library pseudocode for aarch32/functionsexceptions/abortstraps/AArch32.CreateFaultRecordAArch32.UndefinedFault

// AArch32.CreateFaultRecord() // =========================== FaultRecord// AArch32.UndefinedFault() // ======================== AArch32.CreateFaultRecord(AArch32.UndefinedFault() ifFaultAArch32.GeneralExceptionsToAArch64 statuscode, bits(40) ipaddress, bits(4) domain, integer level,() then AccTypeAArch64.UndefinedFault acctype, boolean write, bit extflag, bits(4) debugmoe, bits(2) errortype, boolean secondstage, boolean s2fs1walk)(); FaultRecordAArch32.TakeUndefInstrException fault; fault.statuscode = statuscode; if (statuscode != Fault_None && PSTATE.EL != EL2 && TTBCR.EAE == '0' && !secondstage && !s2fs1walk && AArch32.DomainValid(statuscode, level)) then fault.domain = domain; else fault.domain = bits(4) UNKNOWN; fault.debugmoe = debugmoe; fault.errortype = errortype; fault.ipaddress.NS = bit UNKNOWN; fault.ipaddress.address = ZeroExtend(ipaddress); fault.level = level; fault.acctype = acctype; fault.write = write; fault.extflag = extflag; fault.secondstage = secondstage; fault.s2fs1walk = s2fs1walk; return fault;();

Library pseudocode for aarch32/functions/aborts/AArch32.DomainValidAArch32.CreateFaultRecord

// AArch32.DomainValid() // ===================== // Returns TRUE if the Domain is valid for a Short-descriptor translation scheme. // AArch32.CreateFaultRecord() // =========================== booleanFaultRecord AArch32.DomainValid(AArch32.CreateFaultRecord(Fault statuscode, integer level) assert statuscode !=statuscode, bits(40) ipaddress, bits(4) domain, integer level, Fault_NoneAccType; case statuscode of whenacctype, boolean write, bit extflag, bits(4) debugmoe, bits(2) errortype, boolean secondstage, boolean s2fs1walk) Fault_DomainFaultRecord return TRUE; whenfault; fault.statuscode = statuscode; if (statuscode != Fault_TranslationFault_None,&& PSTATE.EL != Fault_AccessFlagEL2,&& TTBCR.EAE == '0' && !secondstage && !s2fs1walk && Fault_SyncExternalOnWalkAArch32.DomainValid,(statuscode, level)) then fault.domain = domain; else fault.domain = bits(4) UNKNOWN; fault.debugmoe = debugmoe; fault.errortype = errortype; fault.ipaddress.NS = bit UNKNOWN; fault.ipaddress.address = Fault_SyncParityOnWalkZeroExtend return level == 2; otherwise return FALSE;(ipaddress); fault.level = level; fault.acctype = acctype; fault.write = write; fault.extflag = extflag; fault.secondstage = secondstage; fault.s2fs1walk = s2fs1walk; return fault;

Library pseudocode for aarch32/functions/aborts/AArch32.FaultStatusLDAArch32.DomainValid

// AArch32.FaultStatusLD() // ======================= // Creates an exception fault status value for Abort and Watchpoint exceptions taken // to Abort mode using AArch32 and Long-descriptor format. // AArch32.DomainValid() // ===================== // Returns TRUE if the Domain is valid for a Short-descriptor translation scheme. bits(32)boolean AArch32.FaultStatusLD(boolean d_side,AArch32.DomainValid( FaultRecordFault fault) assert fault.statuscode !=statuscode, integer level) assert statuscode != Fault_None; bits(32) fsr = case statuscode of when ZerosFault_Domain(); ifreturn TRUE; when HaveRASExtFault_Translation() &&, IsAsyncAbortFault_AccessFlag(fault) then fsr<15:14> = fault.errortype; if d_side then if fault.acctype IN {,AccType_DCFault_SyncExternalOnWalk, AccType_ICFault_SyncParityOnWalk, AccType_AT} then fsr<13> = '1'; fsr<11> = '1'; else fsr<11> = if fault.write then '1' else '0'; if IsExternalAbort(fault) then fsr<12> = fault.extflag; fsr<9> = '1'; fsr<5:0> = EncodeLDFSC(fault.statuscode, fault.level); return fsr;return level == 2; otherwise return FALSE;

Library pseudocode for aarch32/functions/aborts/AArch32.FaultStatusSDAArch32.FaultStatusLD

// AArch32.FaultStatusSD() // AArch32.FaultStatusLD() // ======================= // Creates an exception fault status value for Abort and Watchpoint exceptions taken // to Abort mode using AArch32 and Short-descriptor format. // to Abort mode using AArch32 and Long-descriptor format. bits(32) AArch32.FaultStatusSD(boolean d_side,AArch32.FaultStatusLD(boolean d_side, FaultRecord fault) assert fault.statuscode != Fault_None; bits(32) fsr = Zeros(); if HaveRASExt() && IsAsyncAbort(fault) then fsr<15:14> = fault.errortype; if d_side then if fault.acctype IN {AccType_DC, AccType_IC, AccType_AT} then fsr<13> = '1'; fsr<11> = '1'; else fsr<11> = if fault.write then '1' else '0'; if IsExternalAbort(fault) then fsr<12> = fault.extflag; fsr<9> = '0'; fsr<10,3:0> = fsr<9> = '1'; fsr<5:0> = EncodeSDFSCEncodeLDFSC(fault.statuscode, fault.level); if d_side then fsr<7:4> = fault.domain; // Domain field (data fault only) return fsr;

Library pseudocode for aarch32/functions/aborts/AArch32.FaultSyndromeAArch32.FaultStatusSD

// AArch32.FaultSyndrome() // AArch32.FaultStatusSD() // ======================= // Creates an exception syndrome value for Abort and Watchpoint exceptions taken to // AArch32 Hyp mode. // Creates an exception fault status value for Abort and Watchpoint exceptions taken // to Abort mode using AArch32 and Short-descriptor format. bits(25)bits(32) AArch32.FaultSyndrome(boolean d_side,AArch32.FaultStatusSD(boolean d_side, FaultRecord fault) assert fault.statuscode != Fault_None; bits(25) iss = bits(32) fsr = Zeros(); if HaveRASExt() && IsAsyncAbort(fault) then iss<11:10> = fault.errortype; // AET (fault) then fsr<15:14> = fault.errortype; if d_side then if ( if fault.acctype IN { IsSecondStage(fault) && !fault.s2fs1walk && (!IsExternalSyncAbort(fault) || (!HaveRASExt() && fault.acctype == AccType_TTW && boolean IMPLEMENTATION_DEFINED "ISV on second stage translation table walk")) ) then iss<24:14> = LSInstructionSyndrome(); if fault.acctype IN {AccType_DC, AccType_DC_UNPRIV, AccType_IC, AccType_AT} then iss<8> = '1'; iss<6> = '1'; fsr<13> = '1'; fsr<11> = '1'; else iss<6> = if fault.write then '1' else '0'; fsr<11> = if fault.write then '1' else '0'; if IsExternalAbort(fault) then iss<9> = fault.extflag; iss<7> = if fault.s2fs1walk then '1' else '0'; iss<5:0> =(fault) then fsr<12> = fault.extflag; fsr<9> = '0'; fsr<10,3:0> = EncodeLDFSCEncodeSDFSC(fault.statuscode, fault.level); if d_side then fsr<7:4> = fault.domain; // Domain field (data fault only) return iss; return fsr;

Library pseudocode for aarch32/functions/aborts/EncodeSDFSCAArch32.FaultSyndrome

// EncodeSDFSC() // ============= // Function that gives the Short-descriptor FSR code for different types of Fault // AArch32.FaultSyndrome() // ======================= // Creates an exception syndrome value for Abort and Watchpoint exceptions taken to // AArch32 Hyp mode. bits(5)bits(25) EncodeSDFSC(AArch32.FaultSyndrome(boolean d_side,FaultFaultRecord statuscode, integer level) bits(5) result; case statuscode of whenfault) assert fault.statuscode != Fault_AccessFlagFault_None assert level IN {1,2}; result = if level == 1 then '00011' else '00110'; when; bits(25) iss = Fault_AlignmentZeros result = '00001'; when(); if Fault_PermissionHaveRASExt assert level IN {1,2}; result = if level == 1 then '01101' else '01111'; when() && Fault_DomainIsAsyncAbort assert level IN {1,2}; result = if level == 1 then '01001' else '01011'; when(fault) then iss<11:10> = fault.errortype; // AET if d_side then if Fault_TranslationIsSecondStage assert level IN {1,2}; result = if level == 1 then '00101' else '00111'; when(fault) && !fault.s2fs1walk then iss<24:14> = Fault_SyncExternalLSInstructionSyndrome result = '01000'; when(); if fault.acctype IN { Fault_SyncExternalOnWalkAccType_DC assert level IN {1,2}; result = if level == 1 then '01100' else '01110'; when, Fault_SyncParityAccType_DC_UNPRIV result = '11001'; when, Fault_SyncParityOnWalkAccType_IC assert level IN {1,2}; result = if level == 1 then '11100' else '11110'; when, Fault_AsyncParityAccType_AT result = '11000'; when} then iss<8> = '1'; iss<6> = '1'; else iss<6> = if fault.write then '1' else '0'; if Fault_AsyncExternalIsExternalAbort result = '10110'; when(fault) then iss<9> = fault.extflag; iss<7> = if fault.s2fs1walk then '1' else '0'; iss<5:0> = Fault_DebugEncodeLDFSC result = '00010'; when Fault_TLBConflict result = '10000'; when Fault_Lockdown result = '10100'; // IMPLEMENTATION DEFINED when Fault_Exclusive result = '10101'; // IMPLEMENTATION DEFINED when Fault_ICacheMaint result = '00100'; otherwise Unreachable(); (fault.statuscode, fault.level); return result; return iss;

Library pseudocode for aarch32/functions/commonaborts/A32ExpandImmEncodeSDFSC

// A32ExpandImm() // ============== // EncodeSDFSC() // ============= // Function that gives the Short-descriptor FSR code for different types of Fault bits(32)bits(5) A32ExpandImm(bits(12) imm12) // PSTATE.C argument to following function call does not affect the imm32 result. (imm32, -) =EncodeSDFSC( statuscode, integer level) bits(5) result; case statuscode of when Fault_AccessFlag assert level IN {1,2}; result = if level == 1 then '00011' else '00110'; when Fault_Alignment result = '00001'; when Fault_Permission assert level IN {1,2}; result = if level == 1 then '01101' else '01111'; when Fault_Domain assert level IN {1,2}; result = if level == 1 then '01001' else '01011'; when Fault_Translation assert level IN {1,2}; result = if level == 1 then '00101' else '00111'; when Fault_SyncExternal result = '01000'; when Fault_SyncExternalOnWalk assert level IN {1,2}; result = if level == 1 then '01100' else '01110'; when Fault_SyncParity result = '11001'; when Fault_SyncParityOnWalk assert level IN {1,2}; result = if level == 1 then '11100' else '11110'; when Fault_AsyncParity result = '11000'; when Fault_AsyncExternal result = '10110'; when Fault_Debug result = '00010'; when Fault_TLBConflict result = '10000'; when Fault_Lockdown result = '10100'; // IMPLEMENTATION DEFINED when Fault_Exclusive result = '10101'; // IMPLEMENTATION DEFINED when Fault_ICacheMaint result = '00100'; otherwise UnreachableA32ExpandImm_CFault(imm12, PSTATE.C); (); return imm32; return result;

Library pseudocode for aarch32/functions/common/A32ExpandImm_CA32ExpandImm

// A32ExpandImm_C() // ================ // A32ExpandImm() // ============== (bits(32), bit)bits(32) A32ExpandImm_C(bits(12) imm12, bit carry_in) A32ExpandImm(bits(12) imm12) unrotated_value = // PSTATE.C argument to following function call does not affect the imm32 result. (imm32, -) = ZeroExtendA32ExpandImm_C(imm12<7:0>, 32); (imm32, carry_out) = Shift_C(unrotated_value, SRType_ROR, 2*UInt(imm12<11:8>), carry_in); (imm12, PSTATE.C); return (imm32, carry_out); return imm32;

Library pseudocode for aarch32/functions/common/DecodeImmShiftA32ExpandImm_C

// DecodeImmShift() // A32ExpandImm_C() // ================ (SRType, integer)(bits(32), bit) DecodeImmShift(bits(2) srtype, bits(5) imm5) A32ExpandImm_C(bits(12) imm12, bit carry_in) case srtype of when '00' shift_t = unrotated_value = SRType_LSLZeroExtend; shift_n =(imm12<7:0>, 32); (imm32, carry_out) = UIntShift_C(imm5); when '01' shift_t = SRType_LSR; shift_n = if imm5 == '00000' then 32 else UInt(imm5); when '10' shift_t = SRType_ASR; shift_n = if imm5 == '00000' then 32 else UInt(imm5); when '11' if imm5 == '00000' then shift_t = SRType_RRX; shift_n = 1; else shift_t =(unrotated_value, SRType_ROR; shift_n =, 2* UInt(imm5); (imm12<11:8>), carry_in); return (shift_t, shift_n); return (imm32, carry_out);

Library pseudocode for aarch32/functions/common/DecodeRegShiftDecodeImmShift

// DecodeRegShift() // DecodeImmShift() // ================ SRType(SRType, integer) DecodeRegShift(bits(2) srtype) DecodeImmShift(bits(2) srtype, bits(5) imm5) case srtype of when '00' shift_t = when '00' shift_t = SRType_LSL; when '01' shift_t =; shift_n = UInt(imm5); when '01' shift_t = SRType_LSR; when '10' shift_t =; shift_n = if imm5 == '00000' then 32 else UInt(imm5); when '10' shift_t = SRType_ASR; when '11' shift_t =; shift_n = if imm5 == '00000' then 32 else UInt(imm5); when '11' if imm5 == '00000' then shift_t = SRType_RRX; shift_n = 1; else shift_t = SRType_ROR; shift_n = UInt; return shift_t;(imm5); return (shift_t, shift_n);

Library pseudocode for aarch32/functions/common/RRXDecodeRegShift

// RRX() // ===== // DecodeRegShift() // ================ bits(N)SRType RRX(bits(N) x, bit carry_in) (result, -) =DecodeRegShift(bits(2) srtype) case srtype of when '00' shift_t = ; when '01' shift_t = SRType_LSR; when '10' shift_t = SRType_ASR; when '11' shift_t = SRType_RORRRX_CSRType_LSL(x, carry_in); return result;; return shift_t;

Library pseudocode for aarch32/functions/common/RRX_CRRX

// RRX_C() // ======= // RRX() // ===== (bits(N), bit)bits(N) RRX_C(bits(N) x, bit carry_in) result = carry_in : x<N-1:1>; carry_out = x<0>; return (result, carry_out);RRX(bits(N) x, bit carry_in) (result, -) =RRX_C(x, carry_in); return result;

Library pseudocode for aarch32/functions/common/SRTypeRRX_C

enumeration// RRX_C() // ======= (bits(N), bit) SRType {RRX_C(bits(N) x, bit carry_in) result = carry_in : x<N-1:1>; carry_out = x<0>; return (result, carry_out);SRType_LSL, SRType_LSR, SRType_ASR, SRType_ROR, SRType_RRX};

Library pseudocode for aarch32/functions/common/ShiftSRType

// Shift() // ======= bits(N)enumeration Shift(bits(N) value,SRType { SRType srtype, integer amount, bit carry_in) (result, -) =SRType_LSL, SRType_LSR, SRType_ASR, SRType_ROR, Shift_C(value, srtype, amount, carry_in); return result;SRType_RRX};

Library pseudocode for aarch32/functions/common/Shift_CShift

// Shift_C() // ========= // Shift() // ======= (bits(N), bit)bits(N) Shift_C(bits(N) value,Shift(bits(N) value, SRType srtype, integer amount, bit carry_in) assert !(srtype == (result, -) = SRType_RRXShift_C && amount != 1); if amount == 0 then (result, carry_out) = (value, carry_in); else case srtype of when SRType_LSL (result, carry_out) = LSL_C(value, amount); when SRType_LSR (result, carry_out) = LSR_C(value, amount); when SRType_ASR (result, carry_out) = ASR_C(value, amount); when SRType_ROR (result, carry_out) = ROR_C(value, amount); when SRType_RRX (result, carry_out) = RRX_C(value, carry_in); return (result, carry_out);(value, srtype, amount, carry_in); return result;

Library pseudocode for aarch32/functions/common/T32ExpandImmShift_C

// T32ExpandImm() // ============== // Shift_C() // ========= bits(32)(bits(N), bit) T32ExpandImm(bits(12) imm12) // PSTATE.C argument to following function call does not affect the imm32 result. (imm32, -) =Shift_C(bits(N) value, srtype, integer amount, bit carry_in) assert !(srtype == SRType_RRX && amount != 1); if amount == 0 then (result, carry_out) = (value, carry_in); else case srtype of when SRType_LSL (result, carry_out) = LSL_C(value, amount); when SRType_LSR (result, carry_out) = LSR_C(value, amount); when SRType_ASR (result, carry_out) = ASR_C(value, amount); when SRType_ROR (result, carry_out) = ROR_C(value, amount); when SRType_RRX (result, carry_out) = RRX_CT32ExpandImm_CSRType(imm12, PSTATE.C); (value, carry_in); return imm32; return (result, carry_out);

Library pseudocode for aarch32/functions/common/T32ExpandImm_CT32ExpandImm

// T32ExpandImm_C() // ================ // T32ExpandImm() // ============== (bits(32), bit)bits(32) T32ExpandImm_C(bits(12) imm12, bit carry_in) T32ExpandImm(bits(12) imm12) if imm12<11:10> == '00' then case imm12<9:8> of when '00' imm32 = // PSTATE.C argument to following function call does not affect the imm32 result. (imm32, -) = ZeroExtendT32ExpandImm_C(imm12<7:0>, 32); when '01' imm32 = '00000000' : imm12<7:0> : '00000000' : imm12<7:0>; when '10' imm32 = imm12<7:0> : '00000000' : imm12<7:0> : '00000000'; when '11' imm32 = imm12<7:0> : imm12<7:0> : imm12<7:0> : imm12<7:0>; carry_out = carry_in; else unrotated_value = ZeroExtend('1':imm12<6:0>, 32); (imm32, carry_out) = ROR_C(unrotated_value, UInt(imm12<11:7>)); (imm12, PSTATE.C); return (imm32, carry_out); return imm32;

Library pseudocode for aarch32/functions/coproccommon/AArch32.CheckCP15InstrCoarseTrapsT32ExpandImm_C

// AArch32.CheckCP15InstrCoarseTraps() // =================================== // Check for coarse-grained CP15 traps in HSTR and HCR. // T32ExpandImm_C() // ================ boolean(bits(32), bit) AArch32.CheckCP15InstrCoarseTraps(integer CRn, integer nreg, integer CRm) T32ExpandImm_C(bits(12) imm12, bit carry_in) // Check for coarse-grained Hyp traps if PSTATE.EL IN { if imm12<11:10> == '00' then case imm12<9:8> of when '00' imm32 =EL0ZeroExtend,(imm12<7:0>, 32); when '01' imm32 = '00000000' : imm12<7:0> : '00000000' : imm12<7:0>; when '10' imm32 = imm12<7:0> : '00000000' : imm12<7:0> : '00000000'; when '11' imm32 = imm12<7:0> : imm12<7:0> : imm12<7:0> : imm12<7:0>; carry_out = carry_in; else unrotated_value = EL1ZeroExtend} &&('1':imm12<6:0>, 32); (imm32, carry_out) = EL2EnabledROR_C() then if PSTATE.EL ==(unrotated_value, EL0UInt && !ELUsingAArch32(EL2) then return AArch64.CheckCP15InstrCoarseTraps(CRn, nreg, CRm); // Check for MCR, MRC, MCRR and MRRC disabled by HSTR<CRn/CRm> major = if nreg == 1 then CRn else CRm; if !(major IN {4,14}) && HSTR<major> == '1' then return TRUE; (imm12<11:7>)); // Check for MRC and MCR disabled by HCR.TIDCP if (HCR.TIDCP == '1' && nreg == 1 && ((CRn == 9 && CRm IN {0,1,2, 5,6,7,8 }) || (CRn == 10 && CRm IN {0,1, 4, 8 }) || (CRn == 11 && CRm IN {0,1,2,3,4,5,6,7,8,15}))) then return TRUE; return FALSE; return (imm32, carry_out);

Library pseudocode for aarch32/functions/exclusivecoproc/AArch32.ExclusiveMonitorsPassAArch32.CheckCP15InstrCoarseTraps

// AArch32.ExclusiveMonitorsPass() // =============================== // Return TRUE if the Exclusives monitors for the current PE include all of the addresses // associated with the virtual address region of size bytes starting at address. // The immediately following memory write must be to the same addresses. // AArch32.CheckCP15InstrCoarseTraps() // =================================== // Check for coarse-grained CP15 traps in HSTR and HCR. boolean AArch32.ExclusiveMonitorsPass(bits(32) address, integer size) AArch32.CheckCP15InstrCoarseTraps(integer CRn, integer nreg, integer CRm) // It is IMPLEMENTATION DEFINED whether the detection of memory aborts happens // before or after the check on the local Exclusives monitor. As a result a failure // of the local monitor can occur on some implementations even if the memory // access would give an memory abort. acctype = // Check for coarse-grained Hyp traps if PSTATE.EL IN { AccType_ATOMICEL0; iswrite = TRUE; aligned =, AArch32.CheckAlignmentEL1(address, size, acctype, iswrite); passed =} && AArch32.IsExclusiveVAEL2Enabled(address,() then if PSTATE.EL == ProcessorIDEL0(), size); if !passed then return FALSE; memaddrdesc =&& ! AArch32.TranslateAddressELUsingAArch32(address, acctype, iswrite, aligned, size); // Check for aborts or debug exceptions if( IsFaultEL2(memaddrdesc) then) then return AArch32.AbortAArch64.CheckCP15InstrCoarseTraps(address, memaddrdesc.fault); passed = IsExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size); ClearExclusiveLocal(ProcessorID()); if passed then if memaddrdesc.memattrs.shareable then passed = IsExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size); (CRn, nreg, CRm); // Check for MCR, MRC, MCRR and MRRC disabled by HSTR<CRn/CRm> major = if nreg == 1 then CRn else CRm; if !(major IN {4,14}) && HSTR<major> == '1' then return TRUE; return passed; // Check for MRC and MCR disabled by HCR.TIDCP if (HCR.TIDCP == '1' && nreg == 1 && ((CRn == 9 && CRm IN {0,1,2, 5,6,7,8 }) || (CRn == 10 && CRm IN {0,1, 4, 8 }) || (CRn == 11 && CRm IN {0,1,2,3,4,5,6,7,8,15}))) then return TRUE; return FALSE;

Library pseudocode for aarch32/functions/exclusive/AArch32.IsExclusiveVAAArch32.ExclusiveMonitorsPass

// An optional IMPLEMENTATION DEFINED test for an exclusive access to a virtual // address region of size bytes starting at address. // // It is permitted (but not required) for this function to return FALSE and // cause a store exclusive to fail if the virtual address region is not // totally included within the region recorded by MarkExclusiveVA(). // // It is always safe to return TRUE which will check the physical address only. // AArch32.ExclusiveMonitorsPass() // =============================== // Return TRUE if the Exclusives monitors for the current PE include all of the addresses // associated with the virtual address region of size bytes starting at address. // The immediately following memory write must be to the same addresses. boolean AArch32.IsExclusiveVA(bits(32) address, integer processorid, integer size);AArch32.ExclusiveMonitorsPass(bits(32) address, integer size) // It is IMPLEMENTATION DEFINED whether the detection of memory aborts happens // before or after the check on the local Exclusives monitor. As a result a failure // of the local monitor can occur on some implementations even if the memory // access would give an memory abort. acctype =AccType_ATOMIC; iswrite = TRUE; aligned = AArch32.CheckAlignment(address, size, acctype, iswrite); passed = AArch32.IsExclusiveVA(address, ProcessorID(), size); if !passed then return FALSE; memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch32.Abort(address, memaddrdesc.fault); passed = IsExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size); ClearExclusiveLocal(ProcessorID()); if passed then if memaddrdesc.memattrs.shareable then passed = IsExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size); return passed;

Library pseudocode for aarch32/functions/exclusive/AArch32.MarkExclusiveVAAArch32.IsExclusiveVA

// Optionally record an exclusive access to the virtual address region of size bytes // starting at address for processorid.// An optional IMPLEMENTATION DEFINED test for an exclusive access to a virtual // address region of size bytes starting at address. // // It is permitted (but not required) for this function to return FALSE and // cause a store exclusive to fail if the virtual address region is not // totally included within the region recorded by MarkExclusiveVA(). // // It is always safe to return TRUE which will check the physical address only. boolean AArch32.MarkExclusiveVA(bits(32) address, integer processorid, integer size);AArch32.IsExclusiveVA(bits(32) address, integer processorid, integer size);

Library pseudocode for aarch32/functions/exclusive/AArch32.SetExclusiveMonitorsAArch32.MarkExclusiveVA

// AArch32.SetExclusiveMonitors() // ============================== // Sets the Exclusives monitors for the current PE to record the addresses associated // with the virtual address region of size bytes starting at address.// Optionally record an exclusive access to the virtual address region of size bytes // starting at address for processorid. AArch32.SetExclusiveMonitors(bits(32) address, integer size) acctype =AArch32.MarkExclusiveVA(bits(32) address, integer processorid, integer size); AccType_ATOMIC; iswrite = FALSE; aligned = AArch32.CheckAlignment(address, size, acctype, iswrite); memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then return; if memaddrdesc.memattrs.shareable then MarkExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size); MarkExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size); AArch32.MarkExclusiveVA(address, ProcessorID(), size);

Library pseudocode for aarch32/functions/floatexclusive/CheckAdvSIMDEnabledAArch32.SetExclusiveMonitors

// CheckAdvSIMDEnabled() // =====================// AArch32.SetExclusiveMonitors() // ============================== // Sets the Exclusives monitors for the current PE to record the addresses associated // with the virtual address region of size bytes starting at address. CheckAdvSIMDEnabled() AArch32.SetExclusiveMonitors(bits(32) address, integer size) fpexc_check = TRUE; advsimd = TRUE; acctype = AArch32.CheckAdvSIMDOrFPEnabledAccType_ATOMIC(fpexc_check, advsimd); // Return from CheckAdvSIMDOrFPEnabled() occurs only if Advanced SIMD access is permitted ; iswrite = FALSE; // Make temporary copy of D registers // _Dclone[] is used as input data for instruction pseudocode for i = 0 to 31 _Dclone[i] = aligned = (address, size, acctype, iswrite); memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then return; if memaddrdesc.memattrs.shareable then MarkExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size); MarkExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size); AArch32.MarkExclusiveVA(address, ProcessorIDDAArch32.CheckAlignment[i]; return;(), size);

Library pseudocode for aarch32/functions/float/CheckAdvSIMDOrVFPEnabledCheckAdvSIMDEnabled

// CheckAdvSIMDOrVFPEnabled() // ==========================// CheckAdvSIMDEnabled() // ===================== CheckAdvSIMDOrVFPEnabled(boolean include_fpexc_check, boolean advsimd)CheckAdvSIMDEnabled() fpexc_check = TRUE; advsimd = TRUE; AArch32.CheckAdvSIMDOrFPEnabled(include_fpexc_check, advsimd); // Return from CheckAdvSIMDOrFPEnabled() occurs only if VFP access is permitted return;(fpexc_check, advsimd); // Return from CheckAdvSIMDOrFPEnabled() occurs only if Advanced SIMD access is permitted // Make temporary copy of D registers // _Dclone[] is used as input data for instruction pseudocode for i = 0 to 31 _Dclone[i] =D[i]; return;

Library pseudocode for aarch32/functions/float/CheckCryptoEnabled32CheckAdvSIMDOrVFPEnabled

// CheckCryptoEnabled32() // ======================// CheckAdvSIMDOrVFPEnabled() // ========================== CheckCryptoEnabled32()CheckAdvSIMDOrVFPEnabled(boolean include_fpexc_check, boolean advsimd) CheckAdvSIMDEnabledAArch32.CheckAdvSIMDOrFPEnabled(); // Return from CheckAdvSIMDEnabled() occurs only if access is permitted (include_fpexc_check, advsimd); // Return from CheckAdvSIMDOrFPEnabled() occurs only if VFP access is permitted return;

Library pseudocode for aarch32/functions/float/CheckVFPEnabledCheckCryptoEnabled32

// CheckVFPEnabled() // =================// CheckCryptoEnabled32() // ====================== CheckVFPEnabled(boolean include_fpexc_check) advsimd = FALSE;CheckCryptoEnabled32() AArch32.CheckAdvSIMDOrFPEnabledCheckAdvSIMDEnabled(include_fpexc_check, advsimd); // Return from CheckAdvSIMDOrFPEnabled() occurs only if VFP access is permitted (); // Return from CheckAdvSIMDEnabled() occurs only if access is permitted return;

Library pseudocode for aarch32/functions/float/FPHalvedSubCheckVFPEnabled

// FPHalvedSub() // ============= bits(N)// CheckVFPEnabled() // ================= FPHalvedSub(bits(N) op1, bits(N) op2,CheckVFPEnabled(boolean include_fpexc_check) advsimd = FALSE; FPCRTypeAArch32.CheckAdvSIMDOrFPEnabled fpcr) assert N IN {16,32,64}; rounding = FPRoundingMode(fpcr); (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); if inf1 && inf2 && sign1 == sign2 then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '1') then result = FPInfinity('0'); elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '0') then result = FPInfinity('1'); elsif zero1 && zero2 && sign1 != sign2 then result = FPZero(sign1); else result_value = (value1 - value2) / 2.0; if result_value == 0.0 then // Sign of exact zero result depends on rounding mode result_sign = if rounding == FPRounding_NEGINF then '1' else '0'; result = FPZero(result_sign); else result = FPRound(result_value, fpcr); return result;(include_fpexc_check, advsimd); // Return from CheckAdvSIMDOrFPEnabled() occurs only if VFP access is permitted return;

Library pseudocode for aarch32/functions/float/FPRSqrtStepFPHalvedSub

// FPRSqrtStep() // FPHalvedSub() // ============= bits(N) FPRSqrtStep(bits(N) op1, bits(N) op2) assert N IN {16,32};FPHalvedSub(bits(N) op1, bits(N) op2, FPCRType fpcr =fpcr) assert N IN {16,32,64}; rounding = StandardFPSCRValueFPRoundingMode(); (fpcr); (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); bits(N) product; if (inf1 && zero2) || (zero1 && inf2) then product = if inf1 && inf2 && sign1 == sign2 then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '1') then result = FPInfinity('0'); elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '0') then result = FPInfinity('1'); elsif zero1 && zero2 && sign1 != sign2 then result = FPZero('0'); (sign1); else product = result_value = (value1 - value2) / 2.0; if result_value == 0.0 then // Sign of exact zero result depends on rounding mode result_sign = if rounding == FPMulFPRounding_NEGINF(op1, op2, fpcr); bits(N) three =then '1' else '0'; result = FPThreeFPZero('0'); result =(result_sign); else result = FPHalvedSubFPRound(three, product, fpcr); (result_value, fpcr); return result;

Library pseudocode for aarch32/functions/float/FPRecipStepFPRSqrtStep

// FPRecipStep() // FPRSqrtStep() // ============= bits(N) FPRecipStep(bits(N) op1, bits(N) op2) FPRSqrtStep(bits(N) op1, bits(N) op2) assert N IN {16,32}; FPCRType fpcr = StandardFPSCRValue(); (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); bits(N) product; if (inf1 && zero2) || (zero1 && inf2) then product = FPZero('0'); else product = FPMul(op1, op2, fpcr); bits(N) two = bits(N) three = FPTwoFPThree('0'); result = FPSubFPHalvedSub(two, product, fpcr); (three, product, fpcr); return result;

Library pseudocode for aarch32/functions/float/StandardFPSCRValueFPRecipStep

// StandardFPSCRValue() // ==================== // FPRecipStep() // ============= FPCRTypebits(N) StandardFPSCRValue() bits(32) upper = '00000000000000000000000000000000'; bits(32) lower = '00000' : FPSCR.AHP : '110000' : FPSCR.FZ16 : '0000000000000000000'; return upper : lower;FPRecipStep(bits(N) op1, bits(N) op2) assert N IN {16,32};FPCRType fpcr = StandardFPSCRValue(); (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); bits(N) product; if (inf1 && zero2) || (zero1 && inf2) then product = FPZero('0'); else product = FPMul(op1, op2, fpcr); bits(N) two = FPTwo('0'); result = FPSub(two, product, fpcr); return result;

Library pseudocode for aarch32/functions/memoryfloat/AArch32.CheckAlignmentStandardFPSCRValue

// AArch32.CheckAlignment() // ======================== // StandardFPSCRValue() // ==================== booleanFPCRType AArch32.CheckAlignment(bits(32) address, integer alignment,StandardFPSCRValue() return '00000' : FPSCR.AHP : '110000' : FPSCR.FZ16 : '0000000000000000000'; AccType acctype, boolean iswrite) if PSTATE.EL == EL0 && !ELUsingAArch32(S1TranslationRegime()) then A = SCTLR[].A; //use AArch64 register, when higher Exception level is using AArch64 elsif PSTATE.EL == EL2 then A = HSCTLR.A; else A = SCTLR.A; aligned = (address == Align(address, alignment)); atomic = acctype IN { AccType_ATOMIC, AccType_ATOMICRW, AccType_ORDEREDATOMIC, AccType_ORDEREDATOMICRW, AccType_ATOMICLS64}; ordered = acctype IN { AccType_ORDERED, AccType_ORDEREDRW, AccType_LIMITEDORDERED, AccType_ORDEREDATOMIC, AccType_ORDEREDATOMICRW }; vector = acctype == AccType_VEC; // AccType_VEC is used for SIMD element alignment checks only check = (atomic || ordered || vector || A == '1'); if check && !aligned then secondstage = FALSE; AArch32.Abort(address, AArch32.AlignmentFault(acctype, iswrite, secondstage)); return aligned;

Library pseudocode for aarch32/functions/memory/AArch32.MemSingleAArch32.CheckAlignment

// AArch32.MemSingle[] - non-assignment (read) form // ================================================ // Perform an atomic, little-endian read of 'size' bytes. // AArch32.CheckAlignment() // ======================== bits(size*8)boolean AArch32.MemSingle[bits(32) address, integer size,AArch32.CheckAlignment(bits(32) address, integer alignment, AccType acctype, boolean wasaligned] assert size IN {1, 2, 4, 8, 16}; assert address ==acctype, boolean iswrite) if PSTATE.EL == AlignEL0(address, size);&& ! AddressDescriptorELUsingAArch32 memaddrdesc; bits(size*8) value; iswrite = FALSE; memaddrdesc =( AArch32.TranslateAddressS1TranslationRegime(address, acctype, iswrite, wasaligned, size); // Check for aborts or debug exceptions if()) then A = SCTLR[].A; //use AArch64 register, when higher Exception level is using AArch64 elsif PSTATE.EL == IsFaultEL2(memaddrdesc) thenthen A = HSCTLR.A; else A = SCTLR.A; aligned = (address == AArch32.AbortAlign(address, memaddrdesc.fault); // Memory array access accdesc =(address, alignment)); atomic = acctype IN { CreateAccessDescriptorAccType_ATOMIC(acctype); if, HaveMTEExtAccType_ATOMICRW() then if, AArch64.AccessIsTagCheckedAccType_ORDEREDATOMIC(,ZeroExtendAccType_ORDEREDATOMICRW(address, 64), acctype) then bits(4) ptag =}; ordered = acctype IN { AArch64.PhysicalTagAccType_ORDERED(,ZeroExtendAccType_ORDEREDRW(address, 64)); if !,AArch64.CheckTagAccType_LIMITEDORDERED(memaddrdesc, ptag, iswrite) then, AArch64.TagCheckFaultAccType_ORDEREDATOMIC(,ZeroExtendAccType_ORDEREDATOMICRW(address, 64), acctype, iswrite); value = _Mem[memaddrdesc, size, accdesc, FALSE]; return value; // AArch32.MemSingle[] - assignment (write) form // ============================================= // Perform an atomic, little-endian write of 'size' bytes.}; vector = acctype == AArch32.MemSingle[bits(32) address, integer size,; // AccType_VEC is used for SIMD element alignment checks only check = (atomic || ordered || vector || A == '1'); if check && !aligned then secondstage = FALSE; AccType acctype, boolean wasaligned] = bits(size*8) value assert size IN {1, 2, 4, 8, 16}; assert address == Align(address, size); AddressDescriptor memaddrdesc; iswrite = TRUE; memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, wasaligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch32.Abort(address, memaddrdesc.fault); // Effect on exclusives if memaddrdesc.memattrs.shareable then(address, ClearExclusiveByAddressAArch32.AlignmentFault(memaddrdesc.paddress, ProcessorID(), size); // Memory array access accdesc = CreateAccessDescriptor(acctype); if HaveMTEExt() then if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64)); if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then AArch64.TagCheckFault(ZeroExtend(address, 64), acctype, iswrite); _Mem[memaddrdesc, size, accdesc] = value; return;(acctype, iswrite, secondstage)); return aligned;

Library pseudocode for aarch32/functions/memory/Hint_PreloadDataAArch32.MemSingle

// AArch32.MemSingle[] - non-assignment (read) form // ================================================ // Perform an atomic, little-endian read of 'size' bytes. bits(size*8) AArch32.MemSingle[bits(32) address, integer size, AccType acctype, boolean wasaligned] assert size IN {1, 2, 4, 8, 16}; assert address == Align(address, size); AddressDescriptor memaddrdesc; bits(size*8) value; iswrite = FALSE; memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, wasaligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch32.Abort(address, memaddrdesc.fault); // Memory array access accdesc = CreateAccessDescriptor(acctype); if HaveMTEExt() then if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64)); if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then AArch64.TagCheckFault(ZeroExtend(address, 64), acctype, iswrite); value = _Mem[memaddrdesc, size, accdesc]; return value; // AArch32.MemSingle[] - assignment (write) form // ============================================= // Perform an atomic, little-endian write of 'size' bytes. AArch32.MemSingle[bits(32) address, integer size, AccType acctype, boolean wasaligned] = bits(size*8) value assert size IN {1, 2, 4, 8, 16}; assert address == Align(address, size); AddressDescriptor memaddrdesc; iswrite = TRUE; memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, wasaligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch32.Abort(address, memaddrdesc.fault); // Effect on exclusives if memaddrdesc.memattrs.shareable then ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), size); // Memory array access accdesc = CreateAccessDescriptor(acctype); if HaveMTEExt() then if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64)); if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then AArch64.TagCheckFault(ZeroExtendHint_PreloadData(bits(32) address);(address, 64), acctype, iswrite); _Mem[memaddrdesc, size, accdesc] = value; return;

Library pseudocode for aarch32/functions/memory/Hint_PreloadDataForWriteHint_PreloadData

Hint_PreloadDataForWrite(bits(32) address);Hint_PreloadData(bits(32) address);

Library pseudocode for aarch32/functions/memory/Hint_PreloadInstrHint_PreloadDataForWrite

Hint_PreloadInstr(bits(32) address);Hint_PreloadDataForWrite(bits(32) address);

Library pseudocode for aarch32/functions/memory/MemAHint_PreloadInstr

// MemA[] - non-assignment form // ============================ bits(8*size) MemA[bits(32) address, integer size] acctype = AccType_ATOMIC; return Mem_with_type[address, size, acctype]; // MemA[] - assignment form // ======================== MemA[bits(32) address, integer size] = bits(8*size) value acctype = AccType_ATOMIC; Mem_with_type[address, size, acctype] = value; return;Hint_PreloadInstr(bits(32) address);

Library pseudocode for aarch32/functions/memory/MemOMemA

// MemO[] - non-assignment form // MemA[] - non-assignment form // ============================ bits(8*size) MemO[bits(32) address, integer size] MemA[bits(32) address, integer size] acctype = AccType_ORDEREDAccType_ATOMIC; return Mem_with_type[address, size, acctype]; // MemO[] - assignment form // MemA[] - assignment form // ======================== MemO[bits(32) address, integer size] = bits(8*size) value MemA[bits(32) address, integer size] = bits(8*size) value acctype = AccType_ORDEREDAccType_ATOMIC; Mem_with_type[address, size, acctype] = value; return;

Library pseudocode for aarch32/functions/memory/MemUMemO

// MemU[] - non-assignment form // MemO[] - non-assignment form // ============================ bits(8*size) MemU[bits(32) address, integer size] MemO[bits(32) address, integer size] acctype = AccType_NORMALAccType_ORDERED; return Mem_with_type[address, size, acctype]; // MemU[] - assignment form // MemO[] - assignment form // ======================== MemU[bits(32) address, integer size] = bits(8*size) value MemO[bits(32) address, integer size] = bits(8*size) value acctype = AccType_NORMALAccType_ORDERED; Mem_with_type[address, size, acctype] = value; return;

Library pseudocode for aarch32/functions/memory/MemU_unprivMemU

// MemU_unpriv[] - non-assignment form // =================================== // MemU[] - non-assignment form // ============================ bits(8*size) MemU_unpriv[bits(32) address, integer size] MemU[bits(32) address, integer size] acctype = AccType_UNPRIVAccType_NORMAL; return Mem_with_type[address, size, acctype]; // MemU_unpriv[] - assignment form // ===============================// MemU[] - assignment form // ======================== MemU_unpriv[bits(32) address, integer size] = bits(8*size) value MemU[bits(32) address, integer size] = bits(8*size) value acctype = AccType_UNPRIVAccType_NORMAL; Mem_with_type[address, size, acctype] = value; return;

Library pseudocode for aarch32/functions/memory/Mem_with_typeMemU_unpriv

// Mem_with_type[] - non-assignment (read) form // ============================================ // Perform a read of 'size' bytes. The access byte order is reversed for a big-endian access. // Instruction fetches would call AArch32.MemSingle directly. // MemU_unpriv[] - non-assignment form // =================================== bits(size*8)bits(8*size) Mem_with_type[bits(32) address, integer size,MemU_unpriv[bits(32) address, integer size] acctype = AccTypeAccType_UNPRIV acctype] assert size IN {1, 2, 4, 8, 16}; bits(size*8) value; boolean iswrite = FALSE; aligned =; return AArch32.CheckAlignmentMem_with_type(address, size, acctype, iswrite); if !aligned then assert size > 1; value<7:0> =[address, size, acctype]; // MemU_unpriv[] - assignment form // =============================== AArch32.MemSingle[address, 1, acctype, aligned]; // For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory // access will generate an Alignment Fault, as to get this far means the first byte did // not, so we must be changing to a new translation page. c = ConstrainUnpredictable(Unpredictable_DEVPAGE2); assert c IN {Constraint_FAULT, Constraint_NONE}; if c == Constraint_NONE then aligned = TRUE; for i = 1 to size-1 value<8*i+7:8*i> = AArch32.MemSingle[address+i, 1, acctype, aligned]; else value = AArch32.MemSingle[address, size, acctype, aligned]; if BigEndian(acctype) then value = BigEndianReverse(value); return value; // Mem_with_type[] - assignment (write) form // ========================================= // Perform a write of 'size' bytes. The byte order is reversed for a big-endian access. Mem_with_type[bits(32) address, integer size,MemU_unpriv[bits(32) address, integer size] = bits(8*size) value acctype = AccTypeAccType_UNPRIV acctype] = bits(size*8) value boolean iswrite = TRUE; if; BigEndianMem_with_type(acctype) then value = BigEndianReverse(value); aligned = AArch32.CheckAlignment(address, size, acctype, iswrite); if !aligned then assert size > 1; AArch32.MemSingle[address, 1, acctype, aligned] = value<7:0>; // For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory // access will generate an Alignment Fault, as to get this far means the first byte did // not, so we must be changing to a new translation page. c = ConstrainUnpredictable(Unpredictable_DEVPAGE2); assert c IN {Constraint_FAULT, Constraint_NONE}; if c == Constraint_NONE then aligned = TRUE; for i = 1 to size-1 AArch32.MemSingle[address+i, 1, acctype, aligned] = value<8*i+7:8*i>; else AArch32.MemSingle[address, size, acctype, aligned] = value; [address, size, acctype] = value; return;

Library pseudocode for aarch32/functions/rasmemory/AArch32.ESBOperationMem_with_type

// AArch32.ESBOperation() // ====================== // Perform the AArch32 ESB operation for ESB executed in AArch32 state// Mem_with_type[] - non-assignment (read) form // ============================================ // Perform a read of 'size' bytes. The access byte order is reversed for a big-endian access. // Instruction fetches would call AArch32.MemSingle directly. bits(size*8) AArch32.ESBOperation() // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL ==Mem_with_type[bits(32) address, integer size, EL0AccType && !acctype] assert size IN {1, 2, 4, 8, 16}; bits(size*8) value; boolean iswrite = FALSE; aligned =ELUsingAArch32AArch32.CheckAlignment((address, size, acctype, iswrite); if !aligned then assert size > 1; value<7:0> =EL1AArch32.MemSingle); if !route_to_aarch64 &&[address, 1, acctype, aligned]; // For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory // access will generate an Alignment Fault, as to get this far means the first byte did // not, so we must be changing to a new translation page. c = EL2EnabledConstrainUnpredictable() && !(ELUsingAArch32Unpredictable_DEVPAGE2(); assert c IN {EL2Constraint_FAULT) then route_to_aarch64 = HCR_EL2.TGE == '1' || HCR_EL2.AMO == '1'; if !route_to_aarch64 &&, HaveELConstraint_NONE(}; if c ==EL3Constraint_NONE) && !then aligned = TRUE; for i = 1 to size-1 value<8*i+7:8*i> =ELUsingAArch32AArch32.MemSingle([address+i, 1, acctype, aligned]; else value =EL3AArch32.MemSingle) then route_to_aarch64 = SCR_EL3.EA == '1'; [address, size, acctype, aligned]; if route_to_aarch64 then if AArch64.ESBOperationBigEndian(); return; route_to_monitor =() then value = HaveELBigEndianReverse((value); return value; // Mem_with_type[] - assignment (write) form // ========================================= // Perform a write of 'size' bytes. The byte order is reversed for a big-endian access.EL3) &&Mem_with_type[bits(32) address, integer size, ELUsingAArch32AccType(acctype] = bits(size*8) value boolean iswrite = TRUE; ifEL3BigEndian) && SCR.EA == '1'; route_to_hyp = PSTATE.EL IN {() then value =EL0BigEndianReverse,(value); aligned = EL1AArch32.CheckAlignment} &&(address, size, acctype, iswrite); if !aligned then assert size > 1; EL2EnabledAArch32.MemSingle() && (HCR.TGE == '1' || HCR.AMO == '1'); [address, 1, acctype, aligned] = value<7:0>; if route_to_monitor then target = // For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory // access will generate an Alignment Fault, as to get this far means the first byte did // not, so we must be changing to a new translation page. c = M32_MonitorConstrainUnpredictable; elsif route_to_hyp || PSTATE.M ==( M32_HypUnpredictable_DEVPAGE2 then target =); assert c IN { M32_HypConstraint_FAULT; else target =, M32_AbortConstraint_NONE; if}; if c == IsSecureConstraint_NONE() then mask_active = TRUE; elsif target ==then aligned = TRUE; for i = 1 to size-1 M32_MonitorAArch32.MemSingle then mask_active = SCR.AW == '1' && (![address+i, 1, acctype, aligned] = value<8*i+7:8*i>; elseHaveELAArch32.MemSingle(EL2) || (HCR.TGE == '0' && HCR.AMO == '0')); else mask_active = target == M32_Abort || PSTATE.M == M32_Hyp; mask_set = PSTATE.A == '1'; (-, el) = ELFromM32(target); intdis = Halted() || ExternalDebugInterruptsDisabled(el); masked = intdis || (mask_active && mask_set); // Check for a masked Physical SError pending that can be synchronized // by an Error synchronization event. if masked && IsSynchronizablePhysicalSErrorPending() then syndrome32 = AArch32.PhysicalSErrorSyndrome(); DISR = AArch32.ReportDeferredSError(syndrome32.AET, syndrome32.ExT); ClearPendingPhysicalSError(); [address, size, acctype, aligned] = value; return;

Library pseudocode for aarch32/functions/ras/AArch32.PhysicalSErrorSyndromeAArch32.ESBOperation

// Return the SError syndrome AArch32.SErrorSyndrome// AArch32.ESBOperation() // ====================== // Perform the AArch32 ESB operation for ESB executed in AArch32 state AArch32.PhysicalSErrorSyndrome();AArch32.ESBOperation() // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL ==EL0 && !ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then route_to_aarch64 = HCR_EL2.TGE == '1' || HCR_EL2.AMO == '1'; if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = SCR_EL3.EA == '1'; if route_to_aarch64 then AArch64.ESBOperation(); return; route_to_monitor = HaveEL(EL3) && ELUsingAArch32(EL3) && SCR.EA == '1'; route_to_hyp = PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR.TGE == '1' || HCR.AMO == '1'); if route_to_monitor then target = M32_Monitor; elsif route_to_hyp || PSTATE.M == M32_Hyp then target = M32_Hyp; else target = M32_Abort; if IsSecure() then mask_active = TRUE; elsif target == M32_Monitor then mask_active = SCR.AW == '1' && (!HaveEL(EL2) || (HCR.TGE == '0' && HCR.AMO == '0')); else mask_active = target == M32_Abort || PSTATE.M == M32_Hyp; mask_set = PSTATE.A == '1'; (-, el) = ELFromM32(target); intdis = Halted() || ExternalDebugInterruptsDisabled(el); masked = intdis || (mask_active && mask_set); // Check for a masked Physical SError pending that can be synchronized // by an Error synchronization event. if masked && IsSynchronizablePhysicalSErrorPending() then syndrome32 = AArch32.PhysicalSErrorSyndrome(); DISR = AArch32.ReportDeferredSError(syndrome32.AET, syndrome32.ExT); ClearPendingPhysicalSError(); return;

Library pseudocode for aarch32/functions/ras/AArch32.ReportDeferredSErrorAArch32.PhysicalSErrorSyndrome

// AArch32.ReportDeferredSError() // ============================== // Return deferred SError syndrome bits(32)// Return the SError syndrome AArch32.SErrorSyndrome AArch32.ReportDeferredSError(bits(2) AET, bit ExT) bits(32) target; target<31> = '1'; // A syndrome =AArch32.PhysicalSErrorSyndrome(); Zeros(16); if PSTATE.EL == EL2 then syndrome<11:10> = AET; // AET syndrome<9> = ExT; // EA syndrome<5:0> = '010001'; // DFSC else syndrome<15:14> = AET; // AET syndrome<12> = ExT; // ExT syndrome<9> = TTBCR.EAE; // LPAE if TTBCR.EAE == '1' then // Long-descriptor format syndrome<5:0> = '010001'; // STATUS else // Short-descriptor format syndrome<10,3:0> = '10110'; // FS if HaveAnyAArch64() then target<24:0> = ZeroExtend(syndrome);// Any RES0 fields must be set to zero else target<15:0> = syndrome; return target;

Library pseudocode for aarch32/functions/ras/AArch32.SErrorSyndromeAArch32.ReportDeferredSError

type// AArch32.ReportDeferredSError() // ============================== // Return deferred SError syndrome bits(32) AArch32.SErrorSyndrome is ( bits(2) AET, bit ExT )AArch32.ReportDeferredSError(bits(2) AET, bit ExT) bits(32) target; target<31> = '1'; // A syndrome =Zeros(16); if PSTATE.EL == EL2 then syndrome<11:10> = AET; // AET syndrome<9> = ExT; // EA syndrome<5:0> = '010001'; // DFSC else syndrome<15:14> = AET; // AET syndrome<12> = ExT; // ExT syndrome<9> = TTBCR.EAE; // LPAE if TTBCR.EAE == '1' then // Long-descriptor format syndrome<5:0> = '010001'; // STATUS else // Short-descriptor format syndrome<10,3:0> = '10110'; // FS if HaveAnyAArch64() then target<24:0> = ZeroExtend(syndrome);// Any RES0 fields must be set to zero else target<15:0> = syndrome; return target;

Library pseudocode for aarch32/functions/ras/AArch32.vESBOperationAArch32.SErrorSyndrome

// AArch32.vESBOperation() // ======================= // Perform the ESB operation for virtual SError interrupts executed in AArch32 statetype AArch32.vESBOperation() assert PSTATE.EL IN {AArch32.SErrorSyndrome is ( bits(2) AET, bit ExT )EL0, EL1} && EL2Enabled(); // Check for EL2 using AArch64 state if !ELUsingAArch32(EL2) then AArch64.vESBOperation(); return; // If physical SError interrupts are routed to Hyp mode, and TGE is not set, then a // virtual SError interrupt might be pending vSEI_enabled = HCR.TGE == '0' && HCR.AMO == '1'; vSEI_pending = vSEI_enabled && HCR.VA == '1'; vintdis = Halted() || ExternalDebugInterruptsDisabled(EL1); vmasked = vintdis || PSTATE.A == '1'; // Check for a masked virtual SError pending if vSEI_pending && vmasked then VDISR = AArch32.ReportDeferredSError(VDFSR<15:14>, VDFSR<12>); HCR.VA = '0'; // Clear pending virtual SError return;

Library pseudocode for aarch32/functions/registersras/AArch32.ResetGeneralRegistersAArch32.vESBOperation

// AArch32.ResetGeneralRegisters() // ===============================// AArch32.vESBOperation() // ======================= // Perform the ESB operation for virtual SError interrupts executed in AArch32 state AArch32.ResetGeneralRegisters() for i = 0 to 7AArch32.vESBOperation() assert PSTATE.EL IN { REL0[i] = bits(32) UNKNOWN; for i = 8 to 12, RmodeEL1[i,} && M32_UserEL2Enabled] = bits(32) UNKNOWN;(); // Check for EL2 using AArch64 state if ! RmodeELUsingAArch32[i,( M32_FIQ] = bits(32) UNKNOWN; if HaveEL(EL2) then RmodeAArch64.vESBOperation[13,(); return; // If physical SError interrupts are routed to Hyp mode, and TGE is not set, then a // virtual SError interrupt might be pending vSEI_enabled = HCR.TGE == '0' && HCR.AMO == '1'; vSEI_pending = vSEI_enabled && HCR.VA == '1'; vintdis = M32_HypHalted] = bits(32) UNKNOWN; // No R14_hyp for i = 13 to 14() || RmodeExternalDebugInterruptsDisabled[i,( M32_UserEL1] = bits(32) UNKNOWN;); vmasked = vintdis || PSTATE.A == '1'; // Check for a masked virtual SError pending if vSEI_pending && vmasked then VDISR = RmodeAArch32.ReportDeferredSError[i, M32_FIQ] = bits(32) UNKNOWN; Rmode[i, M32_IRQ] = bits(32) UNKNOWN; Rmode[i, M32_Svc] = bits(32) UNKNOWN; Rmode[i, M32_Abort] = bits(32) UNKNOWN; Rmode[i, M32_Undef] = bits(32) UNKNOWN; if HaveEL(EL3) then Rmode[i, M32_Monitor] = bits(32) UNKNOWN; (VDFSR<15:14>, VDFSR<12>); HCR.VA = '0'; // Clear pending virtual SError return;

Library pseudocode for aarch32/functions/registers/AArch32.ResetSIMDFPRegistersAArch32.ResetGeneralRegisters

// AArch32.ResetSIMDFPRegisters() // ==============================// AArch32.ResetGeneralRegisters() // =============================== AArch32.ResetSIMDFPRegisters() AArch32.ResetGeneralRegisters() for i = 0 to 15 for i = 0 to 7 [i] = bits(32) UNKNOWN; for i = 8 to 12 Rmode[i, M32_User] = bits(32) UNKNOWN; Rmode[i, M32_FIQ] = bits(32) UNKNOWN; if HaveEL(EL2) then Rmode[13, M32_Hyp] = bits(32) UNKNOWN; // No R14_hyp for i = 13 to 14 Rmode[i, M32_User] = bits(32) UNKNOWN; Rmode[i, M32_FIQ] = bits(32) UNKNOWN; Rmode[i, M32_IRQ] = bits(32) UNKNOWN; Rmode[i, M32_Svc] = bits(32) UNKNOWN; Rmode[i, M32_Abort] = bits(32) UNKNOWN; Rmode[i, M32_Undef] = bits(32) UNKNOWN; if HaveEL(EL3) then Rmode[i, M32_MonitorQR[i] = bits(128) UNKNOWN; ] = bits(32) UNKNOWN; return;

Library pseudocode for aarch32/functions/registers/AArch32.ResetSpecialRegistersAArch32.ResetSIMDFPRegisters

// AArch32.ResetSpecialRegisters() // ===============================// AArch32.ResetSIMDFPRegisters() // ============================== AArch32.ResetSpecialRegisters() AArch32.ResetSIMDFPRegisters() // AArch32 special registers SPSR_fiq<31:0> = bits(32) UNKNOWN; SPSR_irq<31:0> = bits(32) UNKNOWN; SPSR_svc<31:0> = bits(32) UNKNOWN; SPSR_abt<31:0> = bits(32) UNKNOWN; SPSR_und<31:0> = bits(32) UNKNOWN; if for i = 0 to 15 HaveELQ(EL2) then SPSR_hyp = bits(32) UNKNOWN; ELR_hyp = bits(32) UNKNOWN; if HaveEL(EL3) then SPSR_mon = bits(32) UNKNOWN; // External debug special registers DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; [i] = bits(128) UNKNOWN; return;

Library pseudocode for aarch32/functions/registers/AArch32.ResetSystemRegistersAArch32.ResetSpecialRegisters

// AArch32.ResetSpecialRegisters() // =============================== AArch32.ResetSpecialRegisters() // AArch32 special registers SPSR_fiq = bits(32) UNKNOWN; SPSR_irq = bits(32) UNKNOWN; SPSR_svc = bits(32) UNKNOWN; SPSR_abt = bits(32) UNKNOWN; SPSR_und = bits(32) UNKNOWN; if HaveEL(EL2) then SPSR_hyp = bits(32) UNKNOWN; ELR_hyp = bits(32) UNKNOWN; if HaveEL(EL3AArch32.ResetSystemRegisters(boolean cold_reset);) then SPSR_mon = bits(32) UNKNOWN; // External debug special registers DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; return;

Library pseudocode for aarch32/functions/registers/ALUExceptionReturnAArch32.ResetSystemRegisters

// ALUExceptionReturn() // ==================== ALUExceptionReturn(bits(32) address) if PSTATE.EL == EL2 then UNDEFINED; elsif PSTATE.M IN {M32_User,M32_System} then Constraint c = ConstrainUnpredictable(Unpredictable_ALUEXCEPTIONRETURN); assert c IN {Constraint_UNDEF, Constraint_NOP}; case c of when Constraint_UNDEF UNDEFINED; when Constraint_NOPEndOfInstruction(); else AArch32.ExceptionReturn(address, SPSR[]);AArch32.ResetSystemRegisters(boolean cold_reset);

Library pseudocode for aarch32/functions/registers/ALUWritePCALUExceptionReturn

// ALUWritePC() // ============// ALUExceptionReturn() // ==================== ALUWritePC(bits(32) address) ifALUExceptionReturn(bits(32) address) if PSTATE.EL == CurrentInstrSetEL2() ==then UNDEFINED; elsif PSTATE.M IN { InstrSet_A32M32_User then, BXWritePCM32_System(address,} then BranchType_INDIRConstraint); elsec = BranchWritePCConstrainUnpredictable(address,( ); assert c IN {Constraint_UNDEF, Constraint_NOP}; case c of when Constraint_UNDEF UNDEFINED; when Constraint_NOPEndOfInstruction(); else AArch32.ExceptionReturn(address, SPSRBranchType_INDIRUnpredictable_ALUEXCEPTIONRETURN);[]);

Library pseudocode for aarch32/functions/registers/BXWritePCALUWritePC

// BXWritePC() // ===========// ALUWritePC() // ============ BXWritePC(bits(32) address,ALUWritePC(bits(32) address) if BranchTypeCurrentInstrSet branch_type) if address<0> == '1' then() == SelectInstrSetInstrSet_A32(thenInstrSet_T32BXWritePC); address<0> = '0'; else(address, SelectInstrSetBranchType_INDIR(); elseInstrSet_A32BranchWritePC); // For branches to an unaligned PC counter in A32 state, the processor takes the branch // and does one of: // * Forces the address to be aligned // * Leaves the PC unaligned, meaning the target generates a PC Alignment fault. if address<1> == '1' &&(address, ConstrainUnpredictableBoolBranchType_INDIR(Unpredictable_A32FORCEALIGNPC) then address<1> = '0'; BranchTo(address, branch_type););

Library pseudocode for aarch32/functions/registers/BranchWritePCBXWritePC

// BranchWritePC() // ===============// BXWritePC() // =========== BranchWritePC(bits(32) address,BXWritePC(bits(32) address, BranchType branch_type) if if address<0> == '1' then CurrentInstrSetSelectInstrSet() ==( InstrSet_T32); address<0> = '0'; else SelectInstrSet(InstrSet_A32); // For branches to an unaligned PC counter in A32 state, the processor takes the branch // and does one of: // * Forces the address to be aligned // * Leaves the PC unaligned, meaning the target generates a PC Alignment fault. if address<1> == '1' && ConstrainUnpredictableBool(Unpredictable_A32FORCEALIGNPC then address<1:0> = '00'; else address<0> = '0';) then address<1> = '0'; BranchTo(address, branch_type);

Library pseudocode for aarch32/functions/registers/DBranchWritePC

// D[] - non-assignment form // ========================= bits(64)// BranchWritePC() // =============== D[integer n] assert n >= 0 && n <= 31; base = (n MOD 2) * 64; bits(128) vreg = V[n DIV 2]; return vreg<base+63:base>; // D[] - assignment form // =====================BranchWritePC(bits(32) address, branch_type) if CurrentInstrSet() == InstrSet_A32 then address<1:0> = '00'; else address<0> = '0'; BranchToD[integer n] = bits(64) value assert n >= 0 && n <= 31; base = (n MOD 2) * 64; bits(128) vreg = V[n DIV 2]; vreg<base+63:base> = value; V[n DIV 2] = vreg; return;(address, branch_type);

Library pseudocode for aarch32/functions/registers/DinD

// Din[] - non-assignment form // =========================== // D[] - non-assignment form // ========================= bits(64) Din[integer n] D[integer n] assert n >= 0 && n <= 31; return _Dclone[n]; base = (n MOD 2) * 64; bits(128) vreg = V[n DIV 2]; return vreg<base+63:base>; // D[] - assignment form // =====================D[integer n] = bits(64) value assert n >= 0 && n <= 31; base = (n MOD 2) * 64; bits(128) vreg = V[n DIV 2]; vreg<base+63:base> = value; V[n DIV 2] = vreg; return;

Library pseudocode for aarch32/functions/registers/LRDin

// LR - assignment form // ====================// Din[] - non-assignment form // =========================== bits(64) LR = bits(32) valueDin[integer n] assert n >= 0 && n <= 31; return _Dclone[n]; R[14] = value; return; // LR - non-assignment form // ======================== bits(32) LR return R[14];

Library pseudocode for aarch32/functions/registers/LoadWritePCLR

// LoadWritePC() // =============// LR - assignment form // ==================== LoadWritePC(bits(32) address)LR = bits(32) value BXWritePCR(address,[14] = value; return; // LR - non-assignment form // ======================== bits(32) LR return RBranchType_INDIR);[14];

Library pseudocode for aarch32/functions/registers/LookUpRIndexLoadWritePC

// LookUpRIndex() // ============== integer// LoadWritePC() // ============= LookUpRIndex(integer n, bits(5) mode) assert n >= 0 && n <= 14; case n of // Select index by mode: usr fiq irq svc abt und hyp when 8 result =LoadWritePC(bits(32) address) RBankSelectBXWritePC(mode, 8, 24, 8, 8, 8, 8, 8); when 9 result =(address, RBankSelectBranchType_INDIR(mode, 9, 25, 9, 9, 9, 9, 9); when 10 result = RBankSelect(mode, 10, 26, 10, 10, 10, 10, 10); when 11 result = RBankSelect(mode, 11, 27, 11, 11, 11, 11, 11); when 12 result = RBankSelect(mode, 12, 28, 12, 12, 12, 12, 12); when 13 result = RBankSelect(mode, 13, 29, 17, 19, 21, 23, 15); when 14 result = RBankSelect(mode, 14, 30, 16, 18, 20, 22, 14); otherwise result = n; return result;);

Library pseudocode for aarch32/functions/registers/Monitor_mode_registersLookUpRIndex

bits(32)// LookUpRIndex() // ============== integer SP_mon; bits(32)LookUpRIndex(integer n, bits(5) mode) assert n >= 0 && n <= 14; case n of // Select index by mode: usr fiq irq svc abt und hyp when 8 result = (mode, 8, 24, 8, 8, 8, 8, 8); when 9 result = RBankSelect(mode, 9, 25, 9, 9, 9, 9, 9); when 10 result = RBankSelect(mode, 10, 26, 10, 10, 10, 10, 10); when 11 result = RBankSelect(mode, 11, 27, 11, 11, 11, 11, 11); when 12 result = RBankSelect(mode, 12, 28, 12, 12, 12, 12, 12); when 13 result = RBankSelect(mode, 13, 29, 17, 19, 21, 23, 15); when 14 result = RBankSelectLR_mon;(mode, 14, 30, 16, 18, 20, 22, 14); otherwise result = n; return result;

Library pseudocode for aarch32/functions/registers/PCMonitor_mode_registers

// PC - non-assignment form // ======================== bits(32) PC returnSP_mon; bits(32) R[15]; // This includes the offset from AArch32 stateLR_mon;

Library pseudocode for aarch32/functions/registers/PCStoreValuePC

// PCStoreValue() // ============== // PC - non-assignment form // ======================== bits(32) PCStoreValue() // This function returns the PC value. On architecture versions before Armv7, it // is permitted to instead return PC+4, provided it does so consistently. It is // used only to describe A32 instructions, so it returns the address of the current // instruction plus 8 (normally) or 12 (when the alternative is permitted). PC return PCR;[15]; // This includes the offset from AArch32 state

Library pseudocode for aarch32/functions/registers/QPCStoreValue

// Q[] - non-assignment form // ========================= // PCStoreValue() // ============== bits(128)bits(32) Q[integer n] assert n >= 0 && n <= 15; return V[n]; // Q[] - assignment form // =====================PCStoreValue() // This function returns the PC value. On architecture versions before Armv7, it // is permitted to instead return PC+4, provided it does so consistently. It is // used only to describe A32 instructions, so it returns the address of the current // instruction plus 8 (normally) or 12 (when the alternative is permitted). return Q[integer n] = bits(128) value assert n >= 0 && n <= 15; V[n] = value; return;;

Library pseudocode for aarch32/functions/registers/QinQ

// Qin[] - non-assignment form // =========================== // Q[] - non-assignment form // ========================= bits(128) Qin[integer n] Q[integer n] assert n >= 0 && n <= 15; return return V[n]; // Q[] - assignment form // ===================== Din[2*n+1]:Din[2*n];Q[integer n] = bits(128) value assert n >= 0 && n <= 15; V[n] = value; return;

Library pseudocode for aarch32/functions/registers/RQin

// R[] - assignment form // =====================// Qin[] - non-assignment form // =========================== bits(128) R[integer n] = bits(32) valueQin[integer n] assert n >= 0 && n <= 15; return RmodeDin[n, PSTATE.M] = value; return; // R[] - non-assignment form // ========================= bits(32)[2*n+1]: R[integer n] if n == 15 then offset = (if CurrentInstrSet() == InstrSet_A32 then 8 else 4); return _PC<31:0> + offset; else return Rmode[n, PSTATE.M];[2*n];

Library pseudocode for aarch32/functions/registers/RBankSelectR

// RBankSelect() // ============= integer// R[] - assignment form // ===================== RBankSelect(bits(5) mode, integer usr, integer fiq, integer irq, integer svc, integer abt, integer und, integer hyp) case mode of whenR[integer n] = bits(32) value M32_UserRmode result = usr; // User mode when[n, PSTATE.M] = value; return; // R[] - non-assignment form // ========================= bits(32) M32_FIQ result = fiq; // FIQ mode whenR[integer n] if n == 15 then offset = (if M32_IRQCurrentInstrSet result = irq; // IRQ mode when() == M32_SvcInstrSet_A32 result = svc; // Supervisor mode whenthen 8 else 4); return _PC<31:0> + offset; else return M32_AbortRmode result = abt; // Abort mode when M32_Hyp result = hyp; // Hyp mode when M32_Undef result = und; // Undefined mode when M32_System result = usr; // System mode uses User mode registers otherwise Unreachable(); // Monitor mode return result;[n, PSTATE.M];

Library pseudocode for aarch32/functions/registers/RmodeRBankSelect

// Rmode[] - non-assignment form // ============================= // RBankSelect() // ============= bits(32)integer Rmode[integer n, bits(5) mode] assert n >= 0 && n <= 14; RBankSelect(bits(5) mode, integer usr, integer fiq, integer irq, integer svc, integer abt, integer und, integer hyp) // Check for attempted use of Monitor mode in Non-secure state. if ! case mode of whenIsSecureM32_User() then assert mode !=result = usr; // User mode when M32_MonitorM32_FIQ; assert !result = fiq; // FIQ mode whenBadModeM32_IRQ(mode); if mode ==result = irq; // IRQ mode when M32_MonitorM32_Svc then if n == 13 then return SP_mon; elsif n == 14 then return LR_mon; else return _R[n]<31:0>; else return _R[result = svc; // Supervisor mode whenLookUpRIndexM32_Abort(n, mode)]<31:0>; // Rmode[] - assignment form // =========================result = abt; // Abort mode when Rmode[integer n, bits(5) mode] = bits(32) value assert n >= 0 && n <= 14; // Check for attempted use of Monitor mode in Non-secure state. if !result = hyp; // Hyp mode whenIsSecureM32_Undef() then assert mode !=result = und; // Undefined mode when M32_MonitorM32_System; assert !result = usr; // System mode uses User mode registers otherwiseBadModeUnreachable(mode); if mode == M32_Monitor then if n == 13 then SP_mon = value; elsif n == 14 then LR_mon = value; else _R[n]<31:0> = value; else // It is CONSTRAINED UNPREDICTABLE whether the upper 32 bits of the X // register are unchanged or set to zero. This is also tested for on // exception entry, as this applies to all AArch32 registers. if !HighestELUsingAArch32() && ConstrainUnpredictableBool(Unpredictable_ZEROUPPER) then _R[LookUpRIndex(n, mode)] = ZeroExtend(value); else _R[LookUpRIndex(n, mode)]<31:0> = value; (); // Monitor mode return; return result;

Library pseudocode for aarch32/functions/registers/SRmode

// S[] - non-assignment form // ========================= // Rmode[] - non-assignment form // ============================= bits(32) S[integer n] assert n >= 0 && n <= 31; base = (n MOD 4) * 32; bits(128) vreg = V[n DIV 4]; return vreg<base+31:base>; Rmode[integer n, bits(5) mode] assert n >= 0 && n <= 14; // S[] - assignment form // ===================== // Check for attempted use of Monitor mode in Non-secure state. if ! IsSecure() then assert mode != M32_Monitor; assert !BadMode(mode); if mode == M32_Monitor then if n == 13 then return SP_mon; elsif n == 14 then return LR_mon; else return _R[n]<31:0>; else return _R[LookUpRIndex(n, mode)]<31:0>; // Rmode[] - assignment form // ========================= Rmode[integer n, bits(5) mode] = bits(32) value assert n >= 0 && n <= 14; // Check for attempted use of Monitor mode in Non-secure state. if !IsSecure() then assert mode != M32_Monitor; assert !BadMode(mode); if mode == M32_Monitor then if n == 13 then SP_mon = value; elsif n == 14 then LR_mon = value; else _R[n]<31:0> = value; else // It is CONSTRAINED UNPREDICTABLE whether the upper 32 bits of the X // register are unchanged or set to zero. This is also tested for on // exception entry, as this applies to all AArch32 registers. if !HighestELUsingAArch32() && ConstrainUnpredictableBool(Unpredictable_ZEROUPPER) then _R[LookUpRIndex(n, mode)] = ZeroExtend(value); else _R[LookUpRIndexS[integer n] = bits(32) value assert n >= 0 && n <= 31; base = (n MOD 4) * 32; bits(128) vreg = V[n DIV 4]; vreg<base+31:base> = value; V[n DIV 4] = vreg; (n, mode)]<31:0> = value; return;

Library pseudocode for aarch32/functions/registers/SPS

// SP - assignment form // ====================// S[] - non-assignment form // ========================= bits(32) SP = bits(32) valueS[integer n] assert n >= 0 && n <= 31; base = (n MOD 4) * 32; bits(128) vreg = V[n DIV 4]; return vreg<base+31:base>; // S[] - assignment form // ===================== R[13] = value; return; // SP - non-assignment form // ======================== bits(32) SP return R[13];S[integer n] = bits(32) value assert n >= 0 && n <= 31; base = (n MOD 4) * 32; bits(128) vreg = V[n DIV 4]; vreg<base+31:base> = value; V[n DIV 4] = vreg; return;

Library pseudocode for aarch32/functions/registers/_DcloneSP

array bits(64) _Dclone[0..31];// SP - assignment form // ====================SP = bits(32) value R[13] = value; return; // SP - non-assignment form // ======================== bits(32) SP return R[13];

Library pseudocode for aarch32/functions/systemregisters/AArch32.ExceptionReturn_Dclone

// AArch32.ExceptionReturn() // =========================array bits(64) _Dclone[0..31]; AArch32.ExceptionReturn(bits(32) new_pc, bits(32) spsr) SynchronizeContext(); // Attempts to change to an illegal mode or state will invoke the Illegal Execution state // mechanism SetPSTATEFromPSR(spsr); ClearExclusiveLocal(ProcessorID()); SendEventLocal(); if PSTATE.IL == '1' then // If the exception return is illegal, PC[1:0] are UNKNOWN new_pc<1:0> = bits(2) UNKNOWN; else // LR[1:0] or LR[0] are treated as being 0, depending on the target instruction set state if PSTATE.T == '1' then new_pc<0> = '0'; // T32 else new_pc<1:0> = '00'; // A32 BranchTo(new_pc, BranchType_ERET);

Library pseudocode for aarch32/functions/system/AArch32.ExecutingATS1xPInstrAArch32.ExceptionReturn

// AArch32.ExecutingATS1xPInstr() // ============================== // Return TRUE if current instruction is AT S1CPR/WP boolean// AArch32.ExceptionReturn() // ========================= AArch32.ExecutingATS1xPInstr() if !AArch32.ExceptionReturn(bits(32) new_pc, bits(32) spsr)HavePrivATExtSynchronizeContext() then return FALSE; (); instr = // Attempts to change to an illegal mode or state will invoke the Illegal Execution state // mechanism (spsr); ClearExclusiveLocal(ProcessorID()); SendEventLocal(); if PSTATE.IL == '1' then // If the exception return is illegal, PC[1:0] are UNKNOWN new_pc<1:0> = bits(2) UNKNOWN; else // LR[1:0] or LR[0] are treated as being 0, depending on the target instruction set state if PSTATE.T == '1' then new_pc<0> = '0'; // T32 else new_pc<1:0> = '00'; // A32 BranchTo(new_pc, BranchType_ERETThisInstrSetPSTATEFromPSR(); if instr<24+:4> == '1110' && instr<8+:4> == '1111' then opc1 = instr<21+:3>; CRn = instr<16+:4>; CRm = instr<0+:4>; opc2 = instr<5+:3>; return (opc1 == '000' && CRn == '0111' && CRm == '1001' && opc2 IN {'000','001'}); else return FALSE;);

Library pseudocode for aarch32/functions/system/AArch32.ExecutingCP10or11InstrAArch32.ExecutingATS1xPInstr

// AArch32.ExecutingCP10or11Instr() // ================================ // AArch32.ExecutingATS1xPInstr() // ============================== // Return TRUE if current instruction is AT S1CPR/WP boolean AArch32.ExecutingCP10or11Instr() instr =AArch32.ExecutingATS1xPInstr() if ! HavePrivATExt() then return FALSE; instr = ThisInstr(); instr_set = CurrentInstrSet(); assert instr_set IN {InstrSet_A32, InstrSet_T32}; if instr_set == InstrSet_A32 then return ((instr<27:24> == '1110' || instr<27:25> == '110') && instr<11:8> == '101x'); else // InstrSet_T32 return (instr<31:28> == '111x' && (instr<27:24> == '1110' || instr<27:25> == '110') && instr<11:8> == '101x');(); if instr<24+:4> == '1110' && instr<8+:4> == '1111' then opc1 = instr<21+:3>; CRn = instr<16+:4>; CRm = instr<0+:4>; opc2 = instr<5+:3>; return (opc1 == '000' && CRn == '0111' && CRm == '1001' && opc2 IN {'000','001'}); else return FALSE;

Library pseudocode for aarch32/functions/system/AArch32.ExecutingLSMInstrAArch32.ExecutingCP10or11Instr

// AArch32.ExecutingLSMInstr() // =========================== // Returns TRUE if processor is executing a Load/Store Multiple instruction // AArch32.ExecutingCP10or11Instr() // ================================ boolean AArch32.ExecutingLSMInstr() AArch32.ExecutingCP10or11Instr() instr = ThisInstr(); instr_set = CurrentInstrSet(); assert instr_set IN {InstrSet_A32, InstrSet_T32}; if instr_set == InstrSet_A32 then return (instr<28+:4> != '1111' && instr<25+:3> == '100'); return ((instr<27:24> == '1110' || instr<27:25> == '110') && instr<11:8> == '101x'); else // InstrSet_T32 if return (instr<31:28> == '111x' && (instr<27:24> == '1110' || instr<27:25> == '110') && instr<11:8> == '101x'); ThisInstrLength() == 16 then return (instr<12+:4> == '1100'); else return (instr<25+:7> == '1110100' && instr<22> == '0');

Library pseudocode for aarch32/functions/system/AArch32.ITAdvanceAArch32.ExecutingLSMInstr

// AArch32.ITAdvance() // ===================// AArch32.ExecutingLSMInstr() // =========================== // Returns TRUE if processor is executing a Load/Store Multiple instruction boolean AArch32.ITAdvance() if PSTATE.IT<2:0> == '000' then PSTATE.IT = '00000000'; else PSTATE.IT<4:0> =AArch32.ExecutingLSMInstr() instr = (); instr_set = CurrentInstrSet(); assert instr_set IN {InstrSet_A32, InstrSet_T32}; if instr_set == InstrSet_A32 then return (instr<28+:4> != '1111' && instr<25+:3> == '100'); else // InstrSet_T32 if ThisInstrLengthLSLThisInstr(PSTATE.IT<4:0>, 1); return;() == 16 then return (instr<12+:4> == '1100'); else return (instr<25+:7> == '1110100' && instr<22> == '0');

Library pseudocode for aarch32/functions/system/AArch32.SysRegReadAArch32.ITAdvance

// Read from a 32-bit AArch32 System register and return the register's contents. bits(32)// AArch32.ITAdvance() // =================== AArch32.SysRegRead(integer cp_num, bits(32) instr);AArch32.ITAdvance() if PSTATE.IT<2:0> == '000' then PSTATE.IT = '00000000'; else PSTATE.IT<4:0> =LSL(PSTATE.IT<4:0>, 1); return;

Library pseudocode for aarch32/functions/system/AArch32.SysRegRead64AArch32.SysRegRead

// Read from a 64-bit AArch32 System register and return the register's contents. bits(64)// Read from a 32-bit AArch32 System register and return the register's contents. bits(32) AArch32.SysRegRead64(integer cp_num, bits(32) instr);AArch32.SysRegRead(integer cp_num, bits(32) instr);

Library pseudocode for aarch32/functions/system/AArch32.SysRegReadCanWriteAPSRAArch32.SysRegRead64

// AArch32.SysRegReadCanWriteAPSR() // ================================ // Determines whether the AArch32 System register read instruction can write to APSR flags. boolean// Read from a 64-bit AArch32 System register and return the register's contents. bits(64) AArch32.SysRegReadCanWriteAPSR(integer cp_num, bits(32) instr) assertAArch32.SysRegRead64(integer cp_num, bits(32) instr); UsingAArch32(); assert (cp_num IN {14,15}); assert cp_num == UInt(instr<11:8>); opc1 = UInt(instr<23:21>); opc2 = UInt(instr<7:5>); CRn = UInt(instr<19:16>); CRm = UInt(instr<3:0>); if cp_num == 14 && opc1 == 0 && CRn == 0 && CRm == 1 && opc2 == 0 then // DBGDSCRint return TRUE; return FALSE;

Library pseudocode for aarch32/functions/system/AArch32.SysRegWriteAArch32.SysRegReadCanWriteAPSR

// Write to a 32-bit AArch32 System register.// AArch32.SysRegReadCanWriteAPSR() // ================================ // Determines whether the AArch32 System register read instruction can write to APSR flags. boolean AArch32.SysRegWrite(integer cp_num, bits(32) instr, bits(32) val);AArch32.SysRegReadCanWriteAPSR(integer cp_num, bits(32) instr) assertUsingAArch32(); assert (cp_num IN {14,15}); assert cp_num == UInt(instr<11:8>); opc1 = UInt(instr<23:21>); opc2 = UInt(instr<7:5>); CRn = UInt(instr<19:16>); CRm = UInt(instr<3:0>); if cp_num == 14 && opc1 == 0 && CRn == 0 && CRm == 1 && opc2 == 0 then // DBGDSCRint return TRUE; return FALSE;

Library pseudocode for aarch32/functions/system/AArch32.SysRegWrite64AArch32.SysRegWrite

// Write to a 64-bit AArch32 System register.// Write to a 32-bit AArch32 System register. AArch32.SysRegWrite64(integer cp_num, bits(32) instr, bits(64) val);AArch32.SysRegWrite(integer cp_num, bits(32) instr, bits(32) val);

Library pseudocode for aarch32/functions/system/AArch32.WriteModeAArch32.SysRegWrite64

// AArch32.WriteMode() // =================== // Function for dealing with writes to PSTATE.M from AArch32 state only. // This ensures that PSTATE.EL and PSTATE.SP are always valid.// Write to a 64-bit AArch32 System register. AArch32.WriteMode(bits(5) mode) (valid,el) =AArch32.SysRegWrite64(integer cp_num, bits(32) instr, bits(64) val); ELFromM32(mode); assert valid; PSTATE.M = mode; PSTATE.EL = el; PSTATE.nRW = '1'; PSTATE.SP = (if mode IN {M32_User,M32_System} then '0' else '1'); return;

Library pseudocode for aarch32/functions/system/AArch32.WriteModeByInstrAArch32.WriteMode

// AArch32.WriteModeByInstr() // ========================== // Function for dealing with writes to PSTATE.M from an AArch32 instruction, and ensuring that // illegal state changes are correctly flagged in PSTATE.IL.// AArch32.WriteMode() // =================== // Function for dealing with writes to PSTATE.M from AArch32 state only. // This ensures that PSTATE.EL and PSTATE.SP are always valid. AArch32.WriteModeByInstr(bits(5) mode) AArch32.WriteMode(bits(5) mode) (valid,el) = ELFromM32(mode); // 'valid' is set to FALSE if' mode' is invalid for this implementation or the current value // of SCR.NS/SCR_EL3.NS. Additionally, it is illegal for an instruction to write 'mode' to // PSTATE.EL if it would result in any of: // * A change to a mode that would cause entry to a higher Exception level. if assert valid; PSTATE.M = mode; PSTATE.EL = el; PSTATE.nRW = '1'; PSTATE.SP = (if mode IN { UIntM32_User(el) >, UIntM32_System(PSTATE.EL) then valid = FALSE; // * A change to or from Hyp mode. if (PSTATE.M == M32_Hyp || mode == M32_Hyp) && PSTATE.M != mode then valid = FALSE; // * When EL2 is implemented, the value of HCR.TGE is '1', a change to a Non-secure EL1 mode. if PSTATE.M == M32_Monitor && HaveEL(EL2) && el == EL1 && SCR.NS == '1' && HCR.TGE == '1' then valid = FALSE; if !valid then PSTATE.IL = '1'; else AArch32.WriteMode(mode);} then '0' else '1'); return;

Library pseudocode for aarch32/functions/system/BadModeAArch32.WriteModeByInstr

// BadMode() // ========= boolean// AArch32.WriteModeByInstr() // ========================== // Function for dealing with writes to PSTATE.M from an AArch32 instruction, and ensuring that // illegal state changes are correctly flagged in PSTATE.IL. BadMode(bits(5) mode) // Return TRUE if 'mode' encodes a mode that is not valid for this implementation case mode of whenAArch32.WriteModeByInstr(bits(5) mode) (valid,el) = M32_MonitorELFromM32 valid =(mode); // 'valid' is set to FALSE if' mode' is invalid for this implementation or the current value // of SCR.NS/SCR_EL3.NS. Additionally, it is illegal for an instruction to write 'mode' to // PSTATE.EL if it would result in any of: // * A change to a mode that would cause entry to a higher Exception level. if HaveAArch32ELUInt((el) >EL3UInt); when(PSTATE.EL) then valid = FALSE; // * A change to or from Hyp mode. if (PSTATE.M == M32_Hyp valid =|| mode == HaveAArch32ELM32_Hyp() && PSTATE.M != mode then valid = FALSE; // * When EL2 is implemented, the value of HCR.TGE is '1', a change to a Non-secure EL1 mode. if PSTATE.M ==EL2M32_Monitor); when&& M32_FIQHaveEL,( M32_IRQEL2,) && el == M32_Svc, M32_Abort, M32_Undef, M32_System // If EL3 is implemented and using AArch32, then these modes are EL3 modes in Secure // state, and EL1 modes in Non-secure state. If EL3 is not implemented or is using // AArch64, then these modes are EL1 modes. // Therefore it is sufficient to test this implementation supports EL1 using AArch32. valid = HaveAArch32EL(EL1); when&& SCR.NS == '1' && HCR.TGE == '1' then valid = FALSE; if !valid then PSTATE.IL = '1'; else M32_UserAArch32.WriteMode valid = HaveAArch32EL(EL0); otherwise valid = FALSE; // Passed an illegal mode value return !valid;(mode);

Library pseudocode for aarch32/functions/system/BankedRegisterAccessValidBadMode

// BankedRegisterAccessValid() // =========================== // Checks for MRS (Banked register) or MSR (Banked register) accesses to registers // other than the SPSRs that are invalid. This includes ELR_hyp accesses.// BadMode() // ========= boolean BankedRegisterAccessValid(bits(5) SYSm, bits(5) mode) case SYSm of when '000xx', '00100' // R8_usr to R12_usr if mode !=BadMode(bits(5) mode) // Return TRUE if 'mode' encodes a mode that is not valid for this implementation case mode of when M32_FIQM32_Monitor then UNPREDICTABLE; when '00101' // SP_usr if mode ==valid = M32_SystemHaveAArch32EL then UNPREDICTABLE; when '00110' // LR_usr if mode IN {(EL3); when M32_Hyp,valid =M32_SystemHaveAArch32EL} then UNPREDICTABLE; when '010xx', '0110x', '01110' // R8_fiq to R12_fiq, SP_fiq, LR_fiq if mode ==( EL2); when M32_FIQ then UNPREDICTABLE; when '1000x' // LR_irq, SP_irq if mode ==, M32_IRQ then UNPREDICTABLE; when '1001x' // LR_svc, SP_svc if mode ==, M32_Svc then UNPREDICTABLE; when '1010x' // LR_abt, SP_abt if mode ==, M32_Abort then UNPREDICTABLE; when '1011x' // LR_und, SP_und if mode ==, M32_Undef then UNPREDICTABLE; when '1110x' // LR_mon, SP_mon if !,HaveELM32_System(// If EL3 is implemented and using AArch32, then these modes are EL3 modes in Secure // state, and EL1 modes in Non-secure state. If EL3 is not implemented or is using // AArch64, then these modes are EL1 modes. // Therefore it is sufficient to test this implementation supports EL1 using AArch32. valid =EL3HaveAArch32EL) || !(IsSecureEL1() || mode ==); when M32_MonitorM32_User then UNPREDICTABLE; when '11110' // ELR_hyp, only from Monitor or Hyp mode if !valid =HaveELHaveAArch32EL(EL2EL0) || !(mode IN {M32_Monitor,M32_Hyp}) then UNPREDICTABLE; when '11111' // SP_hyp, only from Monitor mode if !HaveEL(EL2) || mode != M32_Monitor then UNPREDICTABLE; ); otherwise UNPREDICTABLE; return; valid = FALSE; // Passed an illegal mode value return !valid;

Library pseudocode for aarch32/functions/system/CPSRWriteByInstrBankedRegisterAccessValid

// CPSRWriteByInstr() // ================== // Update PSTATE.<N,Z,C,V,Q,GE,E,A,I,F,M> from a CPSR value written by an MSR instruction.// BankedRegisterAccessValid() // =========================== // Checks for MRS (Banked register) or MSR (Banked register) accesses to registers // other than the SPSRs that are invalid. This includes ELR_hyp accesses. CPSRWriteByInstr(bits(32) value, bits(4) bytemask) privileged = PSTATE.EL !=BankedRegisterAccessValid(bits(5) SYSm, bits(5) mode) case SYSm of when '000xx', '00100' // R8_usr to R12_usr if mode != EL0M32_FIQ; // PSTATE.<A,I,F,M> are not writable at EL0 // Write PSTATE from 'value', ignoring bytes masked by 'bytemask' if bytemask<3> == '1' then PSTATE.<N,Z,C,V,Q> = value<31:27>; // Bits <26:24> are ignored if bytemask<2> == '1' then ifthen UNPREDICTABLE; when '00101' // SP_usr if mode == HaveSSBSExtM32_System() then PSTATE.SSBS = value<23>; if privileged then PSTATE.PAN = value<22>; ifthen UNPREDICTABLE; when '00110' // LR_usr if mode IN { HaveDITExtM32_Hyp() then PSTATE.DIT = value<21>; // Bit <20> is RES0 PSTATE.GE = value<19:16>; if bytemask<1> == '1' then // Bits <15:10> are RES0 PSTATE.E = value<9>; // PSTATE.E is writable at EL0 if privileged then PSTATE.A = value<8>; if bytemask<0> == '1' then if privileged then PSTATE.<I,F> = value<7:6>; // Bit <5> is RES0 // AArch32.WriteModeByInstr() sets PSTATE.IL to 1 if this is an illegal mode change., } then UNPREDICTABLE; when '010xx', '0110x', '01110' // R8_fiq to R12_fiq, SP_fiq, LR_fiq if mode == M32_FIQ then UNPREDICTABLE; when '1000x' // LR_irq, SP_irq if mode == M32_IRQ then UNPREDICTABLE; when '1001x' // LR_svc, SP_svc if mode == M32_Svc then UNPREDICTABLE; when '1010x' // LR_abt, SP_abt if mode == M32_Abort then UNPREDICTABLE; when '1011x' // LR_und, SP_und if mode == M32_Undef then UNPREDICTABLE; when '1110x' // LR_mon, SP_mon if !HaveEL(EL3) || !IsSecure() || mode == M32_Monitor then UNPREDICTABLE; when '11110' // ELR_hyp, only from Monitor or Hyp mode if !HaveEL(EL2) || !(mode IN {M32_Monitor,M32_Hyp}) then UNPREDICTABLE; when '11111' // SP_hyp, only from Monitor mode if !HaveEL(EL2) || mode != M32_MonitorAArch32.WriteModeByInstrM32_System(value<4:0>); then UNPREDICTABLE; otherwise UNPREDICTABLE; return;

Library pseudocode for aarch32/functions/system/ConditionPassedCPSRWriteByInstr

// ConditionPassed() // ================= boolean// CPSRWriteByInstr() // ================== // Update PSTATE.<N,Z,C,V,Q,GE,E,A,I,F,M> from a CPSR value written by an MSR instruction. ConditionPassed() returnCPSRWriteByInstr(bits(32) value, bits(4) bytemask) privileged = PSTATE.EL != ConditionHoldsEL0(; // PSTATE.<A,I,F,M> are not writable at EL0 // Write PSTATE from 'value', ignoring bytes masked by 'bytemask' if bytemask<3> == '1' then PSTATE.<N,Z,C,V,Q> = value<31:27>; // Bits <26:24> are ignored if bytemask<2> == '1' then if() then PSTATE.SSBS = value<23>; if privileged then PSTATE.PAN = value<22>; if HaveDITExt() then PSTATE.DIT = value<21>; // Bit <20> is RES0 PSTATE.GE = value<19:16>; if bytemask<1> == '1' then // Bits <15:10> are RES0 PSTATE.E = value<9>; // PSTATE.E is writable at EL0 if privileged then PSTATE.A = value<8>; if bytemask<0> == '1' then if privileged then PSTATE.<I,F> = value<7:6>; // Bit <5> is RES0 // AArch32.WriteModeByInstr() sets PSTATE.IL to 1 if this is an illegal mode change. AArch32.WriteModeByInstrAArch32.CurrentCondHaveSSBSExt());(value<4:0>); return;

Library pseudocode for aarch32/functions/system/CurrentCondConditionPassed

bits(4)// ConditionPassed() // ================= boolean AArch32.CurrentCond();ConditionPassed() returnConditionHolds(AArch32.CurrentCond());

Library pseudocode for aarch32/functions/system/InITBlockCurrentCond

// InITBlock() // =========== booleanbits(4) InITBlock() ifAArch32.CurrentCond(); CurrentInstrSet() == InstrSet_T32 then return PSTATE.IT<3:0> != '0000'; else return FALSE;

Library pseudocode for aarch32/functions/system/LastInITBlockInITBlock

// LastInITBlock() // =============== // InITBlock() // =========== boolean LastInITBlock() return (PSTATE.IT<3:0> == '1000');InITBlock() ifCurrentInstrSet() == InstrSet_T32 then return PSTATE.IT<3:0> != '0000'; else return FALSE;

Library pseudocode for aarch32/functions/system/SPSRWriteByInstrLastInITBlock

// SPSRWriteByInstr() // ==================// LastInITBlock() // =============== boolean SPSRWriteByInstr(bits(32) value, bits(4) bytemask) bits(32) new_spsr =LastInITBlock() return (PSTATE.IT<3:0> == '1000'); SPSR[]; if bytemask<3> == '1' then new_spsr<31:24> = value<31:24>; // N,Z,C,V,Q flags, IT[1:0],J bits if bytemask<2> == '1' then new_spsr<23:16> = value<23:16>; // IL bit, GE[3:0] flags if bytemask<1> == '1' then new_spsr<15:8> = value<15:8>; // IT[7:2] bits, E bit, A interrupt mask if bytemask<0> == '1' then new_spsr<7:0> = value<7:0>; // I,F interrupt masks, T bit, Mode bits SPSR[] = new_spsr; // UNPREDICTABLE if User or System mode return;

Library pseudocode for aarch32/functions/system/SPSRaccessValidSPSRWriteByInstr

// SPSRaccessValid() // ================= // Checks for MRS (Banked register) or MSR (Banked register) accesses to the SPSRs // that are UNPREDICTABLE// SPSRWriteByInstr() // ================== SPSRaccessValid(bits(5) SYSm, bits(5) mode) case SYSm of when '01110' // SPSR_fiq if mode ==SPSRWriteByInstr(bits(32) value, bits(4) bytemask) new_spsr = M32_FIQSPSR then UNPREDICTABLE; when '10000' // SPSR_irq if mode ==[]; if bytemask<3> == '1' then new_spsr<31:24> = value<31:24>; // N,Z,C,V,Q flags, IT[1:0],J bits if bytemask<2> == '1' then new_spsr<23:16> = value<23:16>; // IL bit, GE[3:0] flags if bytemask<1> == '1' then new_spsr<15:8> = value<15:8>; // IT[7:2] bits, E bit, A interrupt mask if bytemask<0> == '1' then new_spsr<7:0> = value<7:0>; // I,F interrupt masks, T bit, Mode bits M32_IRQSPSR then UNPREDICTABLE; when '10010' // SPSR_svc if mode == M32_Svc then UNPREDICTABLE; when '10100' // SPSR_abt if mode == M32_Abort then UNPREDICTABLE; when '10110' // SPSR_und if mode == M32_Undef then UNPREDICTABLE; when '11100' // SPSR_mon if !HaveEL(EL3) || mode == M32_Monitor || !IsSecure() then UNPREDICTABLE; when '11110' // SPSR_hyp if !HaveEL(EL2) || mode != M32_Monitor then UNPREDICTABLE; otherwise UNPREDICTABLE; [] = new_spsr; // UNPREDICTABLE if User or System mode return;

Library pseudocode for aarch32/functions/system/SelectInstrSetSPSRaccessValid

// SelectInstrSet() // ================// SPSRaccessValid() // ================= // Checks for MRS (Banked register) or MSR (Banked register) accesses to the SPSRs // that are UNPREDICTABLE SelectInstrSet(SPSRaccessValid(bits(5) SYSm, bits(5) mode) case SYSm of when '01110' // SPSR_fiq if mode ==InstrSetM32_FIQ iset) assertthen UNPREDICTABLE; when '10000' // SPSR_irq if mode == CurrentInstrSetM32_IRQ() IN {then UNPREDICTABLE; when '10010' // SPSR_svc if mode ==InstrSet_A32M32_Svc,then UNPREDICTABLE; when '10100' // SPSR_abt if mode == InstrSet_T32M32_Abort}; assert iset IN {then UNPREDICTABLE; when '10110' // SPSR_und if mode ==InstrSet_A32M32_Undef,then UNPREDICTABLE; when '11100' // SPSR_mon if ! InstrSet_T32HaveEL}; PSTATE.T = if iset ==( ) || mode == M32_Monitor || !IsSecure() then UNPREDICTABLE; when '11110' // SPSR_hyp if !HaveEL(EL2) || mode != M32_MonitorInstrSet_A32EL3 then '0' else '1'; then UNPREDICTABLE; otherwise UNPREDICTABLE; return;

Library pseudocode for aarch32/functions/v6simdsystem/SatSelectInstrSet

// Sat() // ===== bits(N)// SelectInstrSet() // ================ Sat(integer i, integer N, boolean unsigned) result = if unsigned thenSelectInstrSet( UnsignedSatInstrSet(i, N) elseiset) assert () IN {InstrSet_A32, InstrSet_T32}; assert iset IN {InstrSet_A32, InstrSet_T32}; PSTATE.T = if iset == InstrSet_A32SignedSatCurrentInstrSet(i, N); return result;then '0' else '1'; return;

Library pseudocode for aarch32/functions/v6simd/SignedSatSat

// SignedSat() // =========== // Sat() // ===== bits(N) SignedSat(integer i, integer N) (result, -) =Sat(integer i, integer N, boolean unsigned) result = if unsigned then (i, N) else SignedSatSignedSatQUnsignedSat(i, N); return result;

Library pseudocode for aarch32/functions/v6simd/UnsignedSatSignedSat

// UnsignedSat() // ============= // SignedSat() // =========== bits(N) UnsignedSat(integer i, integer N) SignedSat(integer i, integer N) (result, -) = UnsignedSatQSignedSatQ(i, N); return result;

Library pseudocode for aarch32/translationfunctions/attrsv6simd/AArch32.CombineS1S2DescUnsignedSat

// AArch32.CombineS1S2Desc() // ========================= // Combines the address descriptors from stage 1 and stage 2 // UnsignedSat() // ============= AddressDescriptorbits(N) AArch32.CombineS1S2Desc(UnsignedSat(integer i, integer N) (result, -) =AddressDescriptorUnsignedSatQ s1desc, AddressDescriptor s2desc, AccType s2acctype) AddressDescriptor result; result.paddress = s2desc.paddress; apply_force_writeback = HaveStage2MemAttrControl() && HCR_EL2.FWB == '1'; if IsFault(s1desc) || IsFault(s2desc) then result = if IsFault(s1desc) then s1desc else s2desc; else result.fault = AArch32.NoFault(); if s2desc.memattrs.memtype == MemType_Device || ( (apply_force_writeback && s1desc.memattrs.memtype == MemType_Device && s2desc.memattrs.inner.attrs != '10') || (!apply_force_writeback && s1desc.memattrs.memtype == MemType_Device) ) then result.memattrs.memtype = MemType_Device; if s1desc.memattrs.memtype == MemType_Normal then result.memattrs.device = s2desc.memattrs.device; elsif s2desc.memattrs.memtype == MemType_Normal then result.memattrs.device = s1desc.memattrs.device; else // Both Device result.memattrs.device = CombineS1S2Device(s1desc.memattrs.device, s2desc.memattrs.device); result.memattrs.tagged = FALSE; // S1 can be either Normal or Device, S2 is Normal. else result.memattrs.memtype = MemType_Normal; result.memattrs.device = DeviceType UNKNOWN; result.memattrs.inner = CombineS1S2AttrHints(s1desc.memattrs.inner, s2desc.memattrs.inner, s2acctype); result.memattrs.outer = CombineS1S2AttrHints(s1desc.memattrs.outer, s2desc.memattrs.outer, s2acctype); result.memattrs.shareable = (s1desc.memattrs.shareable || s2desc.memattrs.shareable); result.memattrs.outershareable = (s1desc.memattrs.outershareable || s2desc.memattrs.outershareable); result.memattrs.tagged = (s1desc.memattrs.tagged && result.memattrs.inner.attrs == MemAttr_WB && result.memattrs.inner.hints == MemHint_RWA && result.memattrs.outer.attrs == MemAttr_WB && result.memattrs.outer.hints == MemHint_RWA); result.memattrs = MemAttrDefaults(result.memattrs); (i, N); return result;

Library pseudocode for aarch32/translation/attrs/AArch32.DefaultTEXDecodeAArch32.CombineS1S2Desc

// AArch32.DefaultTEXDecode() // ========================== // AArch32.CombineS1S2Desc() // ========================= // Combines the address descriptors from stage 1 and stage 2 MemoryAttributesAddressDescriptor AArch32.DefaultTEXDecode(bits(3) TEX, bit C, bit B, bit S,AArch32.CombineS1S2Desc( AddressDescriptor s1desc, AddressDescriptor s2desc, AccType acctype)s2acctype) MemoryAttributesAddressDescriptor memattrs; result; result.paddress = s2desc.paddress; // Reserved values map to allocated values if (TEX == '001' && C:B == '01') || (TEX == '010' && C:B != '00') || TEX == '011' then bits(5) texcb; (-, texcb) = apply_force_writeback = ConstrainUnpredictableBitsHaveStage2MemAttrControl(() && HCR_EL2.FWB == '1'; ifUnpredictable_RESTEXCBIsFault); TEX = texcb<4:2>; C = texcb<1>; B = texcb<0>; case TEX:C:B of when '00000' // Device-nGnRnE memattrs.memtype =(s1desc) || IsFault(s2desc) then result = if IsFault(s1desc) then s1desc else s2desc; else result.fault = AArch32.NoFault(); if s2desc.memattrs.memtype == MemType_Device; memattrs.device =|| ( (apply_force_writeback && s1desc.memattrs.memtype == DeviceType_nGnRnEMemType_Device; when '00001', '01000' // Device-nGnRE memattrs.memtype =&& s2desc.memattrs.inner.attrs != '10') || (!apply_force_writeback && s1desc.memattrs.memtype == MemType_Device; memattrs.device =) ) then result.memattrs.memtype = DeviceType_nGnREMemType_Device; when '00010', '00011', '00100' // Write-back or Write-through Read allocate, or Non-cacheable memattrs.memtype = if s1desc.memattrs.memtype == MemType_Normal; memattrs.inner =then result.memattrs.device = s2desc.memattrs.device; elsif s2desc.memattrs.memtype == ShortConvertAttrsHintsMemType_Normal(C:B, acctype, FALSE); memattrs.outer =then result.memattrs.device = s1desc.memattrs.device; else // Both Device result.memattrs.device = ShortConvertAttrsHintsCombineS1S2Device(C:B, acctype, FALSE); memattrs.shareable = (S == '1'); when '00110' memattrs =(s1desc.memattrs.device, s2desc.memattrs.device); result.memattrs.tagged = FALSE; // S1 can be either Normal or Device, S2 is Normal. else result.memattrs.memtype = MemoryAttributes IMPLEMENTATION_DEFINED; when '00111' // Write-back Read and Write allocate memattrs.memtype = MemType_Normal; memattrs.inner = result.memattrs.device = ShortConvertAttrsHintsDeviceType('01', acctype, FALSE); memattrs.outer =UNKNOWN; result.memattrs.inner = ShortConvertAttrsHintsCombineS1S2AttrHints('01', acctype, FALSE); memattrs.shareable = (S == '1'); when '1xxxx' // Cacheable, TEX<1:0> = Outer attrs, {C,B} = Inner attrs memattrs.memtype =(s1desc.memattrs.inner, s2desc.memattrs.inner, s2acctype); result.memattrs.outer = MemType_NormalCombineS1S2AttrHints; memattrs.inner =(s1desc.memattrs.outer, s2desc.memattrs.outer, s2acctype); result.memattrs.shareable = (s1desc.memattrs.shareable || s2desc.memattrs.shareable); result.memattrs.outershareable = (s1desc.memattrs.outershareable || s2desc.memattrs.outershareable); result.memattrs.tagged = (s1desc.memattrs.tagged && result.memattrs.inner.attrs == ShortConvertAttrsHintsMemAttr_WB(C:B, acctype, FALSE); memattrs.outer =&& result.memattrs.inner.hints == ShortConvertAttrsHintsMemHint_RWA(TEX<1:0>, acctype, FALSE); memattrs.shareable = (S == '1'); otherwise // Reserved, handled above&& result.memattrs.outer.attrs == && result.memattrs.outer.hints == MemHint_RWAUnreachableMemAttr_WB(); ); // transient bits are not supported in this format memattrs.inner.transient = FALSE; memattrs.outer.transient = FALSE; // distinction between inner and outer shareable is not supported in this format memattrs.outershareable = memattrs.shareable; memattrs.tagged = FALSE; return result.memattrs = MemAttrDefaults(memattrs);(result.memattrs); return result;

Library pseudocode for aarch32/translation/attrs/AArch32.InstructionDeviceAArch32.DefaultTEXDecode

// AArch32.InstructionDevice() // =========================== // Instruction fetches from memory marked as Device but not execute-never might generate a // Permission Fault but are otherwise treated as if from Normal Non-cacheable memory. // AArch32.DefaultTEXDecode() // ========================== AddressDescriptorMemoryAttributes AArch32.InstructionDevice(AArch32.DefaultTEXDecode(bits(3) TEX, bit C, bit B, bit S,AddressDescriptor addrdesc, bits(32) vaddress, bits(40) ipaddress, integer level, bits(4) domain, AccType acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) c =acctype) ConstrainUnpredictableMemoryAttributes(memattrs; // Reserved values map to allocated values if (TEX == '001' && C:B == '01') || (TEX == '010' && C:B != '00') || TEX == '011' then bits(5) texcb; (-, texcb) =Unpredictable_INSTRDEVICEConstrainUnpredictableBits); assert c IN {(Constraint_NONEUnpredictable_RESTEXCB,); TEX = texcb<4:2>; C = texcb<1>; B = texcb<0>; case TEX:C:B of when '00000' // Device-nGnRnE memattrs.memtype = Constraint_FAULTMemType_Device}; if c ==; memattrs.device = Constraint_FAULTDeviceType_nGnRnE then addrdesc.fault =; when '00001', '01000' // Device-nGnRE memattrs.memtype = AArch32.PermissionFaultMemType_Device(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); else addrdesc.memattrs.memtype =; memattrs.device = DeviceType_nGnRE; when '00010', '00011', '00100' // Write-back or Write-through Read allocate, or Non-cacheable memattrs.memtype = MemType_Normal; addrdesc.memattrs.inner.attrs = memattrs.inner = MemAttr_NCShortConvertAttrsHints; addrdesc.memattrs.inner.hints =(C:B, acctype, FALSE); memattrs.outer = (C:B, acctype, FALSE); memattrs.shareable = (S == '1'); when '00110' memattrs = MemoryAttributes IMPLEMENTATION_DEFINED; when '00111' // Write-back Read and Write allocate memattrs.memtype = MemType_Normal; memattrs.inner = ShortConvertAttrsHints('01', acctype, FALSE); memattrs.outer = ShortConvertAttrsHints('01', acctype, FALSE); memattrs.shareable = (S == '1'); when '1xxxx' // Cacheable, TEX<1:0> = Outer attrs, {C,B} = Inner attrs memattrs.memtype = MemType_Normal; memattrs.inner = ShortConvertAttrsHints(C:B, acctype, FALSE); memattrs.outer = ShortConvertAttrsHints(TEX<1:0>, acctype, FALSE); memattrs.shareable = (S == '1'); otherwise // Reserved, handled above UnreachableMemHint_NoShortConvertAttrsHints; addrdesc.memattrs.outer = addrdesc.memattrs.inner; addrdesc.memattrs.tagged = FALSE; addrdesc.memattrs =(); // transient bits are not supported in this format memattrs.inner.transient = FALSE; memattrs.outer.transient = FALSE; // distinction between inner and outer shareable is not supported in this format memattrs.outershareable = memattrs.shareable; memattrs.tagged = FALSE; return MemAttrDefaults(addrdesc.memattrs); return addrdesc;(memattrs);

Library pseudocode for aarch32/translation/attrs/AArch32.RemappedTEXDecodeAArch32.InstructionDevice

// AArch32.RemappedTEXDecode() // AArch32.InstructionDevice() // =========================== // Instruction fetches from memory marked as Device but not execute-never might generate a // Permission Fault but are otherwise treated as if from Normal Non-cacheable memory. MemoryAttributesAddressDescriptor AArch32.RemappedTEXDecode(bits(3) TEX, bit C, bit B, bit S,AArch32.InstructionDevice( AddressDescriptor addrdesc, bits(32) vaddress, bits(40) ipaddress, integer level, bits(4) domain, AccType acctype)acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) c = MemoryAttributesConstrainUnpredictable memattrs; region =( UIntUnpredictable_INSTRDEVICE(TEX<0>:C:B); // TEX<2:1> are ignored in this mapping scheme if region == 6 then memattrs =); assert c IN { MemoryAttributesConstraint_NONE IMPLEMENTATION_DEFINED; else base = 2 * region; attrfield = PRRR<base+1:base>; if attrfield == '11' then // Reserved, maps to allocated value (-, attrfield) =, ConstrainUnpredictableBitsConstraint_FAULT(}; if c ==Unpredictable_RESPRRRConstraint_FAULT); case attrfield of when '00' // Device-nGnRnE memattrs.memtype =then addrdesc.fault = MemType_DeviceAArch32.PermissionFault; memattrs.device =(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); else addrdesc.memattrs.memtype = DeviceType_nGnRnE; when '01' // Device-nGnRE memattrs.memtype = MemType_Device; memattrs.device = DeviceType_nGnRE; when '10' memattrs.memtype = MemType_Normal; memattrs.inner = addrdesc.memattrs.inner.attrs = ShortConvertAttrsHintsMemAttr_NC(NMRR<base+1:base>, acctype, FALSE); memattrs.outer =; addrdesc.memattrs.inner.hints = ShortConvertAttrsHintsMemHint_No(NMRR<base+17:base+16>, acctype, FALSE); s_bit = if S == '0' then PRRR.NS0 else PRRR.NS1; memattrs.shareable = (s_bit == '1'); memattrs.outershareable = (s_bit == '1' && PRRR<region+24> == '0'); when '11' Unreachable(); // transient bits are not supported in this format memattrs.inner.transient = FALSE; memattrs.outer.transient = FALSE; memattrs.tagged = FALSE; return; addrdesc.memattrs.outer = addrdesc.memattrs.inner; addrdesc.memattrs.tagged = FALSE; addrdesc.memattrs = MemAttrDefaults(memattrs);(addrdesc.memattrs); return addrdesc;

Library pseudocode for aarch32/translation/attrs/AArch32.S1AttrDecodeAArch32.RemappedTEXDecode

// AArch32.S1AttrDecode() // ====================== // Converts the Stage 1 attribute fields, using the MAIR, to orthogonal // attributes and hints. // AArch32.RemappedTEXDecode() // =========================== MemoryAttributes AArch32.S1AttrDecode(bits(2) SH, bits(3) attr,AArch32.RemappedTEXDecode(bits(3) TEX, bit C, bit B, bit S, AccType acctype) MemoryAttributes memattrs; if PSTATE.EL == region = EL2 then mair = HMAIR1:HMAIR0; else mair = MAIR1:MAIR0; index = 8 * UInt(attr); attrfield = mair<index+7:index>; memattrs.tagged = FALSE; if ((attrfield<7:4> != '0000' && attrfield<7:4> != '1111' && attrfield<3:0> == '0000') || (attrfield<7:4> == '0000' && attrfield<3:0> != 'xx00')) then // Reserved, maps to an allocated value (-, attrfield) =(TEX<0>:C:B); // TEX<2:1> are ignored in this mapping scheme if region == 6 then memattrs = ConstrainUnpredictableBitsMemoryAttributes(IMPLEMENTATION_DEFINED; else base = 2 * region; attrfield = PRRR<base+1:base>; if attrfield == '11' then // Reserved, maps to allocated value (-, attrfield) =Unpredictable_RESMAIR); if !HaveMTEExt() && attrfield<7:4> == '1111' && attrfield<3:0> == '0000' then // Reserved, maps to an allocated value (-, attrfield) = ConstrainUnpredictableBits(Unpredictable_RESMAIRUnpredictable_RESPRRR); if attrfield<7:4> == '0000' then // Device memattrs.memtype = case attrfield of when '00' // Device-nGnRnE memattrs.memtype = MemType_Device; case attrfield<3:0> of when '0000' memattrs.device = memattrs.device = DeviceType_nGnRnE; when '0100' memattrs.device = when '01' // Device-nGnRE memattrs.memtype = MemType_Device; memattrs.device = DeviceType_nGnRE; when '1000' memattrs.device = when '10' memattrs.memtype = DeviceType_nGRE; when '1100' memattrs.device = DeviceType_GRE; otherwise Unreachable(); // Reserved, handled above elsif attrfield<3:0> != '0000' then // Normal memattrs.memtype = MemType_Normal; memattrs.outer = memattrs.inner = LongConvertAttrsHintsShortConvertAttrsHints(attrfield<7:4>, acctype); memattrs.inner =(NMRR<base+1:base>, acctype, FALSE); memattrs.outer = LongConvertAttrsHintsShortConvertAttrsHints(attrfield<3:0>, acctype); memattrs.shareable = SH<1> == '1'; memattrs.outershareable = SH == '10'; elsif HaveMTEExt() && attrfield == '11110000' then // Normal, Tagged WB-RWA memattrs.memtype = MemType_Normal; memattrs.outer = LongConvertAttrsHints('1111', acctype); // WB_RWA memattrs.inner = LongConvertAttrsHints('1111', acctype); // WB_RWA memattrs.shareable = SH<1> == '1'; memattrs.outershareable = SH == '10'; memattrs.tagged = TRUE; else(NMRR<base+17:base+16>, acctype, FALSE); s_bit = if S == '0' then PRRR.NS0 else PRRR.NS1; memattrs.shareable = (s_bit == '1'); memattrs.outershareable = (s_bit == '1' && PRRR<region+24> == '0'); when '11' Unreachable(); // Reserved, handled above (); // transient bits are not supported in this format memattrs.inner.transient = FALSE; memattrs.outer.transient = FALSE; memattrs.tagged = FALSE; return MemAttrDefaults(memattrs);

Library pseudocode for aarch32/translation/attrs/AArch32.TranslateAddressS1OffAArch32.S1AttrDecode

// AArch32.TranslateAddressS1Off() // =============================== // Called for stage 1 translations when translation is disabled to supply a default translation. // Note that there are additional constraints on instruction prefetching that are not described in // this pseudocode. // AArch32.S1AttrDecode() // ====================== // Converts the Stage 1 attribute fields, using the MAIR, to orthogonal // attributes and hints. TLBRecordMemoryAttributes AArch32.TranslateAddressS1Off(bits(32) vaddress,AArch32.S1AttrDecode(bits(2) SH, bits(3) attr, AccType acctype, boolean iswrite) assertacctype) ELUsingAArch32MemoryAttributes(memattrs; if PSTATE.EL ==S1TranslationRegimeEL2());then mair = HMAIR1:HMAIR0; else mair = MAIR1:MAIR0; index = 8 * TLBRecordUInt result; result.descupdate.AF = FALSE; result.descupdate.AP = FALSE; (attr); attrfield = mair<index+7:index>; default_cacheable = ( memattrs.tagged = FALSE; if ((attrfield<7:4> != '0000' && attrfield<7:4> != '1111' && attrfield<3:0> == '0000') || (attrfield<7:4> == '0000' && attrfield<3:0> != 'xx00')) then // Reserved, maps to an allocated value (-, attrfield) =HasS2TranslationConstrainUnpredictableBits() && ((if( ELUsingAArch32Unpredictable_RESMAIR(); if !EL2HaveMTEExt) then HCR.DC else HCR_EL2.DC) == '1')); if default_cacheable then // Use default cacheable settings result.addrdesc.memattrs.memtype =() && attrfield<7:4> == '1111' && attrfield<3:0> == '0000' then // Reserved, maps to an allocated value (-, attrfield) = MemType_NormalConstrainUnpredictableBits; result.addrdesc.memattrs.inner.attrs =( MemAttr_WBUnpredictable_RESMAIR; // Write-back result.addrdesc.memattrs.inner.hints =); if attrfield<7:4> == '0000' then // Device memattrs.memtype = MemHint_RWA; result.addrdesc.memattrs.shareable = FALSE; result.addrdesc.memattrs.outershareable = FALSE; result.addrdesc.memattrs.tagged = HCR_EL2.DCT == '1'; elsif acctype != AccType_IFETCH then // Treat data as Device result.addrdesc.memattrs.memtype = MemType_Device; result.addrdesc.memattrs.device = case attrfield<3:0> of when '0000' memattrs.device = DeviceType_nGnRnE; result.addrdesc.memattrs.inner = when '0100' memattrs.device = MemAttrHintsDeviceType_nGnRE UNKNOWN; result.addrdesc.memattrs.tagged = FALSE; else // Instruction cacheability controlled by SCTLR/HSCTLR.I if PSTATE.EL ==; when '1000' memattrs.device = EL2DeviceType_nGRE then cacheable = HSCTLR.I == '1'; else cacheable = SCTLR.I == '1'; result.addrdesc.memattrs.memtype =; when '1100' memattrs.device = DeviceType_GRE; otherwise Unreachable(); // Reserved, handled above elsif attrfield<3:0> != '0000' then // Normal memattrs.memtype = MemType_Normal; if cacheable then result.addrdesc.memattrs.inner.attrs = memattrs.outer = MemAttr_WTLongConvertAttrsHints; result.addrdesc.memattrs.inner.hints =(attrfield<7:4>, acctype); memattrs.inner = MemHint_RALongConvertAttrsHints; else result.addrdesc.memattrs.inner.attrs =(attrfield<3:0>, acctype); memattrs.shareable = SH<1> == '1'; memattrs.outershareable = SH == '10'; elsif MemAttr_NCHaveMTEExt; result.addrdesc.memattrs.inner.hints =() && attrfield == '11110000' then // Normal, Tagged WB-RWA memattrs.memtype = MemHint_NoMemType_Normal; result.addrdesc.memattrs.shareable = TRUE; result.addrdesc.memattrs.outershareable = TRUE; result.addrdesc.memattrs.tagged = FALSE; result.addrdesc.memattrs.outer = result.addrdesc.memattrs.inner; result.addrdesc.memattrs = memattrs.outer = MemAttrDefaultsLongConvertAttrsHints(result.addrdesc.memattrs); result.perms.ap = bits(3) UNKNOWN; result.perms.xn = '0'; result.perms.pxn = '0'; result.nG = bit UNKNOWN; result.contiguous = boolean UNKNOWN; result.domain = bits(4) UNKNOWN; result.level = integer UNKNOWN; result.blocksize = integer UNKNOWN; result.addrdesc.paddress.address =('1111', acctype); // WB_RWA memattrs.inner = ZeroExtendLongConvertAttrsHints(vaddress); result.addrdesc.paddress.NS = if('1111', acctype); // WB_RWA memattrs.shareable = SH<1> == '1'; memattrs.outershareable = SH == '10'; memattrs.tagged = TRUE; else IsSecureUnreachable() then '0' else '1'; result.addrdesc.fault =(); // Reserved, handled above return AArch32.NoFaultMemAttrDefaults(); result.descupdate.descaddr = result.addrdesc; return result;(memattrs);

Library pseudocode for aarch32/translation/checksattrs/AArch32.AccessUsesELAArch32.TranslateAddressS1Off

// AArch32.AccessUsesEL() // ====================== // Returns the Exception Level of the regime that will manage the translation for a given access type. // AArch32.TranslateAddressS1Off() // =============================== // Called for stage 1 translations when translation is disabled to supply a default translation. // Note that there are additional constraints on instruction prefetching that are not described in // this pseudocode. bits(2)TLBRecord AArch32.AccessUsesEL(AArch32.TranslateAddressS1Off(bits(32) vaddress,AccType acctype) if acctype ==acctype, boolean iswrite) assert AccType_UNPRIVELUsingAArch32 then return( ()); TLBRecord result; default_cacheable = (HasS2Translation() && ((if ELUsingAArch32(EL2) then HCR.DC else HCR_EL2.DC) == '1')); if default_cacheable then // Use default cacheable settings result.addrdesc.memattrs.memtype = MemType_Normal; result.addrdesc.memattrs.inner.attrs = MemAttr_WB; // Write-back result.addrdesc.memattrs.inner.hints = MemHint_RWA; result.addrdesc.memattrs.shareable = FALSE; result.addrdesc.memattrs.outershareable = FALSE; result.addrdesc.memattrs.tagged = HCR_EL2.DCT == '1'; elsif acctype != AccType_IFETCH then // Treat data as Device result.addrdesc.memattrs.memtype = MemType_Device; result.addrdesc.memattrs.device = DeviceType_nGnRnE; result.addrdesc.memattrs.inner = MemAttrHints UNKNOWN; result.addrdesc.memattrs.tagged = FALSE; else // Instruction cacheability controlled by SCTLR/HSCTLR.I if PSTATE.EL == EL2 then cacheable = HSCTLR.I == '1'; else cacheable = SCTLR.I == '1'; result.addrdesc.memattrs.memtype = MemType_Normal; if cacheable then result.addrdesc.memattrs.inner.attrs = MemAttr_WT; result.addrdesc.memattrs.inner.hints = MemHint_RA; else result.addrdesc.memattrs.inner.attrs = MemAttr_NC; result.addrdesc.memattrs.inner.hints = MemHint_No; result.addrdesc.memattrs.shareable = TRUE; result.addrdesc.memattrs.outershareable = TRUE; result.addrdesc.memattrs.tagged = FALSE; result.addrdesc.memattrs.outer = result.addrdesc.memattrs.inner; result.addrdesc.memattrs = MemAttrDefaults(result.addrdesc.memattrs); result.perms.ap = bits(3) UNKNOWN; result.perms.xn = '0'; result.perms.pxn = '0'; result.nG = bit UNKNOWN; result.contiguous = boolean UNKNOWN; result.domain = bits(4) UNKNOWN; result.level = integer UNKNOWN; result.blocksize = integer UNKNOWN; result.addrdesc.paddress.address = ZeroExtend(vaddress); result.addrdesc.paddress.NS = if IsSecure() then '0' else '1'; result.addrdesc.fault = AArch32.NoFaultEL0S1TranslationRegime; else return PSTATE.EL;(); result.descupdate.AF = FALSE; result.descupdate.AP = FALSE; result.descupdate.descaddr = result.addrdesc; return result;

Library pseudocode for aarch32/translation/checks/AArch32.CheckDomainAArch32.AccessIsPrivileged

// AArch32.CheckDomain() // ===================== // AArch32.AccessIsPrivileged() // ============================ (boolean, FaultRecord)boolean AArch32.CheckDomain(bits(4) domain, bits(32) vaddress, integer level,AArch32.AccessIsPrivileged( AccType acctype, boolean iswrite) acctype) index = 2 * el = UIntAArch32.AccessUsesEL(domain); attrfield = DACR<index+1:index>; (acctype); if attrfield == '10' then // Reserved, maps to an allocated value // Reserved value maps to an allocated value (-, attrfield) = if el == ConstrainUnpredictableBitsEL0(then ispriv = FALSE; elsif el !=Unpredictable_RESDACREL1); if attrfield == '00' then fault =then ispriv = TRUE; else ispriv = (acctype != AArch32.DomainFaultAccType_UNPRIV(domain, level, acctype, iswrite); else fault = AArch32.NoFault(); ); permissioncheck = (attrfield == '01'); return (permissioncheck, fault); return ispriv;

Library pseudocode for aarch32/translation/checks/AArch32.CheckPermissionAArch32.AccessUsesEL

// AArch32.CheckPermission() // ========================= // Function used for permission checking from AArch32 stage 1 translations // AArch32.AccessUsesEL() // ====================== // Returns the Exception Level of the regime that will manage the translation for a given access type. FaultRecordbits(2) AArch32.CheckPermission(AArch32.AccessUsesEL(Permissions perms, bits(32) vaddress, integer level, bits(4) domain, bit NS, AccType acctype, boolean iswrite) assertacctype) if acctype == ELUsingAArch32AccType_UNPRIV(then returnS1TranslationRegime()); if PSTATE.EL != EL2 then wxn = SCTLR.WXN == '1'; if TTBCR.EAE == '1' || SCTLR.AFE == '1' || perms.ap<0> == '1' then priv_r = TRUE; priv_w = perms.ap<2> == '0'; user_r = perms.ap<1> == '1'; user_w = perms.ap<2:1> == '01'; else priv_r = perms.ap<2:1> != '00'; priv_w = perms.ap<2:1> == '01'; user_r = perms.ap<1> == '1'; user_w = FALSE; uwxn = SCTLR.UWXN == '1'; ispriv = AArch32.AccessUsesEL(acctype) != EL0; user_xn = !user_r || perms.xn == '1' || (user_w && wxn); priv_xn = (!priv_r || perms.xn == '1' || perms.pxn == '1' || (priv_w && wxn) || (user_w && uwxn)); pan = if HavePANExt() then PSTATE.PAN else '0'; is_ldst = !(acctype IN {AccType_DC, AccType_DC_UNPRIV, AccType_AT, AccType_IFETCH}); is_ats1xp = (acctype == AccType_AT && AArch32.ExecutingATS1xPInstr()); if pan == '1' && user_r && ispriv && (is_ldst || is_ats1xp) then priv_r = FALSE; priv_w = FALSE; if ispriv then (r, w, xn) = (priv_r, priv_w, priv_xn); else (r, w, xn) = (user_r, user_w, user_xn); else // Access from EL2 wxn = HSCTLR.WXN == '1'; r = TRUE; w = perms.ap<2> == '0'; xn = perms.xn == '1' || (w && wxn); // Restriction on Secure instruction fetch if HaveEL(EL3) && IsSecure() && NS == '1' then secure_instr_fetch = if ELUsingAArch32(EL3) then SCR.SIF else SCR_EL3.SIF; if secure_instr_fetch == '1' then xn = TRUE; if acctype == AccType_IFETCH then fail = xn; failedread = TRUE; elsif acctype IN { AccType_ATOMICRW, AccType_ORDEREDRW, AccType_ORDEREDATOMICRW } then fail = !r || !w; failedread = !r; elsif acctype == AccType_DC then // DC maintenance instructions operating by VA, cannot fault from stage 1 translation. fail = FALSE; elsif iswrite then fail = !w; failedread = FALSE; else fail = !r; failedread = TRUE; if fail then secondstage = FALSE; s2fs1walk = FALSE; ipaddress = bits(40) UNKNOWN; return AArch32.PermissionFault(ipaddress, domain, level, acctype, !failedread, secondstage, s2fs1walk); else return AArch32.NoFault();; else return PSTATE.EL;

Library pseudocode for aarch32/translation/checks/AArch32.CheckS2PermissionAArch32.CheckDomain

// AArch32.CheckS2Permission() // =========================== // Function used for permission checking from AArch32 stage 2 translations // AArch32.CheckDomain() // ===================== FaultRecord(boolean, FaultRecord) AArch32.CheckS2Permission(AArch32.CheckDomain(bits(4) domain, bits(32) vaddress, integer level,Permissions perms, bits(32) vaddress, bits(40) ipaddress, integer level, AccType acctype, boolean iswrite, boolean s2fs1walk) acctype, boolean iswrite) assert index = 2 * HaveELUInt((domain); attrfield = DACR<index+1:index>; if attrfield == '10' then // Reserved, maps to an allocated value // Reserved value maps to an allocated value (-, attrfield) =EL2ConstrainUnpredictableBits) && !(IsSecureUnpredictable_RESDACR() &&); if attrfield == '00' then fault = ELUsingAArch32AArch32.DomainFault(EL2) && HasS2Translation(); r = perms.ap<1> == '1'; w = perms.ap<2> == '1'; if HaveExtendedExecuteNeverExt() then case perms.xn:perms.xxn of when '00' xn = !r; when '01' xn = !r || PSTATE.EL == EL1; when '10' xn = TRUE; when '11' xn = !r || PSTATE.EL == EL0; else xn = !r || perms.xn == '1'; // Stage 1 walk is checked as a read, regardless of the original type if acctype == AccType_IFETCH && !s2fs1walk then fail = xn; failedread = TRUE; elsif (acctype IN { AccType_ATOMICRW, AccType_ORDEREDRW, AccType_ORDEREDATOMICRW }) && !s2fs1walk then fail = !r || !w; failedread = !r; elsif acctype == AccType_DC && !s2fs1walk then // DC maintenance instructions operating by VA, do not generate Permission faults // from stage 2 translation, other than from stage 1 translation table walk. fail = FALSE; elsif iswrite && !s2fs1walk then fail = !w; failedread = FALSE; else fail = !r; failedread = !iswrite; if fail then domain = bits(4) UNKNOWN; secondstage = TRUE; return AArch32.PermissionFault(ipaddress, domain, level, acctype, !failedread, secondstage, s2fs1walk); (domain, level, acctype, iswrite); else return fault = AArch32.NoFault();(); permissioncheck = (attrfield == '01'); return (permissioncheck, fault);

Library pseudocode for aarch32/translation/debugchecks/AArch32.CheckBreakpointAArch32.CheckPermission

// AArch32.CheckBreakpoint() // AArch32.CheckPermission() // ========================= // Called before executing the instruction of length "size" bytes at "vaddress" in an AArch32 // translation regime, when either debug exceptions are enabled, or halting debug is enabled // and halting is allowed. // Function used for permission checking from AArch32 stage 1 translations FaultRecord AArch32.CheckBreakpoint(bits(32) vaddress, integer size) assertAArch32.CheckPermission( Permissions perms, bits(32) vaddress, integer level, bits(4) domain, bit NS, AccType acctype, boolean iswrite) assert ELUsingAArch32(S1TranslationRegime()); assert size IN {2,4}; match = FALSE; mismatch = FALSE; for i = 0 to if PSTATE.EL != UIntEL2(DBGDIDR.BRPs) (match_i, mismatch_i) =then wxn = SCTLR.WXN == '1'; if TTBCR.EAE == '1' || SCTLR.AFE == '1' || perms.ap<0> == '1' then priv_r = TRUE; priv_w = perms.ap<2> == '0'; user_r = perms.ap<1> == '1'; user_w = perms.ap<2:1> == '01'; else priv_r = perms.ap<2:1> != '00'; priv_w = perms.ap<2:1> == '01'; user_r = perms.ap<1> == '1'; user_w = FALSE; uwxn = SCTLR.UWXN == '1'; ispriv = AArch32.BreakpointMatchAArch32.AccessIsPrivileged(i, vaddress, size); match = match || match_i; mismatch = mismatch || mismatch_i; (acctype); if match && pan = if HaltOnBreakpointOrWatchpointHavePANExt() then reason =() then PSTATE.PAN else '0'; is_ldst = !(acctype IN { DebugHalt_BreakpointAccType_DC;, HaltAccType_DC_UNPRIV(reason); elsif (match || mismatch) then acctype =, AccType_AT, AccType_IFETCH; iswrite = FALSE; debugmoe =}); is_ats1xp = (acctype == DebugException_BreakpointAccType_AT; return&& ()); if pan == '1' && user_r && ispriv && (is_ldst || is_ats1xp) then priv_r = FALSE; priv_w = FALSE; user_xn = !user_r || perms.xn == '1' || (user_w && wxn); priv_xn = (!priv_r || perms.xn == '1' || perms.pxn == '1' || (priv_w && wxn) || (user_w && uwxn)); if ispriv then (r, w, xn) = (priv_r, priv_w, priv_xn); else (r, w, xn) = (user_r, user_w, user_xn); else // Access from EL2 wxn = HSCTLR.WXN == '1'; r = TRUE; w = perms.ap<2> == '0'; xn = perms.xn == '1' || (w && wxn); // Restriction on Secure instruction fetch if HaveEL(EL3) && IsSecure() && NS == '1' then secure_instr_fetch = if ELUsingAArch32(EL3) then SCR.SIF else SCR_EL3.SIF; if secure_instr_fetch == '1' then xn = TRUE; if acctype == AccType_IFETCH then fail = xn; failedread = TRUE; elsif acctype IN { AccType_ATOMICRW, AccType_ORDEREDRW, AccType_ORDEREDATOMICRW } then fail = !r || !w; failedread = !r; elsif acctype == AccType_DC then // DC maintenance instructions operating by VA, cannot fault from stage 1 translation. fail = FALSE; elsif iswrite then fail = !w; failedread = FALSE; else fail = !r; failedread = TRUE; if fail then secondstage = FALSE; s2fs1walk = FALSE; ipaddress = bits(40) UNKNOWN; return AArch32.PermissionFaultAArch32.DebugFaultAArch32.ExecutingATS1xPInstr(acctype, iswrite, debugmoe); (ipaddress, domain, level, acctype, !failedread, secondstage, s2fs1walk); else return AArch32.NoFault();

Library pseudocode for aarch32/translation/debugchecks/AArch32.CheckDebugAArch32.CheckS2Permission

// AArch32.CheckDebug() // ==================== // Called on each access to check for a debug exception or entry to Debug state. // AArch32.CheckS2Permission() // =========================== // Function used for permission checking from AArch32 stage 2 translations FaultRecord AArch32.CheckDebug(bits(32) vaddress,AArch32.CheckS2Permission( Permissions perms, bits(32) vaddress, bits(40) ipaddress, integer level, AccType acctype, boolean iswrite, integer size)acctype, boolean iswrite, boolean s2fs1walk) assert FaultRecordHaveEL fault =( AArch32.NoFaultEL2(); d_side = (acctype !=) && ! AccType_IFETCHIsSecure); generate_exception =() && AArch32.GenerateDebugExceptionsELUsingAArch32() && DBGDSCRext.MDBGen == '1'; halt =( HaltOnBreakpointOrWatchpointEL2(); // Relative priority of Vector Catch and Breakpoint exceptions not defined in the architecture vector_catch_first =) && ConstrainUnpredictableBoolHasS2Translation((); r = perms.ap<1> == '1'; w = perms.ap<2> == '1'; ifUnpredictable_BPVECTORCATCHPRIHaveExtendedExecuteNeverExt); if !d_side && vector_catch_first && generate_exception then fault =() then case perms.xn:perms.xxn of when '00' xn = !r; when '01' xn = !r || PSTATE.EL == AArch32.CheckVectorCatchEL1(vaddress, size); if fault.statuscode ==; when '10' xn = TRUE; when '11' xn = !r || PSTATE.EL == Fault_NoneEL0 && (generate_exception || halt) then if d_side then fault =; else xn = !r || perms.xn == '1'; // Stage 1 walk is checked as a read, regardless of the original type if acctype == AArch32.CheckWatchpointAccType_IFETCH(vaddress, acctype, iswrite, size); else fault =&& !s2fs1walk then fail = xn; failedread = TRUE; elsif (acctype IN { AArch32.CheckBreakpointAccType_ATOMICRW(vaddress, size); if fault.statuscode ==, Fault_NoneAccType_ORDEREDRW && !d_side && !vector_catch_first && generate_exception then return, }) && !s2fs1walk then fail = !r || !w; failedread = !r; elsif acctype == AccType_DC && !s2fs1walk then // DC maintenance instructions operating by VA, do not generate Permission faults // from stage 2 translation, other than from stage 1 translation table walk. fail = FALSE; elsif iswrite && !s2fs1walk then fail = !w; failedread = FALSE; else fail = !r; failedread = !iswrite; if fail then domain = bits(4) UNKNOWN; secondstage = TRUE; return AArch32.PermissionFault(ipaddress, domain, level, acctype, !failedread, secondstage, s2fs1walk); else return AArch32.NoFaultAArch32.CheckVectorCatchAccType_ORDEREDATOMICRW(vaddress, size); return fault;();

Library pseudocode for aarch32/translation/debug/AArch32.CheckVectorCatchAArch32.CheckBreakpoint

// AArch32.CheckVectorCatch() // ========================== // AArch32.CheckBreakpoint() // ========================= // Called before executing the instruction of length "size" bytes at "vaddress" in an AArch32 // translation regime, when debug exceptions are enabled. // translation regime, when either debug exceptions are enabled, or halting debug is enabled // and halting is allowed. FaultRecord AArch32.CheckVectorCatch(bits(32) vaddress, integer size) AArch32.CheckBreakpoint(bits(32) vaddress, integer size) assert ELUsingAArch32(S1TranslationRegime()); assert size IN {2,4}; match = match = FALSE; mismatch = FALSE; for i = 0 to AArch32.VCRMatchUInt(vaddress); if size == 4 && !match &&(DBGDIDR.BRPs) (match_i, mismatch_i) = AArch32.VCRMatchAArch32.BreakpointMatch(vaddress + 2) then match =(i, vaddress, size); match = match || match_i; mismatch = mismatch || mismatch_i; if match && ConstrainUnpredictableBoolHaltOnBreakpointOrWatchpoint(() then reason =Unpredictable_VCMATCHHALFDebugHalt_Breakpoint); if match then acctype =; Halt(reason); elsif (match || mismatch) then acctype = AccType_IFETCH; iswrite = FALSE; debugmoe = DebugException_VectorCatchDebugException_Breakpoint; return AArch32.DebugFault(acctype, iswrite, debugmoe); else return AArch32.NoFault();

Library pseudocode for aarch32/translation/debug/AArch32.CheckWatchpointAArch32.CheckDebug

// AArch32.CheckWatchpoint() // ========================= // Called before accessing the memory location of "size" bytes at "address", // when either debug exceptions are enabled for the access, or halting debug // is enabled and halting is allowed. // AArch32.CheckDebug() // ==================== // Called on each access to check for a debug exception or entry to Debug state. FaultRecord AArch32.CheckWatchpoint(bits(32) vaddress,AArch32.CheckDebug(bits(32) vaddress, AccType acctype, boolean iswrite, integer size) assertacctype, boolean iswrite, integer size) ELUsingAArch32FaultRecord(fault =S1TranslationRegime()); if acctype IN {AccType_TTW, AccType_IC, AccType_AT} then return AArch32.NoFault(); if acctype == d_side = (acctype != AccType_DCAccType_IFETCH then if !iswrite then return); generate_exception = AArch32.NoFaultAArch32.GenerateDebugExceptions(); elsif !(boolean IMPLEMENTATION_DEFINED "DCIMVAC generates watchpoint") then return() && DBGDSCRext.MDBGen == '1'; halt = AArch32.NoFaultHaltOnBreakpointOrWatchpoint(); match = FALSE; ispriv = // Relative priority of Vector Catch and Breakpoint exceptions not defined in the architecture vector_catch_first = AArch32.AccessUsesELConstrainUnpredictableBool(acctype) !=( EL0Unpredictable_BPVECTORCATCHPRI; ); for i = 0 to if !d_side && vector_catch_first && generate_exception then fault = UIntAArch32.CheckVectorCatch(DBGDIDR.WRPs) match = match ||(vaddress, size); if fault.statuscode == AArch32.WatchpointMatchFault_None(i, vaddress, size, ispriv, acctype, iswrite); if match &&&& (generate_exception || halt) then if d_side then fault = HaltOnBreakpointOrWatchpointAArch32.CheckWatchpoint() then reason =(vaddress, acctype, iswrite, size); else fault = DebugHalt_WatchpointAArch32.CheckBreakpoint; EDWAR = vaddress;(vaddress, size); if fault.statuscode == HaltFault_None(reason); elsif match then debugmoe =&& !d_side && !vector_catch_first && generate_exception then return DebugException_WatchpointAArch32.CheckVectorCatch; return AArch32.DebugFault(acctype, iswrite, debugmoe); else return AArch32.NoFault();(vaddress, size); return fault;

Library pseudocode for aarch32/translation/faultsdebug/AArch32.AccessFlagFaultAArch32.CheckVectorCatch

// AArch32.AccessFlagFault() // ========================= // AArch32.CheckVectorCatch() // ========================== // Called before executing the instruction of length "size" bytes at "vaddress" in an AArch32 // translation regime, when debug exceptions are enabled. FaultRecord AArch32.AccessFlagFault(bits(40) ipaddress, bits(4) domain, integer level,AArch32.CheckVectorCatch(bits(32) vaddress, integer size) assert AccTypeELUsingAArch32 acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) extflag = bit UNKNOWN; debugmoe = bits(4) UNKNOWN; errortype = bits(2) UNKNOWN; return( AArch32.CreateFaultRecordS1TranslationRegime(()); match =(vaddress); if size == 4 && !match && AArch32.VCRMatch(vaddress + 2) then match = ConstrainUnpredictableBool(Unpredictable_VCMATCHHALF); if match then acctype = AccType_IFETCH; iswrite = FALSE; debugmoe = DebugException_VectorCatch; return AArch32.DebugFault(acctype, iswrite, debugmoe); else return AArch32.NoFaultFault_AccessFlagAArch32.VCRMatch, ipaddress, domain, level, acctype, iswrite, extflag, debugmoe, errortype, secondstage, s2fs1walk);();

Library pseudocode for aarch32/translation/faultsdebug/AArch32.AddressSizeFaultAArch32.CheckWatchpoint

// AArch32.AddressSizeFault() // ========================== // AArch32.CheckWatchpoint() // ========================= // Called before accessing the memory location of "size" bytes at "address", // when either debug exceptions are enabled for the access, or halting debug // is enabled and halting is allowed. FaultRecord AArch32.AddressSizeFault(bits(40) ipaddress, bits(4) domain, integer level,AArch32.CheckWatchpoint(bits(32) vaddress, AccType acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) extflag = bit UNKNOWN; debugmoe = bits(4) UNKNOWN; errortype = bits(2) UNKNOWN; returnacctype, boolean iswrite, integer size) assert AArch32.CreateFaultRecordELUsingAArch32(()); match = FALSE; ispriv = AArch32.AccessIsPrivileged(acctype); for i = 0 to UInt(DBGDIDR.WRPs) match = match || AArch32.WatchpointMatch(i, vaddress, size, ispriv, iswrite); if match && HaltOnBreakpointOrWatchpoint() then reason = DebugHalt_Watchpoint; EDWAR = vaddress; Halt(reason); elsif match then debugmoe = DebugException_Watchpoint; return AArch32.DebugFault(acctype, iswrite, debugmoe); else return AArch32.NoFaultFault_AddressSizeS1TranslationRegime, ipaddress, domain, level, acctype, iswrite, extflag, debugmoe, errortype, secondstage, s2fs1walk);();

Library pseudocode for aarch32/translation/faults/AArch32.AlignmentFaultAArch32.AccessFlagFault

// AArch32.AlignmentFault() // ======================== // AArch32.AccessFlagFault() // ========================= FaultRecord AArch32.AlignmentFault(AArch32.AccessFlagFault(bits(40) ipaddress, bits(4) domain, integer level,AccType acctype, boolean iswrite, boolean secondstage) acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) ipaddress = bits(40) UNKNOWN; domain = bits(4) UNKNOWN; level = integer UNKNOWN; extflag = bit UNKNOWN; debugmoe = bits(4) UNKNOWN; errortype = bits(2) UNKNOWN; s2fs1walk = boolean UNKNOWN; return AArch32.CreateFaultRecord(Fault_AlignmentFault_AccessFlag, ipaddress, domain, level, acctype, iswrite, extflag, debugmoe, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch32/translation/faults/AArch32.AsynchExternalAbortAArch32.AddressSizeFault

// AArch32.AsynchExternalAbort() // ============================= // Wrapper function for asynchronous external aborts // AArch32.AddressSizeFault() // ========================== FaultRecord AArch32.AsynchExternalAbort(boolean parity, bits(2) errortype, bit extflag) faulttype = if parity thenAArch32.AddressSizeFault(bits(40) ipaddress, bits(4) domain, integer level, Fault_AsyncParityAccType elseacctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) extflag = bit UNKNOWN; debugmoe = bits(4) UNKNOWN; errortype = bits(2) UNKNOWN; return Fault_AsyncExternalAArch32.CreateFaultRecord; ipaddress = bits(40) UNKNOWN; domain = bits(4) UNKNOWN; level = integer UNKNOWN; acctype =( AccType_NORMALFault_AddressSize; iswrite = boolean UNKNOWN; debugmoe = bits(4) UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; return AArch32.CreateFaultRecord(faulttype, ipaddress, domain, level, acctype, iswrite, extflag, debugmoe, errortype, secondstage, s2fs1walk);, ipaddress, domain, level, acctype, iswrite, extflag, debugmoe, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch32/translation/faults/AArch32.DebugFaultAArch32.AlignmentFault

// AArch32.DebugFault() // ==================== // AArch32.AlignmentFault() // ======================== FaultRecord AArch32.DebugFault(AArch32.AlignmentFault(AccType acctype, boolean iswrite, bits(4) debugmoe) acctype, boolean iswrite, boolean secondstage) ipaddress = bits(40) UNKNOWN; domain = bits(4) UNKNOWN; errortype = bits(2) UNKNOWN; level = integer UNKNOWN; extflag = bit UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; debugmoe = bits(4) UNKNOWN; errortype = bits(2) UNKNOWN; s2fs1walk = boolean UNKNOWN; return AArch32.CreateFaultRecord(Fault_DebugFault_Alignment, ipaddress, domain, level, acctype, iswrite, extflag, debugmoe, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch32/translation/faults/AArch32.DomainFaultAArch32.AsynchExternalAbort

// AArch32.DomainFault() // ===================== // AArch32.AsynchExternalAbort() // ============================= // Wrapper function for asynchronous external aborts FaultRecord AArch32.DomainFault(bits(4) domain, integer level,AArch32.AsynchExternalAbort(boolean parity, bits(2) errortype, bit extflag) faulttype = if parity then AccTypeFault_AsyncParity acctype, boolean iswrite) ipaddress = bits(40) UNKNOWN; extflag = bit UNKNOWN; debugmoe = bits(4) UNKNOWN; errortype = bits(2) UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; returnelse Fault_AsyncExternal; ipaddress = bits(40) UNKNOWN; domain = bits(4) UNKNOWN; level = integer UNKNOWN; acctype = AccType_NORMAL; iswrite = boolean UNKNOWN; debugmoe = bits(4) UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; return AArch32.CreateFaultRecord(Fault_Domain, ipaddress, domain, level, acctype, iswrite, extflag, debugmoe, errortype, secondstage, s2fs1walk);(faulttype, ipaddress, domain, level, acctype, iswrite, extflag, debugmoe, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch32/translation/faults/AArch32.NoFaultAArch32.DebugFault

// AArch32.NoFault() // ================= // AArch32.DebugFault() // ==================== FaultRecord AArch32.NoFault() ipaddress = bits(40) UNKNOWN; domain = bits(4) UNKNOWN; level = integer UNKNOWN; acctype =AArch32.DebugFault( AccType_NORMALAccType; iswrite = boolean UNKNOWN; acctype, boolean iswrite, bits(4) debugmoe) ipaddress = bits(40) UNKNOWN; domain = bits(4) UNKNOWN; errortype = bits(2) UNKNOWN; level = integer UNKNOWN; extflag = bit UNKNOWN; debugmoe = bits(4) UNKNOWN; errortype = bits(2) UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; return AArch32.CreateFaultRecord(Fault_NoneFault_Debug, ipaddress, domain, level, acctype, iswrite, extflag, debugmoe, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch32/translation/faults/AArch32.PermissionFaultAArch32.DomainFault

// AArch32.PermissionFault() // ========================= // AArch32.DomainFault() // ===================== FaultRecord AArch32.PermissionFault(bits(40) ipaddress, bits(4) domain, integer level,AArch32.DomainFault(bits(4) domain, integer level, AccType acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) acctype, boolean iswrite) ipaddress = bits(40) UNKNOWN; extflag = bit UNKNOWN; debugmoe = bits(4) UNKNOWN; errortype = bits(2) UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; return AArch32.CreateFaultRecord(Fault_PermissionFault_Domain, ipaddress, domain, level, acctype, iswrite, extflag, debugmoe, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch32/translation/faults/AArch32.TranslationFaultAArch32.NoFault

// AArch32.TranslationFault() // ========================== // AArch32.NoFault() // ================= FaultRecord AArch32.TranslationFault(bits(40) ipaddress, bits(4) domain, integer level,AArch32.NoFault() ipaddress = bits(40) UNKNOWN; domain = bits(4) UNKNOWN; level = integer UNKNOWN; acctype = AccTypeAccType_NORMAL acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) ; iswrite = boolean UNKNOWN; extflag = bit UNKNOWN; debugmoe = bits(4) UNKNOWN; errortype = bits(2) UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; return AArch32.CreateFaultRecord(Fault_TranslationFault_None, ipaddress, domain, level, acctype, iswrite, extflag, debugmoe, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch32/translation/translationfaults/AArch32.FirstStageTranslateAArch32.PermissionFault

// AArch32.FirstStageTranslate() // ============================= // Perform a stage 1 translation walk. The function used by Address Translation operations is // similar except it uses the translation regime specified for the instruction. // AArch32.PermissionFault() // ========================= AddressDescriptorFaultRecord AArch32.FirstStageTranslate(bits(32) vaddress,AArch32.PermissionFault(bits(40) ipaddress, bits(4) domain, integer level, AccType acctype, boolean iswrite, boolean wasaligned, integer size) acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) if PSTATE.EL == extflag = bit UNKNOWN; debugmoe = bits(4) UNKNOWN; errortype = bits(2) UNKNOWN; return EL2AArch32.CreateFaultRecord then s1_enabled = HSCTLR.M == '1'; elsif( EL2EnabledFault_Permission() then tge = (if ELUsingAArch32(EL2) then HCR.TGE else HCR_EL2.TGE); dc = (if ELUsingAArch32(EL2) then HCR.DC else HCR_EL2.DC); s1_enabled = tge == '0' && dc == '0' && SCTLR.M == '1'; else s1_enabled = SCTLR.M == '1'; TLBRecord S1; S1.addrdesc.fault = AArch32.NoFault(); ipaddress = bits(40) UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; if s1_enabled then // First stage enabled use_long_descriptor_format = PSTATE.EL == EL2 || TTBCR.EAE == '1'; if use_long_descriptor_format then S1 = AArch32.TranslationTableWalkLD(ipaddress, vaddress, acctype, iswrite, secondstage, s2fs1walk, size); permissioncheck = TRUE; domaincheck = FALSE; else S1 = AArch32.TranslationTableWalkSD(vaddress, acctype, iswrite, size); permissioncheck = TRUE; domaincheck = TRUE; else S1 = AArch32.TranslateAddressS1Off(vaddress, acctype, iswrite); permissioncheck = FALSE; domaincheck = FALSE; InGuardedPage = FALSE; // No memory is guarded when stage 1 address translation is disabled if !IsFault(S1.addrdesc) && UsingAArch32() && HaveTrapLoadStoreMultipleDeviceExt() && AArch32.ExecutingLSMInstr() then if S1.addrdesc.memattrs.memtype == MemType_Device && S1.addrdesc.memattrs.device != DeviceType_GRE then nTLSMD = if S1TranslationRegime() == EL2 then HSCTLR.nTLSMD else SCTLR.nTLSMD; if nTLSMD == '0' then S1.addrdesc.fault = AArch32.AlignmentFault(acctype, iswrite, secondstage); // Check for unaligned data accesses to Device memory if (((!wasaligned && acctype != AccType_IFETCH) || acctype == AccType_DCZVA) && !IsFault(S1.addrdesc) && S1.addrdesc.memattrs.memtype == MemType_Device) then S1.addrdesc.fault = AArch32.AlignmentFault(acctype, iswrite, secondstage); if !IsFault(S1.addrdesc) && domaincheck && !(acctype IN {AccType_DC, AccType_DC_UNPRIV, AccType_IC}) then (permissioncheck, abort) = AArch32.CheckDomain(S1.domain, vaddress, S1.level, acctype, iswrite); S1.addrdesc.fault = abort; if !IsFault(S1.addrdesc) && permissioncheck then S1.addrdesc.fault = AArch32.CheckPermission(S1.perms, vaddress, S1.level, S1.domain, S1.addrdesc.paddress.NS, acctype, iswrite); // Check for instruction fetches from Device memory not marked as execute-never. If there has // not been a Permission Fault then the memory is not marked execute-never. if (!IsFault(S1.addrdesc) && S1.addrdesc.memattrs.memtype == MemType_Device && acctype == AccType_IFETCH) then S1.addrdesc = AArch32.InstructionDevice(S1.addrdesc, vaddress, ipaddress, S1.level, S1.domain, acctype, iswrite, secondstage, s2fs1walk); return S1.addrdesc;, ipaddress, domain, level, acctype, iswrite, extflag, debugmoe, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch32/translation/translationfaults/AArch32.FullTranslateAArch32.TranslationFault

// AArch32.FullTranslate() // ======================= // Perform both stage 1 and stage 2 translation walks for the current translation regime. The // function used by Address Translation operations is similar except it uses the translation // regime specified for the instruction. // AArch32.TranslationFault() // ========================== AddressDescriptorFaultRecord AArch32.FullTranslate(bits(32) vaddress,AArch32.TranslationFault(bits(40) ipaddress, bits(4) domain, integer level, AccType acctype, boolean iswrite, boolean wasaligned, integer size) acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) // First Stage Translation S1 = extflag = bit UNKNOWN; debugmoe = bits(4) UNKNOWN; errortype = bits(2) UNKNOWN; return AArch32.FirstStageTranslateAArch32.CreateFaultRecord(vaddress, acctype, iswrite, wasaligned, size); if !(IsFaultFault_Translation(S1) && HasS2Translation() then s2fs1walk = FALSE; result = AArch32.SecondStageTranslate(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk, size); else result = S1; return result;, ipaddress, domain, level, acctype, iswrite, extflag, debugmoe, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch32/translation/translation/AArch32.SecondStageTranslateAArch32.FirstStageTranslate

// AArch32.SecondStageTranslate() // ============================== // Perform a stage 2 translation walk. The function used by Address Translation operations is // AArch32.FirstStageTranslate() // ============================= // Perform a stage 1 translation walk. The function used by Address Translation operations is // similar except it uses the translation regime specified for the instruction. AddressDescriptor AArch32.SecondStageTranslate(AArch32.FirstStageTranslate(bits(32) vaddress,AddressDescriptor S1, bits(32) vaddress, AccType acctype, boolean iswrite, boolean wasaligned, boolean s2fs1walk, integer size) assertacctype, boolean iswrite, boolean wasaligned, integer size) if PSTATE.EL == HasS2TranslationEL2(); assertthen s1_enabled = HSCTLR.M == '1'; elsif IsZeroEL2Enabled(S1.paddress.address<47:40>); hwupdatewalk = FALSE; if !() then tge = (ifELUsingAArch32(EL2) then return) then HCR.TGE else HCR_EL2.TGE); dc = (if AArch64.SecondStageTranslateELUsingAArch32(S1,( ZeroExtendEL2(vaddress, 64), acctype, iswrite, wasaligned, s2fs1walk, size, hwupdatewalk); s2_enabled = HCR.VM == '1' || HCR.DC == '1'; secondstage = TRUE; if s2_enabled then // Second stage enabled ipaddress = S1.paddress.address<39:0>; S2 =) then HCR.DC else HCR_EL2.DC); s1_enabled = tge == '0' && dc == '0' && SCTLR.M == '1'; else s1_enabled = SCTLR.M == '1'; AArch32.TranslationTableWalkLDTLBRecord(ipaddress, vaddress, acctype, iswrite, secondstage, s2fs1walk, size); // Check for unaligned data accesses to Device memory if (((!wasaligned && acctype !=S1; S1.addrdesc.fault = AccType_IFETCHAArch32.NoFault) || (acctype ==(); ipaddress = bits(40) UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; if s1_enabled then // First stage enabled use_long_descriptor_format = PSTATE.EL == AccType_DCZVAEL2 && !s2fs1walk)) && S2.addrdesc.memattrs.memtype ==|| TTBCR.EAE == '1'; if use_long_descriptor_format then S1 = MemType_DeviceAArch32.TranslationTableWalkLD && !(ipaddress, vaddress, acctype, iswrite, secondstage, s2fs1walk, size); permissioncheck = TRUE; domaincheck = FALSE; else S1 =IsFaultAArch32.TranslationTableWalkSD(S2.addrdesc)) then S2.addrdesc.fault =(vaddress, acctype, iswrite, size); permissioncheck = TRUE; domaincheck = TRUE; else S1 = AArch32.AlignmentFaultAArch32.TranslateAddressS1Off(acctype, iswrite, secondstage); (vaddress, acctype, iswrite); permissioncheck = FALSE; domaincheck = FALSE; InGuardedPage = FALSE; // No memory is guarded when stage 1 address translation is disabled // Check for permissions on Stage2 translations if ! if !IsFault(S2.addrdesc) then S2.addrdesc.fault =(S1.addrdesc) && AArch32.CheckS2PermissionUsingAArch32(S2.perms, vaddress, ipaddress, S2.level, acctype, iswrite, s2fs1walk); // Check for instruction fetches from Device memory not marked as execute-never. As there // has not been a Permission Fault then the memory is not marked execute-never. if (!s2fs1walk && !() &&IsFaultHaveTrapLoadStoreMultipleDeviceExt(S2.addrdesc) && S2.addrdesc.memattrs.memtype ==() && AArch32.ExecutingLSMInstr() then if S1.addrdesc.memattrs.memtype == MemType_Device && acctype ==&& S1.addrdesc.memattrs.device != DeviceType_GRE then nTLSMD = if S1TranslationRegime() == EL2 then HSCTLR.nTLSMD else SCTLR.nTLSMD; if nTLSMD == '0' then S1.addrdesc.fault = AArch32.AlignmentFault(acctype, iswrite, secondstage); // Check for unaligned data accesses to Device memory if ((!wasaligned && acctype != AccType_IFETCH) then domain = bits(4) UNKNOWN; S2.addrdesc =) || (acctype == AArch32.InstructionDeviceAccType_DCZVA(S2.addrdesc, vaddress, ipaddress, S2.level, domain, acctype, iswrite, secondstage, s2fs1walk); if (s2fs1walk && !)) && !IsFault(S2.addrdesc) && S2.addrdesc.memattrs.memtype ==(S1.addrdesc) && S1.addrdesc.memattrs.memtype == MemType_Device) then // Check for protected table walk. if HCR.PTW == '1' then domain = bits(4) UNKNOWN; S2.addrdesc.fault =then S1.addrdesc.fault = AArch32.PermissionFaultAArch32.AlignmentFault(ipaddress, domain, S2.level, acctype, iswrite, secondstage, s2fs1walk); else // Translation table walk occurs as Normal Non-cacheable memory. S2.addrdesc.memattrs.memtype =(acctype, iswrite, secondstage); if ! MemType_NormalIsFault; S2.addrdesc.memattrs.inner.attrs =(S1.addrdesc) && domaincheck && !(acctype IN { MemAttr_NCAccType_DC; S2.addrdesc.memattrs.outer.attrs =, MemAttr_NCAccType_DC_UNPRIV; S2.addrdesc.memattrs.shareable = TRUE; S2.addrdesc.memattrs.outershareable = TRUE; if s2fs1walk then result =, AArch32.CombineS1S2DescAccType_IC(S1, S2.addrdesc,}) then (permissioncheck, abort) = AccType_TTWAArch32.CheckDomain); else result =(S1.domain, vaddress, S1.level, acctype, iswrite); S1.addrdesc.fault = abort; if ! (S1.addrdesc) && permissioncheck then S1.addrdesc.fault = AArch32.CheckPermission(S1.perms, vaddress, S1.level, S1.domain, S1.addrdesc.paddress.NS, acctype, iswrite); // Check for instruction fetches from Device memory not marked as execute-never. If there has // not been a Permission Fault then the memory is not marked execute-never. if (!IsFault(S1.addrdesc) && S1.addrdesc.memattrs.memtype == MemType_Device && acctype == AccType_IFETCH) then S1.addrdesc = AArch32.InstructionDeviceAArch32.CombineS1S2DescIsFault(S1, S2.addrdesc, acctype); else result = S1; (S1.addrdesc, vaddress, ipaddress, S1.level, S1.domain, acctype, iswrite, secondstage, s2fs1walk); return result; return S1.addrdesc;

Library pseudocode for aarch32/translation/translation/AArch32.SecondStageWalkAArch32.FullTranslate

// AArch32.SecondStageWalk() // ========================= // Perform a stage 2 translation on a stage 1 translation table walk access. // AArch32.FullTranslate() // ======================= // Perform both stage 1 and stage 2 translation walks for the current translation regime. The // function used by Address Translation operations is similar except it uses the translation // regime specified for the instruction. AddressDescriptor AArch32.SecondStageWalk(AArch32.FullTranslate(bits(32) vaddress,AddressDescriptorAccType S1, bits(32) vaddress,acctype, boolean iswrite, boolean wasaligned, integer size) // First Stage Translation S1 = (vaddress, acctype, iswrite, wasaligned, size); if !IsFault(S1) && !(HaveNV2Ext() && acctype == AccType_NV2REGISTERAccTypeAArch32.FirstStageTranslate acctype, boolean iswrite, integer size) assert) && HasS2Translation(); s2fs1walk = TRUE; wasaligned = TRUE; return() then s2fs1walk = FALSE; result = AArch32.SecondStageTranslate(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk, size); size); else result = S1; return result;

Library pseudocode for aarch32/translation/translation/AArch32.TranslateAddressAArch32.SecondStageTranslate

// AArch32.TranslateAddress() // ========================== // Main entry point for translating an address // AArch32.SecondStageTranslate() // ============================== // Perform a stage 2 translation walk. The function used by Address Translation operations is // similar except it uses the translation regime specified for the instruction. AddressDescriptor AArch32.TranslateAddress(bits(32) vaddress,AArch32.SecondStageTranslate( AddressDescriptor S1, bits(32) vaddress, AccType acctype, boolean iswrite, boolean wasaligned, integer size) if !acctype, boolean iswrite, boolean wasaligned, boolean s2fs1walk, integer size) assertHasS2Translation(); assert IsZero(S1.paddress.address<47:40>); hwupdatewalk = FALSE; if !ELUsingAArch32(S1TranslationRegimeEL2()) then ) then return AArch64.TranslateAddressAArch64.SecondStageTranslate((S1,ZeroExtend(vaddress, 64), acctype, iswrite, wasaligned, size); result =(vaddress, 64), acctype, iswrite, wasaligned, s2fs1walk, size, hwupdatewalk); s2_enabled = HCR.VM == '1' || HCR.DC == '1'; secondstage = TRUE; if s2_enabled then // Second stage enabled ipaddress = S1.paddress.address<39:0>; S2 = AArch32.FullTranslateAArch32.TranslationTableWalkLD(vaddress, acctype, iswrite, wasaligned, size); if !(ipaddress, vaddress, acctype, iswrite, secondstage, s2fs1walk, size); // Check for unaligned data accesses to Device memory if ((!wasaligned && acctype !=AccType_IFETCH) || (acctype == AccType_DCZVA)) && S2.addrdesc.memattrs.memtype == MemType_Device && !IsFault(result) then result.fault =(S2.addrdesc) then S2.addrdesc.fault = AArch32.CheckDebugAArch32.AlignmentFault(vaddress, acctype, iswrite, size); (acctype, iswrite, secondstage); // Update virtual address for abort functions result.vaddress = // Check for permissions on Stage2 translations if ! (S2.addrdesc) then S2.addrdesc.fault = AArch32.CheckS2Permission(S2.perms, vaddress, ipaddress, S2.level, acctype, iswrite, s2fs1walk); // Check for instruction fetches from Device memory not marked as execute-never. As there // has not been a Permission Fault then the memory is not marked execute-never. if (!s2fs1walk && !IsFault(S2.addrdesc) && S2.addrdesc.memattrs.memtype == MemType_Device && acctype == AccType_IFETCH) then domain = bits(4) UNKNOWN; S2.addrdesc = AArch32.InstructionDevice(S2.addrdesc, vaddress, ipaddress, S2.level, domain, acctype, iswrite, secondstage, s2fs1walk); if (s2fs1walk && !IsFault(S2.addrdesc) && S2.addrdesc.memattrs.memtype == MemType_Device) then // Check for protected table walk. if HCR.PTW == '1' then domain = bits(4) UNKNOWN; S2.addrdesc.fault = AArch32.PermissionFault(ipaddress, domain, S2.level, acctype, iswrite, secondstage, s2fs1walk); else // Translation table walk occurs as Normal Non-cacheable memory. S2.addrdesc.memattrs.memtype = MemType_Normal; S2.addrdesc.memattrs.inner.attrs = MemAttr_NC; S2.addrdesc.memattrs.outer.attrs = MemAttr_NC; S2.addrdesc.memattrs.shareable = TRUE; S2.addrdesc.memattrs.outershareable = TRUE; if s2fs1walk then result = AArch32.CombineS1S2Desc(S1, S2.addrdesc, AccType_PTW); else result = AArch32.CombineS1S2DescZeroExtendIsFault(vaddress); (S1, S2.addrdesc, acctype); else result = S1; return result;

Library pseudocode for aarch32/translation/walktranslation/AArch32.TranslationTableWalkLDAArch32.SecondStageWalk

// AArch32.TranslationTableWalkLD() // ================================ // Returns a result of a translation table walk using the Long-descriptor format // // Implementations might cache information from memory in any number of non-coherent TLB // caching structures, and so avoid memory accesses that have been expressed in this // pseudocode. The use of such TLBs is not expressed in this pseudocode. // AArch32.SecondStageWalk() // ========================= // Perform a stage 2 translation on a stage 1 translation page table walk access. TLBRecordAddressDescriptor AArch32.TranslationTableWalkLD(bits(40) ipaddress, bits(32) vaddress,AArch32.SecondStageWalk( AccType acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk, integer size) if !secondstage then assert ELUsingAArch32(S1TranslationRegime()); else assert HaveEL(EL2) && !IsSecure() && ELUsingAArch32(EL2) && HasS2Translation(); TLBRecord result; AddressDescriptor descaddr; bits(64) baseregister; bits(40) inputaddr; // Input Address is 'vaddress' for stage 1, 'ipaddress' for stage 2 bit nswalk; // Stage 2 translation table walks are to Secure or to Non-secure PA space result.descupdate.AF = FALSE; result.descupdate.AP = FALSE; domain = bits(4) UNKNOWN; descaddr.memattrs.memtype =S1, bits(32) vaddress, MemType_NormalAccType; acctype, boolean iswrite, integer size) // Fixed parameters for the translation table walk: // grainsize = Log2(Size of Table) - Size of Table is 4KB in AArch32 // stride = Log2(Address per Level) - Bits of address consumed at each level constant integer grainsize = 12; // Log2(4KB page size) constant integer stride = grainsize - 3; // Log2(page size / 8 bytes) // Derived parameters for the translation table walk: // inputsize = Log2(Size of Input Address) - Input Address size in bits // level = Level to start walk from // This means that the number of levels after start level = 3-level if !secondstage then // First stage translation inputaddr = assert ZeroExtend(vaddress); el = AArch32.AccessUsesEL(acctype); isprivileged = AArch32.AccessUsesEL(acctype) != EL0; if el == EL2 then inputsize = 32 - UInt(HTCR.T0SZ); basefound = inputsize == 32 || IsZero(inputaddr<31:inputsize>); disabled = FALSE; baseregister = HTTBR; descaddr.memattrs = WalkAttrDecode(HTCR.SH0, HTCR.ORGN0, HTCR.IRGN0, secondstage); reversedescriptors = HSCTLR.EE == '1'; lookupsecure = FALSE; singlepriv = TRUE; hierattrsdisabled = AArch32.HaveHPDExt() && HTCR.HPD == '1'; else basefound = FALSE; disabled = FALSE; t0size = UInt(TTBCR.T0SZ); if t0size == 0 || IsZero(inputaddr<31:(32-t0size)>) then inputsize = 32 - t0size; basefound = TRUE; baseregister = TTBR0; descaddr.memattrs = WalkAttrDecode(TTBCR.SH0, TTBCR.ORGN0, TTBCR.IRGN0, secondstage); hierattrsdisabled = AArch32.HaveHPDExt() && TTBCR.T2E == '1' && TTBCR2.HPD0 == '1'; t1size = UInt(TTBCR.T1SZ); if (t1size == 0 && !basefound) || (t1size > 0 && IsOnes(inputaddr<31:(32-t1size)>)) then inputsize = 32 - t1size; basefound = TRUE; baseregister = TTBR1; descaddr.memattrs = WalkAttrDecode(TTBCR.SH1, TTBCR.ORGN1, TTBCR.IRGN1, secondstage); hierattrsdisabled = AArch32.HaveHPDExt() && TTBCR.T2E == '1' && TTBCR2.HPD1 == '1'; reversedescriptors = SCTLR.EE == '1'; lookupsecure = IsSecure(); singlepriv = FALSE; // The starting level is the number of strides needed to consume the input address level = 4 - (1 + ((inputsize - grainsize - 1) DIV stride)); else // Second stage translation inputaddr = ipaddress; inputsize = 32 - SInt(VTCR.T0SZ); // VTCR.S must match VTCR.T0SZ[3] if VTCR.S != VTCR.T0SZ<3> then (-, inputsize) = ConstrainUnpredictableInteger(32-7, 32+8, Unpredictable_RESVTCRS); basefound = inputsize == 40 || IsZero(inputaddr<39:inputsize>); disabled = FALSE; descaddr.memattrs = WalkAttrDecode(VTCR.SH0, VTCR.ORGN0, VTCR.IRGN0, secondstage); reversedescriptors = HSCTLR.EE == '1'; singlepriv = TRUE; lookupsecure = FALSE; baseregister = VTTBR; startlevel = UInt(VTCR.SL0); level = 2 - startlevel; if level <= 0 then basefound = FALSE; // Number of entries in the starting level table = // (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table)) startsizecheck = inputsize - ((3 - level)*stride + grainsize); // Log2(Num of entries) // Check for starting level table with fewer than 2 entries or longer than 16 pages. // Lower bound check is: startsizecheck < Log2(2 entries) // That is, VTCR.SL0 == '00' and SInt(VTCR.T0SZ) > 1, Size of Input Address < 2^31 bytes // Upper bound check is: startsizecheck > Log2(pagesize/8*16) // That is, VTCR.SL0 == '01' and SInt(VTCR.T0SZ) < -2, Size of Input Address > 2^34 bytes if startsizecheck < 1 || startsizecheck > stride + 4 then basefound = FALSE; if !basefound || disabled then level = 1; // AArch64 reports this as a level 0 fault result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; if !IsZero(baseregister<47:40>) then level = 0; result.addrdesc.fault = AArch32.AddressSizeFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Bottom bound of the Base address is: // Log2(8 bytes per entry)+Log2(Number of entries in starting level table) // Number of entries in starting level table = // (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table)) baselowerbound = 3 + inputsize - ((3-level)*stride + grainsize); // Log2(Num of entries*8) baseaddress = baseregister<39:baselowerbound>:Zeros(baselowerbound); ns_table = if lookupsecure then '0' else '1'; ap_table = '00'; xn_table = '0'; pxn_table = '0'; addrselecttop = inputsize - 1; repeat addrselectbottom = (3-level)*stride + grainsize; bits(40) index = ZeroExtend(inputaddr<addrselecttop:addrselectbottom>:'000'); descaddr.paddress.address = ZeroExtend(baseaddress OR index); descaddr.paddress.NS = ns_table; // If there are two stages of translation, then the first stage table walk addresses // are themselves subject to translation if secondstage || !HasS2Translation() then descaddr2 = descaddr; else descaddr2 =(); s2fs1walk = TRUE; wasaligned = TRUE; return AArch32.SecondStageWalkAArch32.SecondStageTranslate(descaddr, vaddress, acctype, iswrite, 8); // Check for a fault on the stage 2 walk if IsFault(descaddr2) then result.addrdesc.fault = descaddr2.fault; return result; // Update virtual address for abort functions descaddr2.vaddress = ZeroExtend(vaddress); accdesc = CreateAccessDescriptorTTW(acctype, secondstage, s2fs1walk, level); desc = _Mem[descaddr2, 8, accdesc, iswrite]; if reversedescriptors then desc = BigEndianReverse(desc); if desc<0> == '0' || (desc<1:0> == '01' && level == 3) then // Fault (00), Reserved (10), or Block (01) at level 3. result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Valid Block, Page, or Table entry if desc<1:0> == '01' || level == 3 then // Block (01) or Page (11) blocktranslate = TRUE; else // Table (11) if !IsZero(desc<47:40>) then result.addrdesc.fault = AArch32.AddressSizeFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; baseaddress = desc<39:grainsize>:Zeros(grainsize); if !secondstage then // Unpack the upper and lower table attributes ns_table = ns_table OR desc<63>; if !secondstage && !hierattrsdisabled then ap_table<1> = ap_table<1> OR desc<62>; // read-only xn_table = xn_table OR desc<60>; // pxn_table and ap_table[0] apply only in EL1&0 translation regimes if !singlepriv then pxn_table = pxn_table OR desc<59>; ap_table<0> = ap_table<0> OR desc<61>; // privileged level = level + 1; addrselecttop = addrselectbottom - 1; blocktranslate = FALSE; until blocktranslate; // Unpack the descriptor into address and upper and lower block attributes outputaddress = desc<39:addrselectbottom>:inputaddr<addrselectbottom-1:0>; // Check the output address is inside the supported range if !IsZero(desc<47:40>) then result.addrdesc.fault = AArch32.AddressSizeFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Check the access flag if desc<10> == '0' then result.addrdesc.fault = AArch32.AccessFlagFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; xn = desc<54>; // Bit[54] of the block/page descriptor holds UXN pxn = desc<53>; // Bit[53] of the block/page descriptor holds PXN ap = desc<7:6>:'1'; // Bits[7:6] of the block/page descriptor hold AP[2:1] contiguousbit = desc<52>; nG = desc<11>; sh = desc<9:8>; memattr = desc<5:2>; // AttrIndx and NS bit in stage 1 result.domain = bits(4) UNKNOWN; // Domains not used result.level = level; result.blocksize = 2^((3-level)*stride + grainsize); // Stage 1 translation regimes also inherit attributes from the tables if !secondstage then result.perms.xn = xn OR xn_table; result.perms.ap<2> = ap<2> OR ap_table<1>; // Force read-only // PXN, nG and AP[1] apply only in EL1&0 stage 1 translation regimes if !singlepriv then result.perms.ap<1> = ap<1> AND NOT(ap_table<0>); // Force privileged only result.perms.pxn = pxn OR pxn_table; // Pages from Non-secure tables are marked non-global in Secure EL1&0 if IsSecure() then result.nG = nG OR ns_table; else result.nG = nG; else result.perms.ap<1> = '1'; result.perms.pxn = '0'; result.nG = '0'; result.GP = desc<50>; // Stage 1 block or pages might be guarded result.perms.ap<0> = '1'; result.addrdesc.memattrs = AArch32.S1AttrDecode(sh, memattr<2:0>, acctype); result.addrdesc.paddress.NS = memattr<3> OR ns_table; else result.perms.ap<2:1> = ap<2:1>; result.perms.ap<0> = '1'; result.perms.xn = xn; if HaveExtendedExecuteNeverExt() then result.perms.xxn = desc<53>; result.perms.pxn = '0'; result.nG = '0'; if s2fs1walk then result.addrdesc.memattrs = S2AttrDecode(sh, memattr, AccType_TTW); else result.addrdesc.memattrs = S2AttrDecode(sh, memattr, acctype); result.addrdesc.paddress.NS = '1'; result.addrdesc.paddress.address = ZeroExtend(outputaddress); result.addrdesc.fault = AArch32.NoFault(); result.contiguous = contiguousbit == '1'; if HaveCommonNotPrivateTransExt() then result.CnP = baseregister<0>; return result;(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk, size);

Library pseudocode for aarch32/translation/walktranslation/AArch32.TranslationTableWalkSDAArch32.TranslateAddress

// AArch32.TranslationTableWalkSD() // ================================ // Returns a result of a translation table walk using the Short-descriptor format // // Implementations might cache information from memory in any number of non-coherent TLB // caching structures, and so avoid memory accesses that have been expressed in this // pseudocode. The use of such TLBs is not expressed in this pseudocode. // AArch32.TranslateAddress() // ========================== // Main entry point for translating an address TLBRecordAddressDescriptor AArch32.TranslationTableWalkSD(bits(32) vaddress,AArch32.TranslateAddress(bits(32) vaddress, AccType acctype, boolean iswrite, integer size) assert boolean wasaligned, integer size) if ! ELUsingAArch32(S1TranslationRegime()); // This is only called when address translation is enabled()) then return TLBRecordAArch64.TranslateAddress result;( AddressDescriptor l1descaddr; AddressDescriptor l2descaddr; bits(40) outputaddress; // Variables for Abort functions ipaddress = bits(40) UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; NS = bit UNKNOWN; // Default setting of the domain and level. domain = bits(4) UNKNOWN; level = 1; // Determine correct Translation Table Base Register to use. bits(64) ttbr; n = UInt(TTBCR.N); if n == 0 || IsZero(vaddress<31:(32-n)>) then ttbr = TTBR0; disabled = (TTBCR.PD0 == '1'); else ttbr = TTBR1; disabled = (TTBCR.PD1 == '1'); n = 0; // TTBR1 translation always works like N=0 TTBR0 translation // Check if Translation table walk disabled for translations with this Base register. if disabled then result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Obtain descriptor from initial lookup. l1descaddr.paddress.address = ZeroExtend(ttbr<31:14-n>:vaddress<31-n:20>:'00'); l1descaddr.paddress.NS = if(vaddress, 64), acctype, iswrite, wasaligned, size); result = IsSecureAArch32.FullTranslate() then '0' else '1'; IRGN = ttbr<0>:ttbr<6>; // TTBR.IRGN RGN = ttbr<4:3>; // TTBR.RGN SH = ttbr<1>:ttbr<5>; // TTBR.S:TTBR.NOS l1descaddr.memattrs =(vaddress, acctype, iswrite, wasaligned, size); if !(acctype IN { WalkAttrDecodeAccType_PTW(SH, RGN, IRGN, secondstage); if !,HasS2TranslationAccType_IC() then // if only 1 stage of translation l1descaddr2 = l1descaddr; else l1descaddr2 =, AArch32.SecondStageWalkAccType_AT(l1descaddr, vaddress, acctype, iswrite, 4); // Check for a fault on the stage 2 walk if}) && ! IsFault(l1descaddr2) then result.addrdesc.fault = l1descaddr2.fault; return result; // Update virtual address for abort functions l1descaddr2.vaddress =(result) then result.fault = ZeroExtendAArch32.CheckDebug(vaddress); (vaddress, acctype, iswrite, size); accdesc = // Update virtual address for abort functions result.vaddress = CreateAccessDescriptorTTW(acctype, secondstage, s2fs1walk, level); l1desc = _Mem[l1descaddr2, 4, accdesc, iswrite]; if SCTLR.EE == '1' then l1desc = BigEndianReverse(l1desc); // Process descriptor from initial lookup. case l1desc<1:0> of when '00' // Fault, Reserved result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; when '01' // Large page or Small page domain = l1desc<8:5>; level = 2; pxn = l1desc<2>; NS = l1desc<3>; // Obtain descriptor from level 2 lookup. l2descaddr.paddress.address = ZeroExtend(l1desc<31:10>:vaddress<19:12>:'00'); l2descaddr.paddress.NS = if IsSecure() then '0' else '1'; l2descaddr.memattrs = l1descaddr.memattrs; if !HaveEL(EL2) || (IsSecure() && !IsSecureEL2Enabled()) then // if only 1 stage of translation l2descaddr2 = l2descaddr; else l2descaddr2 = AArch32.SecondStageWalk(l2descaddr, vaddress, acctype, iswrite, 4); // Check for a fault on the stage 2 walk if IsFault(l2descaddr2) then result.addrdesc.fault = l2descaddr2.fault; return result; // Update virtual address for abort functions l2descaddr2.vaddress = ZeroExtend(vaddress); accdesc = CreateAccessDescriptorTTW(acctype, secondstage, s2fs1walk, level); l2desc = _Mem[l2descaddr2, 4, accdesc, iswrite]; if SCTLR.EE == '1' then l2desc = BigEndianReverse(l2desc); // Process descriptor from level 2 lookup. if l2desc<1:0> == '00' then result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; nG = l2desc<11>; S = l2desc<10>; ap = l2desc<9,5:4>; if SCTLR.AFE == '1' && l2desc<4> == '0' then // Armv8 VMSAv8-32 does not support hardware management of the Access flag. result.addrdesc.fault = AArch32.AccessFlagFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; if l2desc<1> == '0' then // Large page xn = l2desc<15>; tex = l2desc<14:12>; c = l2desc<3>; b = l2desc<2>; blocksize = 64; outputaddress = ZeroExtend(l2desc<31:16>:vaddress<15:0>); else // Small page tex = l2desc<8:6>; c = l2desc<3>; b = l2desc<2>; xn = l2desc<0>; blocksize = 4; outputaddress = ZeroExtend(l2desc<31:12>:vaddress<11:0>); when '1x' // Section or Supersection NS = l1desc<19>; nG = l1desc<17>; S = l1desc<16>; ap = l1desc<15,11:10>; tex = l1desc<14:12>; xn = l1desc<4>; c = l1desc<3>; b = l1desc<2>; pxn = l1desc<0>; level = 1; if SCTLR.AFE == '1' && l1desc<10> == '0' then // Armv8 VMSAv8-32 does not support hardware management of the Access flag. result.addrdesc.fault = AArch32.AccessFlagFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; if l1desc<18> == '0' then // Section domain = l1desc<8:5>; blocksize = 1024; outputaddress = ZeroExtend(l1desc<31:20>:vaddress<19:0>); else // Supersection domain = '0000'; blocksize = 16384; outputaddress = l1desc<8:5>:l1desc<23:20>:l1desc<31:24>:vaddress<23:0>; // Decode the TEX, C, B and S bits to produce the TLBRecord's memory attributes if SCTLR.TRE == '0' then if RemapRegsHaveResetValues() then result.addrdesc.memattrs = AArch32.DefaultTEXDecode(tex, c, b, S, acctype); else result.addrdesc.memattrs = MemoryAttributes IMPLEMENTATION_DEFINED; else result.addrdesc.memattrs = AArch32.RemappedTEXDecode(tex, c, b, S, acctype); // Set the rest of the TLBRecord, try to add it to the TLB, and return it. result.perms.ap = ap; result.perms.xn = xn; result.perms.pxn = pxn; result.nG = nG; result.domain = domain; result.level = level; result.blocksize = blocksize; result.addrdesc.paddress.address = ZeroExtend(outputaddress); result.addrdesc.paddress.NS = if IsSecure() then NS else '1'; result.addrdesc.fault = AArch32.NoFault(); (vaddress); return result;

Library pseudocode for aarch32/translation/walk/RemapRegsHaveResetValuesAArch32.TranslationTableWalkLD

boolean// AArch32.TranslationTableWalkLD() // ================================ // Returns a result of a translation table walk using the Long-descriptor format // // Implementations might cache information from memory in any number of non-coherent TLB // caching structures, and so avoid memory accesses that have been expressed in this // pseudocode. The use of such TLBs is not expressed in this pseudocode. TLBRecord RemapRegsHaveResetValues();AArch32.TranslationTableWalkLD(bits(40) ipaddress, bits(32) vaddress,AccType acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk, integer size) if !secondstage then assert ELUsingAArch32(S1TranslationRegime()); else assert HaveEL(EL2) && !IsSecure() && ELUsingAArch32(EL2) && HasS2Translation(); TLBRecord result; AddressDescriptor descaddr; bits(64) baseregister; bits(40) inputaddr; // Input Address is 'vaddress' for stage 1, 'ipaddress' for stage 2 bit nswalk; // Stage 2 translation table walks are to Secure or to Non-secure PA space result.descupdate.AF = FALSE; result.descupdate.AP = FALSE; domain = bits(4) UNKNOWN; descaddr.memattrs.memtype = MemType_Normal; // Fixed parameters for the page table walk: // grainsize = Log2(Size of Table) - Size of Table is 4KB in AArch32 // stride = Log2(Address per Level) - Bits of address consumed at each level constant integer grainsize = 12; // Log2(4KB page size) constant integer stride = grainsize - 3; // Log2(page size / 8 bytes) // Derived parameters for the page table walk: // inputsize = Log2(Size of Input Address) - Input Address size in bits // level = Level to start walk from // This means that the number of levels after start level = 3-level if !secondstage then // First stage translation inputaddr = ZeroExtend(vaddress); el = AArch32.AccessUsesEL(acctype); isprivileged = AArch32.AccessIsPrivileged(acctype); if el == EL2 then inputsize = 32 - UInt(HTCR.T0SZ); basefound = inputsize == 32 || IsZero(inputaddr<31:inputsize>); disabled = FALSE; baseregister = HTTBR; descaddr.memattrs = WalkAttrDecode(HTCR.SH0, HTCR.ORGN0, HTCR.IRGN0, secondstage); reversedescriptors = HSCTLR.EE == '1'; lookupsecure = FALSE; singlepriv = TRUE; hierattrsdisabled = AArch32.HaveHPDExt() && HTCR.HPD == '1'; else basefound = FALSE; disabled = FALSE; t0size = UInt(TTBCR.T0SZ); if t0size == 0 || IsZero(inputaddr<31:(32-t0size)>) then inputsize = 32 - t0size; basefound = TRUE; baseregister = TTBR0; descaddr.memattrs = WalkAttrDecode(TTBCR.SH0, TTBCR.ORGN0, TTBCR.IRGN0, secondstage); hierattrsdisabled = AArch32.HaveHPDExt() && TTBCR.T2E == '1' && TTBCR2.HPD0 == '1'; t1size = UInt(TTBCR.T1SZ); if (t1size == 0 && !basefound) || (t1size > 0 && IsOnes(inputaddr<31:(32-t1size)>)) then inputsize = 32 - t1size; basefound = TRUE; baseregister = TTBR1; descaddr.memattrs = WalkAttrDecode(TTBCR.SH1, TTBCR.ORGN1, TTBCR.IRGN1, secondstage); hierattrsdisabled = AArch32.HaveHPDExt() && TTBCR.T2E == '1' && TTBCR2.HPD1 == '1'; reversedescriptors = SCTLR.EE == '1'; lookupsecure = IsSecure(); singlepriv = FALSE; // The starting level is the number of strides needed to consume the input address level = 4 - (1 + ((inputsize - grainsize - 1) DIV stride)); else // Second stage translation inputaddr = ipaddress; inputsize = 32 - SInt(VTCR.T0SZ); // VTCR.S must match VTCR.T0SZ[3] if VTCR.S != VTCR.T0SZ<3> then (-, inputsize) = ConstrainUnpredictableInteger(32-7, 32+8, Unpredictable_RESVTCRS); basefound = inputsize == 40 || IsZero(inputaddr<39:inputsize>); disabled = FALSE; descaddr.memattrs = WalkAttrDecode(VTCR.SH0, VTCR.ORGN0, VTCR.IRGN0, secondstage); reversedescriptors = HSCTLR.EE == '1'; singlepriv = TRUE; lookupsecure = FALSE; baseregister = VTTBR; startlevel = UInt(VTCR.SL0); level = 2 - startlevel; if level <= 0 then basefound = FALSE; // Number of entries in the starting level table = // (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table)) startsizecheck = inputsize - ((3 - level)*stride + grainsize); // Log2(Num of entries) // Check for starting level table with fewer than 2 entries or longer than 16 pages. // Lower bound check is: startsizecheck < Log2(2 entries) // That is, VTCR.SL0 == '00' and SInt(VTCR.T0SZ) > 1, Size of Input Address < 2^31 bytes // Upper bound check is: startsizecheck > Log2(pagesize/8*16) // That is, VTCR.SL0 == '01' and SInt(VTCR.T0SZ) < -2, Size of Input Address > 2^34 bytes if startsizecheck < 1 || startsizecheck > stride + 4 then basefound = FALSE; if !basefound || disabled then level = 1; // AArch64 reports this as a level 0 fault result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; if !IsZero(baseregister<47:40>) then level = 0; result.addrdesc.fault = AArch32.AddressSizeFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Bottom bound of the Base address is: // Log2(8 bytes per entry)+Log2(Number of entries in starting level table) // Number of entries in starting level table = // (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table)) baselowerbound = 3 + inputsize - ((3-level)*stride + grainsize); // Log2(Num of entries*8) baseaddress = baseregister<39:baselowerbound>:Zeros(baselowerbound); ns_table = if lookupsecure then '0' else '1'; ap_table = '00'; xn_table = '0'; pxn_table = '0'; addrselecttop = inputsize - 1; repeat addrselectbottom = (3-level)*stride + grainsize; bits(40) index = ZeroExtend(inputaddr<addrselecttop:addrselectbottom>:'000'); descaddr.paddress.address = ZeroExtend(baseaddress OR index); descaddr.paddress.NS = ns_table; // If there are two stages of translation, then the first stage table walk addresses // are themselves subject to translation if secondstage || !HasS2Translation() || (HaveNV2Ext() && acctype == AccType_NV2REGISTER) then descaddr2 = descaddr; else descaddr2 = AArch32.SecondStageWalk(descaddr, vaddress, acctype, iswrite, 8); // Check for a fault on the stage 2 walk if IsFault(descaddr2) then result.addrdesc.fault = descaddr2.fault; return result; // Update virtual address for abort functions descaddr2.vaddress = ZeroExtend(vaddress); accdesc = CreateAccessDescriptorPTW(acctype, secondstage, s2fs1walk, level); desc = _Mem[descaddr2, 8, accdesc]; if reversedescriptors then desc = BigEndianReverse(desc); if desc<0> == '0' || (desc<1:0> == '01' && level == 3) then // Fault (00), Reserved (10), or Block (01) at level 3. result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Valid Block, Page, or Table entry if desc<1:0> == '01' || level == 3 then // Block (01) or Page (11) blocktranslate = TRUE; else // Table (11) if !IsZero(desc<47:40>) then result.addrdesc.fault = AArch32.AddressSizeFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; baseaddress = desc<39:grainsize>:Zeros(grainsize); if !secondstage then // Unpack the upper and lower table attributes ns_table = ns_table OR desc<63>; if !secondstage && !hierattrsdisabled then ap_table<1> = ap_table<1> OR desc<62>; // read-only xn_table = xn_table OR desc<60>; // pxn_table and ap_table[0] apply only in EL1&0 translation regimes if !singlepriv then pxn_table = pxn_table OR desc<59>; ap_table<0> = ap_table<0> OR desc<61>; // privileged level = level + 1; addrselecttop = addrselectbottom - 1; blocktranslate = FALSE; until blocktranslate; // Unpack the descriptor into address and upper and lower block attributes outputaddress = desc<39:addrselectbottom>:inputaddr<addrselectbottom-1:0>; // Check the output address is inside the supported range if !IsZero(desc<47:40>) then result.addrdesc.fault = AArch32.AddressSizeFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Check the access flag if desc<10> == '0' then result.addrdesc.fault = AArch32.AccessFlagFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; xn = desc<54>; // Bit[54] of the block/page descriptor holds UXN pxn = desc<53>; // Bit[53] of the block/page descriptor holds PXN ap = desc<7:6>:'1'; // Bits[7:6] of the block/page descriptor hold AP[2:1] contiguousbit = desc<52>; nG = desc<11>; sh = desc<9:8>; memattr = desc<5:2>; // AttrIndx and NS bit in stage 1 result.domain = bits(4) UNKNOWN; // Domains not used result.level = level; result.blocksize = 2^((3-level)*stride + grainsize); // Stage 1 translation regimes also inherit attributes from the tables if !secondstage then result.perms.xn = xn OR xn_table; result.perms.ap<2> = ap<2> OR ap_table<1>; // Force read-only // PXN, nG and AP[1] apply only in EL1&0 stage 1 translation regimes if !singlepriv then result.perms.ap<1> = ap<1> AND NOT(ap_table<0>); // Force privileged only result.perms.pxn = pxn OR pxn_table; // Pages from Non-secure tables are marked non-global in Secure EL1&0 if IsSecure() then result.nG = nG OR ns_table; else result.nG = nG; else result.perms.ap<1> = '1'; result.perms.pxn = '0'; result.nG = '0'; result.GP = desc<50>; // Stage 1 block or pages might be guarded result.perms.ap<0> = '1'; result.addrdesc.memattrs = AArch32.S1AttrDecode(sh, memattr<2:0>, acctype); result.addrdesc.paddress.NS = memattr<3> OR ns_table; else result.perms.ap<2:1> = ap<2:1>; result.perms.ap<0> = '1'; result.perms.xn = xn; if HaveExtendedExecuteNeverExt() then result.perms.xxn = desc<53>; result.perms.pxn = '0'; result.nG = '0'; if s2fs1walk then result.addrdesc.memattrs = S2AttrDecode(sh, memattr, AccType_PTW); else result.addrdesc.memattrs = S2AttrDecode(sh, memattr, acctype); result.addrdesc.paddress.NS = '1'; result.addrdesc.paddress.address = ZeroExtend(outputaddress); result.addrdesc.fault = AArch32.NoFault(); result.contiguous = contiguousbit == '1'; if HaveCommonNotPrivateTransExt() then result.CnP = baseregister<0>; return result;

Library pseudocode for aarch64aarch32/debugtranslation/breakpointwalk/AArch64.BreakpointMatchAArch32.TranslationTableWalkSD

// AArch64.BreakpointMatch() // ========================= // Breakpoint matching in an AArch64 translation regime. // AArch32.TranslationTableWalkSD() // ================================ // Returns a result of a translation table walk using the Short-descriptor format // // Implementations might cache information from memory in any number of non-coherent TLB // caching structures, and so avoid memory accesses that have been expressed in this // pseudocode. The use of such TLBs is not expressed in this pseudocode. booleanTLBRecord AArch64.BreakpointMatch(integer n, bits(64) vaddress,AArch32.TranslationTableWalkSD(bits(32) vaddress, AccType acctype, integer size) assert !acctype, boolean iswrite, integer size) assertELUsingAArch32(S1TranslationRegime()); assert n <= // This is only called when address translation is enabled TLBRecord result; AddressDescriptor l1descaddr; AddressDescriptor l2descaddr; bits(40) outputaddress; // Variables for Abort functions ipaddress = bits(40) UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; NS = bit UNKNOWN; // Default setting of the domain and level. domain = bits(4) UNKNOWN; level = 1; // Determine correct Translation Table Base Register to use. bits(64) ttbr; n = UInt(ID_AA64DFR0_EL1.BRPs); enabled = DBGBCR_EL1[n].E == '1'; ispriv = PSTATE.EL !=(TTBCR.N); if n == 0 || EL0IsZero; linked = DBGBCR_EL1[n].BT == '0x01'; isbreakpnt = TRUE; linked_to = FALSE; (vaddress<31:(32-n)>) then ttbr = TTBR0; disabled = (TTBCR.PD0 == '1'); else ttbr = TTBR1; disabled = (TTBCR.PD1 == '1'); n = 0; // TTBR1 translation always works like N=0 TTBR0 translation state_match = // Check if Translation table walk disabled for translations with this Base register. if disabled then result.addrdesc.fault = AArch64.StateMatchAArch32.TranslationFault(DBGBCR_EL1[n].SSC, DBGBCR_EL1[n].HMC, DBGBCR_EL1[n].PMC, linked, DBGBCR_EL1[n].LBN, isbreakpnt, acctype, ispriv); value_match =(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Obtain descriptor from initial lookup. l1descaddr.paddress.address = AArch64.BreakpointValueMatchZeroExtend(n, vaddress, linked_to); if(ttbr<31:14-n>:vaddress<31-n:20>:'00'); l1descaddr.paddress.NS = if HaveAnyAArch32IsSecure() && size == 4 then // Check second halfword // If the breakpoint address and BAS of an Address breakpoint match the address of the // second halfword of an instruction, but not the address of the first halfword, it is // CONSTRAINED UNPREDICTABLE whether or not this breakpoint generates a Breakpoint debug // event. match_i =() then '0' else '1'; IRGN = ttbr<0>:ttbr<6>; // TTBR.IRGN RGN = ttbr<4:3>; // TTBR.RGN SH = ttbr<1>:ttbr<5>; // TTBR.S:TTBR.NOS l1descaddr.memattrs = AArch64.BreakpointValueMatchWalkAttrDecode(n, vaddress + 2, linked_to); if !value_match && match_i then value_match =(SH, RGN, IRGN, secondstage); if ! ConstrainUnpredictableBoolHaveEL(Unpredictable_BPMATCHHALFEL2); if vaddress<1> == '1' && DBGBCR_EL1[n].BAS == '1111' then // The above notwithstanding, if DBGBCR_EL1[n].BAS == '1111', then it is CONSTRAINED // UNPREDICTABLE whether or not a Breakpoint debug event is generated for an instruction // at the address DBGBVR_EL1[n]+2. if value_match then value_match =) || ( ConstrainUnpredictableBoolIsSecure(() && !()) then // if only 1 stage of translation l1descaddr2 = l1descaddr; else l1descaddr2 = AArch32.SecondStageWalk(l1descaddr, vaddress, acctype, iswrite, 4); // Check for a fault on the stage 2 walk if IsFault(l1descaddr2) then result.addrdesc.fault = l1descaddr2.fault; return result; // Update virtual address for abort functions l1descaddr2.vaddress = ZeroExtend(vaddress); accdesc = CreateAccessDescriptorPTW(acctype, secondstage, s2fs1walk, level); l1desc = _Mem[l1descaddr2, 4,accdesc]; if SCTLR.EE == '1' then l1desc = BigEndianReverse(l1desc); // Process descriptor from initial lookup. case l1desc<1:0> of when '00' // Fault, Reserved result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; when '01' // Large page or Small page domain = l1desc<8:5>; level = 2; pxn = l1desc<2>; NS = l1desc<3>; // Obtain descriptor from level 2 lookup. l2descaddr.paddress.address = ZeroExtend(l1desc<31:10>:vaddress<19:12>:'00'); l2descaddr.paddress.NS = if IsSecure() then '0' else '1'; l2descaddr.memattrs = l1descaddr.memattrs; if !HaveEL(EL2) || (IsSecure() && !IsSecureEL2Enabled()) then // if only 1 stage of translation l2descaddr2 = l2descaddr; else l2descaddr2 = AArch32.SecondStageWalk(l2descaddr, vaddress, acctype, iswrite, 4); // Check for a fault on the stage 2 walk if IsFault(l2descaddr2) then result.addrdesc.fault = l2descaddr2.fault; return result; // Update virtual address for abort functions l2descaddr2.vaddress = ZeroExtend(vaddress); accdesc = CreateAccessDescriptorPTW(acctype, secondstage, s2fs1walk, level); l2desc = _Mem[l2descaddr2, 4, accdesc]; if SCTLR.EE == '1' then l2desc = BigEndianReverse(l2desc); // Process descriptor from level 2 lookup. if l2desc<1:0> == '00' then result.addrdesc.fault = AArch32.TranslationFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; nG = l2desc<11>; S = l2desc<10>; ap = l2desc<9,5:4>; if SCTLR.AFE == '1' && l2desc<4> == '0' then // Armv8 VMSAv8-32 does not support hardware management of the Access flag. result.addrdesc.fault = AArch32.AccessFlagFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; if l2desc<1> == '0' then // Large page xn = l2desc<15>; tex = l2desc<14:12>; c = l2desc<3>; b = l2desc<2>; blocksize = 64; outputaddress = ZeroExtend(l2desc<31:16>:vaddress<15:0>); else // Small page tex = l2desc<8:6>; c = l2desc<3>; b = l2desc<2>; xn = l2desc<0>; blocksize = 4; outputaddress = ZeroExtend(l2desc<31:12>:vaddress<11:0>); when '1x' // Section or Supersection NS = l1desc<19>; nG = l1desc<17>; S = l1desc<16>; ap = l1desc<15,11:10>; tex = l1desc<14:12>; xn = l1desc<4>; c = l1desc<3>; b = l1desc<2>; pxn = l1desc<0>; level = 1; if SCTLR.AFE == '1' && l1desc<10> == '0' then // Armv8 VMSAv8-32 does not support hardware management of the Access flag. result.addrdesc.fault = AArch32.AccessFlagFault(ipaddress, domain, level, acctype, iswrite, secondstage, s2fs1walk); return result; if l1desc<18> == '0' then // Section domain = l1desc<8:5>; blocksize = 1024; outputaddress = ZeroExtend(l1desc<31:20>:vaddress<19:0>); else // Supersection domain = '0000'; blocksize = 16384; outputaddress = l1desc<8:5>:l1desc<23:20>:l1desc<31:24>:vaddress<23:0>; // Decode the TEX, C, B and S bits to produce the TLBRecord's memory attributes if SCTLR.TRE == '0' then if RemapRegsHaveResetValues() then result.addrdesc.memattrs = AArch32.DefaultTEXDecode(tex, c, b, S, acctype); else result.addrdesc.memattrs = MemoryAttributes IMPLEMENTATION_DEFINED; else result.addrdesc.memattrs = AArch32.RemappedTEXDecode(tex, c, b, S, acctype); // Set the rest of the TLBRecord, try to add it to the TLB, and return it. result.perms.ap = ap; result.perms.xn = xn; result.perms.pxn = pxn; result.nG = nG; result.domain = domain; result.level = level; result.blocksize = blocksize; result.addrdesc.paddress.address = ZeroExtend(outputaddress); result.addrdesc.paddress.NS = if IsSecure() then NS else '1'; result.addrdesc.fault = AArch32.NoFaultUnpredictable_BPMATCHHALFIsSecureEL2Enabled); (); match = value_match && state_match && enabled; return match; return result;

Library pseudocode for aarch64aarch32/debugtranslation/breakpointwalk/AArch64.BreakpointValueMatchRemapRegsHaveResetValues

// AArch64.BreakpointValueMatch() // ============================== boolean AArch64.BreakpointValueMatch(integer n, bits(64) vaddress, boolean linked_to) // "n" is the identity of the breakpoint unit to match against. // "vaddress" is the current instruction address, ignored if linked_to is TRUE and for Context // matching breakpoints. // "linked_to" is TRUE if this is a call from StateMatch for linking. // If a non-existent breakpoint then it is CONSTRAINED UNPREDICTABLE whether this gives // no match or the breakpoint is mapped to another UNKNOWN implemented breakpoint. if n >RemapRegsHaveResetValues(); UInt(ID_AA64DFR0_EL1.BRPs) then (c, n) = ConstrainUnpredictableInteger(0, UInt(ID_AA64DFR0_EL1.BRPs), Unpredictable_BPNOTIMPL); assert c IN {Constraint_DISABLED, Constraint_UNKNOWN}; if c == Constraint_DISABLED then return FALSE; // If this breakpoint is not enabled, it cannot generate a match. (This could also happen on a // call from StateMatch for linking). if DBGBCR_EL1[n].E == '0' then return FALSE; context_aware = (n >= UInt(ID_AA64DFR0_EL1.BRPs) - UInt(ID_AA64DFR0_EL1.CTX_CMPs)); // If BT is set to a reserved type, behaves either as disabled or as a not-reserved type. dbgtype = DBGBCR_EL1[n].BT; if ((dbgtype IN {'011x','11xx'} && !HaveVirtHostExt() && !HaveV82Debug()) || // Context matching dbgtype == '010x' || // Reserved (dbgtype != '0x0x' && !context_aware) || // Context matching (dbgtype == '1xxx' && !HaveEL(EL2))) then // EL2 extension (c, dbgtype) = ConstrainUnpredictableBits(Unpredictable_RESBPTYPE); assert c IN {Constraint_DISABLED, Constraint_UNKNOWN}; if c == Constraint_DISABLED then return FALSE; // Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value // Determine what to compare against. match_addr = (dbgtype == '0x0x'); match_vmid = (dbgtype == '10xx'); match_cid = (dbgtype == '001x'); match_cid1 = (dbgtype IN { '101x', 'x11x'}); match_cid2 = (dbgtype == '11xx'); linked = (dbgtype == 'xxx1'); // If this is a call from StateMatch, return FALSE if the breakpoint is not programmed for a // VMID and/or context ID match, of if not context-aware. The above assertions mean that the // code can just test for match_addr == TRUE to confirm all these things. if linked_to && (!linked || match_addr) then return FALSE; // If called from BreakpointMatch return FALSE for Linked context ID and/or VMID matches. if !linked_to && linked && !match_addr then return FALSE; // Do the comparison. if match_addr then byte = UInt(vaddress<1:0>); if HaveAnyAArch32() then // T32 instructions can be executed at EL0 in an AArch64 translation regime. assert byte IN {0,2}; // "vaddress" is halfword aligned byte_select_match = (DBGBCR_EL1[n].BAS<byte> == '1'); else assert byte == 0; // "vaddress" is word aligned byte_select_match = TRUE; // DBGBCR_EL1[n].BAS<byte> is RES1 // If the DBGxVR<n>_EL1.RESS field bits are not a sign extension of the MSB // of DBGBVR<n>_EL1.VA, it is UNPREDICTABLE whether they appear to be // included in the match. // If 'vaddress' is outside of the current virtual address space, then the access // generates a Translation fault. integer top = if Have52BitVAExt() then 52 else 48; if !IsOnes(DBGBVR_EL1[n]<63:top>) && !IsZero(DBGBVR_EL1[n]<63:top>) then if ConstrainUnpredictableBool(Unpredicatable_DBGxVR_RESS) then top = 63; BVR_match = (vaddress<top:2> == DBGBVR_EL1[n]<top:2>) && byte_select_match; elsif match_cid then if IsInHost() then BVR_match = (CONTEXTIDR_EL2<31:0> == DBGBVR_EL1[n]<31:0>); else BVR_match = (PSTATE.EL IN {EL0, EL1} && CONTEXTIDR_EL1<31:0> == DBGBVR_EL1[n]<31:0>); elsif match_cid1 then BVR_match = (PSTATE.EL IN {EL0, EL1} && !IsInHost() && CONTEXTIDR_EL1<31:0> == DBGBVR_EL1[n]<31:0>); if match_vmid then if !Have16bitVMID() || VTCR_EL2.VS == '0' then vmid = ZeroExtend(VTTBR_EL2.VMID<7:0>, 16); bvr_vmid = ZeroExtend(DBGBVR_EL1[n]<39:32>, 16); else vmid = VTTBR_EL2.VMID; bvr_vmid = DBGBVR_EL1[n]<47:32>; BXVR_match = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && !IsInHost() && vmid == bvr_vmid); elsif match_cid2 then BXVR_match = ((HaveVirtHostExt() || HaveV82Debug()) && EL2Enabled() && DBGBVR_EL1[n]<63:32> == CONTEXTIDR_EL2<31:0>); bvr_match_valid = (match_addr || match_cid || match_cid1); bxvr_match_valid = (match_vmid || match_cid2); match = (!bxvr_match_valid || BXVR_match) && (!bvr_match_valid || BVR_match); return match;

Library pseudocode for aarch64/debug/breakpoint/AArch64.StateMatchAArch64.BreakpointMatch

// AArch64.StateMatch() // ==================== // Determine whether a breakpoint or watchpoint is enabled in the current mode and state. // AArch64.BreakpointMatch() // ========================= // Breakpoint matching in an AArch64 translation regime. boolean AArch64.StateMatch(bits(2) SSC, bit HMC, bits(2) PxC, boolean linked, bits(4) LBN, boolean isbreakpnt,AArch64.BreakpointMatch(integer n, bits(64) vaddress, AccType acctype, boolean ispriv) // "SSC", "HMC", "PxC" are the control fields from the DBGBCR[n] or DBGWCR[n] register. // "linked" is TRUE if this is a linked breakpoint/watchpoint type. // "LBN" is the linked breakpoint number from the DBGBCR[n] or DBGWCR[n] register. // "isbreakpnt" is TRUE for breakpoints, FALSE for watchpoints. // "ispriv" is valid for watchpoints, and selects between privileged and unprivileged accesses. // If parameters are set to a reserved type, behaves as either disabled or a defined type (c, SSC, HMC, PxC) =acctype, integer size) assert ! CheckValidStateMatchELUsingAArch32(SSC, HMC, PxC, isbreakpnt); if c ==( Constraint_DISABLEDS1TranslationRegime then return FALSE; // Otherwise the HMC,SSC,PxC values are either valid or the values returned by // CheckValidStateMatch are valid. EL3_match =()); assert n <= HaveELUInt((ID_AA64DFR0_EL1.BRPs); enabled = DBGBCR_EL1[n].E == '1'; ispriv = PSTATE.EL !=EL3) && HMC == '1' && SSC<0> == '0'; EL2_match = HaveEL(EL2) && ((HMC == '1' && (SSC:PxC != '1000')) || SSC == '11'); EL1_match = PxC<0> == '1'; EL0_match = PxC<1> == '1'; if HaveNV2Ext() && acctype == AccType_NV2REGISTER && !isbreakpnt then priv_match = EL2_match; elsif !ispriv && !isbreakpnt then priv_match = EL0_match; else case PSTATE.EL of when EL3 priv_match = EL3_match; when EL2 priv_match = EL2_match; when EL1 priv_match = EL1_match; when EL0 priv_match = EL0_match; ; linked = DBGBCR_EL1[n].BT == '0x01'; isbreakpnt = TRUE; linked_to = FALSE; case SSC of when '00' security_state_match = TRUE; // Both when '01' security_state_match = ! state_match =IsSecureAArch64.StateMatch(); // Non-secure only when '10' security_state_match =(DBGBCR_EL1[n].SSC, DBGBCR_EL1[n].HMC, DBGBCR_EL1[n].PMC, linked, DBGBCR_EL1[n].LBN, isbreakpnt, acctype, ispriv); value_match = IsSecureAArch64.BreakpointValueMatch(); // Secure only when '11' security_state_match = (HMC == '1' ||(n, vaddress, linked_to); if IsSecureHaveAnyAArch32()); // HMC=1 -> Both, 0 -> Secure only if linked then // "LBN" must be an enabled context-aware breakpoint unit. If it is not context-aware then // it is CONSTRAINED UNPREDICTABLE whether this gives no match, or LBN is mapped to some // UNKNOWN breakpoint that is context-aware. lbn =() && size == 4 then // Check second halfword // If the breakpoint address and BAS of an Address breakpoint match the address of the // second halfword of an instruction, but not the address of the first halfword, it is // CONSTRAINED UNPREDICTABLE whether or not this breakpoint generates a Breakpoint debug // event. match_i = UIntAArch64.BreakpointValueMatch(LBN); first_ctx_cmp = ((n, vaddress + 2, linked_to); if !value_match && match_i then value_match =UIntConstrainUnpredictableBool(ID_AA64DFR0_EL1.BRPs) -( UIntUnpredictable_BPMATCHHALF(ID_AA64DFR0_EL1.CTX_CMPs)); last_ctx_cmp =); if vaddress<1> == '1' && DBGBCR_EL1[n].BAS == '1111' then // The above notwithstanding, if DBGBCR_EL1[n].BAS == '1111', then it is CONSTRAINED // UNPREDICTABLE whether or not a Breakpoint debug event is generated for an instruction // at the address DBGBVR_EL1[n]+2. if value_match then value_match = UIntConstrainUnpredictableBool(ID_AA64DFR0_EL1.BRPs); if (lbn < first_ctx_cmp || lbn > last_ctx_cmp) then (c, lbn) =( ConstrainUnpredictableIntegerUnpredictable_BPMATCHHALF(first_ctx_cmp, last_ctx_cmp, Unpredictable_BPNOTCTXCMP); assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN}; case c of when Constraint_DISABLED return FALSE; // Disabled when Constraint_NONE linked = FALSE; // No linking // Otherwise ConstrainUnpredictableInteger returned a context-aware breakpoint if linked then vaddress = bits(64) UNKNOWN; linked_to = TRUE; linked_match = AArch64.BreakpointValueMatch(lbn, vaddress, linked_to); ); return priv_match && security_state_match && (!linked || linked_match); match = value_match && state_match && enabled; return match;

Library pseudocode for aarch64/debug/enablesbreakpoint/AArch64.GenerateDebugExceptionsAArch64.BreakpointValueMatch

// AArch64.GenerateDebugExceptions() // ================================= // AArch64.BreakpointValueMatch() // ============================== boolean AArch64.GenerateDebugExceptions() returnAArch64.BreakpointValueMatch(integer n, bits(64) vaddress, boolean linked_to) // "n" is the identity of the breakpoint unit to match against. // "vaddress" is the current instruction address, ignored if linked_to is TRUE and for Context // matching breakpoints. // "linked_to" is TRUE if this is a call from StateMatch for linking. // If a non-existent breakpoint then it is CONSTRAINED UNPREDICTABLE whether this gives // no match or the breakpoint is mapped to another UNKNOWN implemented breakpoint. if n > AArch64.GenerateDebugExceptionsFromUInt(PSTATE.EL,(ID_AA64DFR0_EL1.BRPs) then (c, n) = (0, UInt(ID_AA64DFR0_EL1.BRPs), Unpredictable_BPNOTIMPL); assert c IN {Constraint_DISABLED, Constraint_UNKNOWN}; if c == Constraint_DISABLED then return FALSE; // If this breakpoint is not enabled, it cannot generate a match. (This could also happen on a // call from StateMatch for linking). if DBGBCR_EL1[n].E == '0' then return FALSE; context_aware = (n >= UInt(ID_AA64DFR0_EL1.BRPs) - UInt(ID_AA64DFR0_EL1.CTX_CMPs)); // If BT is set to a reserved type, behaves either as disabled or as a not-reserved type. dbgtype = DBGBCR_EL1[n].BT; if ((dbgtype IN {'011x','11xx'} && !HaveVirtHostExt() && !HaveV82Debug()) || // Context matching dbgtype == '010x' || // Reserved (dbgtype != '0x0x' && !context_aware) || // Context matching (dbgtype == '1xxx' && !HaveEL(EL2))) then // EL2 extension (c, dbgtype) = ConstrainUnpredictableBits(Unpredictable_RESBPTYPE); assert c IN {Constraint_DISABLED, Constraint_UNKNOWN}; if c == Constraint_DISABLED then return FALSE; // Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value // Determine what to compare against. match_addr = (dbgtype == '0x0x'); match_vmid = (dbgtype == '10xx'); match_cid = (dbgtype == '001x'); match_cid1 = (dbgtype IN { '101x', 'x11x'}); match_cid2 = (dbgtype == '11xx'); linked = (dbgtype == 'xxx1'); // If this is a call from StateMatch, return FALSE if the breakpoint is not programmed for a // VMID and/or context ID match, of if not context-aware. The above assertions mean that the // code can just test for match_addr == TRUE to confirm all these things. if linked_to && (!linked || match_addr) then return FALSE; // If called from BreakpointMatch return FALSE for Linked context ID and/or VMID matches. if !linked_to && linked && !match_addr then return FALSE; // Do the comparison. if match_addr then byte = UInt(vaddress<1:0>); if HaveAnyAArch32() then // T32 instructions can be executed at EL0 in an AArch64 translation regime. assert byte IN {0,2}; // "vaddress" is halfword aligned byte_select_match = (DBGBCR_EL1[n].BAS<byte> == '1'); else assert byte == 0; // "vaddress" is word aligned byte_select_match = TRUE; // DBGBCR_EL1[n].BAS<byte> is RES1 top = AddrTop(vaddress, TRUE, PSTATE.EL); BVR_match = vaddress<top:2> == DBGBVR_EL1[n]<top:2> && byte_select_match; elsif match_cid then if IsInHost() then BVR_match = (CONTEXTIDR_EL2 == DBGBVR_EL1[n]<31:0>); else BVR_match = (PSTATE.EL IN {EL0, EL1} && CONTEXTIDR_EL1 == DBGBVR_EL1[n]<31:0>); elsif match_cid1 then BVR_match = (PSTATE.EL IN {EL0, EL1} && !IsInHost() && CONTEXTIDR_EL1 == DBGBVR_EL1[n]<31:0>); if match_vmid then if !Have16bitVMID() || VTCR_EL2.VS == '0' then vmid = ZeroExtend(VTTBR_EL2.VMID<7:0>, 16); bvr_vmid = ZeroExtend(DBGBVR_EL1[n]<39:32>, 16); else vmid = VTTBR_EL2.VMID; bvr_vmid = DBGBVR_EL1[n]<47:32>; BXVR_match = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && !IsInHost() && vmid == bvr_vmid); elsif match_cid2 then BXVR_match = ((HaveVirtHostExt() || HaveV82Debug()) && EL2EnabledIsSecureConstrainUnpredictableInteger(), PSTATE.D);() && DBGBVR_EL1[n]<63:32> == CONTEXTIDR_EL2); bvr_match_valid = (match_addr || match_cid || match_cid1); bxvr_match_valid = (match_vmid || match_cid2); match = (!bxvr_match_valid || BXVR_match) && (!bvr_match_valid || BVR_match); return match;

Library pseudocode for aarch64/debug/enablesbreakpoint/AArch64.GenerateDebugExceptionsFromAArch64.StateMatch

// AArch64.GenerateDebugExceptionsFrom() // ===================================== // AArch64.StateMatch() // ==================== // Determine whether a breakpoint or watchpoint is enabled in the current mode and state. boolean AArch64.GenerateDebugExceptionsFrom(bits(2) from, boolean secure, bit mask) if OSLSR_EL1.OSLK == '1' ||AArch64.StateMatch(bits(2) SSC, bit HMC, bits(2) PxC, boolean linked, bits(4) LBN, boolean isbreakpnt, DoubleLockStatusAccType() ||acctype, boolean ispriv) // "SSC", "HMC", "PxC" are the control fields from the DBGBCR[n] or DBGWCR[n] register. // "linked" is TRUE if this is a linked breakpoint/watchpoint type. // "LBN" is the linked breakpoint number from the DBGBCR[n] or DBGWCR[n] register. // "isbreakpnt" is TRUE for breakpoints, FALSE for watchpoints. // "ispriv" is valid for watchpoints, and selects between privileged and unprivileged accesses. // If parameters are set to a reserved type, behaves as either disabled or a defined type (c, SSC, HMC, PxC) = HaltedCheckValidStateMatch() then return FALSE; route_to_el2 =(SSC, HMC, PxC, isbreakpnt); if c == Constraint_DISABLED then return FALSE; // Otherwise the HMC,SSC,PxC values are either valid or the values returned by // CheckValidStateMatch are valid. EL3_match = HaveEL(EL3) && HMC == '1' && SSC<0> == '0'; EL2_match = HaveEL(EL2) && (!secure ||) && ((HMC == '1' && (SSC:PxC != '1000')) || SSC == '11'); EL1_match = PxC<0> == '1'; EL0_match = PxC<1> == '1'; if IsSecureEL2EnabledHaveNV2Ext()) && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1'); target = (if route_to_el2 then() && acctype == AccType_NV2REGISTER && !isbreakpnt then priv_match = EL2_match; elsif !ispriv && !isbreakpnt then priv_match = EL0_match; else case PSTATE.EL of when EL3 priv_match = EL3_match; when EL2 elsepriv_match = EL2_match; when EL1); enabled = !priv_match = EL1_match; whenHaveELEL0(priv_match = EL0_match; case SSC of when '00' security_state_match = TRUE; // Both when '01' security_state_match = !EL3IsSecure) || !secure || MDCR_EL3.SDD == '0'; if from == target then enabled = enabled && MDSCR_EL1.KDE == '1' && mask == '0'; else enabled = enabled &&(); // Non-secure only when '10' security_state_match = IsSecure(); // Secure only when '11' security_state_match = (HMC == '1' || IsSecure()); // HMC=1 -> Both, 0 -> Secure only if linked then // "LBN" must be an enabled context-aware breakpoint unit. If it is not context-aware then // it is CONSTRAINED UNPREDICTABLE whether this gives no match, or LBN is mapped to some // UNKNOWN breakpoint that is context-aware. lbn = UInt(target) >(LBN); first_ctx_cmp = ( UInt(ID_AA64DFR0_EL1.BRPs) - UInt(ID_AA64DFR0_EL1.CTX_CMPs)); last_ctx_cmp = UInt(ID_AA64DFR0_EL1.BRPs); if (lbn < first_ctx_cmp || lbn > last_ctx_cmp) then (c, lbn) = ConstrainUnpredictableInteger(first_ctx_cmp, last_ctx_cmp, Unpredictable_BPNOTCTXCMP); assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN}; case c of when Constraint_DISABLED return FALSE; // Disabled when Constraint_NONE linked = FALSE; // No linking // Otherwise ConstrainUnpredictableInteger returned a context-aware breakpoint if linked then vaddress = bits(64) UNKNOWN; linked_to = TRUE; linked_match = AArch64.BreakpointValueMatch(from); (lbn, vaddress, linked_to); return enabled; return priv_match && security_state_match && (!linked || linked_match);

Library pseudocode for aarch64/debug/pmuenables/AArch64.CheckForPMUOverflowAArch64.GenerateDebugExceptions

// AArch64.CheckForPMUOverflow() // ============================= // Signal Performance Monitors overflow IRQ and CTI overflow events // AArch64.GenerateDebugExceptions() // ================================= boolean AArch64.CheckForPMUOverflow() pmuirq = PMCR_EL0.E == '1' && PMINTENSET_EL1<31> == '1' && PMOVSSET_EL0<31> == '1'; for n = 0 toAArch64.GenerateDebugExceptions() return UIntAArch64.GenerateDebugExceptionsFrom(PMCR_EL0.N) - 1 if(PSTATE.EL, HaveELIsSecure(EL2) then E = (if n < UInt(MDCR_EL2.HPMN) then PMCR_EL0.E else MDCR_EL2.HPME); else E = PMCR_EL0.E; if E == '1' && PMINTENSET_EL1<n> == '1' && PMOVSSET_EL0<n> == '1' then pmuirq = TRUE; SetInterruptRequestLevel(InterruptID_PMUIRQ, if pmuirq then HIGH else LOW); CTI_SetEventLevel(CrossTriggerIn_PMUOverflow, if pmuirq then HIGH else LOW); // The request remains set until the condition is cleared. (For example, an interrupt handler // or cross-triggered event handler clears the overflow status flag by writing to PMOVSCLR_EL0.) return pmuirq;(), PSTATE.D);

Library pseudocode for aarch64/debug/pmuenables/AArch64.CountEventsAArch64.GenerateDebugExceptionsFrom

// AArch64.CountEvents() // ===================== // Return TRUE if counter "n" should count its event. For the cycle counter, n == 31. // AArch64.GenerateDebugExceptionsFrom() // ===================================== boolean AArch64.CountEvents(integer n) assert n == 31 || n <AArch64.GenerateDebugExceptionsFrom(bits(2) from, boolean secure, bit mask) if OSLSR_EL1.OSLK == '1' || UIntDoubleLockStatus(PMCR_EL0.N); // Event counting is disabled in Debug state debug =() || Halted(); () then return FALSE; // In Non-secure state, some counters are reserved for EL2 if route_to_el2 = HaveEL(EL2) then resvd_for_el2 = n >=) && (!secure || UIntIsSecureEL2Enabled(MDCR_EL2.HPMN) && n != 31; else resvd_for_el2 = FALSE; // Main enable controls E = if resvd_for_el2 then MDCR_EL2.HPME else PMCR_EL0.E; enabled = E == '1' && PMCNTENSET_EL0<n> == '1'; // Event counting is allowed unless it is prohibited by any rule below prohibited = FALSE; // Event counting in Secure state is prohibited if all of: // * EL3 is implemented // * MDCR_EL3.SPME == 0, and either: // - FEAT_PMUv3p7 is not implemented // - MDCR_EL3.MPMX == 0 if()) && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1'); target = (if route_to_el2 then HaveEL(EL3) && IsSecure() then if HavePMUv3p7() then prohibited = MDCR_EL3.<SPME,MPMX> == '00'; else prohibited = MDCR_EL3.SPME == '0'; // Event counting at EL3 is prohibited if all of: // * FEAT_PMUv3p7 is implemented // * One of the following is true: // - MDCR_EL3.SPME == 1 // - PMNx is not reserved for EL2 // * MDCR_EL3.MPMX == 1 if !prohibited && PSTATE.EL == EL3 && HavePMUv3p7() then prohibited = MDCR_EL3.MPMX == '1' && (MDCR_EL3.SPME == '1' || !resvd_for_el2); // Event counting at EL2 is prohibited if all of: // * The HPMD Extension is implemented // * PMNx is not reserved for EL2 // * MDCR_EL2.HPMD == 1 if !prohibited && PSTATE.EL == EL2 &&else HaveHPMDExtEL1() && !resvd_for_el2 then prohibited = MDCR_EL2.HPMD == '1'; // The IMPLEMENTATION DEFINED authentication interface might override software if prohibited && !); enabled = !HaveNoSecurePMUDisableOverride() then prohibited = !ExternalSecureNoninvasiveDebugEnabled(); // PMCR_EL0.DP disables the cycle counter when event counting is prohibited if enabled && prohibited && n == 31 then enabled = PMCR_EL0.DP == '0'; // If FEAT_PMUv3p5 is implemented, cycle counting can be prohibited. // This is not overridden by PMCR_EL0.DP. if Havev85PMU() && n == 31 then if HaveEL(EL3) &&) || !secure || MDCR_EL3.SDD == '0'; if from == target then enabled = enabled && MDSCR_EL1.KDE == '1' && mask == '0'; else enabled = enabled && IsSecure() && MDCR_EL3.SCCD == '1' then prohibited = TRUE; if PSTATE.EL == EL2 && MDCR_EL2.HCCD == '1' then prohibited = TRUE; // If FEAT_PMUv3p7 is implemented, cycle counting an be prohibited at EL3. // This is not overriden by PMCR_EL0.DP. if HavePMUv3p7() && n == 31 then if PSTATE.EL == EL3 && MDCR_EL3.MCCD == '1' then prohibited = TRUE; // Event counting might be frozen frozen = FALSE; // If FEAT_PMUv3p7 is implemented, event counting can be frozen if HavePMUv3p7() && n != 31 then ovflw = PMOVSCLR_EL0<UInt(PMCR_EL0.N)-1:0>; if resvd_for_el2 then FZ = MDCR_EL2.HPMFZO; ovflw<(target) >UInt(MDCR_EL2.HPMN)-1:0> = Zeros(); else FZ = PMCR_EL0.FZO; if HaveEL(EL2) then ovflw<UInt(PMCR_EL0.N)-1:UInt(MDCR_EL2.HPMN)> = Zeros(); frozen = FZ == '1' && !IsZero(ovflw); // Event counting can be filtered by the {P, U, NSK, NSU, NSH, M, SH} bits filter = if n == 31 then PMCCFILTR_EL0[31:0] else PMEVTYPER_EL0[n]<31:0>; P = filter<31>; U = filter<30>; NSK = if HaveEL(EL3) then filter<29> else '0'; NSU = if HaveEL(EL3) then filter<28> else '0'; NSH = if HaveEL(EL2) then filter<27> else '0'; M = if HaveEL(EL3) then filter<26> else '0'; SH = if HaveEL(EL3) && HaveSecureEL2Ext() then filter<24> else '0'; case PSTATE.EL of when EL0 filtered = if IsSecure() then U == '1' else U != NSU; when EL1 filtered = if IsSecure() then P == '1' else P != NSK; when EL2 filtered = if IsSecure() then NSH == SH else NSH == '0'; when EL3 filtered = M != P; (from); return !debug && enabled && !prohibited && !filtered && !frozen; return enabled;

Library pseudocode for aarch64/debug/statisticalprofilingpmu/CheckProfilingBufferAccessAArch64.CheckForPMUOverflow

// CheckProfilingBufferAccess() // ============================ // AArch64.CheckForPMUOverflow() // ============================= // Signal Performance Monitors overflow IRQ and CTI overflow events SysRegAccessboolean CheckProfilingBufferAccess() if !AArch64.CheckForPMUOverflow() pmuirq = PMCR_EL0.E == '1' && PMINTENSET_EL1<31> == '1' && PMOVSSET_EL0<31> == '1'; for n = 0 toHaveStatisticalProfilingUInt() || PSTATE.EL ==(PMCR_EL0.N) - 1 if EL0 || UsingAArch32() then return SysRegAccess_UNDEFINED; if PSTATE.EL == EL1 && EL2Enabled() && MDCR_EL2.E2PB<0> != '1' then return SysRegAccess_TrapToEL2; if HaveEL(EL3EL2) && PSTATE.EL !=) then E = (if n < EL3UInt && MDCR_EL3.NSPB != SCR_EL3.NS:'1' then return(MDCR_EL2.HPMN) then PMCR_EL0.E else MDCR_EL2.HPME); else E = PMCR_EL0.E; if E == '1' && PMINTENSET_EL1<n> == '1' && PMOVSSET_EL0<n> == '1' then pmuirq = TRUE; SetInterruptRequestLevel( SysRegAccess_TrapToEL3InterruptID_PMUIRQ; , if pmuirq then HIGH else LOW); return CTI_SetEventLevel( SysRegAccess_OKCrossTriggerIn_PMUOverflow;, if pmuirq then HIGH else LOW); // The request remains set until the condition is cleared. (For example, an interrupt handler // or cross-triggered event handler clears the overflow status flag by writing to PMOVSCLR_EL0.) return pmuirq;

Library pseudocode for aarch64/debug/statisticalprofilingpmu/CheckStatisticalProfilingAccessAArch64.CountEvents

// CheckStatisticalProfilingAccess() // ================================= // AArch64.CountEvents() // ===================== // Return TRUE if counter "n" should count its event. For the cycle counter, n == 31. SysRegAccessboolean CheckStatisticalProfilingAccess() if !AArch64.CountEvents(integer n) assert n == 31 || n <HaveStatisticalProfilingUInt() || PSTATE.EL ==(PMCR_EL0.N); // Event counting is disabled in Debug state debug = EL0Halted ||(); // In Non-secure state, some counters are reserved for EL2 if UsingAArch32HaveEL() then return( SysRegAccess_UNDEFINEDEL2; if PSTATE.EL ==) then E = if n < EL1UInt &&(MDCR_EL2.HPMN) || n == 31 then PMCR_EL0.E else MDCR_EL2.HPME; else E = PMCR_EL0.E; enabled = E == '1' && PMCNTENSET_EL0<n> == '1'; // Event counting in Secure state is prohibited unless any one of: // * EL3 is not implemented // * EL3 is using AArch64 and MDCR_EL3.SPME == 1 prohibited = EL2EnabledHaveEL() && MDCR_EL2.TPMS == '1' then return( SysRegAccess_TrapToEL2EL3; if) && IsSecure() && MDCR_EL3.SPME == '0'; // Event counting at EL2 is prohibited if all of: // * The HPMD Extension is implemented // * Executing at EL2 // * PMNx is not reserved for EL2 // * MDCR_EL2.HPMD == 1 if !prohibited && HaveEL(EL2) && HaveHPMDExt() && PSTATE.EL == EL2 && (n < UInt(MDCR_EL2.HPMN) || n == 31) then prohibited = (MDCR_EL2.HPMD == '1'); // The IMPLEMENTATION DEFINED authentication interface might override software controls if prohibited && !HaveNoSecurePMUDisableOverride() then prohibited = !ExternalSecureNoninvasiveDebugEnabled(); // For the cycle counter, PMCR_EL0.DP enables counting when otherwise prohibited if prohibited && n == 31 then prohibited = (PMCR_EL0.DP == '1'); // If FEAT_PMUv3p5 is implemented, cycle counting can be prohibited. // This is not overridden by PMCR_EL0.DP. if Havev85PMU() && n == 31 then if HaveEL(EL3) && PSTATE.EL !=) && IsSecure() && MDCR_EL3.SCCD == '1' then prohibited = TRUE; if PSTATE.EL == EL2 && MDCR_EL2.HCCD == '1' then prohibited = TRUE; // Event counting can be filtered by the {P, U, NSK, NSU, NSH, M, SH} bits filter = if n == 31 then PMCCFILTR_EL0[31:0] else PMEVTYPER_EL0[n]<31:0>; P = filter<31>; U = filter<30>; NSK = if HaveEL(EL3 && MDCR_EL3.NSPB != SCR_EL3.NS:'1' then return) then filter<29> else '0'; NSU = if SysRegAccess_TrapToEL3HaveEL; return( ) then filter<28> else '0'; NSH = if HaveEL(EL2) then filter<27> else '0'; M = if HaveEL(EL3) then filter<26> else '0'; SH = if HaveEL(EL3) && HaveSecureEL2Ext() then filter<24> else '0'; case PSTATE.EL of when EL0 filtered = if IsSecure() then U == '1' else U != NSU; when EL1 filtered = if IsSecure() then P == '1' else P != NSK; when EL2 filtered = (if IsSecure() then NSH == SH else NSH == '0'); when EL3SysRegAccess_OKEL3;filtered = (M != P); return !debug && enabled && !prohibited && !filtered;

Library pseudocode for aarch64/debug/statisticalprofiling/CollectContextIDR1CheckProfilingBufferAccess

// CollectContextIDR1() // ==================== // CheckProfilingBufferAccess() // ============================ booleanSysRegAccess CollectContextIDR1() CheckProfilingBufferAccess() if !StatisticalProfilingEnabledHaveStatisticalProfiling() then return FALSE; if PSTATE.EL ==() || PSTATE.EL == EL2EL0 then return FALSE; if|| UsingAArch32() then return SysRegAccess_UNDEFINED; if PSTATE.EL == EL1 && EL2Enabled() && MDCR_EL2.E2PB<0> != '1' then return SysRegAccess_TrapToEL2; if HaveEL(EL3) && PSTATE.EL != EL3 && MDCR_EL3.NSPB != SCR_EL3.NS:'1' then return SysRegAccess_TrapToEL3; return SysRegAccess_OK() && HCR_EL2.TGE == '1' then return FALSE; return PMSCR_EL1.CX == '1';;

Library pseudocode for aarch64/debug/statisticalprofiling/CollectContextIDR2CheckStatisticalProfilingAccess

// CollectContextIDR2() // ==================== // CheckStatisticalProfilingAccess() // ================================= booleanSysRegAccess CollectContextIDR2() CheckStatisticalProfilingAccess() if !StatisticalProfilingEnabledHaveStatisticalProfiling() then return FALSE; if !() || PSTATE.EL ==EL0 || UsingAArch32() then return SysRegAccess_UNDEFINED; if PSTATE.EL == EL1 && EL2Enabled() && MDCR_EL2.TPMS == '1' then return SysRegAccess_TrapToEL2; if HaveEL(EL3) && PSTATE.EL != EL3 && MDCR_EL3.NSPB != SCR_EL3.NS:'1' then return SysRegAccess_TrapToEL3; return SysRegAccess_OK() then return FALSE; return PMSCR_EL2.CX == '1';;

Library pseudocode for aarch64/debug/statisticalprofiling/CollectPhysicalAddressCollectContextIDR1

// CollectPhysicalAddress() // ======================== // CollectContextIDR1() // ==================== boolean CollectPhysicalAddress() CollectContextIDR1() if !StatisticalProfilingEnabled() then return FALSE; (secure, el) = if PSTATE.EL == ProfilingBufferOwner(); if ((!secure && HaveEL(EL2)) ||then return FALSE; if IsSecureEL2EnabledEL2Enabled()) then return PMSCR_EL2.PA == '1' && (el == EL2 || PMSCR_EL1.PA == '1'); else return PMSCR_EL1.PA == '1';() && HCR_EL2.TGE == '1' then return FALSE; return PMSCR_EL1.CX == '1';

Library pseudocode for aarch64/debug/statisticalprofiling/CollectTimeStampCollectContextIDR2

// CollectTimeStamp() // ================== // CollectContextIDR2() // ==================== TimeStampboolean CollectTimeStamp() CollectContextIDR2() if !StatisticalProfilingEnabled() then return() then return FALSE; if ! TimeStamp_None; (secure, el) = ProfilingBufferOwner(); if el == EL2 then if PMSCR_EL2.TS == '0' then return TimeStamp_None; else if PMSCR_EL1.TS == '0' then return TimeStamp_None; if EL2Enabled() then case PMSCR_EL2.PCT of when '00' return TimeStamp_Virtual; when '01' if el == EL2 then return TimeStamp_Physical; when '11' if (el == EL2 || PMSCR_EL1.PCT != '00') && HaveECVExt() then return TimeStamp_OffsetPhysical; otherwise Unreachable(); case PMSCR_EL1.PCT of when '00' return TimeStamp_Virtual; when '01' return TimeStamp_Physical; when '11' if HaveECVExt() then return TimeStamp_OffsetPhysical; otherwise Unreachable();() then return FALSE; return PMSCR_EL2.CX == '1';

Library pseudocode for aarch64/debug/statisticalprofiling/OpTypeCollectPhysicalAddress

enumeration// CollectPhysicalAddress() // ======================== boolean OpType {CollectPhysicalAddress() if ! OpType_Load, // Any memory-read operation other than atomics, compare-and-swap, and swap() then return FALSE; (secure, el) = OpType_Store, // Any memory-write operation, including atomics without return(); if ((!secure && OpType_LoadAtomic, // Atomics with return, compare-and-swap and swap( OpType_Branch, // Software write to the PC)) || ()) then return PMSCR_EL2.PA == '1' && (el == EL2OpType_Other // Any other class of operation };|| PMSCR_EL1.PA == '1'); else return PMSCR_EL1.PA == '1';

Library pseudocode for aarch64/debug/statisticalprofiling/ProfilingBufferEnabledCollectRecord

// ProfilingBufferEnabled() // ======================== // CollectRecord() // =============== boolean ProfilingBufferEnabled() if !CollectRecord(bits(64) events, integer total_latency,OpType optype) assert StatisticalProfilingEnabled(); // Filtering by event if PMSFCR_EL1.FE == '1' && !IsZero(PMSEVFR_EL1) then bits(64) mask = 0xFFFF0000FF00F0AA<63:0>; // Bits [63:48,31:24,15:12,7,5,3,1] if HaveStatisticalProfiling() then return FALSE; (secure, el) =() then mask<11> = '1'; // Alignment flag if ProfilingBufferOwnerHaveSVE(); non_secure_bit = if secure then '0' else '1'; return (!() then mask<18:17> =(); // Predicate flags e = events AND mask; m = PMSEVFR_EL1 AND mask; if !IsZero(NOT(e) AND m) then return FALSE; // Filtering by type if PMSFCR_EL1.FT == '1' && !IsZero(PMSFCR_EL1.<B,LD,ST>) then case optype of when OpType_Branch if PMSFCR_EL1.B == '0' then return FALSE; when OpType_Load if PMSFCR_EL1.LD == '0' then return FALSE; when OpType_Store if PMSFCR_EL1.ST == '0' then return FALSE; when OpType_LoadAtomic if PMSFCR_EL1.<LD,ST> == '00' then return FALSE; otherwise return FALSE; // Filtering by latency if PMSFCR_EL1.FL == '1' && !IsZero(PMSLATFR_EL1.MINLAT) then if total_latency < UInt(PMSLATFR_EL1.MINLAT) then return FALSE; // Check for UNPREDICTABLE cases if ((PMSFCR_EL1.FE == '1' && !IsZero(PMSEVFR_EL1)) || (PMSFCR_EL1.FT == '1' && !IsZero(PMSFCR_EL1.<B,LD,ST>)) || (PMSFCR_EL1.FL == '1' && !IsZero(PMSLATFR_EL1.MINLAT))) then return ConstrainUnpredictableBool(Unpredictable_BADPMSFCRELUsingAArch32Ones(el) && non_secure_bit == SCR_EL3.NS && PMBLIMITR_EL1.E == '1' && PMBSR_EL1.S == '0');); return TRUE;

Library pseudocode for aarch64/debug/statisticalprofiling/ProfilingBufferOwnerCollectTimeStamp

// ProfilingBufferOwner() // ====================== // CollectTimeStamp() // ================== (boolean, bits(2))TimeStamp ProfilingBufferOwner() secure = ifCollectTimeStamp() if ! HaveELStatisticalProfilingEnabled(() then returnEL3TimeStamp_None) then (MDCR_EL3.NSPB<1> == '0') else; (secure, el) = IsSecureProfilingBufferOwner(); el = if !secure && if el == HaveELEL2(then if PMSCR_EL2.TS == '0' then returnTimeStamp_None; else if PMSCR_EL1.TS == '0' then return TimeStamp_None; if EL2Enabled() then case PMSCR_EL2.PCT of when '00' return TimeStamp_Virtual; when '01' if el == EL2) && MDCR_EL2.E2PB == '00' thenthen return TimeStamp_Physical; when '11' if (el == EL2 else|| PMSCR_EL1.PCT != '00') && () then return TimeStamp_OffsetPhysical; otherwise Unreachable(); case PMSCR_EL1.PCT of when '00' return TimeStamp_Virtual; when '01' return TimeStamp_Physical; when '11' if HaveECVExt() then return TimeStamp_OffsetPhysical; otherwise UnreachableEL1HaveECVExt; return (secure, el);();

Library pseudocode for aarch64/debug/statisticalprofiling/ProfilingSynchronizationBarrierOpType

// Barrier to ensure that all existing profiling data has been formatted, and profiling buffer // addresses have been translated such that writes to the profiling buffer have been initiated. // A following DSB completes when writes to the profiling buffer have completed.enumeration ProfilingSynchronizationBarrier();OpType {OpType_Load, // Any memory-read operation other than atomics, compare-and-swap, and swap OpType_Store, // Any memory-write operation, including atomics without return OpType_LoadAtomic, // Atomics with return, compare-and-swap and swap OpType_Branch, // Software write to the PC OpType_Other // Any other class of operation };

Library pseudocode for aarch64/debug/statisticalprofiling/SPECollectRecordProfilingBufferEnabled

// SPECollectRecord() // ================== // Returns TRUE if the sampled class of instructions or operations, as // determined by PMSFCR_EL1, are recorded and FALSE otherwise. // ProfilingBufferEnabled() // ======================== boolean SPECollectRecord(bits(64) events, integer total_latency,ProfilingBufferEnabled() if ! OpTypeHaveStatisticalProfiling optype) assert() then return FALSE; (secure, el) = StatisticalProfilingEnabledProfilingBufferOwner(); bits(64) mask = 0xAA<63:0>; // Bits [7,5,3,1] if non_secure_bit = if secure then '0' else '1'; return (! HaveSVEELUsingAArch32() then mask<18:17> = Ones(); // Predicate flags if HaveStatisticalProfilingv1p1() then mask<11> = '1'; // Alignment Flag if HaveStatisticalProfilingv1p2() then mask<6> = '1'; // Not taken flag mask<63:48> = bits(16) IMPLEMENTATION_DEFINED; mask<31:24> = bits(8) IMPLEMENTATION_DEFINED; mask<15:12> = bits(4) IMPLEMENTATION_DEFINED; // Check for UNPREDICTABLE case if (HaveStatisticalProfilingv1p2() && PMSFCR_EL1.<FnE,FE> == '11' && !IsZero(PMSEVFR_EL1 AND PMSNEVFR_EL1 AND mask)) then if ConstrainUnpredictableBool(Unpredictable_BADPMSFCR) then return FALSE; else // Filtering by event if PMSFCR_EL1.FE == '1' && !IsZero(PMSEVFR_EL1) then e = events AND mask; m = PMSEVFR_EL1 AND mask; if !IsZero(NOT(e) AND m) then return FALSE; // Filtering by inverse event if (HaveStatisticalProfilingv1p2() && PMSFCR_EL1.FnE == '1' && !IsZero(PMSNEVFR_EL1)) then e = events AND mask; m = PMSNEVFR_EL1 AND mask; if !IsZero(e AND m) then return FALSE; // Filtering by type if PMSFCR_EL1.FT == '1' && !IsZero(PMSFCR_EL1.<B,LD,ST>) then case optype of when OpType_Branch if PMSFCR_EL1.B == '0' then return FALSE; when OpType_Load if PMSFCR_EL1.LD == '0' then return FALSE; when OpType_Store if PMSFCR_EL1.ST == '0' then return FALSE; when OpType_LoadAtomic if PMSFCR_EL1.<LD,ST> == '00' then return FALSE; otherwise return FALSE; // Filtering by latency if PMSFCR_EL1.FL == '1' && !IsZero(PMSLATFR_EL1.MINLAT) then if total_latency < UInt(PMSLATFR_EL1.MINLAT) then return FALSE; // Check for UNPREDICTABLE cases if ((PMSFCR_EL1.FE == '1' && IsZero(PMSEVFR_EL1 AND mask)) || (PMSFCR_EL1.FT == '1' && IsZero(PMSFCR_EL1.<B,LD,ST>)) || (PMSFCR_EL1.FL == '1' && IsZero(PMSLATFR_EL1.MINLAT))) then return ConstrainUnpredictableBool(Unpredictable_BADPMSFCR); if (HaveStatisticalProfilingv1p2() && ((PMSFCR_EL1.FnE == '1' && IsZero(PMSNEVFR_EL1 AND mask)) || (PMSFCR_EL1.<FnE,FE> == '11' && !IsZero(PMSEVFR_EL1 AND PMSNEVFR_EL1 AND mask)))) then return ConstrainUnpredictableBool(Unpredictable_BADPMSFCR); return TRUE;(el) && non_secure_bit == SCR_EL3.NS && PMBLIMITR_EL1.E == '1' && PMBSR_EL1.S == '0');

Library pseudocode for aarch64/debug/statisticalprofiling/StatisticalProfilingEnabledProfilingBufferOwner

// StatisticalProfilingEnabled() // ============================= // ProfilingBufferOwner() // ====================== boolean(boolean, bits(2)) StatisticalProfilingEnabled() if !ProfilingBufferOwner() secure = ifHaveStatisticalProfilingHaveEL() ||( UsingAArch32EL3() || !) then (MDCR_EL3.NSPB<1> == '0') elseProfilingBufferEnabled() then return FALSE; in_host = EL2Enabled() && HCR_EL2.TGE == '1'; (secure, el) = ProfilingBufferOwner(); if UInt(el) < UInt(PSTATE.EL) || secure != IsSecure() || (in_host && el ==(); el = if !secure && EL1HaveEL) then return FALSE; case PSTATE.EL of when( EL3EL2 ) && MDCR_EL2.E2PB == '00' thenUnreachable(); when EL2 spe_bit = PMSCR_EL2.E2SPE; whenelse EL1 spe_bit = PMSCR_EL1.E1SPE; when EL0 spe_bit = (if in_host then PMSCR_EL2.E0HSPE else PMSCR_EL1.E0SPE); return spe_bit == '1';; return (secure, el);

Library pseudocode for aarch64/debug/statisticalprofiling/SysRegAccessProfilingSynchronizationBarrier

enumeration// Barrier to ensure that all existing profiling data has been formatted, and profiling buffer // addresses have been translated such that writes to the profiling buffer have been initiated. // A following DSB completes when writes to the profiling buffer have completed. SysRegAccess {ProfilingSynchronizationBarrier(); SysRegAccess_OK, SysRegAccess_UNDEFINED, SysRegAccess_TrapToEL1, SysRegAccess_TrapToEL2, SysRegAccess_TrapToEL3 };

Library pseudocode for aarch64/debug/statisticalprofiling/TimeStampStatisticalProfilingEnabled

enumeration// StatisticalProfilingEnabled() // ============================= boolean TimeStamp {StatisticalProfilingEnabled() if ! TimeStamp_None, // No timestamp() || TimeStamp_CoreSight, // CoreSight time (IMPLEMENTATION DEFINED)() || ! TimeStamp_Physical, // Physical counter value with no offset() then return FALSE; in_host = TimeStamp_OffsetPhysical, // Physical counter value minus CNTPOFF_EL2() && HCR_EL2.TGE == '1'; (secure, el) = (); if UInt(el) < UInt(PSTATE.EL) || secure != IsSecure() || (in_host && el == EL1) then return FALSE; case PSTATE.EL of when EL3 Unreachable(); when EL2 spe_bit = PMSCR_EL2.E2SPE; when EL1 spe_bit = PMSCR_EL1.E1SPE; when EL0TimeStamp_Virtual }; // Physical counter value minus CNTVOFF_EL2spe_bit = (if in_host then PMSCR_EL2.E0HSPE else PMSCR_EL1.E0SPE); return spe_bit == '1';

Library pseudocode for aarch64/debug/takeexceptiondbgstatisticalprofiling/AArch64.TakeExceptionInDebugStateSysRegAccess

// AArch64.TakeExceptionInDebugState() // =================================== // Take an exception in Debug state to an Exception Level using AArch64.enumeration AArch64.TakeExceptionInDebugState(bits(2) target_el,SysRegAccess { ExceptionRecord exception) assertSysRegAccess_OK, HaveEL(target_el) && !SysRegAccess_UNDEFINED,ELUsingAArch32(target_el) &&SysRegAccess_TrapToEL1, UInt(target_el) >=SysRegAccess_TrapToEL2, UInt(PSTATE.EL); sync_errors = HaveIESB() && SCTLR[target_el].IESB == '1'; if HaveDoubleFaultExt() then sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && target_el == EL3); // SCTLR[].IESB might be ignored in Debug state. if !ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then sync_errors = FALSE; SynchronizeContext(); // If coming from AArch32 state, the top parts of the X[] registers might be set to zero from_32 = UsingAArch32(); if from_32 then AArch64.MaybeZeroRegisterUppers(); MaybeZeroSVEUppers(target_el); AArch64.ReportException(exception, target_el); PSTATE.EL = target_el; PSTATE.nRW = '0'; PSTATE.SP = '1'; SPSR[] = bits(64) UNKNOWN; ELR[] = bits(64) UNKNOWN; // PSTATE.{SS,D,A,I,F} are not observable and ignored in Debug state, so behave as if UNKNOWN. PSTATE.<SS,D,A,I,F> = bits(5) UNKNOWN; PSTATE.IL = '0'; if from_32 then // Coming from AArch32 PSTATE.IT = '00000000'; PSTATE.T = '0'; // PSTATE.J is RES0 if (HavePANExt() && (PSTATE.EL == EL1 || (PSTATE.EL == EL2 && ELIsInHost(EL0))) && SCTLR[].SPAN == '0') then PSTATE.PAN = '1'; if HaveUAOExt() then PSTATE.UAO = '0'; if HaveBTIExt() then PSTATE.BTYPE = '00'; if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN; if HaveMTEExt() then PSTATE.TCO = '1'; DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(64) UNKNOWN; EDSCR.ERR = '1'; UpdateEDSCRFields(); // Update EDSCR processor state flags. if sync_errors then SynchronizeErrors(); EndOfInstruction();SysRegAccess_TrapToEL3 };

Library pseudocode for aarch64/debug/watchpointstatisticalprofiling/AArch64.WatchpointByteMatchTimeStamp

// AArch64.WatchpointByteMatch() // ============================= booleanenumeration AArch64.WatchpointByteMatch(integer n,TimeStamp { AccType acctype, bits(64) vaddress) integer top = ifTimeStamp_None, // No timestamp Have52BitVAExt() then 52 else 48; bottom = if DBGWVR_EL1[n]<2> == '1' then 2 else 3; // Word or doubleword byte_select_match = (DBGWCR_EL1[n].BAS<TimeStamp_CoreSight, // CoreSight time (IMPLEMENTATION DEFINED)UInt(vaddress<bottom-1:0>)> != '0'); mask =TimeStamp_Physical, // Physical counter value with no offset UInt(DBGWCR_EL1[n].MASK); // If DBGWCR_EL1[n].MASK is non-zero value and DBGWCR_EL1[n].BAS is not set to '11111111', or // DBGWCR_EL1[n].BAS specifies a non-contiguous set of bytes behavior is CONSTRAINED // UNPREDICTABLE. if mask > 0 && !TimeStamp_OffsetPhysical, // Physical counter value minus CNTPOFF_EL2IsOnes(DBGWCR_EL1[n].BAS) then byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPMASKANDBAS); else LSB = (DBGWCR_EL1[n].BAS AND NOT(DBGWCR_EL1[n].BAS - 1)); MSB = (DBGWCR_EL1[n].BAS + LSB); if !IsZero(MSB AND (MSB - 1)) then // Not contiguous byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPBASCONTIGUOUS); bottom = 3; // For the whole doubleword // If the address mask is set to a reserved value, the behavior is CONSTRAINED UNPREDICTABLE. if mask > 0 && mask <= 2 then (c, mask) = ConstrainUnpredictableInteger(3, 31, Unpredictable_RESWPMASK); assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN}; case c of when Constraint_DISABLED return FALSE; // Disabled when Constraint_NONE mask = 0; // No masking // Otherwise the value returned by ConstrainUnpredictableInteger is a not-reserved value if mask > bottom then // If the DBGxVR<n>_EL1.RESS field bits are not a sign extension of the MSB // of DBGBVR<n>_EL1.VA, it is UNPREDICTABLE whether they appear to be // included in the match. if !IsOnes(DBGBVR_EL1[n]<63:top>) && !IsZero(DBGBVR_EL1[n]<63:top>) then if ConstrainUnpredictableBool(Unpredicatable_DBGxVR_RESS) then top = 63; WVR_match = (vaddress<top:mask> == DBGWVR_EL1[n]<top:mask>); // If masked bits of DBGWVR_EL1[n] are not zero, the behavior is CONSTRAINED UNPREDICTABLE. if WVR_match && !IsZero(DBGWVR_EL1[n]<mask-1:bottom>) then WVR_match = ConstrainUnpredictableBool(Unpredictable_WPMASKEDBITS); else WVR_match = vaddress<top:bottom> == DBGWVR_EL1[n]<top:bottom>; return WVR_match && byte_select_match;TimeStamp_Virtual }; // Physical counter value minus CNTVOFF_EL2

Library pseudocode for aarch64/debug/watchpointtakeexceptiondbg/AArch64.WatchpointMatchAArch64.TakeExceptionInDebugState

// AArch64.WatchpointMatch() // ========================= // Watchpoint matching in an AArch64 translation regime. boolean// AArch64.TakeExceptionInDebugState() // =================================== // Take an exception in Debug state to an Exception Level using AArch64. AArch64.WatchpointMatch(integer n, bits(64) vaddress, integer size, boolean ispriv,AArch64.TakeExceptionInDebugState(bits(2) target_el, AccTypeExceptionRecord acctype, boolean iswrite) assert !exception) assertHaveEL(target_el) && !ELUsingAArch32((target_el) &&S1TranslationRegimeUInt()); assert n <=(target_el) >= UInt(ID_AA64DFR0_EL1.WRPs); (PSTATE.EL); // "ispriv" is: // * FALSE for all loads, stores, and atomic operations executed at EL0. // * FALSE if the access is unprivileged. // * TRUE for all other loads, stores, and atomic operations. enabled = DBGWCR_EL1[n].E == '1'; linked = DBGWCR_EL1[n].WT == '1'; isbreakpnt = FALSE; state_match = sync_errors = AArch64.StateMatchHaveIESB(DBGWCR_EL1[n].SSC, DBGWCR_EL1[n].HMC, DBGWCR_EL1[n].PAC, linked, DBGWCR_EL1[n].LBN, isbreakpnt, acctype, ispriv); ls_match = FALSE; if acctype ==() && AccType_ATOMICRWSCTLR then ls_match = (DBGWCR_EL1[n].LSC != '00'); else ls_match = (DBGWCR_EL1[n].LSC<(if iswrite then 1 else 0)> == '1'); value_match = FALSE; for byte = 0 to size - 1 value_match = value_match ||[target_el].IESB == '1'; if () then sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && target_el == EL3); // SCTLR[].IESB might be ignored in Debug state. if !ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then sync_errors = FALSE; SynchronizeContext(); // If coming from AArch32 state, the top parts of the X[] registers might be set to zero from_32 = UsingAArch32(); if from_32 then AArch64.MaybeZeroRegisterUppers(); MaybeZeroSVEUppers(target_el); AArch64.ReportException(exception, target_el); PSTATE.EL = target_el; PSTATE.nRW = '0'; PSTATE.SP = '1'; SPSR[] = bits(32) UNKNOWN; ELR[] = bits(64) UNKNOWN; // PSTATE.{SS,D,A,I,F} are not observable and ignored in Debug state, so behave as if UNKNOWN. PSTATE.<SS,D,A,I,F> = bits(5) UNKNOWN; PSTATE.IL = '0'; if from_32 then // Coming from AArch32 PSTATE.IT = '00000000'; PSTATE.T = '0'; // PSTATE.J is RES0 if (HavePANExt() && (PSTATE.EL == EL1 || (PSTATE.EL == EL2 && ELIsInHost(EL0))) && SCTLR[].SPAN == '0') then PSTATE.PAN = '1'; if HaveUAOExt() then PSTATE.UAO = '0'; if HaveBTIExt() then PSTATE.BTYPE = '00'; if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN; if HaveMTEExt() then PSTATE.TCO = '1'; DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(32) UNKNOWN; EDSCR.ERR = '1'; UpdateEDSCRFields(); // Update EDSCR processor state flags. if sync_errors then SynchronizeErrors(); EndOfInstructionAArch64.WatchpointByteMatchHaveDoubleFaultExt(n, acctype, vaddress + byte); return value_match && state_match && ls_match && enabled;();

Library pseudocode for aarch64/exceptionsdebug/abortswatchpoint/AArch64.AbortAArch64.WatchpointByteMatch

// AArch64.Abort() // =============== // Abort and Debug exception handling in an AArch64 translation regime.// AArch64.WatchpointByteMatch() // ============================= boolean AArch64.Abort(bits(64) vaddress,AArch64.WatchpointByteMatch(integer n, FaultRecordAccType fault) acctype, bits(64) vaddress) if el = if IsDebugExceptionHaveNV2Ext(fault) then if fault.acctype ==() && acctype == AccType_IFETCHAccType_NV2REGISTER then ifthen UsingAArch32EL2() && fault.debugmoe ==else PSTATE.EL; top = DebugException_VectorCatchAddrTop then(vaddress, FALSE, el); bottom = if DBGWVR_EL1[n]<2> == '1' then 2 else 3; // Word or doubleword byte_select_match = (DBGWCR_EL1[n].BAS< AArch64.VectorCatchExceptionUInt(fault); else(vaddress<bottom-1:0>)> != '0'); mask = AArch64.BreakpointExceptionUInt(fault); else(DBGWCR_EL1[n].MASK); // If DBGWCR_EL1[n].MASK is non-zero value and DBGWCR_EL1[n].BAS is not set to '11111111', or // DBGWCR_EL1[n].BAS specifies a non-contiguous set of bytes behavior is CONSTRAINED // UNPREDICTABLE. if mask > 0 && ! AArch64.WatchpointExceptionIsOnes(vaddress, fault); elsif fault.acctype ==(DBGWCR_EL1[n].BAS) then byte_select_match = AccType_IFETCHConstrainUnpredictableBool then( AArch64.InstructionAbortUnpredictable_WPMASKANDBAS(vaddress, fault); else); else LSB = (DBGWCR_EL1[n].BAS AND NOT(DBGWCR_EL1[n].BAS - 1)); MSB = (DBGWCR_EL1[n].BAS + LSB); if ! (MSB AND (MSB - 1)) then // Not contiguous byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPBASCONTIGUOUS); bottom = 3; // For the whole doubleword // If the address mask is set to a reserved value, the behavior is CONSTRAINED UNPREDICTABLE. if mask > 0 && mask <= 2 then (c, mask) = ConstrainUnpredictableInteger(3, 31, Unpredictable_RESWPMASK); assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN}; case c of when Constraint_DISABLED return FALSE; // Disabled when Constraint_NONE mask = 0; // No masking // Otherwise the value returned by ConstrainUnpredictableInteger is a not-reserved value if mask > bottom then WVR_match = (vaddress<top:mask> == DBGWVR_EL1[n]<top:mask>); // If masked bits of DBGWVR_EL1[n] are not zero, the behavior is CONSTRAINED UNPREDICTABLE. if WVR_match && !IsZero(DBGWVR_EL1[n]<mask-1:bottom>) then WVR_match = ConstrainUnpredictableBool(Unpredictable_WPMASKEDBITSAArch64.DataAbortIsZero(vaddress, fault);); else WVR_match = vaddress<top:bottom> == DBGWVR_EL1[n]<top:bottom>; return WVR_match && byte_select_match;

Library pseudocode for aarch64/exceptionsdebug/abortswatchpoint/AArch64.AbortSyndromeAArch64.WatchpointMatch

// AArch64.AbortSyndrome() // ======================= // Creates an exception syndrome record for Abort and Watchpoint exceptions // from an AArch64 translation regime. // AArch64.WatchpointMatch() // ========================= // Watchpoint matching in an AArch64 translation regime. ExceptionRecordboolean AArch64.AbortSyndrome(AArch64.WatchpointMatch(integer n, bits(64) vaddress, integer size, boolean ispriv,ExceptionAccType exceptype,acctype, boolean iswrite) assert ! FaultRecordELUsingAArch32 fault, bits(64) vaddress) exception =( ExceptionSyndromeS1TranslationRegime(exceptype); d_side = exceptype IN {()); assert n <=Exception_DataAbortUInt,(ID_AA64DFR0_EL1.WRPs); // "ispriv" is FALSE for LDTR/STTR instructions executed at EL1 and all // load/stores at EL0, TRUE for all other load/stores. "iswrite" is TRUE for stores, FALSE for // loads. enabled = DBGWCR_EL1[n].E == '1'; linked = DBGWCR_EL1[n].WT == '1'; isbreakpnt = FALSE; state_match = Exception_NV2DataAbortAArch64.StateMatch,(DBGWCR_EL1[n].SSC, DBGWCR_EL1[n].HMC, DBGWCR_EL1[n].PAC, linked, DBGWCR_EL1[n].LBN, isbreakpnt, acctype, ispriv); ls_match = (DBGWCR_EL1[n].LSC<(if iswrite then 1 else 0)> == '1'); value_match = FALSE; for byte = 0 to size - 1 value_match = value_match || Exception_WatchpointAArch64.WatchpointByteMatch, Exception_NV2Watchpoint}; (exception.syndrome, exception.syndrome2) = AArch64.FaultSyndrome(d_side, fault); exception.vaddress = ZeroExtend(vaddress); if IPAValid(fault) then exception.ipavalid = TRUE; exception.NS = fault.ipaddress.NS; exception.ipaddress = fault.ipaddress.address; else exception.ipavalid = FALSE; (n, acctype, vaddress + byte); return exception; return value_match && state_match && ls_match && enabled;

Library pseudocode for aarch64/exceptions/aborts/AArch64.CheckPCAlignmentAArch64.Abort

// AArch64.CheckPCAlignment() // ==========================// AArch64.Abort() // =============== // Abort and Debug exception handling in an AArch64 translation regime. AArch64.CheckPCAlignment() bits(64) pc =AArch64.Abort(bits(64) vaddress, ThisInstrAddrFaultRecord(); if pc<1:0> != '00' thenfault) if (fault) then if fault.acctype == AccType_IFETCH then if UsingAArch32() && fault.debugmoe == DebugException_VectorCatch then AArch64.VectorCatchException(fault); else AArch64.BreakpointException(fault); else AArch64.WatchpointException(vaddress, fault); elsif fault.acctype == AccType_IFETCH then AArch64.InstructionAbort(vaddress, fault); else AArch64.DataAbortAArch64.PCAlignmentFaultIsDebugException();(vaddress, fault);

Library pseudocode for aarch64/exceptions/aborts/AArch64.DataAbortAArch64.AbortSyndrome

// AArch64.DataAbort() // ===================// AArch64.AbortSyndrome() // ======================= // Creates an exception syndrome record for Abort and Watchpoint exceptions // from an AArch64 translation regime. ExceptionRecord AArch64.DataAbort(bits(64) vaddress,AArch64.AbortSyndrome( Exception exceptype, FaultRecord fault) route_to_el3 =fault, bits(64) vaddress) exception = HaveELExceptionSyndrome((exceptype); d_side = exceptype IN {EL3Exception_DataAbort) && SCR_EL3.EA == '1' &&, IsExternalAbort(fault); route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR_EL2.TGE == '1' || (HaveRASExt() && HCR_EL2.TEA == '1' && IsExternalAbort(fault)) || (HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER) || IsSecondStage(fault))); bits(64) preferred_exception_return = ThisInstrAddr(); if (HaveDoubleFaultExt() && (PSTATE.EL == EL3 || route_to_el3) && IsExternalAbort(fault) && SCR_EL3.EASE == '1') then vect_offset = 0x180; else vect_offset = 0x0; if HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER then exception = AArch64.AbortSyndrome(Exception_NV2DataAbort, fault, vaddress); else exception =, AArch64.AbortSyndromeException_Watchpoint(,Exception_DataAbortException_NV2Watchpoint, fault, vaddress); if PSTATE.EL ==}; exception.syndrome = EL3AArch64.FaultSyndrome || route_to_el3 then(d_side, fault); exception.vaddress = AArch64.TakeExceptionZeroExtend((vaddress); ifEL3IPAValid, exception, preferred_exception_return, vect_offset); elsif PSTATE.EL == EL2 || route_to_el2 then AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);(fault) then exception.ipavalid = TRUE; exception.NS = fault.ipaddress.NS; exception.ipaddress = fault.ipaddress.address; else exception.ipavalid = FALSE; return exception;

Library pseudocode for aarch64/exceptions/aborts/AArch64.EffectiveTCFAArch64.CheckPCAlignment

// AArch64.EffectiveTCF() // ====================== // Returns the TCF field applied to tag check faults in the given Exception Level. bits(2)// AArch64.CheckPCAlignment() // ========================== AArch64.EffectiveTCF(bits(2) el) bits(2) tcf; AArch64.CheckPCAlignment() if el == bits(64) pc = EL3ThisInstrAddr then tcf = SCTLR_EL3.TCF; elsif el ==(); if pc<1:0> != '00' then EL2AArch64.PCAlignmentFault then tcf = SCTLR_EL2.TCF; elsif el == EL1 then tcf = SCTLR_EL1.TCF; elsif el == EL0 && HCR_EL2.<E2H,TGE> == '11' then tcf = SCTLR_EL2.TCF0; elsif el == EL0 && HCR_EL2.<E2H,TGE> != '11' then tcf = SCTLR_EL1.TCF0; return tcf;();

Library pseudocode for aarch64/exceptions/aborts/AArch64.InstructionAbortAArch64.DataAbort

// AArch64.InstructionAbort() // ==========================// AArch64.DataAbort() // =================== AArch64.InstructionAbort(bits(64) vaddress,AArch64.DataAbort(bits(64) vaddress, FaultRecord fault) // External aborts on instruction fetch must be taken synchronously if route_to_el3 = HaveDoubleFaultExt() then assert fault.statuscode != Fault_AsyncExternal; route_to_el3 = HaveEL(EL3) && SCR_EL3.EA == '1' && IsExternalAbort(fault); route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR_EL2.TGE == '1' ||() && (HCR_EL2.TGE == '1' || ( IsSecondStage(fault) || (HaveRASExt() && HCR_EL2.TEA == '1' && IsExternalAbort(fault)))); bits(64) preferred_exception_return =(fault)) || ( HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER) || IsSecondStage(fault))); bits(64) preferred_exception_return = ThisInstrAddr(); if (HaveDoubleFaultExt() && (PSTATE.EL == EL3 || route_to_el3) && IsExternalAbort(fault) && SCR_EL3.EASE == '1') then vect_offset = 0x180; else vect_offset = 0x0; exception = if HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER then exception = AArch64.AbortSyndrome(, fault, vaddress); else exception = AArch64.AbortSyndrome(Exception_DataAbortException_InstructionAbortException_NV2DataAbort, fault, vaddress); if PSTATE.EL == EL3 || route_to_el3 then AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset); elsif PSTATE.EL == EL2 || route_to_el2 then AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/aborts/AArch64.PCAlignmentFaultAArch64.EffectiveTCF

// AArch64.PCAlignmentFault() // ========================== // Called on unaligned program counter in AArch64 state.// AArch64.EffectiveTCF() // ====================== // Returns the TCF field applied to tag check faults in the given Exception Level. bits(2) AArch64.PCAlignmentFault() AArch64.EffectiveTCF(bits(2) el) bits(2) tcf; bits(64) preferred_exception_return = if el == ThisInstrAddrEL3(); vect_offset = 0x0; exception =then tcf = SCTLR_EL3.TCF; elsif el == ExceptionSyndromeEL2(then tcf = SCTLR_EL2.TCF; elsif el ==Exception_PCAlignment); exception.vaddress = ThisInstrAddr(); if UInt(PSTATE.EL) > UInt(EL1) thenthen tcf = SCTLR_EL1.TCF; elsif el == AArch64.TakeExceptionEL0(PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif&& HCR_EL2.<E2H,TGE> == '11' then tcf = SCTLR_EL2.TCF0; elsif el == EL2EnabledEL0() && HCR_EL2.TGE == '1' then&& HCR_EL2.<E2H,TGE> != '11' then tcf = SCTLR_EL1.TCF0; if tcf == '11' then (-,tcf) = AArch64.TakeExceptionConstrainUnpredictableBits(EL2Unpredictable_RESTCF, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);); return tcf;

Library pseudocode for aarch64/exceptions/aborts/AArch64.RaiseTagCheckFaultAArch64.InstructionAbort

// AArch64.RaiseTagCheckFault() // ============================ // Raise a tag check fault exception.// AArch64.InstructionAbort() // ========================== AArch64.RaiseTagCheckFault(bits(64) va, boolean write) bits(2) target_el; bits(64) preferred_exception_return =AArch64.InstructionAbort(bits(64) vaddress, ThisInstrAddrFaultRecord(); integer vect_offset = 0x0; if PSTATE.EL ==fault) // External aborts on instruction fetch must be taken synchronously if HaveDoubleFaultExt() then assert fault.statuscode != Fault_AsyncExternal; route_to_el3 = HaveEL(EL3) && SCR_EL3.EA == '1' && IsExternalAbort(fault); route_to_el2 = (PSTATE.EL IN {EL0 then target_el = if HCR_EL2.TGE == '0' then, EL1 else} && EL2Enabled() && (HCR_EL2.TGE == '1' || IsSecondStage(fault) || (HaveRASExt() && HCR_EL2.TEA == '1' && IsExternalAbort(fault)))); bits(64) preferred_exception_return = ThisInstrAddr(); if (HaveDoubleFaultExt() && (PSTATE.EL == EL3 || route_to_el3) && IsExternalAbort(fault) && SCR_EL3.EASE == '1') then vect_offset = 0x180; else vect_offset = 0x0; exception = AArch64.AbortSyndrome(Exception_InstructionAbort, fault, vaddress); if PSTATE.EL == EL3 || route_to_el3 then AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset); elsif PSTATE.EL == EL2; else target_el = PSTATE.EL; exception =|| route_to_el2 then ExceptionSyndromeAArch64.TakeException(Exception_DataAbortEL2); exception.syndrome<5:0> = '010001'; if write then exception.syndrome<6> = '1'; exception.vaddress = bits(4) UNKNOWN : va<59:0>;, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1(target_el, exception, preferred_exception_return, vect_offset);, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/aborts/AArch64.ReportTagCheckFaultAArch64.PCAlignmentFault

// AArch64.ReportTagCheckFault() // ============================= // Records a tag check fault exception into the appropriate TCFR_ELx.// AArch64.PCAlignmentFault() // ========================== // Called on unaligned program counter in AArch64 state. AArch64.ReportTagCheckFault(bits(2) el, bit ttbr) if el ==AArch64.PCAlignmentFault() bits(64) preferred_exception_return = EL3ThisInstrAddr then assert ttbr == '0'; TFSR_EL3.TF0 = '1'; elsif el ==(); vect_offset = 0x0; exception = ExceptionSyndrome(Exception_PCAlignment); exception.vaddress = ThisInstrAddr(); if UInt(PSTATE.EL) > UInt(EL1) then AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif EL2Enabled() && HCR_EL2.TGE == '1' then AArch64.TakeException(EL2 then if ttbr == '0' then TFSR_EL2.TF0 = '1'; else TFSR_EL2.TF1 = '1'; elsif el ==, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1 then if ttbr == '0' then TFSR_EL1.TF0 = '1'; else TFSR_EL1.TF1 = '1'; elsif el == EL0 then if ttbr == '0' then TFSRE0_EL1.TF0 = '1'; else TFSRE0_EL1.TF1 = '1';, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/aborts/AArch64.SPAlignmentFaultAArch64.RaiseTagCheckFault

// AArch64.SPAlignmentFault() // ========================== // Called on an unaligned stack pointer in AArch64 state.// AArch64.RaiseTagCheckFault() // ============================ // Raise a tag check fault exception. AArch64.SPAlignmentFault() AArch64.RaiseTagCheckFault(bits(64) va, boolean write) bits(2) target_el; bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; integer vect_offset = 0x0; exception = if PSTATE.EL == ExceptionSyndromeEL0(then target_el = if HCR_EL2.TGE == '0' thenException_SPAlignment); if UInt(PSTATE.EL) > UInt(EL1) thenelse AArch64.TakeExceptionEL2(PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif; else target_el = PSTATE.EL; exception = EL2EnabledExceptionSyndrome() && HCR_EL2.TGE == '1' then( AArch64.TakeExceptionException_DataAbort(); exception.syndrome<5:0> = '010001'; if write then exception.syndrome<6> = '1'; exception.vaddress = bits(4) UNKNOWN : va<59:0>;EL2, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);(target_el, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/aborts/AArch64.TagCheckFaultAArch64.ReportTagCheckFault

// AArch64.TagCheckFault() // ======================= // Handle a tag check fault condition.// AArch64.ReportTagCheckFault() // ============================= // Records a tag check fault exception into the appropriate TCFR_ELx. AArch64.TagCheckFault(bits(64) vaddress,AArch64.ReportTagCheckFault(bits(2) el, bit ttbr) if el == AccTypeEL3 acctype, boolean iswrite) bits(2) tcf =then assert ttbr == '0'; TFSR_EL3.TF0 = '1'; elsif el == AArch64.EffectiveTCFEL2(PSTATE.EL); case tcf of when '00' // Tag Check Faults have no effect on the PE return; when '01' // Tag Check Faults cause a synchronous exceptionthen if ttbr == '0' then TFSR_EL2.TF0 = '1'; else TFSR_EL2.TF1 = '1'; elsif el == AArch64.RaiseTagCheckFaultEL1(vaddress, iswrite); when '10' // Tag Check Faults are asynchronously accumulatedthen if ttbr == '0' then TFSR_EL1.TF0 = '1'; else TFSR_EL1.TF1 = '1'; elsif el == AArch64.ReportTagCheckFaultEL0(PSTATE.EL, vaddress<55>); when '11' // Tag Check Faults cause a synchronous exception on reads or on // a read-write access, and are asynchronously accumulated on writes // Check for access performing both a read and a write. readwrite = acctype IN {AccType_ATOMICRW, AccType_ORDEREDATOMICRW, AccType_ORDEREDRW}; if !iswrite || readwrite then AArch64.RaiseTagCheckFault(vaddress, iswrite); else AArch64.ReportTagCheckFault(PSTATE.EL, vaddress<55>);then if ttbr == '0' then TFSRE0_EL1.TF0 = '1'; else TFSRE0_EL1.TF1 = '1';

Library pseudocode for aarch64/exceptions/aborts/BranchTargetExceptionAArch64.SPAlignmentFault

// BranchTargetException() // ======================= // Raise branch target exception.// AArch64.SPAlignmentFault() // ========================== // Called on an unaligned stack pointer in AArch64 state. AArch64.BranchTargetException(bits(52) vaddress) AArch64.SPAlignmentFault() route_to_el2 = PSTATE.EL == bits(64) preferred_exception_return = EL0 && EL2Enabled() && HCR_EL2.TGE == '1'; bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; exception = ExceptionSyndrome(Exception_BranchTargetException_SPAlignment); exception.syndrome<1:0> = PSTATE.BTYPE; exception.syndrome<24:2> = if Zeros(); // RES0 if UInt(PSTATE.EL) > UInt(EL1) then AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif EL2Enabled(PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif route_to_el2 then() && HCR_EL2.TGE == '1' then AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/asynchaborts/AArch64.TakePhysicalFIQExceptionAArch64.TagCheckFault

// AArch64.TakePhysicalFIQException() // ==================================// AArch64.TagCheckFault() // ======================= // Handle a tag check fault condition. AArch64.TakePhysicalFIQException() route_to_el3 =AArch64.TagCheckFault(bits(64) vaddress, HaveELAccType(acctype, boolean iswrite) bits(2) tcf =EL3AArch64.EffectiveTCF) && SCR_EL3.FIQ == '1'; route_to_el2 = (PSTATE.EL IN {(PSTATE.EL); if tcf == '01' thenEL0AArch64.RaiseTagCheckFault,(vaddress, iswrite); elsif tcf == '10' then EL1AArch64.ReportTagCheckFault} && EL2Enabled() && (HCR_EL2.TGE == '1' || HCR_EL2.FMO == '1')); bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x100; exception = ExceptionSyndrome(Exception_FIQ); if route_to_el3 then AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset); elsif PSTATE.EL == EL2 || route_to_el2 then assert PSTATE.EL != EL3; AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else assert PSTATE.EL IN {EL0, EL1}; AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);(PSTATE.EL, vaddress<55>);

Library pseudocode for aarch64/exceptions/asynchaborts/AArch64.TakePhysicalIRQExceptionBranchTargetException

// AArch64.TakePhysicalIRQException() // ================================== // Take an enabled physical IRQ exception.// BranchTargetException // ===================== // Raise branch target exception. AArch64.TakePhysicalIRQException() AArch64.BranchTargetException(bits(52) vaddress) route_to_el3 = route_to_el2 = PSTATE.EL == HaveEL(EL3) && SCR_EL3.IRQ == '1'; route_to_el2 = (PSTATE.EL IN {EL0,&& EL1} && EL2Enabled() && (HCR_EL2.TGE == '1' || HCR_EL2.IMO == '1')); () && HCR_EL2.TGE == '1'; bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x80; vect_offset = 0x0; exception = ExceptionSyndrome(Exception_IRQException_BranchTarget); if route_to_el3 then exception.syndrome<1:0> = PSTATE.BTYPE; exception.syndrome<24:2> = AArch64.TakeExceptionZeros((); // RES0 ifEL3UInt, exception, preferred_exception_return, vect_offset); elsif PSTATE.EL ==(PSTATE.EL) > EL2UInt || route_to_el2 then assert PSTATE.EL !=( EL3EL1;) then AArch64.TakeException((PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif route_to_el2 thenAArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else assert PSTATE.EL IN {EL0, EL1};, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/asynch/AArch64.TakePhysicalSErrorExceptionAArch64.TakePhysicalFIQException

// AArch64.TakePhysicalSErrorException() // =====================================// AArch64.TakePhysicalFIQException() // ================================== AArch64.TakePhysicalSErrorException(boolean impdef_syndrome, bits(24) syndrome) AArch64.TakePhysicalFIQException() route_to_el3 = HaveEL(EL3) && SCR_EL3.EA == '1'; ) && SCR_EL3.FIQ == '1'; route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR_EL2.TGE == '1' || (! (HCR_EL2.TGE == '1' || HCR_EL2.FMO == '1')); bits(64) preferred_exception_return =IsInHost() && HCR_EL2.AMO == '1'))); bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x180; if vect_offset = 0x100; exception = IsSErrorEdgeTriggered(syndrome) then ClearPendingPhysicalSError(); exception = ExceptionSyndrome(Exception_SErrorException_FIQ); exception.syndrome<24> = if impdef_syndrome then '1' else '0'; exception.syndrome<23:0> = syndrome; if PSTATE.EL == if route_to_el3 then EL3 || route_to_el3 then AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset); elsif PSTATE.EL == EL2 || route_to_el2 then|| route_to_el2 then assert PSTATE.EL != EL3; AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else assert PSTATE.EL IN {EL0, EL1, exception, preferred_exception_return, vect_offset); else}; AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/asynch/AArch64.TakeVirtualFIQExceptionAArch64.TakePhysicalIRQException

// AArch64.TakeVirtualFIQException() // =================================// AArch64.TakePhysicalIRQException() // ================================== // Take an enabled physical IRQ exception. AArch64.TakeVirtualFIQException() assert PSTATE.EL IN {AArch64.TakePhysicalIRQException() route_to_el3 =HaveEL(EL3) && SCR_EL3.IRQ == '1'; route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled(); assert HCR_EL2.TGE == '0' && HCR_EL2.FMO == '1'; // Virtual IRQ enabled if TGE==0 and FMO==1 () && (HCR_EL2.TGE == '1' || HCR_EL2.IMO == '1')); bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x100; vect_offset = 0x80; exception = ExceptionSyndrome(); if route_to_el3 then AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset); elsif PSTATE.EL == EL2 || route_to_el2 then assert PSTATE.EL != EL3; AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else assert PSTATE.EL IN {EL0, EL1Exception_FIQException_IRQ);}; AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/asynch/AArch64.TakeVirtualIRQExceptionAArch64.TakePhysicalSErrorException

// AArch64.TakeVirtualIRQException() // =================================// AArch64.TakePhysicalSErrorException() // ===================================== AArch64.TakeVirtualIRQException() assert PSTATE.EL IN {AArch64.TakePhysicalSErrorException(boolean impdef_syndrome, bits(24) syndrome) route_to_el3 =HaveEL(EL3) && SCR_EL3.EA == '1'; route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled(); assert HCR_EL2.TGE == '0' && HCR_EL2.IMO == '1'; // Virtual IRQ enabled if TGE==0 and IMO==1 bits(64) preferred_exception_return =() && (HCR_EL2.TGE == '1' || (! IsInHost() && HCR_EL2.AMO == '1'))); bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x80; vect_offset = 0x180; exception = ExceptionSyndrome(); exception.syndrome<24> = if impdef_syndrome then '1' else '0'; exception.syndrome<23:0> = syndrome; ClearPendingPhysicalSError(); if PSTATE.EL == EL3 || route_to_el3 then AArch64.TakeException(EL3, exception, preferred_exception_return, vect_offset); elsif PSTATE.EL == EL2 || route_to_el2 then AArch64.TakeException(EL2Exception_IRQException_SError);, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/asynch/AArch64.TakeVirtualSErrorExceptionAArch64.TakeVirtualFIQException

// AArch64.TakeVirtualSErrorException() // ====================================// AArch64.TakeVirtualFIQException() // ================================= AArch64.TakeVirtualSErrorException(boolean impdef_syndrome, bits(24) syndrome) AArch64.TakeVirtualFIQException() assert PSTATE.EL IN {EL0, EL1} && EL2Enabled(); assert HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1'; // Virtual SError enabled if TGE==0 and AMO==1 assert HCR_EL2.TGE == '0' && HCR_EL2.FMO == '1'; // Virtual IRQ enabled if TGE==0 and FMO==1 bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x180; vect_offset = 0x100; exception = ExceptionSyndrome(Exception_SErrorException_FIQ); if HaveRASExt() then exception.syndrome<24> = VSESR_EL2.IDS; exception.syndrome<23:0> = VSESR_EL2.ISS; else exception.syndrome<24> = if impdef_syndrome then '1' else '0'; if impdef_syndrome then exception.syndrome<23:0> = syndrome; ClearPendingVirtualSError();); AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/debugasynch/AArch64.BreakpointExceptionAArch64.TakeVirtualIRQException

// AArch64.BreakpointException() // =============================// AArch64.TakeVirtualIRQException() // ================================= AArch64.BreakpointException(AArch64.TakeVirtualIRQException() assert PSTATE.EL IN {FaultRecord fault) assert PSTATE.EL != EL3; route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1')); (); assert HCR_EL2.TGE == '0' && HCR_EL2.IMO == '1'; // Virtual IRQ enabled if TGE==0 and IMO==1 bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; vect_offset = 0x80; vaddress = bits(64) UNKNOWN; exception = AArch64.AbortSyndromeExceptionSyndrome(Exception_BreakpointException_IRQ, fault, vaddress); if PSTATE.EL == EL2 || route_to_el2 then AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else); AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/debugasynch/AArch64.SoftwareBreakpointAArch64.TakeVirtualSErrorException

// AArch64.SoftwareBreakpoint() // ============================// AArch64.TakeVirtualSErrorException() // ==================================== AArch64.SoftwareBreakpoint(bits(16) immediate) AArch64.TakeVirtualSErrorException(boolean impdef_syndrome, bits(24) syndrome) route_to_el2 = (PSTATE.EL IN { assert PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1')); (); assert HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1'; // Virtual SError enabled if TGE==0 and AMO==1 bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; vect_offset = 0x180; exception = ExceptionSyndrome(Exception_SoftwareBreakpointException_SError); exception.syndrome<15:0> = immediate; if UIntHaveRASExt(PSTATE.EL) >() then exception.syndrome<24> = VSESR_EL2.IDS; exception.syndrome<23:0> = VSESR_EL2.ISS; else exception.syndrome<24> = if impdef_syndrome then '1' else '0'; if impdef_syndrome then exception.syndrome<23:0> = syndrome; UIntClearPendingVirtualSError(EL1) then AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif route_to_el2 then AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else(); AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/debug/AArch64.SoftwareStepExceptionAArch64.BreakpointException

// AArch64.SoftwareStepException() // ===============================// AArch64.BreakpointException() // ============================= AArch64.SoftwareStepException() assert PSTATE.EL !=AArch64.BreakpointException( FaultRecord fault) assert PSTATE.EL != EL3; route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1')); bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; vaddress = bits(64) UNKNOWN; exception = ExceptionSyndromeAArch64.AbortSyndrome(Exception_SoftwareStepException_Breakpoint); if SoftwareStep_DidNotStep() then exception.syndrome<24> = '0'; else exception.syndrome<24> = '1'; exception.syndrome<6> = if SoftwareStep_SteppedEX() then '1' else '0'; exception.syndrome<5:0> = '100010'; // IFSC = Debug Exception , fault, vaddress); if PSTATE.EL == EL2 || route_to_el2 then AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/debug/AArch64.VectorCatchExceptionAArch64.SoftwareBreakpoint

// AArch64.VectorCatchException() // ============================== // Vector Catch taken from EL0 or EL1 to EL2. This can only be called when debug exceptions are // being routed to EL2, as Vector Catch is a legacy debug event.// AArch64.SoftwareBreakpoint() // ============================ AArch64.VectorCatchException(AArch64.SoftwareBreakpoint(bits(16) immediate) route_to_el2 = (PSTATE.EL IN {FaultRecordEL0 fault) assert PSTATE.EL !=, EL2EL1; assert} && EL2Enabled() && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1'); () && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1')); bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; vaddress = bits(64) UNKNOWN; exception = AArch64.AbortSyndromeExceptionSyndrome(Exception_VectorCatchException_SoftwareBreakpoint, fault, vaddress);); exception.syndrome<15:0> = immediate; if UInt(PSTATE.EL) > UInt(EL1) then AArch64.TakeException((PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif route_to_el2 thenAArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/debug/AArch64.WatchpointExceptionAArch64.SoftwareStepException

// AArch64.WatchpointException() // =============================// AArch64.SoftwareStepException() // =============================== AArch64.WatchpointException(bits(64) vaddress,AArch64.SoftwareStepException() assert PSTATE.EL != FaultRecord fault) assert PSTATE.EL != EL3; route_to_el2 = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1')); bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; if exception = HaveNV2ExtExceptionSyndrome() && fault.acctype ==( AccType_NV2REGISTERException_SoftwareStep then exception =); if AArch64.AbortSyndromeSoftwareStep_DidNotStep(() then exception.syndrome<24> = '0'; else exception.syndrome<24> = '1'; exception.syndrome<6> = ifException_NV2WatchpointSoftwareStep_SteppedEX, fault, vaddress); else exception = AArch64.AbortSyndrome(Exception_Watchpoint, fault, vaddress); () then '1' else '0'; if PSTATE.EL == EL2 || route_to_el2 then AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/exceptionsdebug/AArch64.ExceptionClassAArch64.VectorCatchException

// AArch64.ExceptionClass() // ======================== // Returns the Exception Class and Instruction Length fields to be reported in ESR (integer,bit)// AArch64.VectorCatchException() // ============================== // Vector Catch taken from EL0 or EL1 to EL2. This can only be called when debug exceptions are // being routed to EL2, as Vector Catch is a legacy debug event. AArch64.ExceptionClass(AArch64.VectorCatchException(ExceptionFaultRecord exceptype, bits(2) target_el) il_is_valid = TRUE; from_32 =fault) assert PSTATE.EL != UsingAArch32EL2(); case exceptype of when; assert Exception_UncategorizedEL2Enabled ec = 0x00; il_is_valid = FALSE; when() && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1'); bits(64) preferred_exception_return = Exception_WFxTrapThisInstrAddr ec = 0x01; when(); vect_offset = 0x0; vaddress = bits(64) UNKNOWN; exception = Exception_CP15RTTrapAArch64.AbortSyndrome ec = 0x03; assert from_32; when( Exception_CP15RRTTrap ec = 0x04; assert from_32; when Exception_CP14RTTrap ec = 0x05; assert from_32; when Exception_CP14DTTrap ec = 0x06; assert from_32; when Exception_AdvSIMDFPAccessTrap ec = 0x07; when Exception_FPIDTrap ec = 0x08; when Exception_PACTrap ec = 0x09; when Exception_LDST64BTrap ec = 0x0A; when Exception_CP14RRTTrap ec = 0x0C; assert from_32; when Exception_BranchTarget ec = 0x0D; when Exception_IllegalState ec = 0x0E; il_is_valid = FALSE; when Exception_SupervisorCall ec = 0x11; when Exception_HypervisorCall ec = 0x12; when Exception_MonitorCall ec = 0x13; when Exception_SystemRegisterTrap ec = 0x18; assert !from_32; when Exception_SVEAccessTrap ec = 0x19; assert !from_32; when Exception_ERetTrap ec = 0x1A; assert !from_32; when Exception_PACFail ec = 0x1C; assert !from_32; when Exception_InstructionAbort ec = 0x20; il_is_valid = FALSE; when Exception_PCAlignment ec = 0x22; il_is_valid = FALSE; when Exception_DataAbort ec = 0x24; when Exception_NV2DataAbort ec = 0x25; when Exception_SPAlignment ec = 0x26; il_is_valid = FALSE; assert !from_32; when Exception_FPTrappedException ec = 0x28; when Exception_SError ec = 0x2F; il_is_valid = FALSE; when Exception_Breakpoint ec = 0x30; il_is_valid = FALSE; when Exception_SoftwareStep ec = 0x32; il_is_valid = FALSE; when Exception_Watchpoint ec = 0x34; il_is_valid = FALSE; when Exception_NV2Watchpoint ec = 0x35; il_is_valid = FALSE; when Exception_SoftwareBreakpoint ec = 0x38; when Exception_VectorCatch ec = 0x3A; il_is_valid = FALSE; assert from_32; otherwise, fault, vaddress); UnreachableAArch64.TakeException(); if ec IN {0x20,0x24,0x30,0x32,0x34} && target_el == PSTATE.EL then ec = ec + 1; if ec IN {0x11,0x12,0x13,0x28,0x38} && !from_32 then ec = ec + 4; if il_is_valid then il = if( ThisInstrLengthEL2() == 32 then '1' else '0'; else il = '1'; assert from_32 || il == '1'; // AArch64 instructions always 32-bit return (ec,il);, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/exceptionsdebug/AArch64.ReportExceptionAArch64.WatchpointException

// AArch64.ReportException() // ========================= // Report syndrome information for exception taken to AArch64 state.// AArch64.WatchpointException() // ============================= AArch64.ReportException(AArch64.WatchpointException(bits(64) vaddress,ExceptionRecordFaultRecord exception, bits(2) target_el)fault) assert PSTATE.EL != ExceptionEL3 exceptype = exception.exceptype; ; (ec,il) = route_to_el2 = (PSTATE.EL IN { AArch64.ExceptionClassEL0(exceptype, target_el); iss = exception.syndrome; iss2 = exception.syndrome2; // IL is not valid for Data Abort exceptions without valid instruction syndrome information if ec IN {0x24,0x25} && iss<24> == '0' then il = '1';, ESREL1[target_el] = (} &&ZerosEL2Enabled(27) : // <63:37> iss2 : // <36:32> ec<5:0> : // <31:26> il : // <25> iss); // <24:0> () && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1')); if exceptype IN { bits(64) preferred_exception_return =Exception_InstructionAbortThisInstrAddr,(); vect_offset = 0x0; if Exception_PCAlignmentHaveNV2Ext,() && fault.acctype == Exception_DataAbortAccType_NV2REGISTER,then exception = Exception_NV2DataAbortAArch64.AbortSyndrome,( Exception_NV2Watchpoint,, fault, vaddress); else exception = AArch64.AbortSyndrome(Exception_Watchpoint} then, fault, vaddress); if PSTATE.EL == FAREL2[target_el] = exception.vaddress; else|| route_to_el2 then FARAArch64.TakeException[target_el] = bits(64) UNKNOWN; if target_el ==( EL2 then if exception.ipavalid then HPFAR_EL2<43:4> = exception.ipaddress<51:12>; if, exception, preferred_exception_return, vect_offset); else IsSecureEL2EnabledAArch64.TakeException() &&( IsSecureEL1() then HPFAR_EL2.NS = exception.NS; else HPFAR_EL2.NS = '0'; else HPFAR_EL2<43:4> = bits(40) UNKNOWN; return;, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/exceptions/AArch64.ResetControlRegistersAArch64.ExceptionClass

// Resets System registers and memory-mapped control registers that have architecturally-defined // reset values to those values.// AArch64.ExceptionClass() // ======================== // Returns the Exception Class and Instruction Length fields to be reported in ESR (integer,bit) AArch64.ResetControlRegisters(boolean cold_reset);AArch64.ExceptionClass(Exception exceptype, bits(2) target_el) il = if ThisInstrLength() == 32 then '1' else '0'; from_32 = UsingAArch32(); assert from_32 || il == '1'; // AArch64 instructions always 32-bit case exceptype of when Exception_Uncategorized ec = 0x00; il = '1'; when Exception_WFxTrap ec = 0x01; when Exception_CP15RTTrap ec = 0x03; assert from_32; when Exception_CP15RRTTrap ec = 0x04; assert from_32; when Exception_CP14RTTrap ec = 0x05; assert from_32; when Exception_CP14DTTrap ec = 0x06; assert from_32; when Exception_AdvSIMDFPAccessTrap ec = 0x07; when Exception_FPIDTrap ec = 0x08; when Exception_PACTrap ec = 0x09; when Exception_CP14RRTTrap ec = 0x0C; assert from_32; when Exception_BranchTarget ec = 0x0D; when Exception_IllegalState ec = 0x0E; il = '1'; when Exception_SupervisorCall ec = 0x11; when Exception_HypervisorCall ec = 0x12; when Exception_MonitorCall ec = 0x13; when Exception_SystemRegisterTrap ec = 0x18; assert !from_32; when Exception_SVEAccessTrap ec = 0x19; assert !from_32; when Exception_ERetTrap ec = 0x1A; when Exception_PACFail ec = 0x1C; when Exception_InstructionAbort ec = 0x20; il = '1'; when Exception_PCAlignment ec = 0x22; il = '1'; when Exception_DataAbort ec = 0x24; when Exception_NV2DataAbort ec = 0x25; when Exception_SPAlignment ec = 0x26; il = '1'; assert !from_32; when Exception_FPTrappedException ec = 0x28; when Exception_SError ec = 0x2F; il = '1'; when Exception_Breakpoint ec = 0x30; il = '1'; when Exception_SoftwareStep ec = 0x32; il = '1'; when Exception_Watchpoint ec = 0x34; il = '1'; when Exception_NV2Watchpoint ec = 0x35; il = '1'; when Exception_SoftwareBreakpoint ec = 0x38; when Exception_VectorCatch ec = 0x3A; il = '1'; assert from_32; otherwise Unreachable(); if ec IN {0x20,0x24,0x30,0x32,0x34} && target_el == PSTATE.EL then ec = ec + 1; if ec IN {0x11,0x12,0x13,0x28,0x38} && !from_32 then ec = ec + 4; return (ec,il);

Library pseudocode for aarch64/exceptions/exceptions/AArch64.TakeResetAArch64.ReportException

// AArch64.TakeReset() // =================== // Reset into AArch64 state// AArch64.ReportException() // ========================= // Report syndrome information for exception taken to AArch64 state. AArch64.TakeReset(boolean cold_reset) assert !AArch64.ReportException(HighestELUsingAArch32ExceptionRecord(); // Enter the highest implemented Exception level in AArch64 state PSTATE.nRW = '0'; ifexception, bits(2) target_el) HaveELException(exceptype = exception.exceptype; (ec,il) =EL3AArch64.ExceptionClass) then PSTATE.EL =(exceptype, target_el); iss = exception.syndrome; // IL is not valid for Data Abort exceptions without valid instruction syndrome information if ec IN {0x24,0x25} && iss<24> == '0' then il = '1'; EL3ESR; elsif[target_el] = ec<5:0>:il:iss; if exceptype IN { HaveELException_InstructionAbort(,EL2Exception_PCAlignment) then PSTATE.EL =, EL2Exception_DataAbort; else PSTATE.EL =, EL1Exception_NV2DataAbort; // Reset the system registers and other system components, AArch64.ResetControlRegistersException_NV2Watchpoint(cold_reset); // Reset all other PSTATE fields PSTATE.SP = '1'; // Select stack pointer PSTATE.<D,A,I,F> = '1111'; // All asynchronous exceptions masked PSTATE.SS = '0'; // Clear software step bit PSTATE.DIT = '0'; // PSTATE.DIT is reset to 0 when resetting into AArch64 PSTATE.IL = '0'; // Clear Illegal Execution state bit // All registers, bits and fields not reset by the above pseudocode or by the BranchTo() call // below are UNKNOWN bitstrings after reset. In particular, the return information registers // ELR_ELx and SPSR_ELx have UNKNOWN values, so that it // is impossible to return from a reset in an architecturally defined way., AArch64.ResetGeneralRegistersException_Watchpoint();} then AArch64.ResetSIMDFPRegistersFAR();[target_el] = exception.vaddress; else AArch64.ResetSpecialRegistersFAR();[target_el] = bits(64) UNKNOWN; if target_el == ResetExternalDebugRegisters(cold_reset); bits(64) rv; // IMPLEMENTATION DEFINED reset vector if HaveEL(EL3) then rv = RVBAR_EL3; elsif HaveEL(EL2) then rv = RVBAR_EL2; else rv = RVBAR_EL1; // The reset vector must be correctly aligned assertthen if exception.ipavalid then HPFAR_EL2<43:4> = exception.ipaddress<51:12>; if IsZeroIsSecureEL2Enabled(rv<63:() &&PAMaxIsSecure()>) && IsZero(rv<1:0>); BranchTo(rv, BranchType_RESET);() then HPFAR_EL2.NS = exception.NS; else HPFAR_EL2.NS = '0'; else HPFAR_EL2<43:4> = bits(40) UNKNOWN; return;

Library pseudocode for aarch64/exceptions/ieeefpexceptions/AArch64.FPTrappedExceptionAArch64.ResetControlRegisters

// AArch64.FPTrappedException() // ============================// Resets System registers and memory-mapped control registers that have architecturally-defined // reset values to those values. AArch64.FPTrappedException(boolean is_ase, bits(8) accumulated_exceptions) exception =AArch64.ResetControlRegisters(boolean cold_reset); ExceptionSyndrome(Exception_FPTrappedException); if is_ase then if boolean IMPLEMENTATION_DEFINED "vector instructions set TFV to 1" then exception.syndrome<23> = '1'; // TFV else exception.syndrome<23> = '0'; // TFV else exception.syndrome<23> = '1'; // TFV exception.syndrome<10:8> = bits(3) UNKNOWN; // VECITR if exception.syndrome<23> == '1' then exception.syndrome<7,4:0> = accumulated_exceptions<7,4:0>; // IDF,IXF,UFF,OFF,DZF,IOF else exception.syndrome<7,4:0> = bits(6) UNKNOWN; route_to_el2 = EL2Enabled() && HCR_EL2.TGE == '1'; bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; if UInt(PSTATE.EL) > UInt(EL1) then AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif route_to_el2 then AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/syscallsexceptions/AArch64.CallHypervisorAArch64.TakeReset

// AArch64.CallHypervisor() // ======================== // Performs a HVC call// AArch64.TakeReset() // =================== // Reset into AArch64 state AArch64.CallHypervisor(bits(16) immediate) assertAArch64.TakeReset(boolean cold_reset) assert ! HighestELUsingAArch32(); // Enter the highest implemented Exception level in AArch64 state PSTATE.nRW = '0'; if HaveEL(EL3) then PSTATE.EL = EL3; elsif HaveEL(EL2); if) then PSTATE.EL = UsingAArch32EL2() then; else PSTATE.EL = AArch32.ITAdvanceEL1();; // Reset the system registers and other system components SSAdvanceAArch64.ResetControlRegisters(); bits(64) preferred_exception_return =(cold_reset); // Reset all other PSTATE fields PSTATE.SP = '1'; // Select stack pointer PSTATE.<D,A,I,F> = '1111'; // All asynchronous exceptions masked PSTATE.SS = '0'; // Clear software step bit PSTATE.DIT = '0'; // PSTATE.DIT is reset to 0 when resetting into AArch64 PSTATE.IL = '0'; // Clear Illegal Execution state bit // All registers, bits and fields not reset by the above pseudocode or by the BranchTo() call // below are UNKNOWN bitstrings after reset. In particular, the return information registers // ELR_ELx and SPSR_ELx have UNKNOWN values, so that it // is impossible to return from a reset in an architecturally defined way. NextInstrAddrAArch64.ResetGeneralRegisters(); vect_offset = 0x0; exception =(); ExceptionSyndromeAArch64.ResetSIMDFPRegisters(();Exception_HypervisorCallAArch64.ResetSpecialRegisters); exception.syndrome<15:0> = immediate; if PSTATE.EL ==(); EL3ResetExternalDebugRegisters then(cold_reset); bits(64) rv; // IMPLEMENTATION DEFINED reset vector if AArch64.TakeExceptionHaveEL(EL3, exception, preferred_exception_return, vect_offset); else) then rv = RVBAR_EL3; elsif AArch64.TakeExceptionHaveEL(EL2) then rv = RVBAR_EL2; else rv = RVBAR_EL1; // The reset vector must be correctly aligned assert IsZero(rv<63:PAMax()>) && IsZero(rv<1:0>); BranchTo(rv, BranchType_RESET, exception, preferred_exception_return, vect_offset););

Library pseudocode for aarch64/exceptions/syscallsieeefp/AArch64.CallSecureMonitorAArch64.FPTrappedException

// AArch64.CallSecureMonitor() // ===========================// AArch64.FPTrappedException() // ============================ AArch64.CallSecureMonitor(bits(16) immediate) assertAArch64.FPTrappedException(boolean is_ase, integer element, bits(8) accumulated_exceptions) exception = HaveELExceptionSyndrome(EL3Exception_FPTrappedException) && !); if is_ase then if boolean IMPLEMENTATION_DEFINED "vector instructions set TFV to 1" then exception.syndrome<23> = '1'; // TFV else exception.syndrome<23> = '0'; // TFV else exception.syndrome<23> = '1'; // TFV exception.syndrome<10:8> = bits(3) UNKNOWN; // VECITR if exception.syndrome<23> == '1' then exception.syndrome<7,4:0> = accumulated_exceptions<7,4:0>; // IDF,IXF,UFF,OFF,DZF,IOF else exception.syndrome<7,4:0> = bits(6) UNKNOWN; route_to_el2 =ELUsingAArch32EL2Enabled(() && HCR_EL2.TGE == '1'; bits(64) preferred_exception_return =EL3ThisInstrAddr); (); vect_offset = 0x0; if UsingAArch32UInt() then(PSTATE.EL) > AArch32.ITAdvanceUInt();( SSAdvanceEL1(); bits(64) preferred_exception_return =) then NextInstrAddrAArch64.TakeException(); vect_offset = 0x0; exception =(PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif route_to_el2 then ExceptionSyndromeAArch64.TakeException(Exception_MonitorCallEL2); exception.syndrome<15:0> = immediate;, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL3EL1, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/syscalls/AArch64.CallSupervisorAArch64.CallHypervisor

// AArch64.CallSupervisor() // AArch64.CallHypervisor() // ======================== // Calls the Supervisor// Performs a HVC call AArch64.CallSupervisor(bits(16) immediate) ifAArch64.CallHypervisor(bits(16) immediate) assert HaveEL(EL2); if UsingAArch32() then AArch32.ITAdvance(); SSAdvance(); route_to_el2 = PSTATE.EL == bits(64) preferred_exception_return = EL0 && EL2Enabled() && HCR_EL2.TGE == '1'; bits(64) preferred_exception_return = NextInstrAddr(); vect_offset = 0x0; exception = ExceptionSyndrome(Exception_SupervisorCallException_HypervisorCall); exception.syndrome<15:0> = immediate; if if PSTATE.EL == UIntEL3(PSTATE.EL) >then UIntAArch64.TakeException(EL1EL3) then, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif route_to_el2 then( AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/takeexceptionsyscalls/AArch64.TakeExceptionAArch64.CallSecureMonitor

// AArch64.TakeException() // ======================= // Take an exception to an Exception Level using AArch64.// AArch64.CallSecureMonitor() // =========================== AArch64.TakeException(bits(2) target_el,AArch64.CallSecureMonitor(bits(16) immediate) assert ExceptionRecord exception, bits(64) preferred_exception_return, integer vect_offset) assert HaveEL(target_el) && !(ELUsingAArch32(target_el) && UInt(target_el) >= UInt(PSTATE.EL); sync_errors = HaveIESB() && SCTLR[target_el].IESB == '1'; if HaveDoubleFaultExt() then sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && target_el == EL3); if sync_errors &&) && ! InsertIESBBeforeExceptionELUsingAArch32(target_el) then( SynchronizeErrors(); iesb_req = FALSE; sync_errors = FALSE; TakeUnmaskedPhysicalSErrorInterrupts(iesb_req); SynchronizeContext(); // If coming from AArch32 state, the top parts of the X[] registers might be set to zero from_32 = UsingAArch32(); if from_32 then AArch64.MaybeZeroRegisterUppers(); MaybeZeroSVEUppers(target_el); if UInt(target_el) > UInt(PSTATE.EL) then boolean lower_32; if target_el == EL3 then if); if EL2Enabled() then lower_32 = ELUsingAArch32(EL2); else lower_32 = ELUsingAArch32(EL1); elsif IsInHost() && PSTATE.EL == EL0 && target_el == EL2 then lower_32 = ELUsingAArch32(EL0); else lower_32 = ELUsingAArch32(target_el - 1); vect_offset = vect_offset + (if lower_32 then 0x600 else 0x400); elsif PSTATE.SP == '1' then vect_offset = vect_offset + 0x200; bits(64) spsr = GetPSRFromPSTATE(AArch64_NonDebugState); if PSTATE.EL == EL1 && target_el == EL1 && EL2Enabled() then if HaveNV2Ext() && (HCR_EL2.<NV,NV1,NV2> == '100' || HCR_EL2.<NV,NV1,NV2> == '111') then spsr<3:2> = '10'; else if HaveNVExt() && HCR_EL2.<NV,NV1> == '10' then spsr<3:2> = '10'; if HaveBTIExt() && !UsingAArch32() then // SPSR[].BTYPE is only guaranteed valid for these exception types if exception.exceptype IN {() thenException_SErrorAArch32.ITAdvance,(); Exception_IRQSSAdvance,(); bits(64) preferred_exception_return = Exception_FIQNextInstrAddr,(); vect_offset = 0x0; exception = Exception_SoftwareStepExceptionSyndrome,( Exception_PCAlignmentException_MonitorCall,); exception.syndrome<15:0> = immediate; Exception_InstructionAbortAArch64.TakeException,( Exception_Breakpoint, Exception_VectorCatch, Exception_SoftwareBreakpoint, Exception_IllegalState, Exception_BranchTarget} then zero_btype = FALSE; else zero_btype = ConstrainUnpredictableBool(Unpredictable_ZEROBTYPE); if zero_btype then spsr<11:10> = '00'; if HaveNV2Ext() && exception.exceptype == Exception_NV2DataAbort && target_el == EL3 then // external aborts are configured to be taken to EL3 exception.exceptype = Exception_DataAbort; if !(exception.exceptype IN {Exception_IRQ, Exception_FIQ}) then AArch64.ReportException(exception, target_el); PSTATE.EL = target_el; PSTATE.nRW = '0'; PSTATE.SP = '1'; SPSR[] = spsr; ELR[] = preferred_exception_return; PSTATE.SS = '0'; ShouldAdvanceSS = FALSE; PSTATE.<D,A,I,F> = '1111'; PSTATE.IL = '0'; if from_32 then // Coming from AArch32 PSTATE.IT = '00000000'; PSTATE.T = '0'; // PSTATE.J is RES0 if (HavePANExt() && (PSTATE.EL == EL1 || (PSTATE.EL == EL2 && ELIsInHost(EL0))) && SCTLR[].SPAN == '0') then PSTATE.PAN = '1'; if HaveUAOExt() then PSTATE.UAO = '0'; if HaveBTIExt() then PSTATE.BTYPE = '00'; if HaveSSBSExt() then PSTATE.SSBS = SCTLR[].DSSBS; if HaveMTEExt() then PSTATE.TCO = '1'; BranchTo(VBAR[]<63:11>:vect_offset<10:0>, BranchType_EXCEPTION); if sync_errors then SynchronizeErrors(); iesb_req = TRUE; TakeUnmaskedPhysicalSErrorInterrupts(iesb_req); EndOfInstruction();, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/trapssyscalls/AArch64.AArch32SystemAccessTrapAArch64.CallSupervisor

// AArch64.AArch32SystemAccessTrap() // ================================= // Trapped AARCH32 system register access.// AArch64.CallSupervisor() // ======================== // Calls the Supervisor AArch64.AArch32SystemAccessTrap(bits(2) target_el, integer ec) assertAArch64.CallSupervisor(bits(16) immediate) if HaveELUsingAArch32(target_el) && target_el !=() then AArch32.ITAdvance(); SSAdvance(); route_to_el2 = PSTATE.EL == EL0 && EL2Enabled() && HCR_EL2.TGE == '1'; bits(64) preferred_exception_return = NextInstrAddr(); vect_offset = 0x0; exception = ExceptionSyndrome(Exception_SupervisorCall); exception.syndrome<15:0> = immediate; if UInt(target_el) >=(PSTATE.EL) > UInt(PSTATE.EL); bits(64) preferred_exception_return =( ThisInstrAddrEL1(); vect_offset = 0x0; exception =) then AArch64.AArch32SystemAccessTrapSyndromeAArch64.TakeException((PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif route_to_el2 thenThisInstrAArch64.TakeException(), ec);( EL2, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1(target_el, exception, preferred_exception_return, vect_offset);, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/trapstakeexception/AArch64.AArch32SystemAccessTrapSyndromeAArch64.TakeException

// AArch64.AArch32SystemAccessTrapSyndrome() // ========================================= // Returns the syndrome information for traps on AArch32 MCR, MCRR, MRC, MRRC, and VMRS, VMSR instructions, // other than traps that are due to HCPTR or CPACR. ExceptionRecord// AArch64.TakeException() // ======================= // Take an exception to an Exception Level using AArch64. AArch64.AArch32SystemAccessTrapSyndrome(bits(32) instr, integer ec)AArch64.TakeException(bits(2) target_el, ExceptionRecord exception; case ec of when 0x0 exception =exception, bits(64) preferred_exception_return, integer vect_offset) assert ExceptionSyndromeHaveEL((target_el) && !Exception_UncategorizedELUsingAArch32); when 0x3 exception =(target_el) && ExceptionSyndromeUInt((target_el) >=Exception_CP15RTTrapUInt); when 0x4 exception =(PSTATE.EL); sync_errors = ExceptionSyndromeHaveIESB(() &&Exception_CP15RRTTrapSCTLR); when 0x5 exception =[target_el].IESB == '1'; if ExceptionSyndromeHaveDoubleFaultExt(() then sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && target_el ==Exception_CP14RTTrapEL3); when 0x6 exception = if sync_errors && ExceptionSyndromeInsertIESBBeforeException((target_el) thenException_CP14DTTrapSynchronizeErrors); when 0x7 exception =(); iesb_req = FALSE; sync_errors = FALSE; ExceptionSyndromeTakeUnmaskedPhysicalSErrorInterrupts((iesb_req);Exception_AdvSIMDFPAccessTrapSynchronizeContext); when 0x8 exception =(); // If coming from AArch32 state, the top parts of the X[] registers might be set to zero from_32 = ExceptionSyndromeUsingAArch32((); if from_32 thenException_FPIDTrapAArch64.MaybeZeroRegisterUppers); when 0xC exception =(); ExceptionSyndromeMaybeZeroSVEUppers((target_el); ifException_CP14RRTTrapUInt); otherwise(target_el) > UnreachableUInt(); bits(20) iss =(PSTATE.EL) then boolean lower_32; if target_el == ZerosEL3(); if exception.exceptype IN {then ifException_FPIDTrapEL2Enabled,() then lower_32 = Exception_CP14RTTrapELUsingAArch32,( Exception_CP15RTTrapEL2} then // Trapped MRC/MCR, VMRS on FPSID if exception.exceptype !=); else lower_32 = Exception_FPIDTrapELUsingAArch32 then // When trap is not for VMRS iss<19:17> = instr<7:5>; // opc2 iss<16:14> = instr<23:21>; // opc1 iss<13:10> = instr<19:16>; // CRn iss<4:1> = instr<3:0>; // CRm else iss<19:17> = '000'; iss<16:14> = '111'; iss<13:10> = instr<19:16>; // reg iss<4:1> = '0000'; if instr<20> == '1' && instr<15:12> == '1111' then // MRC, Rt==15 iss<9:5> = '11111'; elsif instr<20> == '0' && instr<15:12> == '1111' then // MCR, Rt==15 iss<9:5> = bits(5) UNKNOWN; else iss<9:5> = LookUpRIndex(UInt(instr<15:12>), PSTATE.M)<4:0>; elsif exception.exceptype IN {(Exception_CP14RRTTrapEL1,); elsif Exception_AdvSIMDFPAccessTrapIsInHost,() && PSTATE.EL == Exception_CP15RRTTrapEL0} then // Trapped MRRC/MCRR, VMRS/VMSR iss<19:16> = instr<7:4>; // opc1 if instr<19:16> == '1111' then // Rt2==15 iss<14:10> = bits(5) UNKNOWN; else iss<14:10> = LookUpRIndex(UInt(instr<19:16>), PSTATE.M)<4:0>; if instr<15:12> == '1111' then // Rt==15 iss<9:5> = bits(5) UNKNOWN; else iss<9:5> = LookUpRIndex(UInt(instr<15:12>), PSTATE.M)<4:0>; iss<4:1> = instr<3:0>; // CRm elsif exception.exceptype ==&& target_el == Exception_CP14DTTrapEL2 then // Trapped LDC/STC iss<19:12> = instr<7:0>; // imm8 iss<4> = instr<23>; // U iss<2:1> = instr<24,21>; // P,W if instr<19:16> == '1111' then // Rn==15, LDC(Literal addressing)/STC iss<9:5> = bits(5) UNKNOWN; iss<3> = '1'; elsif exception.exceptype == lower_32 = Exception_UncategorizedELUsingAArch32 then // Trapped for unknown reason iss<9:5> = LookUpRIndex(UInt(instr<19:16>), PSTATE.M)<4:0>; // Rn iss<3> = '0'; iss<0> = instr<20>; // Direction exception.syndrome<24:20> =( ); else lower_32 = ELUsingAArch32(target_el - 1); vect_offset = vect_offset + (if lower_32 then 0x600 else 0x400); elsif PSTATE.SP == '1' then vect_offset = vect_offset + 0x200; spsr = GetPSRFromPSTATE(); if PSTATE.EL == EL1 && target_el == EL1 && EL2Enabled() then if HaveNV2Ext() && (HCR_EL2.<NV,NV1,NV2> == '100' || HCR_EL2.<NV,NV1,NV2> == '111') then spsr<3:2> = '10'; else if HaveNVExt() && HCR_EL2.<NV,NV1> == '10' then spsr<3:2> = '10'; if HaveBTIExt() && !UsingAArch32() then // SPSR[].BTYPE is only guaranteed valid for these exception types if exception.exceptype IN {Exception_SError, Exception_IRQ, Exception_FIQ, Exception_SoftwareStep, Exception_PCAlignment, Exception_InstructionAbort, Exception_Breakpoint, Exception_VectorCatch, Exception_SoftwareBreakpoint, Exception_IllegalState, Exception_BranchTarget} then zero_btype = FALSE; else zero_btype = ConstrainUnpredictableBool(Unpredictable_ZEROBTYPE); if zero_btype then spsr<11:10> = '00'; if HaveNV2Ext() && exception.exceptype == Exception_NV2DataAbort && target_el == EL3 then // external aborts are configured to be taken to EL3 exception.exceptype = Exception_DataAbort; if !(exception.exceptype IN {Exception_IRQ, Exception_FIQ}) then AArch64.ReportException(exception, target_el); PSTATE.EL = target_el; PSTATE.nRW = '0'; PSTATE.SP = '1'; SPSR[] = spsr; ELR[] = preferred_exception_return; PSTATE.SS = '0'; PSTATE.<D,A,I,F> = '1111'; PSTATE.IL = '0'; if from_32 then // Coming from AArch32 PSTATE.IT = '00000000'; PSTATE.T = '0'; // PSTATE.J is RES0 if (HavePANExt() && (PSTATE.EL == EL1 || (PSTATE.EL == EL2 && ELIsInHost(EL0))) && SCTLR[].SPAN == '0') then PSTATE.PAN = '1'; if HaveUAOExt() then PSTATE.UAO = '0'; if HaveBTIExt() then PSTATE.BTYPE = '00'; if HaveSSBSExt() then PSTATE.SSBS = SCTLR[].DSSBS; if HaveMTEExt() then PSTATE.TCO = '1'; BranchTo(VBAR[]<63:11>:vect_offset<10:0>, BranchType_EXCEPTION); if sync_errors then SynchronizeErrors(); iesb_req = TRUE; TakeUnmaskedPhysicalSErrorInterrupts(iesb_req); EndOfInstructionConditionSyndromeEL0(); exception.syndrome<19:0> = iss; return exception;();

Library pseudocode for aarch64/exceptions/traps/AArch64.AdvSIMDFPAccessTrapAArch64.AArch32SystemAccessTrap

// AArch64.AdvSIMDFPAccessTrap() // ============================= // Trapped access to Advanced SIMD or FP registers due to CPACR[].// AArch64.AArch32SystemAccessTrap() // ================================= // Trapped AARCH32 system register access. AArch64.AdvSIMDFPAccessTrap(bits(2) target_el) bits(64) preferred_exception_return =AArch64.AArch32SystemAccessTrap(bits(2) target_el, integer ec) assert ThisInstrAddrHaveEL(); vect_offset = 0x0; route_to_el2 = (target_el ==(target_el) && target_el != EL1EL0 && EL2EnabledUInt() && HCR_EL2.TGE == '1'); if route_to_el2 then exception =(target_el) >= ExceptionSyndromeUInt((PSTATE.EL); bits(64) preferred_exception_return =Exception_UncategorizedThisInstrAddr);(); vect_offset = 0x0; exception = AArch64.TakeExceptionAArch64.AArch32SystemAccessTrapSyndrome(EL2ThisInstr, exception, preferred_exception_return, vect_offset); else exception = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrap); exception.syndrome<24:20> = ConditionSyndrome();(), ec); AArch64.TakeException(target_el, exception, preferred_exception_return, vect_offset); return;(target_el, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/traps/AArch64.CheckCP15InstrCoarseTrapsAArch64.AArch32SystemAccessTrapSyndrome

// AArch64.CheckCP15InstrCoarseTraps() // =================================== // Check for coarse-grained AArch32 CP15 traps in HSTR_EL2 and HCR_EL2. // AArch64.AArch32SystemAccessTrapSyndrome() // ========================================= // Returns the syndrome information for traps on AArch32 MCR, MCRR, MRC, MRRC, and VMRS, VMSR instructions, // other than traps that are due to HCPTR or CPACR. booleanExceptionRecord AArch64.CheckCP15InstrCoarseTraps(integer CRn, integer nreg, integer CRm) // Check for coarse-grained Hyp traps if PSTATE.EL IN {AArch64.AArch32SystemAccessTrapSyndrome(bits(32) instr, integer ec)EL0ExceptionRecord,exception; case ec of when 0x0 exception = EL1ExceptionSyndrome} &&( EL2EnabledException_Uncategorized() then // Check for MCR, MRC, MCRR and MRRC disabled by HSTR_EL2<CRn/CRm> major = if nreg == 1 then CRn else CRm; if !); when 0x3 exception =(Exception_CP15RTTrap); when 0x4 exception = ExceptionSyndrome(Exception_CP15RRTTrap); when 0x5 exception = ExceptionSyndrome(Exception_CP14RTTrap); when 0x6 exception = ExceptionSyndrome(Exception_CP14DTTrap); when 0x7 exception = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrap); when 0x8 exception = ExceptionSyndrome(Exception_FPIDTrap); when 0xC exception = ExceptionSyndrome(Exception_CP14RRTTrap); otherwise Unreachable(); bits(20) iss = Zeros(); if exception.exceptype IN {Exception_FPIDTrap, Exception_CP14RTTrap, Exception_CP15RTTrap} then // Trapped MRC/MCR, VMRS on FPSID if exception.exceptype != Exception_FPIDTrap then // When trap is not for VMRS iss<19:17> = instr<7:5>; // opc2 iss<16:14> = instr<23:21>; // opc1 iss<13:10> = instr<19:16>; // CRn iss<4:1> = instr<3:0>; // CRm else iss<19:17> = '000'; iss<16:14> = '111'; iss<13:10> = instr<19:16>; // reg iss<4:1> = '0000'; if instr<20> == '1' && instr<15:12> == '1111' then // MRC, Rt==15 iss<9:5> = '11111'; elsif instr<20> == '0' && instr<15:12> == '1111' then // MCR, Rt==15 iss<9:5> = bits(5) UNKNOWN; else iss<9:5> = LookUpRIndex(UInt(instr<15:12>), PSTATE.M)<4:0>; elsif exception.exceptype IN {Exception_CP14RRTTrap, Exception_AdvSIMDFPAccessTrap, Exception_CP15RRTTrap} then // Trapped MRRC/MCRR, VMRS/VMSR iss<19:16> = instr<7:4>; // opc1 if instr<19:16> == '1111' then // Rt2==15 iss<14:10> = bits(5) UNKNOWN; else iss<14:10> = LookUpRIndex(UInt(instr<19:16>), PSTATE.M)<4:0>; if instr<15:12> == '1111' then // Rt==15 iss<9:5> = bits(5) UNKNOWN; else iss<9:5> = LookUpRIndex(UInt(instr<15:12>), PSTATE.M)<4:0>; iss<4:1> = instr<3:0>; // CRm elsif exception.exceptype == Exception_CP14DTTrap then // Trapped LDC/STC iss<19:12> = instr<7:0>; // imm8 iss<4> = instr<23>; // U iss<2:1> = instr<24,21>; // P,W if instr<19:16> == '1111' then // Rn==15, LDC(Literal addressing)/STC iss<9:5> = bits(5) UNKNOWN; iss<3> = '1'; elsif exception.exceptype == Exception_Uncategorized then // Trapped for unknown reason iss<9:5> = LookUpRIndex(UInt(instr<19:16>), PSTATE.M)<4:0>; // Rn iss<3> = '0'; iss<0> = instr<20>; // Direction exception.syndrome<24:20> = ConditionSyndromeIsInHostExceptionSyndrome() && !(major IN {4,14}) && HSTR_EL2<major> == '1' then return TRUE; (); exception.syndrome<19:0> = iss; // Check for MRC and MCR disabled by HCR_EL2.TIDCP if (HCR_EL2.TIDCP == '1' && nreg == 1 && ((CRn == 9 && CRm IN {0,1,2, 5,6,7,8 }) || (CRn == 10 && CRm IN {0,1, 4, 8 }) || (CRn == 11 && CRm IN {0,1,2,3,4,5,6,7,8,15}))) then return TRUE; return FALSE; return exception;

Library pseudocode for aarch64/exceptions/traps/AArch64.CheckFPAdvSIMDEnabledAArch64.AdvSIMDFPAccessTrap

// AArch64.CheckFPAdvSIMDEnabled() // =============================== // Check against CPACR[]// AArch64.AdvSIMDFPAccessTrap() // ============================= // Trapped access to Advanced SIMD or FP registers due to CPACR[]. AArch64.CheckFPAdvSIMDEnabled() if PSTATE.EL IN {AArch64.AdvSIMDFPAccessTrap(bits(2) target_el) bits(64) preferred_exception_return =EL0ThisInstrAddr,(); vect_offset = 0x0; route_to_el2 = (target_el == EL1} && !&&IsInHostEL2Enabled() then // Check if access disabled in CPACR_EL1 case CPACR_EL1.FPEN of when 'x0' disabled = TRUE; when '01' disabled = PSTATE.EL ==() && HCR_EL2.TGE == '1'); if route_to_el2 then exception = EL0ExceptionSyndrome; when '11' disabled = FALSE; if disabled then( AArch64.AdvSIMDFPAccessTrapException_Uncategorized();EL1AArch64.TakeException);( , exception, preferred_exception_return, vect_offset); else exception = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrap); exception.syndrome<24:20> = ConditionSyndrome(); AArch64.TakeExceptionAArch64.CheckFPAdvSIMDTrapEL2(); // Also check against CPTR_EL2 and CPTR_EL3(target_el, exception, preferred_exception_return, vect_offset); return;

Library pseudocode for aarch64/exceptions/traps/AArch64.CheckFPAdvSIMDTrapAArch64.CheckCP15InstrCoarseTraps

// AArch64.CheckFPAdvSIMDTrap() // ============================ // Check against CPTR_EL2 and CPTR_EL3.// AArch64.CheckCP15InstrCoarseTraps() // =================================== // Check for coarse-grained AArch32 CP15 traps in HSTR_EL2 and HCR_EL2. boolean AArch64.CheckFPAdvSIMDTrap() AArch64.CheckCP15InstrCoarseTraps(integer CRn, integer nreg, integer CRm) // Check for coarse-grained Hyp traps if PSTATE.EL IN {EL0, EL1,} && EL2} && EL2Enabled() then // Check if access disabled in CPTR_EL2 if // Check for MCR, MRC, MCRR and MRRC disabled by HSTR_EL2<CRn/CRm> major = if nreg == 1 then CRn else CRm; if ! HaveVirtHostExtIsInHost() && HCR_EL2.E2H == '1' then case CPTR_EL2.FPEN of when 'x0' disabled = TRUE; when '01' disabled = PSTATE.EL == EL0 && HCR_EL2.TGE == '1'; when '11' disabled = FALSE; if disabled then AArch64.AdvSIMDFPAccessTrap(EL2); else if CPTR_EL2.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL2); if HaveEL(EL3) then // Check if access disabled in CPTR_EL3 if CPTR_EL3.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL3); () && !(major IN {4,14}) && HSTR_EL2<major> == '1' then return TRUE; return; // Check for MRC and MCR disabled by HCR_EL2.TIDCP if (HCR_EL2.TIDCP == '1' && nreg == 1 && ((CRn == 9 && CRm IN {0,1,2, 5,6,7,8 }) || (CRn == 10 && CRm IN {0,1, 4, 8 }) || (CRn == 11 && CRm IN {0,1,2,3,4,5,6,7,8,15}))) then return TRUE; return FALSE;

Library pseudocode for aarch64/exceptions/traps/AArch64.CheckForERetTrapAArch64.CheckFPAdvSIMDEnabled

// AArch64.CheckForERetTrap() // ========================== // Check for trap on ERET, ERETAA, ERETAB instruction// AArch64.CheckFPAdvSIMDEnabled() // =============================== // Check against CPACR[] AArch64.CheckForERetTrap(boolean eret_with_pac, boolean pac_uses_key_a) route_to_el2 = FALSE; // Non-secure EL1 execution of ERET, ERETAA, ERETAB when either HCR_EL2.NV or HFGITR_EL2.ERET is set, // is trapped to EL2 route_to_el2 = (PSTATE.EL ==AArch64.CheckFPAdvSIMDEnabled() if PSTATE.EL IN { EL0, EL1 &&} && ! EL2EnabledIsInHost() && ((() then // Check if access disabled in CPACR_EL1 case CPACR_EL1.FPEN of when 'x0' disabled = TRUE; when '01' disabled = PSTATE.EL ==HaveNVExtEL0() && HCR_EL2.NV == '1') || (; when '11' disabled = FALSE; if disabled thenHaveFGTExtAArch64.AdvSIMDFPAccessTrap() && HCR_EL2.<E2H, TGE> != '11' && (!(HaveELEL1();EL3AArch64.CheckFPAdvSIMDTrap) || SCR_EL3.FGTEn == '1') && HFGITR_EL2.ERET == '1'))); if route_to_el2 then ExceptionRecord exception; bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; exception = ExceptionSyndrome(Exception_ERetTrap); if !eret_with_pac then // ERET exception.syndrome<1> = '0'; exception.syndrome<0> = '0'; // RES0 else exception.syndrome<1> = '1'; if pac_uses_key_a then // ERETAA exception.syndrome<0> = '0'; else // ERETAB exception.syndrome<0> = '1'; AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);(); // Also check against CPTR_EL2 and CPTR_EL3

Library pseudocode for aarch64/exceptions/traps/AArch64.CheckForSMCUndefOrTrapAArch64.CheckFPAdvSIMDTrap

// AArch64.CheckForSMCUndefOrTrap() // ================================ // Check for UNDEFINED or trap on SMC instruction// AArch64.CheckFPAdvSIMDTrap() // ============================ // Check against CPTR_EL2 and CPTR_EL3. AArch64.CheckForSMCUndefOrTrap(bits(16) imm) if PSTATE.EL ==AArch64.CheckFPAdvSIMDTrap() if PSTATE.EL IN { EL0 then UNDEFINED; if (!(PSTATE.EL ==, EL1 &&, EL2} && EL2Enabled() && HCR_EL2.TSC == '1') &&() then // Check if access disabled in CPTR_EL2 if HaveELHaveVirtHostExt(() && HCR_EL2.E2H == '1' then case CPTR_EL2.FPEN of when 'x0' disabled = TRUE; when '01' disabled = PSTATE.EL ==EL3EL0) && SCR_EL3.SMD == '1') then UNDEFINED; route_to_el2 = FALSE; if !&& HCR_EL2.TGE == '1'; when '11' disabled = FALSE; if disabled thenHaveELAArch64.AdvSIMDFPAccessTrap(EL3EL2) then if PSTATE.EL ==); else if CPTR_EL2.TFP == '1' then EL1AArch64.AdvSIMDFPAccessTrap &&( EL2EnabledEL2() then if); if HaveNVExtHaveEL() && HCR_EL2.NV == '1' && HCR_EL2.TSC == '1' then route_to_el2 = TRUE; else UNDEFINED; else UNDEFINED; else route_to_el2 = PSTATE.EL ==( EL1EL3 &&) then // Check if access disabled in CPTR_EL3 if CPTR_EL3.TFP == '1' then EL2EnabledAArch64.AdvSIMDFPAccessTrap() && HCR_EL2.TSC == '1'; if route_to_el2 then bits(64) preferred_exception_return =( ThisInstrAddrEL3(); vect_offset = 0x0; exception = ExceptionSyndrome(Exception_MonitorCall); exception.syndrome<15:0> = imm; AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);); return;

Library pseudocode for aarch64/exceptions/traps/AArch64.CheckForSVCTrapAArch64.CheckForERetTrap

// AArch64.CheckForSVCTrap() // ========================= // Check for trap on SVC instruction// AArch64.CheckForERetTrap() // ========================== // Check for trap on ERET, ERETAA, ERETAB instruction AArch64.CheckForSVCTrap(bits(16) immediate) ifAArch64.CheckForERetTrap(boolean eret_with_pac, boolean pac_uses_key_a) route_to_el2 = FALSE; // Non-secure EL1 execution of ERET, ERETAA, ERETAB when either HCR_EL2.NV or HFGITR_EL2.ERET is set, // is trapped to EL2 route_to_el2 = PSTATE.EL == HaveFGTExtEL1() then route_to_el2 = FALSE; if PSTATE.EL ==&& EL0EL2Enabled then route_to_el2 = (!() && ( (ELUsingAArch32HaveNVExt(() && HCR_EL2.NV == '1') || (EL0HaveFGTExt) && !() && HCR_EL2.<E2H, TGE> != '11' && (!ELUsingAArch32(EL1) && EL2Enabled() && HFGITR_EL2.SVC_EL0 == '1' && (HCR_EL2.<E2H, TGE> != '11' && (!HaveEL(EL3) || SCR_EL3.FGTEn == '1'))); elsif PSTATE.EL ==) || SCR_EL3.FGTEn == '1') && HFGITR_EL2.ERET == '1') ); if route_to_el2 then EL1ExceptionRecord then route_to_el2 = (!exception; bits(64) preferred_exception_return =ELUsingAArch32ThisInstrAddr((); vect_offset = 0x0; exception =EL1) && EL2Enabled() && HFGITR_EL2.SVC_EL1 == '1' && (HCR_EL2.<E2H, TGE> != '11' && (!HaveEL(EL3) || SCR_EL3.FGTEn == '1'))); if route_to_el2 then exception = ExceptionSyndrome(Exception_SupervisorCallException_ERetTrap); exception.syndrome<15:0> = immediate; bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0;); if !eret_with_pac then // ERET exception.syndrome<1> = '0'; exception.syndrome<0> = '0'; // RES0 else exception.syndrome<1> = '1'; if pac_uses_key_a then // ERETAA exception.syndrome<0> = '0'; else // ERETAB exception.syndrome<0> = '1'; AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/traps/AArch64.CheckForWFxTrapAArch64.CheckForSMCUndefOrTrap

// AArch64.CheckForWFxTrap() // ========================= // Check for trap on WFE or WFI instruction// AArch64.CheckForSMCUndefOrTrap() // ================================ // Check for UNDEFINED or trap on SMC instruction AArch64.CheckForWFxTrap(bits(2) target_el, boolean is_wfe) assertAArch64.CheckForSMCUndefOrTrap(bits(16) imm) if PSTATE.EL == HaveELEL0(target_el); case target_el of whenthen UNDEFINED; if (!(PSTATE.EL == EL1 trap = (if is_wfe then&& SCTLREL2Enabled[].nTWE else() && HCR_EL2.TSC == '1') && SCTLRHaveEL[].nTWI) == '0'; when( EL2EL3 trap = (if is_wfe then HCR_EL2.TWE else HCR_EL2.TWI) == '1'; when) && SCR_EL3.SMD == '1') then UNDEFINED; route_to_el2 = FALSE; if ! HaveEL(EL3 trap = (if is_wfe then SCR_EL3.TWE else SCR_EL3.TWI) == '1'; if trap then) then if PSTATE.EL == && EL2Enabled() then if HaveNVExt() && HCR_EL2.NV == '1' && HCR_EL2.TSC == '1' then route_to_el2 = TRUE; else UNDEFINED; else UNDEFINED; else route_to_el2 = PSTATE.EL == EL1 && EL2Enabled() && HCR_EL2.TSC == '1'; if route_to_el2 then bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; exception = ExceptionSyndrome(Exception_MonitorCall); exception.syndrome<15:0> = imm; AArch64.TakeException(EL2AArch64.WFxTrapEL1(target_el, is_wfe);, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/traps/AArch64.CheckIllegalStateAArch64.CheckForSVCTrap

// AArch64.CheckIllegalState() // =========================== // Check PSTATE.IL bit and generate Illegal Execution state exception if set.// AArch64.CheckForSVCTrap() // ========================= // Check for trap on SVC instruction AArch64.CheckIllegalState() if PSTATE.IL == '1' then route_to_el2 = PSTATE.EL ==AArch64.CheckForSVCTrap(bits(16) immediate) if HaveFGTExt() then route_to_el2 = FALSE; if PSTATE.EL == EL0 &&then route_to_el2 = (! EL2EnabledELUsingAArch32() && HCR_EL2.TGE == '1'; bits(64) preferred_exception_return =( ThisInstrAddrEL0(); vect_offset = 0x0; exception =) && ! ExceptionSyndromeELUsingAArch32(Exception_IllegalStateEL1); if) && UIntEL2Enabled(PSTATE.EL) >() && HFGITR_EL2.SVC_EL0 == '1' && (HCR_EL2.<E2H, TGE> != '11' && (! UIntHaveEL(EL3) || SCR_EL3.FGTEn == '1'))); elsif PSTATE.EL == EL1) thenthen route_to_el2 = (! AArch64.TakeExceptionELUsingAArch32(PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif route_to_el2 then( AArch64.TakeExceptionEL1() &&EL2EL2Enabled, exception, preferred_exception_return, vect_offset); else() && HFGITR_EL2.SVC_EL1 == '1' && (HCR_EL2.<E2H, TGE> != '11' && (! HaveEL(EL3) || SCR_EL3.FGTEn == '1'))); if route_to_el2 then exception = ExceptionSyndrome(Exception_SupervisorCall); exception.syndrome<15:0> = immediate; bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; AArch64.TakeException(EL1EL2, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/traps/AArch64.MonitorModeTrapAArch64.CheckForWFxTrap

// AArch64.MonitorModeTrap() // AArch64.CheckForWFxTrap() // ========================= // Trapped use of Monitor mode features in a Secure EL1 AArch32 mode// Check for trap on WFE or WFI instruction AArch64.MonitorModeTrap() bits(64) preferred_exception_return =AArch64.CheckForWFxTrap(bits(2) target_el, boolean is_wfe) assert ThisInstrAddrHaveEL(); vect_offset = 0x0; (target_el); exception = case target_el of when ExceptionSyndromeEL1(trap = (if is_wfe thenException_UncategorizedSCTLR); if[].nTWE else IsSecureEL2EnabledSCTLR() then[].nTWI) == '0'; when AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);trap = (if is_wfe then HCR_EL2.TWE else HCR_EL2.TWI) == '1'; when AArch64.TakeExceptionEL3(trap = (if is_wfe then SCR_EL3.TWE else SCR_EL3.TWI) == '1'; if trap thenEL3AArch64.WFxTrap, exception, preferred_exception_return, vect_offset);(target_el, is_wfe);

Library pseudocode for aarch64/exceptions/traps/AArch64.SystemAccessTrapAArch64.CheckIllegalState

// AArch64.SystemAccessTrap() // ========================== // Trapped access to AArch64 system register or system instruction.// AArch64.CheckIllegalState() // =========================== // Check PSTATE.IL bit and generate Illegal Execution state exception if set. AArch64.SystemAccessTrap(bits(2) target_el, integer ec) assertAArch64.CheckIllegalState() if PSTATE.IL == '1' then route_to_el2 = PSTATE.EL == HaveEL(target_el) && target_el != EL0 && EL2Enabled() && HCR_EL2.TGE == '1'; bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; exception = ExceptionSyndrome(Exception_IllegalState); if UInt(target_el) >=(PSTATE.EL) > UInt(PSTATE.EL); bits(64) preferred_exception_return =( ThisInstrAddrEL1(); vect_offset = 0x0; exception =) then AArch64.SystemAccessTrapSyndromeAArch64.TakeException((PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif route_to_el2 thenThisInstrAArch64.TakeException(), ec);( EL2, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1(target_el, exception, preferred_exception_return, vect_offset);, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/traps/AArch64.SystemAccessTrapSyndromeAArch64.MonitorModeTrap

// AArch64.SystemAccessTrapSyndrome() // ================================== // Returns the syndrome information for traps on AArch64 MSR/MRS instructions. ExceptionRecord// AArch64.MonitorModeTrap() // ========================= // Trapped use of Monitor mode features in a Secure EL1 AArch32 mode AArch64.SystemAccessTrapSyndrome(bits(32) instr, integer ec)AArch64.MonitorModeTrap() bits(64) preferred_exception_return = ExceptionRecordThisInstrAddr exception; case ec of when 0x0 // Trapped access due to unknown reason. exception =(); vect_offset = 0x0; exception = ExceptionSyndrome(Exception_Uncategorized); when 0x7 // Trapped access to SVE, Advance SIMD&FP system register. exception = if ExceptionSyndromeIsSecureEL2Enabled(() thenException_AdvSIMDFPAccessTrapAArch64.TakeException); exception.syndrome<24:20> =( ConditionSyndromeEL2(); when 0x18 // Trapped access to system register or system instruction. exception =, exception, preferred_exception_return, vect_offset); ExceptionSyndromeAArch64.TakeException(Exception_SystemRegisterTrapEL3); instr = ThisInstr(); exception.syndrome<21:20> = instr<20:19>; // Op0 exception.syndrome<19:17> = instr<7:5>; // Op2 exception.syndrome<16:14> = instr<18:16>; // Op1 exception.syndrome<13:10> = instr<15:12>; // CRn exception.syndrome<9:5> = instr<4:0>; // Rt exception.syndrome<4:1> = instr<11:8>; // CRm exception.syndrome<0> = instr<21>; // Direction when 0x19 // Trapped access to SVE System register exception = ExceptionSyndrome(Exception_SVEAccessTrap); otherwise Unreachable(); return exception;, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/traps/AArch64.UndefinedFaultAArch64.SystemAccessTrap

// AArch64.UndefinedFault() // ========================// AArch64.SystemAccessTrap() // ========================== // Trapped access to AArch64 system register or system instruction. AArch64.UndefinedFault() route_to_el2 = PSTATE.EL ==AArch64.SystemAccessTrap(bits(2) target_el, integer ec) assert HaveEL(target_el) && target_el != EL0 && EL2Enabled() && HCR_EL2.TGE == '1'; bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; exception = ExceptionSyndrome(Exception_Uncategorized); if UInt(PSTATE.EL) >(target_el) >= UInt((PSTATE.EL); bits(64) preferred_exception_return =EL1ThisInstrAddr) then(); vect_offset = 0x0; exception = AArch64.TakeExceptionAArch64.SystemAccessTrapSyndrome(PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif route_to_el2 then( AArch64.TakeExceptionThisInstr((), ec);EL2, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1, exception, preferred_exception_return, vect_offset);(target_el, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/traps/AArch64.WFxTrapAArch64.SystemAccessTrapSyndrome

// AArch64.WFxTrap() // =================// AArch64.SystemAccessTrapSyndrome() // ================================== // Returns the syndrome information for traps on AArch64 MSR/MRS instructions. ExceptionRecord AArch64.WFxTrap(bits(2) target_el, boolean is_wfe) assertAArch64.SystemAccessTrapSyndrome(bits(32) instr, integer ec) UIntExceptionRecord(target_el) >exception; case ec of when 0x0 // Trapped access due to unknown reason. exception = UIntExceptionSyndrome(PSTATE.EL); bits(64) preferred_exception_return =( ThisInstrAddrException_Uncategorized(); vect_offset = 0x0; exception =); when 0x7 // Trapped access to SVE, Advance SIMD&FP system register. exception = ExceptionSyndrome(Exception_WFxTrapException_AdvSIMDFPAccessTrap); exception.syndrome<24:20> = ConditionSyndrome(); exception.syndrome<0> = if is_wfe then '1' else '0'; if target_el == when 0x18 // Trapped access to system register or system instruction. exception = EL1ExceptionSyndrome &&( EL2EnabledException_SystemRegisterTrap() && HCR_EL2.TGE == '1' then); instr = AArch64.TakeExceptionThisInstr((); exception.syndrome<21:20> = instr<20:19>; // Op0 exception.syndrome<19:17> = instr<7:5>; // Op2 exception.syndrome<16:14> = instr<18:16>; // Op1 exception.syndrome<13:10> = instr<15:12>; // CRn exception.syndrome<9:5> = instr<4:0>; // Rt exception.syndrome<4:1> = instr<11:8>; // CRm exception.syndrome<0> = instr<21>; // Direction when 0x19 // Trapped access to SVE System register exception =EL2ExceptionSyndrome, exception, preferred_exception_return, vect_offset); else( ); otherwise UnreachableAArch64.TakeExceptionException_SVEAccessTrap(target_el, exception, preferred_exception_return, vect_offset);(); return exception;

Library pseudocode for aarch64/exceptions/traps/CheckFPAdvSIMDEnabled64AArch64.UndefinedFault

// CheckFPAdvSIMDEnabled64() // ========================= // AArch64 instruction wrapper// AArch64.UndefinedFault() // ======================== CheckFPAdvSIMDEnabled64()AArch64.UndefinedFault() route_to_el2 = PSTATE.EL == && EL2Enabled() && HCR_EL2.TGE == '1'; bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; exception = ExceptionSyndrome(Exception_Uncategorized); if UInt(PSTATE.EL) > UInt(EL1) then AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif route_to_el2 then AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1AArch64.CheckFPAdvSIMDEnabledEL0();, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/traps/CheckLDST64BEnabledAArch64.WFxTrap

// CheckLDST64BEnabled() // ===================== // Checks for trap on ST64B and LD64B instructions// AArch64.WFxTrap() // ================= CheckLDST64BEnabled() boolean trap = FALSE; bits(25) iss =AArch64.WFxTrap(bits(2) target_el, boolean is_wfe) assert ZeroExtendUInt('10'); // 0x2 if PSTATE.EL ==(target_el) > EL0UInt then if !(PSTATE.EL); bits(64) preferred_exception_return =IsInHostThisInstrAddr() then trap = SCTLR_EL1.EnALS == '0'; target_el = if HCR_EL2.TGE == '1' then(); vect_offset = 0x0; exception = EL2ExceptionSyndrome else( EL1Exception_WFxTrap; else trap = SCTLR_EL2.EnALS == '0'; target_el =); exception.syndrome<24:20> = EL2ConditionSyndrome; (); exception.syndrome<0> = if is_wfe then '1' else '0'; if !trap && ((PSTATE.EL == if target_el == EL0 && !IsInHost()) || PSTATE.EL == EL1) then trap =&& EL2Enabled() && HCRX_EL2.EnALS == '0'; target_el =() && HCR_EL2.TGE == '1' then AArch64.TakeException(EL2; if trap then, exception, preferred_exception_return, vect_offset); else LDST64BTrapAArch64.TakeException(target_el, iss);(target_el, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/exceptions/traps/CheckST64BV0EnabledCheckFPAdvSIMDEnabled64

// CheckST64BV0Enabled() // ===================== // Checks for trap on ST64BV0 instruction// CheckFPAdvSIMDEnabled64() // ========================= // AArch64 instruction wrapper CheckST64BV0Enabled() boolean trap = FALSE; bits(25) iss =CheckFPAdvSIMDEnabled64() ZeroExtendAArch64.CheckFPAdvSIMDEnabled('1'); // 0x1 if PSTATE.EL == EL0 then if !IsInHost() then trap = SCTLR_EL1.EnAS0 == '0'; target_el = if HCR_EL2.TGE == '1' then EL2 else EL1; else trap = SCTLR_EL2.EnAS0 == '0'; target_el = EL2; if !trap && ((PSTATE.EL == EL0 && !IsInHost()) || PSTATE.EL == EL1) then trap = EL2Enabled() && HCRX_EL2.EnAS0 == '0'; target_el = EL2; if !trap && ((PSTATE.EL IN {EL0, EL1}) || PSTATE.EL == EL2) then trap = HaveEL(EL3) && SCR_EL3.EnAS0 == '0'; target_el = EL3; if trap then LDST64BTrap(target_el, iss);();

Library pseudocode for aarch64/exceptions/traps/CheckST64BVEnabledWFETrapDelay

// CheckST64BVEnabled() // ==================== // Checks for trap on ST64BV instruction// WFETrapDelay() // ============== // Returns TRUE when delay in trap to WFE is enabled with value to amount of delay, // FALSE otherwise. (boolean, integer) CheckST64BVEnabled() boolean trap = FALSE; bits(25) iss =WFETrapDelay(bits(2) target_el) case target_el of when ZerosEL1(); if PSTATE.EL ==if ! EL0 then if !IsInHost() then trap = SCTLR_EL1.EnASR == '0'; target_el = if HCR_EL2.TGE == '1' then delay_enabled = SCTLR_EL1.TWEDEn == '1'; delay = 1 << ( EL2UInt else(SCTLR_EL1.TWEDEL) + 8); else delay_enabled = SCTLR_EL2.TWEDEn == '1'; delay = 1 << ( EL1UInt; else trap = SCTLR_EL2.EnASR == '0'; target_el =(SCTLR_EL2.TWEDEL) + 8); when EL2; if !trap && ((PSTATE.EL ==delay_enabled = HCR_EL2.TWEDEn == '1'; delay = 1 << ( EL0UInt && !(HCR_EL2.TWEDEL) + 8); whenIsInHostEL3()) || PSTATE.EL ==delay_enabled = SCR_EL3.TWEDEn == '1'; delay = 1 << ( EL1UInt) then trap = EL2Enabled() && HCRX_EL2.EnASR == '0'; target_el = EL2; if trap then LDST64BTrap(target_el, iss);(SCR_EL3.TWEDEL) + 8); return (delay_enabled, delay);

Library pseudocode for aarch64/exceptions/traps/LDST64BTrapWaitForEventUntilDelay

// LDST64BTrap() // ============= // Trapped access to LD64B, ST64B, ST64BV and ST64BV0 instructions// WaitForEventUntilDelay() // ======================== // Returns TRUE if WaitForEvent() returns before WFE trap delay expires, // FALSE otherwise. boolean LDST64BTrap(bits(2) target_el, bits(25) iss) bits(64) preferred_exception_return =WaitForEventUntilDelay(boolean delay_enabled, integer delay) boolean eventarrived = FALSE; // set eventarrived to TRUE if WaitForEvent() returns before // 'delay' expires when delay_enabled is TRUE. return eventarrived; ThisInstrAddr(); vect_offset = 0x0; exception = ExceptionSyndrome(Exception_LDST64BTrap); exception.syndrome = iss; AArch64.TakeException(target_el, exception, preferred_exception_return, vect_offset); return;

Library pseudocode for aarch64/exceptionsfunctions/trapsaborts/WFETrapDelayAArch64.CreateFaultRecord

// WFETrapDelay() // ============== // Returns TRUE when delay in trap to WFE is enabled with value to amount of delay, // FALSE otherwise. // AArch64.CreateFaultRecord() // =========================== (boolean, integer)FaultRecord WFETrapDelay(bits(2) target_el) case target_el of whenAArch64.CreateFaultRecord( EL1Fault if !statuscode, bits(52) ipaddress, boolean NS, integer level,IsInHostAccType() then delay_enabled = SCTLR_EL1.TWEDEn == '1'; delay = 1 << (acctype, boolean write, bit extflag, bits(2) errortype, boolean secondstage, boolean s2fs1walk)UIntFaultRecord(SCTLR_EL1.TWEDEL) + 8); else delay_enabled = SCTLR_EL2.TWEDEn == '1'; delay = 1 << (UInt(SCTLR_EL2.TWEDEL) + 8); when EL2 delay_enabled = HCR_EL2.TWEDEn == '1'; delay = 1 << (UInt(HCR_EL2.TWEDEL) + 8); when EL3 delay_enabled = SCR_EL3.TWEDEn == '1'; delay = 1 << (UInt(SCR_EL3.TWEDEL) + 8); fault; fault.statuscode = statuscode; fault.domain = bits(4) UNKNOWN; // Not used from AArch64 fault.debugmoe = bits(4) UNKNOWN; // Not used from AArch64 fault.errortype = errortype; fault.ipaddress.NS = if NS then '1' else '0'; fault.ipaddress.address = ipaddress; fault.level = level; fault.acctype = acctype; fault.write = write; fault.extflag = extflag; fault.secondstage = secondstage; fault.s2fs1walk = s2fs1walk; return (delay_enabled, delay); return fault;

Library pseudocode for aarch64/exceptionsfunctions/trapsaborts/WaitForEventUntilDelayAArch64.FaultSyndrome

// WaitForEventUntilDelay() // ======================== // Returns TRUE if WaitForEvent() returns before WFE trap delay expires, // FALSE otherwise. // AArch64.FaultSyndrome() // ======================= // Creates an exception syndrome value for Abort and Watchpoint exceptions taken to // an Exception Level using AArch64. booleanbits(25) WaitForEventUntilDelay(boolean delay_enabled, integer delay) boolean eventarrived = FALSE; // set eventarrived to TRUE if WaitForEvent() returns before // 'delay' expires when delay_enabled is TRUE. return eventarrived;AArch64.FaultSyndrome(boolean d_side,FaultRecord fault) assert fault.statuscode != Fault_None; bits(25) iss = Zeros(); if HaveRASExt() && IsExternalSyncAbort(fault) then iss<12:11> = fault.errortype; // SET if d_side then if IsSecondStage(fault) && !fault.s2fs1walk then iss<24:14> = LSInstructionSyndrome(); if HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER then iss<13> = '1'; // Value of '1' indicates fault is generated by use of VNCR_EL2 if fault.acctype IN {AccType_DC, AccType_DC_UNPRIV, AccType_IC, AccType_AT} then iss<8> = '1'; iss<6> = '1'; else iss<6> = if fault.write then '1' else '0'; if IsExternalAbort(fault) then iss<9> = fault.extflag; iss<7> = if fault.s2fs1walk then '1' else '0'; iss<5:0> = EncodeLDFSC(fault.statuscode, fault.level); return iss;

Library pseudocode for aarch64/functions/abortsexclusive/AArch64.CreateFaultRecordAArch64.ExclusiveMonitorsPass

// AArch64.CreateFaultRecord() // =========================== // AArch64.ExclusiveMonitorsPass() // =============================== FaultRecord// Return TRUE if the Exclusives monitors for the current PE include all of the addresses // associated with the virtual address region of size bytes starting at address. // The immediately following memory write must be to the same addresses. boolean AArch64.CreateFaultRecord(AArch64.ExclusiveMonitorsPass(bits(64) address, integer size) // It is IMPLEMENTATION DEFINED whether the detection of memory aborts happens // before or after the check on the local Exclusives monitor. As a result a failure // of the local monitor can occur on some implementations even if the memory // access would give an memory abort. acctype =FaultAccType_ATOMIC statuscode, bits(52) ipaddress, boolean NS, integer level,; iswrite = TRUE; aligned = AccTypeAArch64.CheckAlignment acctype, boolean write, bit extflag, bits(2) errortype, boolean secondstage, boolean s2fs1walk)(address, size, acctype, iswrite); passed = (address, ProcessorID(), size); if !passed then return FALSE; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); passed = IsExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size); ClearExclusiveLocal(ProcessorID()); if passed then if memaddrdesc.memattrs.shareable then passed = IsExclusiveGlobal(memaddrdesc.paddress, ProcessorIDFaultRecordAArch64.IsExclusiveVA fault; fault.statuscode = statuscode; fault.domain = bits(4) UNKNOWN; // Not used from AArch64 fault.debugmoe = bits(4) UNKNOWN; // Not used from AArch64 fault.errortype = errortype; fault.ipaddress.NS = if NS then '1' else '0'; fault.ipaddress.address = ipaddress; fault.level = level; fault.acctype = acctype; fault.write = write; fault.extflag = extflag; fault.secondstage = secondstage; fault.s2fs1walk = s2fs1walk; (), size); return fault; return passed;

Library pseudocode for aarch64/functions/abortsexclusive/AArch64.FaultSyndromeAArch64.IsExclusiveVA

// AArch64.FaultSyndrome() // ======================= // Creates an exception syndrome value for Abort and Watchpoint exceptions taken to // an Exception Level using AArch64. (bits(25), bits(5))// An optional IMPLEMENTATION DEFINED test for an exclusive access to a virtual // address region of size bytes starting at address. // // It is permitted (but not required) for this function to return FALSE and // cause a store exclusive to fail if the virtual address region is not // totally included within the region recorded by MarkExclusiveVA(). // // It is always safe to return TRUE which will check the physical address only. boolean AArch64.FaultSyndrome(boolean d_side,AArch64.IsExclusiveVA(bits(64) address, integer processorid, integer size); FaultRecord fault) assert fault.statuscode != Fault_None; bits(25) iss = Zeros(); bits(5) iss2 = Zeros(); if (HaveFeatLS64() && fault.acctype == AccType_ATOMICLS64 && fault.statuscode IN {Fault_AccessFlag, Fault_Translation, Fault_Permission}) then iss2 = AArch64.RegisterSpecifier(); if HaveRASExt() && IsAsyncAbort(fault) then if (HaveFeatLS64() && fault.acctype == AccType_ATOMICLS64 && fault.statuscode IN {Fault_AccessFlag, Fault_Translation, Fault_Permission}) then iss<12:11> = '01'; // LST else iss<12:11> = fault.errortype; // SET if d_side then if ( IsSecondStage(fault) && !fault.s2fs1walk && (!IsExternalSyncAbort(fault) || (!HaveRASExt() && fault.acctype == AccType_TTW && boolean IMPLEMENTATION_DEFINED "ISV on second stage translation table walk")) ) then iss<24:14> = LSInstructionSyndrome(); if HaveNV2Ext() && fault.acctype == AccType_NV2REGISTER then iss<13> = '1'; // Fault is generated by use of VNCR_EL2 if fault.acctype IN {AccType_DC, AccType_DC_UNPRIV, AccType_IC, AccType_AT} then iss<8> = '1'; iss<6> = '1'; else iss<6> = if fault.write then '1' else '0'; if IsExternalAbort(fault) then iss<9> = fault.extflag; iss<7> = if fault.s2fs1walk then '1' else '0'; iss<5:0> = EncodeLDFSC(fault.statuscode, fault.level); return (iss, iss2);

Library pseudocode for aarch64/functions/exclusive/AArch64.ExclusiveMonitorsPassAArch64.MarkExclusiveVA

// AArch64.ExclusiveMonitorsPass() // =============================== // Return TRUE if the Exclusives monitors for the current PE include all of the addresses // associated with the virtual address region of size bytes starting at address. // The immediately following memory write must be to the same addresses. boolean// Optionally record an exclusive access to the virtual address region of size bytes // starting at address for processorid. AArch64.ExclusiveMonitorsPass(bits(64) address, integer size) // It is IMPLEMENTATION DEFINED whether the detection of memory aborts happens // before or after the check on the local Exclusives monitor. As a result a failure // of the local monitor can occur on some implementations even if the memory // access would give an memory abort. acctype =AArch64.MarkExclusiveVA(bits(64) address, integer processorid, integer size); AccType_ATOMIC; iswrite = TRUE; aligned = AArch64.CheckAlignment(address, size, acctype, iswrite); passed = AArch64.IsExclusiveVA(address, ProcessorID(), size); if !passed then return FALSE; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); passed = IsExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size); ClearExclusiveLocal(ProcessorID()); if passed then if memaddrdesc.memattrs.shareable then passed = IsExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size); return passed;

Library pseudocode for aarch64/functions/exclusive/AArch64.IsExclusiveVAAArch64.SetExclusiveMonitors

// An optional IMPLEMENTATION DEFINED test for an exclusive access to a virtual // address region of size bytes starting at address. // // It is permitted (but not required) for this function to return FALSE and // cause a store exclusive to fail if the virtual address region is not // totally included within the region recorded by MarkExclusiveVA(). // // It is always safe to return TRUE which will check the physical address only. boolean// AArch64.SetExclusiveMonitors() // ============================== // Sets the Exclusives monitors for the current PE to record the addresses associated // with the virtual address region of size bytes starting at address. AArch64.IsExclusiveVA(bits(64) address, integer processorid, integer size);AArch64.SetExclusiveMonitors(bits(64) address, integer size) acctype =AccType_ATOMIC; iswrite = FALSE; aligned = AArch64.CheckAlignment(address, size, acctype, iswrite); memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then return; if memaddrdesc.memattrs.shareable then MarkExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size); MarkExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size); AArch64.MarkExclusiveVA(address, ProcessorID(), size);

Library pseudocode for aarch64/functions/exclusivefusedrstep/AArch64.MarkExclusiveVAFPRSqrtStepFused

// Optionally record an exclusive access to the virtual address region of size bytes // starting at address for processorid.// FPRSqrtStepFused() // ================== bits(N) AArch64.MarkExclusiveVA(bits(64) address, integer processorid, integer size);FPRSqrtStepFused(bits(N) op1, bits(N) op2) assert N IN {16, 32, 64}; bits(N) result; op1 =FPNeg(op1); FPRounding rounding = FPRoundingMode(FPCR); (type1,sign1,value1) = FPUnpack(op1, FPCR); (type2,sign2,value2) = FPUnpack(op2, FPCR); (done,result) = FPProcessNaNs(type1, type2, op1, op2, FPCR); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); if (inf1 && zero2) || (zero1 && inf2) then result = FPOnePointFive('0'); elsif inf1 || inf2 then result = FPInfinity(sign1 EOR sign2); else // Fully fused multiply-add and halve result_value = (3.0 + (value1 * value2)) / 2.0; if result_value == 0.0 then // Sign of exact zero result depends on rounding mode sign = if FPRoundingMode(FPCR) == FPRounding_NEGINF then '1' else '0'; result = FPZero(sign); else result = FPRound(result_value, FPCR, rounding); return result;

Library pseudocode for aarch64/functions/exclusivefusedrstep/AArch64.SetExclusiveMonitorsFPRecipStepFused

// AArch64.SetExclusiveMonitors() // ============================== // FPRecipStepFused() // ================== // Sets the Exclusives monitors for the current PE to record the addresses associated // with the virtual address region of size bytes starting at address.bits(N) AArch64.SetExclusiveMonitors(bits(64) address, integer size) acctype =FPRecipStepFused(bits(N) op1, bits(N) op2) assert N IN {16, 32, 64}; bits(N) result; op1 = AccType_ATOMICFPNeg; iswrite = FALSE; aligned =(op1); AArch64.CheckAlignmentFPRounding(address, size, acctype, iswrite); memaddrdesc =rounding = AArch64.TranslateAddressFPRoundingMode(address, acctype, iswrite, aligned, size); // Check for aborts or debug exceptions if(FPCR); (type1,sign1,value1) = IsFaultFPUnpack(memaddrdesc) then return; if memaddrdesc.memattrs.shareable then(op1, FPCR); (type2,sign2,value2) = MarkExclusiveGlobalFPUnpack(memaddrdesc.paddress,(op2, FPCR); (done,result) = ProcessorIDFPProcessNaNs(), size);(type1, type2, op1, op2, FPCR); if !done then inf1 = (type1 == MarkExclusiveLocalFPType_Infinity(memaddrdesc.paddress,); inf2 = (type2 == ProcessorIDFPType_Infinity(), size);); zero1 = (type1 == AArch64.MarkExclusiveVAFPType_Zero(address,); zero2 = (type2 == ); if (inf1 && zero2) || (zero1 && inf2) then result = FPTwo('0'); elsif inf1 || inf2 then result = FPInfinity(sign1 EOR sign2); else // Fully fused multiply-add result_value = 2.0 + (value1 * value2); if result_value == 0.0 then // Sign of exact zero result depends on rounding mode sign = if FPRoundingMode(FPCR) == FPRounding_NEGINF then '1' else '0'; result = FPZero(sign); else result = FPRoundProcessorIDFPType_Zero(), size);(result_value, FPCR, rounding); return result;

Library pseudocode for aarch64/functions/fusedrstepmemory/FPRSqrtStepFusedAArch64.AccessIsTagChecked

// FPRSqrtStepFused() // ================== // AArch64.AccessIsTagChecked() // ============================ // TRUE if a given access is tag-checked, FALSE otherwise. bits(N)boolean FPRSqrtStepFused(bits(N) op1, bits(N) op2) assert N IN {16, 32, 64}; bits(N) result;AArch64.AccessIsTagChecked(bits(64) vaddr, FPCRTypeAccType fpcr = FPCR[]; op1 =acctype) if PSTATE.M<4> == '1' then return FALSE; if FPNegEffectiveTBI(op1); boolean altfp =(vaddr, FALSE, PSTATE.EL) == '0' then return FALSE; if HaveAltFPEffectiveTCMA() && fpcr.AH == '1'; boolean fpexc = !altfp; // Generate no floating-point exceptions if altfp then fpcr.<FIZ,FZ> = '11'; // Flush denormal input and output to zero if altfp then fpcr.RMode = '00'; // Use RNE rounding mode (vaddr, PSTATE.EL) == '1' && (vaddr<59:55> == '00000' || vaddr<59:55> == '11111') then return FALSE; (type1,sign1,value1) = if ! FPUnpackAArch64.AllocationTagAccessIsEnabled(op1, fpcr, fpexc); (type2,sign2,value2) =(acctype) then return FALSE; if acctype IN { FPUnpackAccType_IFETCH(op2, fpcr, fpexc); (done,result) =, FPProcessNaNsAccType_PTW(type1, type2, op1, op2, fpcr, FALSE, fpexc);} then return FALSE; if acctype == FPRoundingAccType_NV2REGISTER rounding =then return FALSE; if PSTATE.TCO=='1' then return FALSE; if ! FPRoundingModeIsTagCheckedInstruction(fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); if (inf1 && zero2) || (zero1 && inf2) then result = FPOnePointFive('0'); elsif inf1 || inf2 then result = FPInfinity(sign1 EOR sign2); else // Fully fused multiply-add and halve result_value = (3.0 + (value1 * value2)) / 2.0; if result_value == 0.0 then // Sign of exact zero result depends on rounding mode sign = if rounding == FPRounding_NEGINF then '1' else '0'; result = FPZero(sign); else result = FPRound(result_value, fpcr, rounding, fpexc); () then return FALSE; return result; return TRUE;

Library pseudocode for aarch64/functions/fusedrstepmemory/FPRecipStepFusedAArch64.AddressWithAllocationTag

// FPRecipStepFused() // ================== // AArch64.AddressWithAllocationTag() // ================================== // Generate a 64-bit value containing a Logical Address Tag from a 64-bit // virtual address and an Allocation Tag. // If the extension is disabled, treats the Allocation Tag as '0000'. bits(N)bits(64) FPRecipStepFused(bits(N) op1, bits(N) op2) assert N IN {16, 32, 64}; bits(N) result;AArch64.AddressWithAllocationTag(bits(64) address, FPCRTypeAccType fpcr = FPCR[]; op1 =acctype, bits(4) allocation_tag) bits(64) result = address; bits(4) tag; if FPNegAArch64.AllocationTagAccessIsEnabled(op1); boolean altfp = HaveAltFP() && fpcr.AH == '1'; boolean fpexc = !altfp; // Generate no floating-point exceptions if altfp then fpcr.<FIZ,FZ> = '11'; // Flush denormal input and output to zero if altfp then fpcr.RMode = '00'; // Use RNE rounding mode (type1,sign1,value1) = FPUnpack(op1, fpcr, fpexc); (type2,sign2,value2) = FPUnpack(op2, fpcr, fpexc); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr, FALSE, fpexc); FPRounding rounding = FPRoundingMode(fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); if (inf1 && zero2) || (zero1 && inf2) then result = FPTwo('0'); elsif inf1 || inf2 then result = FPInfinity(sign1 EOR sign2); else // Fully fused multiply-add result_value = 2.0 + (value1 * value2); if result_value == 0.0 then // Sign of exact zero result depends on rounding mode sign = if rounding == FPRounding_NEGINF then '1' else '0'; result = FPZero(sign); else result = FPRound(result_value, fpcr, rounding, fpexc); (acctype) then tag = allocation_tag; else tag = '0000'; result<59:56> = tag; return result;

Library pseudocode for aarch64/functions/memory/AArch64.AccessIsTagCheckedAArch64.AllocationTagFromAddress

// AArch64.AccessIsTagChecked() // ============================ // TRUE if a given access is tag-checked, FALSE otherwise. // AArch64.AllocationTagFromAddress() // ================================== // Generate an ALlocation Tag from a 64-bit value containing a Logical Address Tag. booleanbits(4) AArch64.AccessIsTagChecked(bits(64) vaddr,AArch64.AllocationTagFromAddress(bits(64) tagged_address) return tagged_address<59:56>; AccType acctype) if PSTATE.M<4> == '1' then return FALSE; if EffectiveTBI(vaddr, FALSE, PSTATE.EL) == '0' then return FALSE; if EffectiveTCMA(vaddr, PSTATE.EL) == '1' && (vaddr<59:55> == '00000' || vaddr<59:55> == '11111') then return FALSE; if !AArch64.AllocationTagAccessIsEnabled(acctype) then return FALSE; if acctype IN {AccType_IFETCH, AccType_TTW} then return FALSE; if acctype == AccType_NV2REGISTER then return FALSE; if PSTATE.TCO=='1' then return FALSE; if !IsTagCheckedInstruction() then return FALSE; return TRUE;

Library pseudocode for aarch64/functions/memory/AArch64.AddressWithAllocationTagAArch64.CheckAlignment

// AArch64.AddressWithAllocationTag() // ================================== // Generate a 64-bit value containing a Logical Address Tag from a 64-bit // virtual address and an Allocation Tag. // If the extension is disabled, treats the Allocation Tag as '0000'. // AArch64.CheckAlignment() // ======================== bits(64)boolean AArch64.AddressWithAllocationTag(bits(64) address,AArch64.CheckAlignment(bits(64) address, integer alignment, AccType acctype, bits(4) allocation_tag) bits(64) result = address; bits(4) tag; ifacctype, boolean iswrite) aligned = (address == (address, alignment)); atomic = acctype IN { AccType_ATOMIC, AccType_ATOMICRW, AccType_ORDEREDATOMIC, AccType_ORDEREDATOMICRW }; ordered = acctype IN { AccType_ORDERED, AccType_ORDEREDRW, AccType_LIMITEDORDERED, AccType_ORDEREDATOMIC, AccType_ORDEREDATOMICRW }; vector = acctype == AccType_VEC; if SCTLR[].A == '1' then check = TRUE; elsif HaveLSE2Ext() then check = (UInt(address<0+:4>) + alignment > 16) && ((ordered && SCTLR[].nAA == '0') || atomic); else check = atomic || ordered; if check && !aligned then secondstage = FALSE; AArch64.Abort(address, AArch64.AlignmentFaultAArch64.AllocationTagAccessIsEnabledAlign(acctype) then tag = allocation_tag; else tag = '0000'; result<59:56> = tag; return result;(acctype, iswrite, secondstage)); return aligned;

Library pseudocode for aarch64/functions/memory/AArch64.AllocationTagFromAddressAArch64.CheckTag

// AArch64.AllocationTagFromAddress() // ================================== // Generate an Allocation Tag from a 64-bit value containing a Logical Address Tag. // AArch64.CheckTag() // ================== // Performs a Tag Check operation for a memory access and returns // whether the check passed bits(4)boolean AArch64.AllocationTagFromAddress(bits(64) tagged_address) return tagged_address<59:56>;AArch64.CheckTag(AddressDescriptor memaddrdesc, bits(4) ptag, boolean write) if memaddrdesc.memattrs.tagged then return ptag == _MemTag[memaddrdesc]; else return TRUE;

Library pseudocode for aarch64/functions/memory/AArch64.CheckAlignmentAArch64.MemSingle

// AArch64.CheckAlignment() // ======================== // AArch64.MemSingle[] - non-assignment (read) form // ================================================ // Perform an atomic, little-endian read of 'size' bytes. booleanbits(size*8) AArch64.CheckAlignment(bits(64) address, integer alignment,AArch64.MemSingle[bits(64) address, integer size, AccType acctype, boolean iswrite) aligned = (address ==acctype, boolean wasaligned] assert size IN {1, 2, 4, 8, 16}; assert address == Align(address, alignment)); atomic = acctype IN {(address, size); AccType_ATOMICAddressDescriptor,memaddrdesc; bits(size*8) value; iswrite = FALSE; memaddrdesc = AccType_ATOMICRWAArch64.TranslateAddress,(address, acctype, iswrite, wasaligned, size); // Check for aborts or debug exceptions if AccType_ORDEREDATOMICIsFault,(memaddrdesc) then AccType_ORDEREDATOMICRWAArch64.Abort,(address, memaddrdesc.fault); // Memory array access accdesc = AccType_ATOMICLS64CreateAccessDescriptor}; ordered = acctype IN {(acctype); if AccType_ORDEREDHaveMTEExt,() then if AccType_ORDEREDRWAArch64.AccessIsTagChecked,( AccType_LIMITEDORDEREDZeroExtend,(address, 64), acctype) then bits(4) ptag = AccType_ORDEREDATOMICAArch64.PhysicalTag,( AccType_ORDEREDATOMICRWZeroExtend }; vector = acctype ==(address, 64)); if ! AccType_VECAArch64.CheckTag; if(memaddrdesc, ptag, iswrite) then SCTLRAArch64.TagCheckFault[].A == '1' then check = TRUE; elsif( HaveLSE2ExtZeroExtend() then check = ((address, 64), acctype, iswrite); value = _Mem[memaddrdesc, size, accdesc]; return value; // AArch64.MemSingle[] - assignment (write) form // ============================================= // Perform an atomic, little-endian write of 'size' bytes.UInt(address<0+:4>) + alignment > 16) && ((ordered &&AArch64.MemSingle[bits(64) address, integer size, SCTLRAccType[].nAA == '0') || atomic); else check = atomic || ordered; if check && !aligned then secondstage = FALSE;acctype, boolean wasaligned] = bits(size*8) value assert size IN {1, 2, 4, 8, 16}; assert address == Align(address, size); AddressDescriptor memaddrdesc; iswrite = TRUE; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, wasaligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address,(address, memaddrdesc.fault); // Effect on exclusives if memaddrdesc.memattrs.shareable then (memaddrdesc.paddress, ProcessorID(), size); // Memory array access accdesc = CreateAccessDescriptor(acctype); if HaveMTEExt() then if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64)); if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then AArch64.TagCheckFault(ZeroExtendAArch64.AlignmentFaultClearExclusiveByAddress(acctype, iswrite, secondstage)); return aligned;(address, 64), acctype, iswrite); _Mem[memaddrdesc, size, accdesc] = value; return;

Library pseudocode for aarch64/functions/memory/AArch64.CheckTagAArch64.MemTag

// AArch64.CheckTag() // ================== // Performs a Tag Check operation for a memory access and returns // whether the check passed // AArch64.MemTag[] - non-assignment (read) form // ============================================= // Load an Allocation Tag from memory. booleanbits(4) AArch64.CheckTag(AArch64.MemTag[bits(64) address,AccType acctype] assert acctype == AccType_NORMAL; AddressDescriptor memaddrdesc; bits(4) value; iswrite = FALSE; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, TRUE, TAG_GRANULE); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); // Return the granule tag if tagging is enabled... if AArch64.AllocationTagAccessIsEnabled(acctype) && memaddrdesc.memattrs.tagged then return _MemTag[memaddrdesc]; else // ...otherwise read tag as zero. return '0000'; // AArch64.MemTag[] - assignment (write) form // ========================================== // Store an Allocation Tag to memory. AArch64.MemTag[bits(64) address, AccType acctype] = bits(4) value assert acctype == AccType_NORMAL; AddressDescriptor memaddrdesc; iswrite = TRUE; // Stores of allocation tags must be aligned if address != Align(address, TAG_GRANULE) then boolean secondstage = FALSE; AArch64.Abort(address, AArch64.AlignmentFault(acctype, iswrite, secondstage)); wasaligned = TRUE; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, wasaligned, TAG_GRANULE); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); // Memory array access if AArch64.AllocationTagAccessIsEnabled memaddrdesc, bits(4) ptag, boolean write) if memaddrdesc.memattrs.tagged then return ptag == _MemTag[memaddrdesc]; else return TRUE;(acctype) && memaddrdesc.memattrs.tagged then _MemTag[memaddrdesc] = value;

Library pseudocode for aarch64/functions/memory/AArch64.MemSingleAArch64.PhysicalTag

// AArch64.MemSingle[] - non-assignment (read) form // ================================================ // Perform an atomic, little-endian read of 'size' bytes. // AArch64.PhysicalTag() // ===================== // Generate a Physical Tag from a Logical Tag in an address bits(size*8)bits(4) AArch64.MemSingle[bits(64) address, integer size,AArch64.PhysicalTag(bits(64) vaddr) return vaddr<59:56>; AccType acctype, boolean wasaligned] assert size IN {1, 2, 4, 8, 16}; assert address == Align(address, size); AddressDescriptor memaddrdesc; bits(size*8) value; iswrite = FALSE; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, wasaligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); // Memory array access accdesc = CreateAccessDescriptor(acctype); if HaveMTEExt() then if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64)); if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then AArch64.TagCheckFault(ZeroExtend(address, 64), acctype, iswrite); value = _Mem[memaddrdesc, size, accdesc, FALSE]; return value; // AArch64.MemSingle[] - assignment (write) form // ============================================= // Perform an atomic, little-endian write of 'size' bytes. AArch64.MemSingle[bits(64) address, integer size, AccType acctype, boolean wasaligned] = bits(size*8) value assert size IN {1, 2, 4, 8, 16}; assert address == Align(address, size); AddressDescriptor memaddrdesc; iswrite = TRUE; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, wasaligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); // Effect on exclusives if memaddrdesc.memattrs.shareable then ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), size); // Memory array access accdesc = CreateAccessDescriptor(acctype); if HaveMTEExt() then if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64)); if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then AArch64.TagCheckFault(ZeroExtend(address, 64), acctype, iswrite); _Mem[memaddrdesc, size, accdesc] = value; return;

Library pseudocode for aarch64/functions/memory/AArch64.MemTagAArch64.TranslateAddressForAtomicAccess

// AArch64.MemTag[] - non-assignment (read) form // ============================================= // Load an Allocation Tag from memory. // AArch64.TranslateAddressForAtomicAccess() // ========================================= // Performs an alignment check for atomic memory operations. // Also translates 64-bit Virtual Address into Physical Address. bits(4)AddressDescriptor AArch64.MemTag[bits(64) address,AArch64.TranslateAddressForAtomicAccess(bits(64) address, integer sizeinbits) boolean iswrite = FALSE; size = sizeinbits DIV 8; assert size IN {1, 2, 4, 8, 16}; aligned = AccTypeAArch64.CheckAlignment acctype] assert acctype ==(address, size, AccType_NORMALAccType_ATOMICRW;, iswrite); // MMU or MPU lookup memaddrdesc = AddressDescriptor memaddrdesc; bits(4) value; iswrite = FALSE; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, TRUE,(address, TAG_GRANULEAccType_ATOMICRW); , iswrite, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); // Return the granule tag if tagging is enabled... if // Effect on exclusives if memaddrdesc.memattrs.shareable then AArch64.AllocationTagAccessIsEnabledClearExclusiveByAddress(acctype) && memaddrdesc.memattrs.tagged then return _MemTag[memaddrdesc]; else // ...otherwise read tag as zero. return '0000'; // AArch64.MemTag[] - assignment (write) form // ========================================== // Store an Allocation Tag to memory.(memaddrdesc.paddress, AArch64.MemTag[bits(64) address,(), size); if AccTypeHaveMTEExt acctype] = bits(4) value assert acctype ==() && AccType_NORMALAArch64.AccessIsTagChecked;(address, AddressDescriptorAccType_ATOMICRW memaddrdesc; iswrite = TRUE; // Stores of allocation tags must be aligned if address !=) then bits(4) ptag = AlignAArch64.PhysicalTag(address,(address); if ! TAG_GRANULEAArch64.CheckTag) then boolean secondstage = FALSE;(memaddrdesc, ptag, iswrite) then AArch64.AbortAArch64.TagCheckFault(address, AArch64.AlignmentFaultAccType_ATOMICRW(acctype, iswrite, secondstage)); wasaligned = TRUE; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, wasaligned, TAG_GRANULE); // It is CONSTRAINED UNPREDICTABLE if tags stored to memory locations marked as Device // generate an Alignment Fault or store the data to locations. if memaddrdesc.memattrs.memtype == MemType_Device then c = ConstrainUnpredictable(Unpredictable_DEVICETAGSTORE); assert c IN {Constraint_NONE, Constraint_FAULT}; if c == Constraint_FAULT then boolean secondstage = FALSE; AArch64.Abort(address, AArch64.AlignmentFault(acctype, iswrite, secondstage)); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); // Memory array access if AArch64.AllocationTagAccessIsEnabled(acctype) && memaddrdesc.memattrs.tagged then _MemTag[memaddrdesc] = value;, iswrite); return memaddrdesc;

Library pseudocode for aarch64/functions/memory/AArch64.PhysicalTagCheckSPAlignment

// AArch64.PhysicalTag() // ===================== // Generate a Physical Tag from a Logical Tag in an address bits(4)// CheckSPAlignment() // ================== // Check correct stack pointer alignment for AArch64 state. AArch64.PhysicalTag(bits(64) vaddr) return vaddr<59:56>;CheckSPAlignment() bits(64) sp =SP[]; if PSTATE.EL == EL0 then stack_align_check = (SCTLR[].SA0 != '0'); else stack_align_check = (SCTLR[].SA != '0'); if stack_align_check && sp != Align(sp, 16) then AArch64.SPAlignmentFault(); return;

Library pseudocode for aarch64/functions/memory/AArch64.TranslateAddressForAtomicAccessIsBlockDescriptorNTBitValid

// AArch64.TranslateAddressForAtomicAccess() // ========================================= // Performs an alignment check for atomic memory operations. // Also translates 64-bit Virtual Address into Physical Address. AddressDescriptor// If the implementation supports changing the block size without a break-before-make // approach, then for implementations that have level 1 or 2 support, the nT bit in // the block descriptor is valid. boolean AArch64.TranslateAddressForAtomicAccess(bits(64) address, integer sizeinbits) boolean iswrite = FALSE; size = sizeinbits DIV 8; assert size IN {1, 2, 4, 8, 16}; aligned =IsBlockDescriptorNTBitValid(); AArch64.CheckAlignment(address, size, AccType_ATOMICRW, iswrite); // MMU or MPU lookup memaddrdesc = AArch64.TranslateAddress(address, AccType_ATOMICRW, iswrite, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); // Effect on exclusives if memaddrdesc.memattrs.shareable then ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), size); if HaveMTEExt() && AArch64.AccessIsTagChecked(address, AccType_ATOMICRW) then bits(4) ptag = AArch64.PhysicalTag(address); if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then AArch64.TagCheckFault(address, AccType_ATOMICRW, iswrite); return memaddrdesc;

Library pseudocode for aarch64/functions/memory/AddressSupportsLS64IsTagCheckedInstruction

// Returns TRUE if the 64-byte block following the given address supports the // LD64B and ST64B instructions, and FALSE otherwise. // Returns True if the current instruction uses tag-checked memory access, // False otherwise. boolean AddressSupportsLS64(bits(64) address);IsTagCheckedInstruction();

Library pseudocode for aarch64/functions/memory/CheckSPAlignmentMem

// CheckSPAlignment() // ================== // Check correct stack pointer alignment for AArch64 state.// Mem[] - non-assignment (read) form // ================================== // Perform a read of 'size' bytes. The access byte order is reversed for a big-endian access. // Instruction fetches would call AArch64.MemSingle directly. bits(size*8) CheckSPAlignment() bits(64) sp =Mem[bits(64) address, integer size, SPAccType[]; if PSTATE.EL ==acctype] assert size IN {1, 2, 4, 8, 16}; bits(size*8) value; boolean iswrite = FALSE; aligned = EL0AArch64.CheckAlignment then stack_align_check = ((address, size, acctype, iswrite); if size != 16 || !(acctype IN {SCTLRAccType_VEC[].SA0 != '0'); else stack_align_check = (,SCTLRAccType_VECSTREAM[].SA != '0'); if stack_align_check && sp !=}) then atomic = aligned; else // 128-bit SIMD&FP loads are treated as a pair of 64-bit single-copy atomic accesses // 64-bit aligned. atomic = address == Align(sp, 16) then(address, 8); if !atomic then assert size > 1; value<7:0> = [address, 1, acctype, aligned]; // For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory // access will generate an Alignment Fault, as to get this far means the first byte did // not, so we must be changing to a new translation page. if !aligned then c = ConstrainUnpredictable(Unpredictable_DEVPAGE2); assert c IN {Constraint_FAULT, Constraint_NONE}; if c == Constraint_NONE then aligned = TRUE; for i = 1 to size-1 value<8*i+7:8*i> = AArch64.MemSingle[address+i, 1, acctype, aligned]; elsif size == 16 && acctype IN {AccType_VEC, AccType_VECSTREAM} then value<63:0> = AArch64.MemSingle[address, 8, acctype, aligned]; value<127:64> = AArch64.MemSingle[address+8, 8, acctype, aligned]; else value = AArch64.MemSingle[address, size, acctype, aligned]; if (HaveNV2Ext() && acctype == AccType_NV2REGISTER && SCTLR_EL2.EE == '1') || BigEndian() then value = BigEndianReverse(value); return value; // Mem[] - assignment (write) form // =============================== // Perform a write of 'size' bytes. The byte order is reversed for a big-endian access. Mem[bits(64) address, integer size, AccType acctype] = bits(size*8) value boolean iswrite = TRUE; if (HaveNV2Ext() && acctype == AccType_NV2REGISTER && SCTLR_EL2.EE == '1') || BigEndian() then value = BigEndianReverse(value); aligned = AArch64.CheckAlignment(address, size, acctype, iswrite); if size != 16 || !(acctype IN {AccType_VEC, AccType_VECSTREAM}) then atomic = aligned; else // 128-bit SIMD&FP stores are treated as a pair of 64-bit single-copy atomic accesses // 64-bit aligned. atomic = address == Align(address, 8); if !atomic then assert size > 1; AArch64.MemSingle[address, 1, acctype, aligned] = value<7:0>; // For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory // access will generate an Alignment Fault, as to get this far means the first byte did // not, so we must be changing to a new translation page. if !aligned then c = ConstrainUnpredictable(Unpredictable_DEVPAGE2); assert c IN {Constraint_FAULT, Constraint_NONE}; if c == Constraint_NONE then aligned = TRUE; for i = 1 to size-1 AArch64.MemSingle[address+i, 1, acctype, aligned] = value<8*i+7:8*i>; elsif size == 16 && acctype IN {AccType_VEC, AccType_VECSTREAM} then AArch64.MemSingle[address, 8, acctype, aligned] = value<63:0>; AArch64.MemSingle[address+8, 8, acctype, aligned] = value<127:64>; else AArch64.MemSingleAArch64.SPAlignmentFaultAArch64.MemSingle(); [address, size, acctype, aligned] = value; return;

Library pseudocode for aarch64/functions/memory/IsBlockDescriptorNTBitValidMemAtomic

// If the implementation supports changing the block size without a break-before-make // approach, then for implementations that have level 1 or 2 support, the nT bit in // the block descriptor is valid. boolean// MemAtomic() // =========== // Performs load and store memory operations for a given virtual address. bits(size) IsBlockDescriptorNTBitValid();MemAtomic(bits(64) address,MemAtomicOp op, bits(size) value, AccType ldacctype, AccType stacctype) bits(size) newvalue; memaddrdesc = AArch64.TranslateAddressForAtomicAccess(address, size); ldaccdesc = CreateAccessDescriptor(ldacctype); staccdesc = CreateAccessDescriptor(stacctype); // All observers in the shareability domain observe the // following load and store atomically. oldvalue = _Mem[memaddrdesc, size DIV 8, ldaccdesc]; if BigEndian() then oldvalue = BigEndianReverse(oldvalue); case op of when MemAtomicOp_ADD newvalue = oldvalue + value; when MemAtomicOp_BIC newvalue = oldvalue AND NOT(value); when MemAtomicOp_EOR newvalue = oldvalue EOR value; when MemAtomicOp_ORR newvalue = oldvalue OR value; when MemAtomicOp_SMAX newvalue = if SInt(oldvalue) > SInt(value) then oldvalue else value; when MemAtomicOp_SMIN newvalue = if SInt(oldvalue) > SInt(value) then value else oldvalue; when MemAtomicOp_UMAX newvalue = if UInt(oldvalue) > UInt(value) then oldvalue else value; when MemAtomicOp_UMIN newvalue = if UInt(oldvalue) > UInt(value) then value else oldvalue; when MemAtomicOp_SWP newvalue = value; if BigEndian() then newvalue = BigEndianReverse(newvalue); _Mem[memaddrdesc, size DIV 8, staccdesc] = newvalue; // Load operations return the old (pre-operation) value return oldvalue;

Library pseudocode for aarch64/functions/memory/IsTagCheckedInstructionMemAtomicCompareAndSwap

// Returns True if the current instruction uses tag-checked memory access, // False otherwise. boolean// MemAtomicCompareAndSwap() // ========================= // Compares the value stored at the passed-in memory address against the passed-in expected // value. If the comparison is successful, the value at the passed-in memory address is swapped // with the passed-in new_value. bits(size) IsTagCheckedInstruction();MemAtomicCompareAndSwap(bits(64) address, bits(size) expectedvalue, bits(size) newvalue,AccType ldacctype, AccType stacctype) memaddrdesc = AArch64.TranslateAddressForAtomicAccess(address, size); ldaccdesc = CreateAccessDescriptor(ldacctype); staccdesc = CreateAccessDescriptor(stacctype); // All observers in the shareability domain observe the // following load and store atomically. oldvalue = _Mem[memaddrdesc, size DIV 8, ldaccdesc]; if BigEndian() then oldvalue = BigEndianReverse(oldvalue); if oldvalue == expectedvalue then if BigEndian() then newvalue = BigEndianReverse(newvalue); _Mem[memaddrdesc, size DIV 8, staccdesc] = newvalue; return oldvalue;

Library pseudocode for aarch64/functions/memory/MemNVMem

// Mem[] - non-assignment (read) form // ================================== // Perform a read of 'size' bytes. The access byte order is reversed for a big-endian access. // Instruction fetches would call AArch64.MemSingle directly. // NVMem[] - non-assignment form // ============================= // This function is the load memory access for the transformed System register read access // when Enhanced Nested Virtualisation is enabled with HCR_EL2.NV2 = 1. // The address for the load memory access is calculated using // the formula SignExtend(VNCR_EL2.BADDR : Offset<11:0>, 64) where, // * VNCR_EL2.BADDR holds the base address of the memory location, and // * Offset is the unique offset value defined architecturally for each System register that // supports transformation of register access to memory access. bits(size*8)bits(64) Mem[bits(64) address, integer size,NVMem[integer offset] assert offset > 0; bits(64) address = AccTypeSignExtend acctype] assert size IN {1, 2, 4, 8, 16}; bits(size*8) value; boolean iswrite = FALSE; aligned =(VNCR_EL2.BADDR:offset<11:0>, 64); return AArch64.CheckAlignmentMem(address, size, acctype, iswrite); if size != 16 || !(acctype IN {[address, 8,AccType_VECAccType_NV2REGISTER,]; // NVMem[] - assignment form // ========================= // This function is the store memory access for the transformed System register write access // when Enhanced Nested Virtualisation is enabled with HCR_EL2.NV2 = 1. // The address for the store memory access is calculated using // the formula SignExtend(VNCR_EL2.BADDR : Offset<11:0>, 64) where, // * VNCR_EL2.BADDR holds the base address of the memory location, and // * Offset is the unique offset value defined architecturally for each System register that // supports transformation of register access to memory access. AccType_VECSTREAM}) then atomic = aligned; else // 128-bit SIMD&FP loads are treated as a pair of 64-bit single-copy atomic accesses // 64-bit aligned. atomic = address == Align(address, 8); if !atomic then assert size > 1; value<7:0> = AArch64.MemSingle[address, 1, acctype, aligned]; // For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory // access will generate an Alignment Fault, as to get this far means the first byte did // not, so we must be changing to a new translation page. if !aligned then c = ConstrainUnpredictable(Unpredictable_DEVPAGE2); assert c IN {Constraint_FAULT, Constraint_NONE}; if c == Constraint_NONE then aligned = TRUE; for i = 1 to size-1 value<8*i+7:8*i> = AArch64.MemSingle[address+i, 1, acctype, aligned]; elsif size == 16 && acctype IN {AccType_VEC, AccType_VECSTREAM} then value<63:0> = AArch64.MemSingle[address, 8, acctype, aligned]; value<127:64> = AArch64.MemSingle[address+8, 8, acctype, aligned]; else value = AArch64.MemSingle[address, size, acctype, aligned]; if BigEndian(acctype) then value = BigEndianReverse(value); return value; // Mem[] - assignment (write) form // =============================== // Perform a write of 'size' bytes. The byte order is reversed for a big-endian access. Mem[bits(64) address, integer size,NVMem[integer offset] = bits(64) value assert offset > 0; bits(64) address = AccTypeSignExtend acctype] = bits(size*8) value boolean iswrite = TRUE; if(VNCR_EL2.BADDR:offset<11:0>, 64); BigEndianMem(acctype) then value =[address, 8, BigEndianReverseAccType_NV2REGISTER(value); aligned = AArch64.CheckAlignment(address, size, acctype, iswrite); if size != 16 || !(acctype IN {AccType_VEC, AccType_VECSTREAM}) then atomic = aligned; else // 128-bit SIMD&FP stores are treated as a pair of 64-bit single-copy atomic accesses // 64-bit aligned. atomic = address == Align(address, 8); if !atomic then assert size > 1; AArch64.MemSingle[address, 1, acctype, aligned] = value<7:0>; // For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory // access will generate an Alignment Fault, as to get this far means the first byte did // not, so we must be changing to a new translation page. if !aligned then c = ConstrainUnpredictable(Unpredictable_DEVPAGE2); assert c IN {Constraint_FAULT, Constraint_NONE}; if c == Constraint_NONE then aligned = TRUE; for i = 1 to size-1 AArch64.MemSingle[address+i, 1, acctype, aligned] = value<8*i+7:8*i>; elsif size == 16 && acctype IN {AccType_VEC, AccType_VECSTREAM} then AArch64.MemSingle[address, 8, acctype, aligned] = value<63:0>; AArch64.MemSingle[address+8, 8, acctype, aligned] = value<127:64>; else AArch64.MemSingle[address, size, acctype, aligned] = value; ] = value; return;

Library pseudocode for aarch64/functions/memory/MemAtomicSetTagCheckedInstruction

// MemAtomic() // =========== // Performs load and store memory operations for a given virtual address. bits(size)// Flag the current instruction as using/not using memory tag checking. MemAtomic(bits(64) address,SetTagCheckedInstruction(boolean checked); MemAtomicOp op, bits(size) value, AccType ldacctype, AccType stacctype) bits(size) newvalue; memaddrdesc = AArch64.TranslateAddressForAtomicAccess(address, size); ldaccdesc = CreateAccessDescriptor(ldacctype); staccdesc = CreateAccessDescriptor(stacctype); // All observers in the shareability domain observe the // following load and store atomically. oldvalue = _Mem[memaddrdesc, size DIV 8, ldaccdesc, FALSE]; if BigEndian(ldacctype) then oldvalue = BigEndianReverse(oldvalue); case op of when MemAtomicOp_ADD newvalue = oldvalue + value; when MemAtomicOp_BIC newvalue = oldvalue AND NOT(value); when MemAtomicOp_EOR newvalue = oldvalue EOR value; when MemAtomicOp_ORR newvalue = oldvalue OR value; when MemAtomicOp_SMAX newvalue = if SInt(oldvalue) > SInt(value) then oldvalue else value; when MemAtomicOp_SMIN newvalue = if SInt(oldvalue) > SInt(value) then value else oldvalue; when MemAtomicOp_UMAX newvalue = if UInt(oldvalue) > UInt(value) then oldvalue else value; when MemAtomicOp_UMIN newvalue = if UInt(oldvalue) > UInt(value) then value else oldvalue; when MemAtomicOp_SWP newvalue = value; if BigEndian(stacctype) then newvalue = BigEndianReverse(newvalue); _Mem[memaddrdesc, size DIV 8, staccdesc] = newvalue; // Load operations return the old (pre-operation) value return oldvalue;

Library pseudocode for aarch64/functions/memory/MemAtomicCompareAndSwap_MemTag

// MemAtomicCompareAndSwap() // ========================= // Compares the value stored at the passed-in memory address against the passed-in expected // value. If the comparison is successful, the value at the passed-in memory address is swapped // with the passed-in new_value. bits(size)// This _MemTag[] accessor is the hardware operation which perform a single-copy atomic, // Allocation Tag granule aligned, memory access from the tag in PA space. // // The function address the array using desc.paddress which supplies: // * A 52-bit physical address // * A single NS bit to select between Secure and Non-secure parts of the array. // // The accdesc descriptor describes the access type: normal, exclusive, ordered, streaming, // etc and other parameters required to access the physical memory or for setting syndrome // register in the event of an external abort. bits(4) MemAtomicCompareAndSwap(bits(64) address, bits(size) expectedvalue, bits(size) newvalue,_MemTag[ AccTypeAddressDescriptor ldacctype,desc, AccTypeAccessDescriptor stacctype) memaddrdesc =accdesc]; // This _MemTag[] accessor is the hardware operation which perform a single-copy atomic, // Allocation Tag granule aligned, memory access to the tag in PA space. // // The functions address the array using desc.paddress which supplies: // * A 52-bit physical address // * A single NS bit to select between Secure and Non-secure parts of the array. // // The accdesc descriptor describes the access type: normal, exclusive, ordered, streaming, // etc and other parameters required to access the physical memory or for setting syndrome // register in the event of an external abort. AArch64.TranslateAddressForAtomicAccess(address, size); ldaccdesc =_MemTag[ CreateAccessDescriptorAddressDescriptor(ldacctype); staccdesc =desc, CreateAccessDescriptorAccessDescriptor(stacctype); // All observers in the shareability domain observe the // following load and store atomically. oldvalue = _Mem[memaddrdesc, size DIV 8, ldaccdesc, FALSE]; if BigEndian(ldacctype) then oldvalue = BigEndianReverse(oldvalue); if oldvalue == expectedvalue then if BigEndian(stacctype) then newvalue = BigEndianReverse(newvalue); _Mem[memaddrdesc, size DIV 8, staccdesc] = newvalue; return oldvalue;accdesc] = bits(4) value;

Library pseudocode for aarch64/functions/memorypac/MemLoad64Baddpac/AddPAC

// MemLoad64B() // ============ // Performs an atomic 64-byte read from a given virtual address. // AddPAC() // ======== // Calculates the pointer authentication code for a 64-bit quantity and then // inserts that into pointer authentication code field of that 64-bit quantity. bits(512)bits(64) MemLoad64B(bits(64) address,AddPAC(bits(64) ptr, bits(64) modifier, bits(128) K, boolean data) bits(64) PAC; bits(64) result; bits(64) ext_ptr; bits(64) extfield; bit selbit; boolean tbi = AccTypeEffectiveTBI acctype) assert address ==(ptr, !data, PSTATE.EL) == '1'; integer top_bit = if tbi then 55 else 63; // If tagged pointers are in use for a regime with two TTBRs, use bit<55> of // the pointer to select between upper and lower ranges, and preserve this. // This handles the awkward case where there is apparently no correct choice between // the upper and lower address range - ie an addr of 1xxxxxxx0... with TBI0=0 and TBI1=1 // and 0xxxxxxx1 with TBI1=0 and TBI0=1: if AlignPtrHasUpperAndLowerAddRanges(address, 64); bits(512) data; boolean iswrite = FALSE; boolean aligned = TRUE; if !() then assertAddressSupportsLS64S1TranslationRegime(address) then c =() IN { ConstrainUnpredictableEL1(,Unpredictable_LS64UNSUPPORTEDEL2); assert c IN {}; ifConstraint_LIMITED_ATOMICITYS1TranslationRegime,() == Constraint_FAULTEL1}; then // EL1 translation regime registers if data then if TCR_EL1.TBI1 == '1' || TCR_EL1.TBI0 == '1' then selbit = ptr<55>; else selbit = ptr<63>; else if ((TCR_EL1.TBI1 == '1' && TCR_EL1.TBID1 == '0') || (TCR_EL1.TBI0 == '1' && TCR_EL1.TBID0 == '0')) then selbit = ptr<55>; else selbit = ptr<63>; else // EL2 translation regime registers if data then if TCR_EL2.TBI1 == '1' || TCR_EL2.TBI0 == '1' then selbit = ptr<55>; else selbit = ptr<63>; else if ((TCR_EL2.TBI1 == '1' && TCR_EL2.TBID1 == '0') || (TCR_EL2.TBI0 == '1' && TCR_EL2.TBID0 == '0')) then selbit = ptr<55>; else selbit = ptr<63>; else selbit = if tbi then ptr<55> else ptr<63>; if c == integer bottom_PAC_bit = Constraint_FAULTCalculateBottomPACBit then // Generate a stage 1 Data Abort reported using the DFSC code of 110101. fault =(selbit); // The pointer authentication code field takes all the available bits in between extfield = AArch64.ExclusiveFaultReplicate(acctype, iswrite);(selbit, 64); // Compute the pointer authentication code for a ptr with good extension bits if tbi then ext_ptr = ptr<63:56>:extfield<(56-bottom_PAC_bit)-1:0>:ptr<bottom_PAC_bit-1:0>; else ext_ptr = extfield<(64-bottom_PAC_bit)-1:0>:ptr<bottom_PAC_bit-1:0>; PAC = AArch64.AbortComputePAC(address, fault); else // Accesses are not single-copy atomic above the byte level for i = 0 to 63 data<7+8*i : 8*i> =(ext_ptr, modifier, K<127:64>, K<63:0>); // Check if the ptr has good extension bits and corrupt the pointer authentication code if not if ! AArch64.MemSingleIsZero[address+8*i, 1, acctype, aligned]; return data;(ptr<top_bit:bottom_PAC_bit>) && ! AddressDescriptorIsOnes memaddrdesc; memaddrdesc =(ptr<top_bit:bottom_PAC_bit>) then if AArch64.TranslateAddressHaveEnhancedPAC(address, acctype, iswrite, aligned, 64); // Check for aborts or debug exceptions if() then PAC = 0x0000000000000000<63:0>; elsif ! IsFaultHaveEnhancedPAC2(memaddrdesc) then() then PAC<top_bit-1> = NOT(PAC<top_bit-1>); // preserve the determination between upper and lower address at bit<55> and insert PAC if ! AArch64.AbortHaveEnhancedPAC2(address, memaddrdesc.fault); // Effect on exclusives if memaddrdesc.memattrs.shareable then ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), 64); // Memory array access accdesc = CreateAccessDescriptor(acctype); if HaveMTEExt() then if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64)); if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then AArch64.TagCheckFault(address, acctype, iswrite); data = _Mem[memaddrdesc, 64, accdesc, iswrite]; return data;() then if tbi then result = ptr<63:56>:selbit:PAC<54:bottom_PAC_bit>:ptr<bottom_PAC_bit-1:0>; else result = PAC<63:56>:selbit:PAC<54:bottom_PAC_bit>:ptr<bottom_PAC_bit-1:0>; else if tbi then result = ptr<63:56>:selbit:(ptr<54:bottom_PAC_bit> EOR PAC<54:bottom_PAC_bit>):ptr<bottom_PAC_bit-1:0>; else result = (ptr<63:56> EOR PAC<63:56>):selbit:(ptr<54:bottom_PAC_bit> EOR PAC<54:bottom_PAC_bit>):ptr<bottom_PAC_bit-1:0>; return result;

Library pseudocode for aarch64/functions/memorypac/MemStore64Baddpacda/AddPACDA

// MemStore64B() // ============= // Performs an atomic 64-byte store to a given virtual address. Function does // not return the status of the store.// AddPACDA() // ========== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with a pointer authentication code, where the pointer authentication // code is derived using a cryptographic algorithm as a combination of X, Y and the // APDAKey_EL1. bits(64) MemStore64B(bits(64) address, bits(512) value,AddPACDA(bits(64) X, bits(64) Y) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APDAKey_EL1; APDAKey_EL1 = APDAKeyHi_EL1<63:0> : APDAKeyLo_EL1<63:0>; case PSTATE.EL of when AccTypeEL0 acctype) assert address ==boolean IsEL1Regime = AlignS1TranslationRegime(address, 64); if !() ==AddressSupportsLS64EL1(address) then c =; Enable = if IsEL1Regime then SCTLR_EL1.EnDA else SCTLR_EL2.EnDA; TrapEL2 = ( ConstrainUnpredictableEL2Enabled(() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 =Unpredictable_LS64UNSUPPORTEDHaveEL); assert c IN {(Constraint_LIMITED_ATOMICITYEL3,) && SCR_EL3.API == '0'; when Constraint_FAULTEL1}; if c ==Enable = SCTLR_EL1.EnDA; TrapEL2 = Constraint_FAULTEL2Enabled then // Generate a Data Abort reported using the DFSC code of 110101. iswrite = TRUE; fault =() && HCR_EL2.API == '0'; TrapEL3 = AArch64.ExclusiveFaultHaveEL(acctype, iswrite);( AArch64.AbortEL3(address, fault); else // Accesses are not single-copy atomic above the byte level. aligned = TRUE; for i = 0 to 63) && SCR_EL3.API == '0'; when AArch64.MemSingleEL2[address+8*i, 1, acctype, aligned] = value<7+8*i : 8*i>; else -=Enable = SCTLR_EL2.EnDA; TrapEL2 = FALSE; TrapEL3 = (EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnDA; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return AddPACMemStore64BWithRetHaveEL(address, value, acctype); // Return status is ignored by ST64B return;(X, Y, APDAKey_EL1, TRUE);

Library pseudocode for aarch64/functions/memorypac/MemStore64BWithRetaddpacdb/AddPACDB

// MemStore64BWithRet() // ==================== // Performs an atomic 64-byte store to a given virtual address returning // the status value of the operation. // AddPACDB() // ========== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with a pointer authentication code, where the pointer authentication // code is derived using a cryptographic algorithm as a combination of X, Y and the // APDBKey_EL1. bits(64) MemStore64BWithRet(bits(64) address, bits(512) value,AddPACDB(bits(64) X, bits(64) Y) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APDBKey_EL1; APDBKey_EL1 = APDBKeyHi_EL1<63:0> : APDBKeyLo_EL1<63:0>; case PSTATE.EL of when AccTypeEL0 acctype) assert address ==boolean IsEL1Regime = AlignS1TranslationRegime(address, 64);() == AddressDescriptorEL1 memaddrdesc; boolean iswrite = TRUE; boolean aligned = TRUE; memaddrdesc =; Enable = if IsEL1Regime then SCTLR_EL1.EnDB else SCTLR_EL2.EnDB; TrapEL2 = ( AArch64.TranslateAddressEL2Enabled(address, acctype, iswrite, aligned, 64); // Check for aborts or debug exceptions if() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = IsFaultHaveEL(memaddrdesc) then( AArch64.AbortEL3(address, memaddrdesc.fault); return) && SCR_EL3.API == '0'; when ZeroExtendEL1('1'); // Effect on exclusives if memaddrdesc.memattrs.shareable thenEnable = SCTLR_EL1.EnDB; TrapEL2 = ClearExclusiveByAddressEL2Enabled(memaddrdesc.paddress,() && HCR_EL2.API == '0'; TrapEL3 = ProcessorIDHaveEL(), 64); // Memory array access accdesc =( CreateAccessDescriptorEL3(acctype); if) && SCR_EL3.API == '0'; when HaveMTEExtEL2() then ifEnable = SCTLR_EL2.EnDB; TrapEL2 = FALSE; TrapEL3 = AArch64.AccessIsTagCheckedHaveEL(ZeroExtendEL3(address, 64), acctype) then bits(4) ptag =) && SCR_EL3.API == '0'; when AArch64.PhysicalTagEL3(Enable = SCTLR_EL3.EnDB; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 thenZeroExtendTrapPACUse(address, 64)); if !(AArch64.CheckTagEL2(memaddrdesc, ptag, iswrite) then); elsif TrapEL3 then AArch64.TagCheckFaultTrapPACUse(address, acctype, iswrite); return( ZeroExtendEL3('1'); _Mem[memaddrdesc, 64, accdesc] = value; status =); else return MemStore64BWithRetStatusAddPAC(); return status;(X, Y, APDBKey_EL1, TRUE);

Library pseudocode for aarch64/functions/memorypac/MemStore64BWithRetStatusaddpacga/AddPACGA

// Generates the return status of memory write with ST64BV or ST64BV0 // instructions. The status indicates if the operation succeeded, failed, // or was not supported at this memory location. // AddPACGA() // ========== // Returns a 64-bit value where the lower 32 bits are 0, and the upper 32 bits contain // a 32-bit pointer authentication code which is derived using a cryptographic // algorithm as a combination of X, Y and the APGAKey_EL1. bits(64) MemStore64BWithRetStatus();AddPACGA(bits(64) X, bits(64) Y) boolean TrapEL2; boolean TrapEL3; bits(128) APGAKey_EL1; APGAKey_EL1 = APGAKeyHi_EL1<63:0> : APGAKeyLo_EL1<63:0>; case PSTATE.EL of whenEL0 TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 TrapEL2 = FALSE; TrapEL3 = FALSE; if TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return ComputePAC(X, Y, APGAKey_EL1<127:64>, APGAKey_EL1<63:0>)<63:32>:Zeros(32);

Library pseudocode for aarch64/functions/memorypac/NVMemaddpacia/AddPACIA

// NVMem[] - non-assignment form // ============================= // This function is the load memory access for the transformed System register read access // when Enhanced Nested Virtualisation is enabled with HCR_EL2.NV2 = 1. // The address for the load memory access is calculated using // the formula SignExtend(VNCR_EL2.BADDR : Offset<11:0>, 64) where, // * VNCR_EL2.BADDR holds the base address of the memory location, and // * Offset is the unique offset value defined architecturally for each System register that // supports transformation of register access to memory access. // AddPACIA() // ========== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with a pointer authentication code, where the pointer authentication // code is derived using a cryptographic algorithm as a combination of X, Y, and the // APIAKey_EL1. bits(64) NVMem[integer offset] assert offset > 0; bits(64) address =AddPACIA(bits(64) X, bits(64) Y) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APIAKey_EL1; APIAKey_EL1 = APIAKeyHi_EL1<63:0>:APIAKeyLo_EL1<63:0>; case PSTATE.EL of when SignExtendEL0(VNCR_EL2.BADDR:offset<11:0>, 64); returnboolean IsEL1Regime = MemS1TranslationRegime[address, 8,() == AccType_NV2REGISTEREL1]; // NVMem[] - assignment form // ========================= // This function is the store memory access for the transformed System register write access // when Enhanced Nested Virtualisation is enabled with HCR_EL2.NV2 = 1. // The address for the store memory access is calculated using // the formula SignExtend(VNCR_EL2.BADDR : Offset<11:0>, 64) where, // * VNCR_EL2.BADDR holds the base address of the memory location, and // * Offset is the unique offset value defined architecturally for each System register that // supports transformation of register access to memory access.; Enable = if IsEL1Regime then SCTLR_EL1.EnIA else SCTLR_EL2.EnIA; TrapEL2 = ( NVMem[integer offset] = bits(64) value assert offset > 0; bits(64) address =() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = SignExtendHaveEL(VNCR_EL2.BADDR:offset<11:0>, 64);( MemEL3[address, 8,) && SCR_EL3.API == '0'; when Enable = SCTLR_EL1.EnIA; TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 Enable = SCTLR_EL2.EnIA; TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnIA; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return AddPACAccType_NV2REGISTEREL1] = value; return;(X, Y, APIAKey_EL1, FALSE);

Library pseudocode for aarch64/functions/memorypac/SetTagCheckedInstructionaddpacib/AddPACIB

// Flag the current instruction as using/not using memory tag checking.// AddPACIB() // ========== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with a pointer authentication code, where the pointer authentication // code is derived using a cryptographic algorithm as a combination of X, Y and the // APIBKey_EL1. bits(64) SetTagCheckedInstruction(boolean checked);AddPACIB(bits(64) X, bits(64) Y) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APIBKey_EL1; APIBKey_EL1 = APIBKeyHi_EL1<63:0> : APIBKeyLo_EL1<63:0>; case PSTATE.EL of whenEL0 boolean IsEL1Regime = S1TranslationRegime() == EL1; Enable = if IsEL1Regime then SCTLR_EL1.EnIB else SCTLR_EL2.EnIB; TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 Enable = SCTLR_EL1.EnIB; TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 Enable = SCTLR_EL2.EnIB; TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnIB; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return AddPAC(X, Y, APIBKey_EL1, FALSE);

Library pseudocode for aarch64/functions/memorypac/_MemTagauth/AArch64.PACFailException

// This _MemTag[] accessor is the hardware operation which perform a single-copy atomic, // Allocation Tag granule aligned, memory access from the tag in PA space. // // The function address the array using desc.paddress which supplies: // * A 52-bit physical address // * A single NS bit to select between Secure and Non-secure parts of the array. // // The accdesc descriptor describes the access type: normal, exclusive, ordered, streaming, // etc and other parameters required to access the physical memory or for setting syndrome // register in the event of an external abort. bits(4)// AArch64.PACFailException() // ========================== // Generates a PAC Fail Exception _MemTag[AArch64.PACFailException(bits(2) syndrome) route_to_el2 = PSTATE.EL ==AddressDescriptorEL0 desc,&& AccessDescriptorEL2Enabled accdesc]; // This _MemTag[] accessor is the hardware operation which perform a single-copy atomic, // Allocation Tag granule aligned, memory access to the tag in PA space. // // The functions address the array using desc.paddress which supplies: // * A 52-bit physical address // * A single NS bit to select between Secure and Non-secure parts of the array. // // The accdesc descriptor describes the access type: normal, exclusive, ordered, streaming, // etc and other parameters required to access the physical memory or for setting syndrome // register in the event of an external abort.() && HCR_EL2.TGE == '1'; bits(64) preferred_exception_return = _MemTag[(); vect_offset = 0x0; exception =AddressDescriptorExceptionSyndrome desc,( ); exception.syndrome<1:0> = syndrome; exception.syndrome<24:2> = Zeros(); // RES0 if UInt(PSTATE.EL) > UInt(EL0) then AArch64.TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif route_to_el2 then AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(EL1AccessDescriptorException_PACFail accdesc] = bits(4) value;, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/functions/pac/addpacauth/AddPACAuth

// AddPAC() // ======== // Calculates the pointer authentication code for a 64-bit quantity and then // inserts that into pointer authentication code field of that 64-bit quantity. // Auth() // ====== // Restores the upper bits of the address to be all zeros or all ones (based on the // value of bit[55]) and computes and checks the pointer authentication code. If the // check passes, then the restored address is returned. If the check fails, the // second-top and third-top bits of the extension bits in the pointer authentication code // field are corrupted to ensure that accessing the address will give a translation fault. bits(64) AddPAC(bits(64) ptr, bits(64) modifier, bits(128) K, boolean data) Auth(bits(64) ptr, bits(64) modifier, bits(128) K, boolean data, bit key_number, boolean is_combined) bits(64) PAC; bits(64) result; bits(64) ext_ptr; bits(64) original_ptr; bits(2) error_code; bits(64) extfield; bit selbit; // Reconstruct the extension field used of adding the PAC to the pointer boolean tbi = EffectiveTBI(ptr, !data, PSTATE.EL) == '1'; integer top_bit = if tbi then 55 else 63; // If tagged pointers are in use for a regime with two TTBRs, use bit<55> of // the pointer to select between upper and lower ranges, and preserve this. // This handles the awkward case where there is apparently no correct choice between // the upper and lower address range - ie an addr of 1xxxxxxx0... with TBI0=0 and TBI1=1 // and 0xxxxxxx1 with TBI1=0 and TBI0=1: if integer bottom_PAC_bit = PtrHasUpperAndLowerAddRangesCalculateBottomPACBit() then assert(ptr<55>); extfield = S1TranslationRegimeReplicate() IN {(ptr<55>, 64); if tbi then original_ptr = ptr<63:56>:extfield<56-bottom_PAC_bit-1:0>:ptr<bottom_PAC_bit-1:0>; else original_ptr = extfield<64-bottom_PAC_bit-1:0>:ptr<bottom_PAC_bit-1:0>; PAC =EL1ComputePAC,(original_ptr, modifier, K<127:64>, K<63:0>); // Check pointer authentication code if tbi then if ! EL2HaveEnhancedPAC2}; if() then if PAC<54:bottom_PAC_bit> == ptr<54:bottom_PAC_bit> then result = original_ptr; else error_code = key_number:NOT(key_number); result = original_ptr<63:55>:error_code:original_ptr<52:0>; else result = ptr; result<54:bottom_PAC_bit> = result<54:bottom_PAC_bit> EOR PAC<54:bottom_PAC_bit>; if S1TranslationRegimeHaveFPACCombined() ==() || ( EL1HaveFPAC then // EL1 translation regime registers if data then if TCR_EL1.TBI1 == '1' || TCR_EL1.TBI0 == '1' then selbit = ptr<55>; else selbit = ptr<63>; else if ((TCR_EL1.TBI1 == '1' && TCR_EL1.TBID1 == '0') || (TCR_EL1.TBI0 == '1' && TCR_EL1.TBID0 == '0')) then selbit = ptr<55>; else selbit = ptr<63>; else // EL2 translation regime registers if data then if TCR_EL2.TBI1 == '1' || TCR_EL2.TBI0 == '1' then selbit = ptr<55>; else selbit = ptr<63>; else if ((TCR_EL2.TBI1 == '1' && TCR_EL2.TBID1 == '0') || (TCR_EL2.TBI0 == '1' && TCR_EL2.TBID0 == '0')) then selbit = ptr<55>; else selbit = ptr<63>; else selbit = if tbi then ptr<55> else ptr<63>; integer bottom_PAC_bit =() && !is_combined) then if result<54:bottom_PAC_bit> != CalculateBottomPACBit(selbit); // The pointer authentication code field takes all the available bits in between extfield = Replicate(selbit, 64); // Compute the pointer authentication code for a ptr with good extension bits if tbi then ext_ptr = ptr<63:56>:extfield<(56-bottom_PAC_bit)-1:0>:ptr<bottom_PAC_bit-1:0>; else ext_ptr = extfield<(64-bottom_PAC_bit)-1:0>:ptr<bottom_PAC_bit-1:0>; PAC =(result<55>, (55-bottom_PAC_bit)) then error_code = (if data then '1' else '0'):key_number; ComputePACAArch64.PACFailException(ext_ptr, modifier, K<127:64>, K<63:0>); // Check if the ptr has good extension bits and corrupt the pointer authentication code if not if !(error_code); else if !IsZeroHaveEnhancedPAC2(ptr<top_bit:bottom_PAC_bit>) && !() then if PAC<54:bottom_PAC_bit> == ptr<54:bottom_PAC_bit> && PAC<63:56> == ptr<63:56> then result = original_ptr; else error_code = key_number:NOT(key_number); result = original_ptr<63>:error_code:original_ptr<60:0>; else result = ptr; result<54:bottom_PAC_bit> = result<54:bottom_PAC_bit> EOR PAC<54:bottom_PAC_bit>; result<63:56> = result<63:56> EOR PAC<63:56>; ifIsOnesHaveFPACCombined(ptr<top_bit:bottom_PAC_bit>) then if() || ( HaveEnhancedPACHaveFPAC() then PAC = 0x0000000000000000<63:0>; elsif !() && !is_combined) then if result<63:bottom_PAC_bit> !=HaveEnhancedPAC2Replicate() then PAC<top_bit-1> = NOT(PAC<top_bit-1>); // preserve the determination between upper and lower address at bit<55> and insert PAC if !(result<55>, (64-bottom_PAC_bit)) then error_code = (if data then '1' else '0'):key_number;HaveEnhancedPAC2AArch64.PACFailException() then if tbi then result = ptr<63:56>:selbit:PAC<54:bottom_PAC_bit>:ptr<bottom_PAC_bit-1:0>; else result = PAC<63:56>:selbit:PAC<54:bottom_PAC_bit>:ptr<bottom_PAC_bit-1:0>; else if tbi then result = ptr<63:56>:selbit:(ptr<54:bottom_PAC_bit> EOR PAC<54:bottom_PAC_bit>):ptr<bottom_PAC_bit-1:0>; else result = (ptr<63:56> EOR PAC<63:56>):selbit:(ptr<54:bottom_PAC_bit> EOR PAC<54:bottom_PAC_bit>):ptr<bottom_PAC_bit-1:0>; (error_code); return result;

Library pseudocode for aarch64/functions/pac/addpacdaauthda/AddPACDAAuthDA

// AddPACDA() // ========== // AuthDA() // ======== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with a pointer authentication code, where the pointer authentication // code is derived using a cryptographic algorithm as a combination of X, Y and the // APDAKey_EL1. // field bits with the extension of the address bits. The instruction checks a pointer // authentication code in the pointer authentication code field bits of X, using the same // algorithm and key as AddPACDA(). bits(64) AddPACDA(bits(64) X, bits(64) Y) AuthDA(bits(64) X, bits(64) Y, boolean is_combined) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APDAKey_EL1; APDAKey_EL1 = APDAKeyHi_EL1<63:0> : APDAKeyLo_EL1<63:0>; case PSTATE.EL of when EL0 boolean IsEL1Regime = S1TranslationRegime() == EL1; Enable = if IsEL1Regime then SCTLR_EL1.EnDA else SCTLR_EL2.EnDA; TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 Enable = SCTLR_EL1.EnDA; TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 Enable = SCTLR_EL2.EnDA; TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnDA; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return AddPACAuth(X, Y, APDAKey_EL1, TRUE);(X, Y, APDAKey_EL1, TRUE, '0', is_combined);

Library pseudocode for aarch64/functions/pac/addpacdbauthdb/AddPACDBAuthDB

// AddPACDB() // ========== // AuthDB() // ======== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with a pointer authentication code, where the pointer authentication // code is derived using a cryptographic algorithm as a combination of X, Y and the // APDBKey_EL1. // field bits with the extension of the address bits. The instruction checks a // pointer authentication code in the pointer authentication code field bits of X, using // the same algorithm and key as AddPACDB(). bits(64) AddPACDB(bits(64) X, bits(64) Y) AuthDB(bits(64) X, bits(64) Y, boolean is_combined) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APDBKey_EL1; APDBKey_EL1 = APDBKeyHi_EL1<63:0> : APDBKeyLo_EL1<63:0>; case PSTATE.EL of when EL0 boolean IsEL1Regime = S1TranslationRegime() == EL1; Enable = if IsEL1Regime then SCTLR_EL1.EnDB else SCTLR_EL2.EnDB; TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 Enable = SCTLR_EL1.EnDB; TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 Enable = SCTLR_EL2.EnDB; TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnDB; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return AddPACAuth(X, Y, APDBKey_EL1, TRUE);(X, Y, APDBKey_EL1, TRUE, '1', is_combined);

Library pseudocode for aarch64/functions/pac/addpacgaauthia/AddPACGAAuthIA

// AddPACGA() // ========== // Returns a 64-bit value where the lower 32 bits are 0, and the upper 32 bits contain // a 32-bit pointer authentication code which is derived using a cryptographic // algorithm as a combination of X, Y and the APGAKey_EL1. // AuthIA() // ======== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with the extension of the address bits. The instruction checks a pointer // authentication code in the pointer authentication code field bits of X, using the same // algorithm and key as AddPACIA(). bits(64) AddPACGA(bits(64) X, bits(64) Y) AuthIA(bits(64) X, bits(64) Y, boolean is_combined) boolean TrapEL2; boolean TrapEL3; bits(128) APGAKey_EL1; bits(1) Enable; bits(128) APIAKey_EL1; APGAKey_EL1 = APGAKeyHi_EL1<63:0> : APGAKeyLo_EL1<63:0>; APIAKey_EL1 = APIAKeyHi_EL1<63:0> : APIAKeyLo_EL1<63:0>; case PSTATE.EL of when EL0 TrapEL2 = (boolean IsEL1Regime =S1TranslationRegime() == EL1; Enable = if IsEL1Regime then SCTLR_EL1.EnIA else SCTLR_EL2.EnIA; TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 TrapEL2 =Enable = SCTLR_EL1.EnIA; TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 TrapEL2 = FALSE; Enable = SCTLR_EL2.EnIA; TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 TrapEL2 = FALSE; Enable = SCTLR_EL3.EnIA; TrapEL2 = FALSE; TrapEL3 = FALSE; if TrapEL2 then if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return ComputePACAuth(X, Y, APGAKey_EL1<127:64>, APGAKey_EL1<63:0>)<63:32>:Zeros(32);(X, Y, APIAKey_EL1, FALSE, '0', is_combined);

Library pseudocode for aarch64/functions/pac/addpaciaauthib/AddPACIAAuthIB

// AddPACIA() // ========== // AuthIB() // ======== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with a pointer authentication code, where the pointer authentication // code is derived using a cryptographic algorithm as a combination of X, Y, and the // APIAKey_EL1. // field bits with the extension of the address bits. The instruction checks a pointer // authentication code in the pointer authentication code field bits of X, using the same // algorithm and key as AddPACIB(). bits(64) AddPACIA(bits(64) X, bits(64) Y) AuthIB(bits(64) X, bits(64) Y, boolean is_combined) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APIAKey_EL1; bits(128) APIBKey_EL1; APIAKey_EL1 = APIAKeyHi_EL1<63:0>:APIAKeyLo_EL1<63:0>; APIBKey_EL1 = APIBKeyHi_EL1<63:0> : APIBKeyLo_EL1<63:0>; case PSTATE.EL of when EL0 boolean IsEL1Regime = S1TranslationRegime() == EL1; Enable = if IsEL1Regime then SCTLR_EL1.EnIA else SCTLR_EL2.EnIA; Enable = if IsEL1Regime then SCTLR_EL1.EnIB else SCTLR_EL2.EnIB; TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 Enable = SCTLR_EL1.EnIA; Enable = SCTLR_EL1.EnIB; TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 Enable = SCTLR_EL2.EnIA; Enable = SCTLR_EL2.EnIB; TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnIA; Enable = SCTLR_EL3.EnIB; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return AddPACAuth(X, Y, APIAKey_EL1, FALSE);(X, Y, APIBKey_EL1, FALSE, '1', is_combined);

Library pseudocode for aarch64/functions/pac/addpacibcalcbottompacbit/AddPACIBCalculateBottomPACBit

// AddPACIB() // ========== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with a pointer authentication code, where the pointer authentication // code is derived using a cryptographic algorithm as a combination of X, Y and the // APIBKey_EL1. // CalculateBottomPACBit() // ======================= bits(64)integer AddPACIB(bits(64) X, bits(64) Y) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APIBKey_EL1; CalculateBottomPACBit(bit top_bit) integer tsz_field; APIBKey_EL1 = APIBKeyHi_EL1<63:0> : APIBKeyLo_EL1<63:0>; case PSTATE.EL of when if EL0PtrHasUpperAndLowerAddRanges boolean IsEL1Regime =() then assert S1TranslationRegime() ==() IN { EL1; Enable = if IsEL1Regime then SCTLR_EL1.EnIB else SCTLR_EL2.EnIB; TrapEL2 = (,EL2EnabledEL2() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 =}; if HaveELS1TranslationRegime(() ==EL3) && SCR_EL3.API == '0'; when EL1 Enable = SCTLR_EL1.EnIB; TrapEL2 =then // EL1 translation regime registers tsz_field = if top_bit == '1' then EL2EnabledUInt() && HCR_EL2.API == '0'; TrapEL3 =(TCR_EL1.T1SZ) else UInt(TCR_EL1.T0SZ); using64k = if top_bit == '1' then TCR_EL1.TG1 == '11' else TCR_EL1.TG0 == '01'; else // EL2 translation regime registers assert HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 Enable = SCTLR_EL2.EnIB; TrapEL2 = FALSE; TrapEL3 =); tsz_field = if top_bit == '1' then HaveELUInt((TCR_EL2.T1SZ) elseEL3UInt) && SCR_EL3.API == '0'; when(TCR_EL2.T0SZ); using64k = if top_bit == '1' then TCR_EL2.TG1 == '11' else TCR_EL2.TG0 == '01'; else tsz_field = if PSTATE.EL == EL3EL2 Enable = SCTLR_EL3.EnIB; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 thenthen TrapPACUseUInt((TCR_EL2.T0SZ) elseUInt(TCR_EL3.T0SZ); using64k = if PSTATE.EL == EL2); elsif TrapEL3 thenthen TCR_EL2.TG0 == '01' else TCR_EL3.TG0 == '01'; max_limit_tsz_field = (if ! TrapPACUseHaveSmallPageTblExt(() then 39 else if using64k then 47 else 48); if tsz_field > max_limit_tsz_field then // TCR_ELx.TySZ is out of range c =EL3ConstrainUnpredictable); else return( ); assert c IN {Constraint_FORCE, Constraint_NONE}; if c == Constraint_FORCE then tsz_field = max_limit_tsz_field; tszmin = if using64k && VAMax() == 52 then 12 else 16; if tsz_field < tszmin then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_NONE}; if c == Constraint_FORCEAddPACUnpredictable_RESTnSZ(X, Y, APIBKey_EL1, FALSE);then tsz_field = tszmin; return (64-tsz_field);

Library pseudocode for aarch64/functions/pac/authcomputepac/AArch64.PACFailExceptionComputePAC

// AArch64.PACFailException() // ========================== // Generates a PAC Fail Exceptionarray bits(64) RC[0..4]; bits(64) AArch64.PACFailException(bits(2) syndrome) route_to_el2 = PSTATE.EL ==ComputePAC(bits(64) data, bits(64) modifier, bits(64) key0, bits(64) key1) bits(64) workingval; bits(64) runningmod; bits(64) roundkey; bits(64) modk0; constant bits(64) Alpha = 0xC0AC29B7C97C50DD<63:0>; RC[0] = 0x0000000000000000<63:0>; RC[1] = 0x13198A2E03707344<63:0>; RC[2] = 0xA4093822299F31D0<63:0>; RC[3] = 0x082EFA98EC4E6C89<63:0>; RC[4] = 0x452821E638D01377<63:0>; modk0 = key0<0>:key0<63:2>:(key0<63> EOR key0<1>); runningmod = modifier; workingval = data EOR key0; for i = 0 to 4 roundkey = key1 EOR runningmod; workingval = workingval EOR roundkey; workingval = workingval EOR RC[i]; if i > 0 then workingval = EL0PACCellShuffle &&(workingval); workingval = EL2EnabledPACMult() && HCR_EL2.TGE == '1'; bits(64) preferred_exception_return =(workingval); workingval = ThisInstrAddrPACSub(); vect_offset = 0x0; exception =(workingval); runningmod = ExceptionSyndromeTweakShuffle((runningmod<63:0>); roundkey = modk0 EOR runningmod; workingval = workingval EOR roundkey; workingval =Exception_PACFailPACCellShuffle); exception.syndrome<1:0> = syndrome; exception.syndrome<24:2> =(workingval); workingval = ZerosPACMult(); // RES0 if(workingval); workingval = UIntPACSub(PSTATE.EL) >(workingval); workingval = UIntPACCellShuffle((workingval); workingval =EL0PACMult) then(workingval); workingval = key1 EOR workingval; workingval = AArch64.TakeExceptionPACCellInvShuffle(PSTATE.EL, exception, preferred_exception_return, vect_offset); elsif route_to_el2 then(workingval); workingval = AArch64.TakeExceptionPACInvSub((workingval); workingval =EL2PACMult, exception, preferred_exception_return, vect_offset); else(workingval); workingval = AArch64.TakeExceptionPACCellInvShuffle((workingval); workingval = workingval EOR key0; workingval = workingval EOR runningmod; for i = 0 to 4 workingval =(workingval); if i < 4 then workingval = PACMult(workingval); workingval = PACCellInvShuffleEL1PACInvSub, exception, preferred_exception_return, vect_offset);(workingval); runningmod = TweakInvShuffle(runningmod<63:0>); roundkey = key1 EOR runningmod; workingval = workingval EOR RC[4-i]; workingval = workingval EOR roundkey; workingval = workingval EOR Alpha; workingval = workingval EOR modk0; return workingval;

Library pseudocode for aarch64/functions/pac/authcomputepac/AuthPACCellInvShuffle

// Auth() // ====== // Restores the upper bits of the address to be all zeros or all ones (based on the // value of bit[55]) and computes and checks the pointer authentication code. If the // check passes, then the restored address is returned. If the check fails, the // second-top and third-top bits of the extension bits in the pointer authentication code // field are corrupted to ensure that accessing the address will give a translation fault. // PACCellInvShuffle() // =================== bits(64) Auth(bits(64) ptr, bits(64) modifier, bits(128) K, boolean data, bit key_number, boolean is_combined) bits(64) PAC; bits(64) result; bits(64) original_ptr; bits(2) error_code; bits(64) extfield; // Reconstruct the extension field used of adding the PAC to the pointer boolean tbi =PACCellInvShuffle(bits(64) indata) bits(64) outdata; outdata<3:0> = indata<15:12>; outdata<7:4> = indata<27:24>; outdata<11:8> = indata<51:48>; outdata<15:12> = indata<39:36>; outdata<19:16> = indata<59:56>; outdata<23:20> = indata<47:44>; outdata<27:24> = indata<7:4>; outdata<31:28> = indata<19:16>; outdata<35:32> = indata<35:32>; outdata<39:36> = indata<55:52>; outdata<43:40> = indata<31:28>; outdata<47:44> = indata<11:8>; outdata<51:48> = indata<23:20>; outdata<55:52> = indata<3:0>; outdata<59:56> = indata<43:40>; outdata<63:60> = indata<63:60>; return outdata; EffectiveTBI(ptr, !data, PSTATE.EL) == '1'; integer bottom_PAC_bit = CalculateBottomPACBit(ptr<55>); extfield = Replicate(ptr<55>, 64); if tbi then original_ptr = ptr<63:56>:extfield<56-bottom_PAC_bit-1:0>:ptr<bottom_PAC_bit-1:0>; else original_ptr = extfield<64-bottom_PAC_bit-1:0>:ptr<bottom_PAC_bit-1:0>; PAC = ComputePAC(original_ptr, modifier, K<127:64>, K<63:0>); // Check pointer authentication code if tbi then if !HaveEnhancedPAC2() then if PAC<54:bottom_PAC_bit> == ptr<54:bottom_PAC_bit> then result = original_ptr; else error_code = key_number:NOT(key_number); result = original_ptr<63:55>:error_code:original_ptr<52:0>; else result = ptr; result<54:bottom_PAC_bit> = result<54:bottom_PAC_bit> EOR PAC<54:bottom_PAC_bit>; if HaveFPACCombined() || (HaveFPAC() && !is_combined) then if result<54:bottom_PAC_bit> != Replicate(result<55>, (55-bottom_PAC_bit)) then error_code = (if data then '1' else '0'):key_number; AArch64.PACFailException(error_code); else if !HaveEnhancedPAC2() then if PAC<54:bottom_PAC_bit> == ptr<54:bottom_PAC_bit> && PAC<63:56> == ptr<63:56> then result = original_ptr; else error_code = key_number:NOT(key_number); result = original_ptr<63>:error_code:original_ptr<60:0>; else result = ptr; result<54:bottom_PAC_bit> = result<54:bottom_PAC_bit> EOR PAC<54:bottom_PAC_bit>; result<63:56> = result<63:56> EOR PAC<63:56>; if HaveFPACCombined() || (HaveFPAC() && !is_combined) then if result<63:bottom_PAC_bit> != Replicate(result<55>, (64-bottom_PAC_bit)) then error_code = (if data then '1' else '0'):key_number; AArch64.PACFailException(error_code); return result;

Library pseudocode for aarch64/functions/pac/authdacomputepac/AuthDAPACCellShuffle

// AuthDA() // ======== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with the extension of the address bits. The instruction checks a pointer // authentication code in the pointer authentication code field bits of X, using the same // algorithm and key as AddPACDA(). // PACCellShuffle() // ================ bits(64) AuthDA(bits(64) X, bits(64) Y, boolean is_combined) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APDAKey_EL1; APDAKey_EL1 = APDAKeyHi_EL1<63:0> : APDAKeyLo_EL1<63:0>; case PSTATE.EL of whenPACCellShuffle(bits(64) indata) bits(64) outdata; outdata<3:0> = indata<55:52>; outdata<7:4> = indata<27:24>; outdata<11:8> = indata<47:44>; outdata<15:12> = indata<3:0>; outdata<19:16> = indata<31:28>; outdata<23:20> = indata<51:48>; outdata<27:24> = indata<7:4>; outdata<31:28> = indata<43:40>; outdata<35:32> = indata<35:32>; outdata<39:36> = indata<15:12>; outdata<43:40> = indata<59:56>; outdata<47:44> = indata<23:20>; outdata<51:48> = indata<11:8>; outdata<55:52> = indata<39:36>; outdata<59:56> = indata<19:16>; outdata<63:60> = indata<63:60>; return outdata; EL0 boolean IsEL1Regime = S1TranslationRegime() == EL1; Enable = if IsEL1Regime then SCTLR_EL1.EnDA else SCTLR_EL2.EnDA; TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 Enable = SCTLR_EL1.EnDA; TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 Enable = SCTLR_EL2.EnDA; TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnDA; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return Auth(X, Y, APDAKey_EL1, TRUE, '0', is_combined);

Library pseudocode for aarch64/functions/pac/authdbcomputepac/AuthDBPACInvSub

// AuthDB() // ======== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with the extension of the address bits. The instruction checks a // pointer authentication code in the pointer authentication code field bits of X, using // the same algorithm and key as AddPACDB(). // PACInvSub() // =========== bits(64) AuthDB(bits(64) X, bits(64) Y, boolean is_combined) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APDBKey_EL1; PACInvSub(bits(64) Tinput) // This is a 4-bit substitution from the PRINCE-family cipher APDBKey_EL1 = APDBKeyHi_EL1<63:0> : APDBKeyLo_EL1<63:0>; case PSTATE.EL of when bits(64) Toutput; for i = 0 to 15 case Tinput<4*i+3:4*i> of when '0000' Toutput<4*i+3:4*i> = '0101'; when '0001' Toutput<4*i+3:4*i> = '1110'; when '0010' Toutput<4*i+3:4*i> = '1101'; when '0011' Toutput<4*i+3:4*i> = '1000'; when '0100' Toutput<4*i+3:4*i> = '1010'; when '0101' Toutput<4*i+3:4*i> = '1011'; when '0110' Toutput<4*i+3:4*i> = '0001'; when '0111' Toutput<4*i+3:4*i> = '1001'; when '1000' Toutput<4*i+3:4*i> = '0010'; when '1001' Toutput<4*i+3:4*i> = '0110'; when '1010' Toutput<4*i+3:4*i> = '1111'; when '1011' Toutput<4*i+3:4*i> = '0000'; when '1100' Toutput<4*i+3:4*i> = '0100'; when '1101' Toutput<4*i+3:4*i> = '1100'; when '1110' Toutput<4*i+3:4*i> = '0111'; when '1111' Toutput<4*i+3:4*i> = '0011'; return Toutput; EL0 boolean IsEL1Regime = S1TranslationRegime() == EL1; Enable = if IsEL1Regime then SCTLR_EL1.EnDB else SCTLR_EL2.EnDB; TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 Enable = SCTLR_EL1.EnDB; TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 Enable = SCTLR_EL2.EnDB; TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnDB; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return Auth(X, Y, APDBKey_EL1, TRUE, '1', is_combined);

Library pseudocode for aarch64/functions/pac/authiacomputepac/AuthIAPACMult

// AuthIA() // ======== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with the extension of the address bits. The instruction checks a pointer // authentication code in the pointer authentication code field bits of X, using the same // algorithm and key as AddPACIA(). // PACMult() // ========= bits(64) AuthIA(bits(64) X, bits(64) Y, boolean is_combined) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APIAKey_EL1; PACMult(bits(64) Sinput) bits(4) t0; bits(4) t1; bits(4) t2; bits(4) t3; bits(64) Soutput; APIAKey_EL1 = APIAKeyHi_EL1<63:0> : APIAKeyLo_EL1<63:0>; case PSTATE.EL of when for i = 0 to 3 t0<3:0> = EL0RotCell boolean IsEL1Regime =(Sinput<4*(i+8)+3:4*(i+8)>, 1) EOR S1TranslationRegimeRotCell() ==(Sinput<4*(i+4)+3:4*(i+4)>, 2); t0<3:0> = t0<3:0> EOR EL1RotCell; Enable = if IsEL1Regime then SCTLR_EL1.EnIA else SCTLR_EL2.EnIA; TrapEL2 = ((Sinput<4*(i)+3:4*(i)>, 1); t1<3:0> =EL2EnabledRotCell() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 =(Sinput<4*(i+12)+3:4*(i+12)>, 1) EOR HaveELRotCell((Sinput<4*(i+4)+3:4*(i+4)>, 1); t1<3:0> = t1<3:0> EOREL3RotCell) && SCR_EL3.API == '0'; when(Sinput<4*(i)+3:4*(i)>, 2); t2<3:0> = EL1RotCell Enable = SCTLR_EL1.EnIA; TrapEL2 =(Sinput<4*(i+12)+3:4*(i+12)>, 2) EOR EL2EnabledRotCell() && HCR_EL2.API == '0'; TrapEL3 =(Sinput<4*(i+8)+3:4*(i+8)>, 1); t2<3:0> = t2<3:0> EOR HaveELRotCell((Sinput<4*(i)+3:4*(i)>, 1); t3<3:0> =EL3RotCell) && SCR_EL3.API == '0'; when(Sinput<4*(i+12)+3:4*(i+12)>, 1) EOR EL2RotCell Enable = SCTLR_EL2.EnIA; TrapEL2 = FALSE; TrapEL3 =(Sinput<4*(i+8)+3:4*(i+8)>, 2); t3<3:0> = t3<3:0> EOR HaveELRotCell(EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnIA; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return Auth(X, Y, APIAKey_EL1, FALSE, '0', is_combined);(Sinput<4*(i+4)+3:4*(i+4)>, 1); Soutput<4*i+3:4*i> = t3<3:0>; Soutput<4*(i+4)+3:4*(i+4)> = t2<3:0>; Soutput<4*(i+8)+3:4*(i+8)> = t1<3:0>; Soutput<4*(i+12)+3:4*(i+12)> = t0<3:0>; return Soutput;

Library pseudocode for aarch64/functions/pac/authibcomputepac/AuthIBPACSub

// AuthIB() // PACSub() // ======== // Returns a 64-bit value containing X, but replacing the pointer authentication code // field bits with the extension of the address bits. The instruction checks a pointer // authentication code in the pointer authentication code field bits of X, using the same // algorithm and key as AddPACIB(). bits(64) AuthIB(bits(64) X, bits(64) Y, boolean is_combined) boolean TrapEL2; boolean TrapEL3; bits(1) Enable; bits(128) APIBKey_EL1; APIBKey_EL1 = APIBKeyHi_EL1<63:0> : APIBKeyLo_EL1<63:0>; case PSTATE.EL of whenPACSub(bits(64) Tinput) // This is a 4-bit substitution from the PRINCE-family cipher bits(64) Toutput; for i = 0 to 15 case Tinput<4*i+3:4*i> of when '0000' Toutput<4*i+3:4*i> = '1011'; when '0001' Toutput<4*i+3:4*i> = '0110'; when '0010' Toutput<4*i+3:4*i> = '1000'; when '0011' Toutput<4*i+3:4*i> = '1111'; when '0100' Toutput<4*i+3:4*i> = '1100'; when '0101' Toutput<4*i+3:4*i> = '0000'; when '0110' Toutput<4*i+3:4*i> = '1001'; when '0111' Toutput<4*i+3:4*i> = '1110'; when '1000' Toutput<4*i+3:4*i> = '0011'; when '1001' Toutput<4*i+3:4*i> = '0111'; when '1010' Toutput<4*i+3:4*i> = '0100'; when '1011' Toutput<4*i+3:4*i> = '0101'; when '1100' Toutput<4*i+3:4*i> = '1101'; when '1101' Toutput<4*i+3:4*i> = '0010'; when '1110' Toutput<4*i+3:4*i> = '0001'; when '1111' Toutput<4*i+3:4*i> = '1010'; return Toutput; EL0 boolean IsEL1Regime = S1TranslationRegime() == EL1; Enable = if IsEL1Regime then SCTLR_EL1.EnIB else SCTLR_EL2.EnIB; TrapEL2 = (EL2Enabled() && HCR_EL2.API == '0' && (HCR_EL2.TGE == '0' || HCR_EL2.E2H == '0')); TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL1 Enable = SCTLR_EL1.EnIB; TrapEL2 = EL2Enabled() && HCR_EL2.API == '0'; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL2 Enable = SCTLR_EL2.EnIB; TrapEL2 = FALSE; TrapEL3 = HaveEL(EL3) && SCR_EL3.API == '0'; when EL3 Enable = SCTLR_EL3.EnIB; TrapEL2 = FALSE; TrapEL3 = FALSE; if Enable == '0' then return X; elsif TrapEL2 then TrapPACUse(EL2); elsif TrapEL3 then TrapPACUse(EL3); else return Auth(X, Y, APIBKey_EL1, FALSE, '1', is_combined);

Library pseudocode for aarch64/functions/pac/calcbottompacbitcomputepac/CalculateBottomPACBitRotCell

// CalculateBottomPACBit() // ======================= // RotCell() // ========= integerbits(4) CalculateBottomPACBit(bit top_bit) integer tsz_field; RotCell(bits(4) incell, integer amount) bits(8) tmp; bits(4) outcell; if // assert amount>3 || amount<1; tmp<7:0> = incell<3:0>:incell<3:0>; outcell = tmp<7-amount:4-amount>; return outcell; PtrHasUpperAndLowerAddRanges() then assert S1TranslationRegime() IN {EL1, EL2}; if S1TranslationRegime() == EL1 then // EL1 translation regime registers tsz_field = if top_bit == '1' then UInt(TCR_EL1.T1SZ) else UInt(TCR_EL1.T0SZ); using64k = if top_bit == '1' then TCR_EL1.TG1 == '11' else TCR_EL1.TG0 == '01'; else // EL2 translation regime registers assert HaveEL(EL2); tsz_field = if top_bit == '1' then UInt(TCR_EL2.T1SZ) else UInt(TCR_EL2.T0SZ); using64k = if top_bit == '1' then TCR_EL2.TG1 == '11' else TCR_EL2.TG0 == '01'; else tsz_field = if PSTATE.EL == EL2 then UInt(TCR_EL2.T0SZ) else UInt(TCR_EL3.T0SZ); using64k = if PSTATE.EL == EL2 then TCR_EL2.TG0 == '01' else TCR_EL3.TG0 == '01'; max_limit_tsz_field = (if !HaveSmallTranslationTableExt() then 39 else if using64k then 47 else 48); if tsz_field > max_limit_tsz_field then // TCR_ELx.TySZ is out of range c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_NONE}; if c == Constraint_FORCE then tsz_field = max_limit_tsz_field; tszmin = if using64k && VAMax() == 52 then 12 else 16; if tsz_field < tszmin then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_NONE}; if c == Constraint_FORCE then tsz_field = tszmin; return (64-tsz_field);

Library pseudocode for aarch64/functions/pac/computepac/ComputePACTweakCellInvRot

array bits(64) RC[0..4]; // TweakCellInvRot() // ================= bits(64)bits(4) TweakCellInvRot(bits(4)incell) bits(4) outcell; outcell<3> = incell<2>; outcell<2> = incell<1>; outcell<1> = incell<0>; outcell<0> = incell<0> EOR incell<3>; return outcell; ComputePAC(bits(64) data, bits(64) modifier, bits(64) key0, bits(64) key1) bits(64) workingval; bits(64) runningmod; bits(64) roundkey; bits(64) modk0; constant bits(64) Alpha = 0xC0AC29B7C97C50DD<63:0>; RC[0] = 0x0000000000000000<63:0>; RC[1] = 0x13198A2E03707344<63:0>; RC[2] = 0xA4093822299F31D0<63:0>; RC[3] = 0x082EFA98EC4E6C89<63:0>; RC[4] = 0x452821E638D01377<63:0>; modk0 = key0<0>:key0<63:2>:(key0<63> EOR key0<1>); runningmod = modifier; workingval = data EOR key0; for i = 0 to 4 roundkey = key1 EOR runningmod; workingval = workingval EOR roundkey; workingval = workingval EOR RC[i]; if i > 0 then workingval = PACCellShuffle(workingval); workingval = PACMult(workingval); workingval = PACSub(workingval); runningmod = TweakShuffle(runningmod<63:0>); roundkey = modk0 EOR runningmod; workingval = workingval EOR roundkey; workingval = PACCellShuffle(workingval); workingval = PACMult(workingval); workingval = PACSub(workingval); workingval = PACCellShuffle(workingval); workingval = PACMult(workingval); workingval = key1 EOR workingval; workingval = PACCellInvShuffle(workingval); workingval = PACInvSub(workingval); workingval = PACMult(workingval); workingval = PACCellInvShuffle(workingval); workingval = workingval EOR key0; workingval = workingval EOR runningmod; for i = 0 to 4 workingval = PACInvSub(workingval); if i < 4 then workingval = PACMult(workingval); workingval = PACCellInvShuffle(workingval); runningmod = TweakInvShuffle(runningmod<63:0>); roundkey = key1 EOR runningmod; workingval = workingval EOR RC[4-i]; workingval = workingval EOR roundkey; workingval = workingval EOR Alpha; workingval = workingval EOR modk0; return workingval;

Library pseudocode for aarch64/functions/pac/computepac/PACCellInvShuffleTweakCellRot

// PACCellInvShuffle() // =================== // TweakCellRot() // ============== bits(64)bits(4) PACCellInvShuffle(bits(64) indata) bits(64) outdata; outdata<3:0> = indata<15:12>; outdata<7:4> = indata<27:24>; outdata<11:8> = indata<51:48>; outdata<15:12> = indata<39:36>; outdata<19:16> = indata<59:56>; outdata<23:20> = indata<47:44>; outdata<27:24> = indata<7:4>; outdata<31:28> = indata<19:16>; outdata<35:32> = indata<35:32>; outdata<39:36> = indata<55:52>; outdata<43:40> = indata<31:28>; outdata<47:44> = indata<11:8>; outdata<51:48> = indata<23:20>; outdata<55:52> = indata<3:0>; outdata<59:56> = indata<43:40>; outdata<63:60> = indata<63:60>; return outdata;TweakCellRot(bits(4) incell) bits(4) outcell; outcell<3> = incell<0> EOR incell<1>; outcell<2> = incell<3>; outcell<1> = incell<2>; outcell<0> = incell<1>; return outcell;

Library pseudocode for aarch64/functions/pac/computepac/PACCellShuffleTweakInvShuffle

// PACCellShuffle() // ================ // TweakInvShuffle() // ================= bits(64)bits(64) TweakInvShuffle(bits(64)indata) bits(64) outdata; outdata<3:0> = TweakCellInvRot(indata<51:48>); outdata<7:4> = indata<55:52>; outdata<11:8> = indata<23:20>; outdata<15:12> = indata<27:24>; outdata<19:16> = indata<3:0>; outdata<23:20> = indata<7:4>; outdata<27:24> = TweakCellInvRot(indata<11:8>); outdata<31:28> = indata<15:12>; outdata<35:32> = TweakCellInvRot(indata<31:28>); outdata<39:36> = TweakCellInvRot(indata<63:60>); outdata<43:40> = TweakCellInvRot(indata<59:56>); outdata<47:44> = TweakCellInvRot(indata<19:16>); outdata<51:48> = indata<35:32>; outdata<55:52> = indata<39:36>; outdata<59:56> = indata<43:40>; outdata<63:60> = TweakCellInvRot(indata<47:44>); return outdata; PACCellShuffle(bits(64) indata) bits(64) outdata; outdata<3:0> = indata<55:52>; outdata<7:4> = indata<27:24>; outdata<11:8> = indata<47:44>; outdata<15:12> = indata<3:0>; outdata<19:16> = indata<31:28>; outdata<23:20> = indata<51:48>; outdata<27:24> = indata<7:4>; outdata<31:28> = indata<43:40>; outdata<35:32> = indata<35:32>; outdata<39:36> = indata<15:12>; outdata<43:40> = indata<59:56>; outdata<47:44> = indata<23:20>; outdata<51:48> = indata<11:8>; outdata<55:52> = indata<39:36>; outdata<59:56> = indata<19:16>; outdata<63:60> = indata<63:60>; return outdata;

Library pseudocode for aarch64/functions/pac/computepac/PACInvSubTweakShuffle

// PACInvSub() // =========== // TweakShuffle() // ============== bits(64) PACInvSub(bits(64) Tinput) // This is a 4-bit substitution from the PRINCE-family cipher bits(64) Toutput; for i = 0 to 15 case Tinput<4*i+3:4*i> of when '0000' Toutput<4*i+3:4*i> = '0101'; when '0001' Toutput<4*i+3:4*i> = '1110'; when '0010' Toutput<4*i+3:4*i> = '1101'; when '0011' Toutput<4*i+3:4*i> = '1000'; when '0100' Toutput<4*i+3:4*i> = '1010'; when '0101' Toutput<4*i+3:4*i> = '1011'; when '0110' Toutput<4*i+3:4*i> = '0001'; when '0111' Toutput<4*i+3:4*i> = '1001'; when '1000' Toutput<4*i+3:4*i> = '0010'; when '1001' Toutput<4*i+3:4*i> = '0110'; when '1010' Toutput<4*i+3:4*i> = '1111'; when '1011' Toutput<4*i+3:4*i> = '0000'; when '1100' Toutput<4*i+3:4*i> = '0100'; when '1101' Toutput<4*i+3:4*i> = '1100'; when '1110' Toutput<4*i+3:4*i> = '0111'; when '1111' Toutput<4*i+3:4*i> = '0011'; return Toutput;TweakShuffle(bits(64) indata) bits(64) outdata; outdata<3:0> = indata<19:16>; outdata<7:4> = indata<23:20>; outdata<11:8> =TweakCellRot(indata<27:24>); outdata<15:12> = indata<31:28>; outdata<19:16> = TweakCellRot(indata<47:44>); outdata<23:20> = indata<11:8>; outdata<27:24> = indata<15:12>; outdata<31:28> = TweakCellRot(indata<35:32>); outdata<35:32> = indata<51:48>; outdata<39:36> = indata<55:52>; outdata<43:40> = indata<59:56>; outdata<47:44> = TweakCellRot(indata<63:60>); outdata<51:48> = TweakCellRot(indata<3:0>); outdata<55:52> = indata<7:4>; outdata<59:56> = TweakCellRot(indata<43:40>); outdata<63:60> = TweakCellRot(indata<39:36>); return outdata;

Library pseudocode for aarch64/functions/pac/computepacpac/PACMultHaveEnhancedPAC

// PACMult() // ========= // HaveEnhancedPAC() // ================= // Returns TRUE if support for EnhancedPAC is implemented, FALSE otherwise. bits(64)boolean PACMult(bits(64) Sinput) bits(4) t0; bits(4) t1; bits(4) t2; bits(4) t3; bits(64) Soutput; for i = 0 to 3 t0<3:0> =HaveEnhancedPAC() return ( RotCellHavePACExt(Sinput<4*(i+8)+3:4*(i+8)>, 1) EOR RotCell(Sinput<4*(i+4)+3:4*(i+4)>, 2); t0<3:0> = t0<3:0> EOR RotCell(Sinput<4*(i)+3:4*(i)>, 1); t1<3:0> = RotCell(Sinput<4*(i+12)+3:4*(i+12)>, 1) EOR RotCell(Sinput<4*(i+4)+3:4*(i+4)>, 1); t1<3:0> = t1<3:0> EOR RotCell(Sinput<4*(i)+3:4*(i)>, 2); t2<3:0> = RotCell(Sinput<4*(i+12)+3:4*(i+12)>, 2) EOR RotCell(Sinput<4*(i+8)+3:4*(i+8)>, 1); t2<3:0> = t2<3:0> EOR RotCell(Sinput<4*(i)+3:4*(i)>, 1); t3<3:0> = RotCell(Sinput<4*(i+12)+3:4*(i+12)>, 1) EOR RotCell(Sinput<4*(i+8)+3:4*(i+8)>, 2); t3<3:0> = t3<3:0> EOR RotCell(Sinput<4*(i+4)+3:4*(i+4)>, 1); Soutput<4*i+3:4*i> = t3<3:0>; Soutput<4*(i+4)+3:4*(i+4)> = t2<3:0>; Soutput<4*(i+8)+3:4*(i+8)> = t1<3:0>; Soutput<4*(i+12)+3:4*(i+12)> = t0<3:0>; return Soutput;() && boolean IMPLEMENTATION_DEFINED "Has enhanced PAC functionality" );

Library pseudocode for aarch64/functions/pac/computepacpac/PACSubHaveEnhancedPAC2

// PACSub() // ======== // HaveEnhancedPAC2() // ================== // Returns TRUE if support for EnhancedPAC2 is implemented, FALSE otherwise. bits(64)boolean PACSub(bits(64) Tinput) // This is a 4-bit substitution from the PRINCE-family cipher bits(64) Toutput; for i = 0 to 15 case Tinput<4*i+3:4*i> of when '0000' Toutput<4*i+3:4*i> = '1011'; when '0001' Toutput<4*i+3:4*i> = '0110'; when '0010' Toutput<4*i+3:4*i> = '1000'; when '0011' Toutput<4*i+3:4*i> = '1111'; when '0100' Toutput<4*i+3:4*i> = '1100'; when '0101' Toutput<4*i+3:4*i> = '0000'; when '0110' Toutput<4*i+3:4*i> = '1001'; when '0111' Toutput<4*i+3:4*i> = '1110'; when '1000' Toutput<4*i+3:4*i> = '0011'; when '1001' Toutput<4*i+3:4*i> = '0111'; when '1010' Toutput<4*i+3:4*i> = '0100'; when '1011' Toutput<4*i+3:4*i> = '0101'; when '1100' Toutput<4*i+3:4*i> = '1101'; when '1101' Toutput<4*i+3:4*i> = '0010'; when '1110' Toutput<4*i+3:4*i> = '0001'; when '1111' Toutput<4*i+3:4*i> = '1010'; return Toutput;HaveEnhancedPAC2() returnHasArchVersion(ARMv8p6) || (HasArchVersion(ARMv8p3) && boolean IMPLEMENTATION_DEFINED "Has enhanced PAC 2 functionality");

Library pseudocode for aarch64/functions/pac/computepacpac/RotCellHaveFPAC

// RotCell() // ========= // HaveFPAC() // ========== // Returns TRUE if support for FPAC is implemented, FALSE otherwise. bits(4)boolean RotCell(bits(4) incell, integer amount) bits(8) tmp; bits(4) outcell; // assert amount>3 || amount<1; tmp<7:0> = incell<3:0>:incell<3:0>; outcell = tmp<7-amount:4-amount>; return outcell;HaveFPAC() returnHaveEnhancedPAC2() && boolean IMPLEMENTATION_DEFINED "Has FPAC functionality";

Library pseudocode for aarch64/functions/pac/computepacpac/TweakCellInvRotHaveFPACCombined

// TweakCellInvRot() // ================= // HaveFPACCombined() // ================== // Returns TRUE if support for FPACCombined is implemented, FALSE otherwise. bits(4) TweakCellInvRot(bits(4)incell) bits(4) outcell; outcell<3> = incell<2>; outcell<2> = incell<1>; outcell<1> = incell<0>; outcell<0> = incell<0> EOR incell<3>; return outcell;booleanHaveFPACCombined() return HaveFPAC() && boolean IMPLEMENTATION_DEFINED "Has FPAC Combined functionality";

Library pseudocode for aarch64/functions/pac/computepacpac/TweakCellRotHavePACExt

// TweakCellRot() // ============== // HavePACExt() // ============ // Returns TRUE if support for the PAC extension is implemented, FALSE otherwise. bits(4)boolean TweakCellRot(bits(4) incell) bits(4) outcell; outcell<3> = incell<0> EOR incell<1>; outcell<2> = incell<3>; outcell<1> = incell<2>; outcell<0> = incell<1>; return outcell;HavePACExt() returnHasArchVersion(ARMv8p3);

Library pseudocode for aarch64/functions/pac/computepacpac/TweakInvShufflePtrHasUpperAndLowerAddRanges

// TweakInvShuffle() // ================= // PtrHasUpperAndLowerAddRanges() // ============================== // Returns TRUE if the pointer has upper and lower address ranges, FALSE otherwise. bits(64) TweakInvShuffle(bits(64)indata) bits(64) outdata; outdata<3:0> = TweakCellInvRot(indata<51:48>); outdata<7:4> = indata<55:52>; outdata<11:8> = indata<23:20>; outdata<15:12> = indata<27:24>; outdata<19:16> = indata<3:0>; outdata<23:20> = indata<7:4>; outdata<27:24> = TweakCellInvRot(indata<11:8>); outdata<31:28> = indata<15:12>; outdata<35:32> = TweakCellInvRot(indata<31:28>); outdata<39:36> = TweakCellInvRot(indata<63:60>); outdata<43:40> = TweakCellInvRot(indata<59:56>); outdata<47:44> = TweakCellInvRot(indata<19:16>); outdata<51:48> = indata<35:32>; outdata<55:52> = indata<39:36>; outdata<59:56> = indata<43:40>; outdata<63:60> = TweakCellInvRot(indata<47:44>); return outdata;booleanPtrHasUpperAndLowerAddRanges() return PSTATE.EL == EL1 || PSTATE.EL == EL0 || (PSTATE.EL == EL2 && HCR_EL2.E2H == '1');

Library pseudocode for aarch64/functions/pac/computepacstrip/TweakShuffleStrip

// TweakShuffle() // ============== // Strip() // ======= // Strip() returns a 64-bit value containing A, but replacing the pointer authentication // code field bits with the extension of the address bits. This can apply to either // instructions or data, where, as the use of tagged pointers is distinct, it might be // handled differently. bits(64) TweakShuffle(bits(64) indata) bits(64) outdata; outdata<3:0> = indata<19:16>; outdata<7:4> = indata<23:20>; outdata<11:8> =Strip(bits(64) A, boolean data) bits(64) original_ptr; bits(64) extfield; boolean tbi = TweakCellRotEffectiveTBI(indata<27:24>); outdata<15:12> = indata<31:28>; outdata<19:16> =(A, !data, PSTATE.EL) == '1'; integer bottom_PAC_bit = TweakCellRotCalculateBottomPACBit(indata<47:44>); outdata<23:20> = indata<11:8>; outdata<27:24> = indata<15:12>; outdata<31:28> =(A<55>); extfield = TweakCellRotReplicate(indata<35:32>); outdata<35:32> = indata<51:48>; outdata<39:36> = indata<55:52>; outdata<43:40> = indata<59:56>; outdata<47:44> = TweakCellRot(indata<63:60>); outdata<51:48> = TweakCellRot(indata<3:0>); outdata<55:52> = indata<7:4>; outdata<59:56> = TweakCellRot(indata<43:40>); outdata<63:60> = TweakCellRot(indata<39:36>); return outdata;(A<55>, 64); if tbi then original_ptr = A<63:56>:extfield< 56-bottom_PAC_bit-1:0>:A<bottom_PAC_bit-1:0>; else original_ptr = extfield< 64-bottom_PAC_bit-1:0>:A<bottom_PAC_bit-1:0>; return original_ptr;

Library pseudocode for aarch64/functions/pac/pactrappacuse/HaveEnhancedPACTrapPACUse

// HaveEnhancedPAC() // ================= // Returns TRUE if support for EnhancedPAC is implemented, FALSE otherwise. boolean// TrapPACUse() // ============ // Used for the trapping of the pointer authentication functions by higher exception // levels. HaveEnhancedPAC() return (TrapPACUse(bits(2) target_el) assert (target_el) && target_el != EL0 && UInt(target_el) >= UInt(PSTATE.EL); bits(64) preferred_exception_return = ThisInstrAddr(); ExceptionRecord exception; vect_offset = 0; exception = ExceptionSyndrome(Exception_PACTrap); AArch64.TakeExceptionHavePACExtHaveEL() && boolean IMPLEMENTATION_DEFINED "Has enhanced PAC functionality" );(target_el, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/functions/pacras/pac/HaveEnhancedPAC2AArch64.ESBOperation

// HaveEnhancedPAC2() // ================== // Returns TRUE if support for EnhancedPAC2 is implemented, FALSE otherwise. boolean// AArch64.ESBOperation() // ====================== // Perform the AArch64 ESB operation, either for ESB executed in AArch64 state, or for // ESB in AArch32 state when SError interrupts are routed to an Exception level using // AArch64 HaveEnhancedPAC2() returnAArch64.ESBOperation() route_to_el3 = HasArchVersionHaveEL(ARMv8p6EL3) || () && SCR_EL3.EA == '1'; route_to_el2 = (HasArchVersionEL2Enabled(() && (HCR_EL2.TGE == '1' || HCR_EL2.AMO == '1')); target = if route_to_el3 then elsif route_to_el2 then EL2 else EL1; if target == EL1 then mask_active = PSTATE.EL IN {EL0, EL1}; elsif HaveVirtHostExt() && target == EL2 && HCR_EL2.<E2H,TGE> == '11' then mask_active = PSTATE.EL IN {EL0, EL2}; else mask_active = PSTATE.EL == target; mask_set = (PSTATE.A == '1' && (!HaveDoubleFaultExt() || SCR_EL3.EA == '0' || PSTATE.EL != EL3 || SCR_EL3.NMEA == '0')); intdis = Halted() || ExternalDebugInterruptsDisabled(target); masked = (UInt(target) < UInt(PSTATE.EL)) || intdis || (mask_active && mask_set); // Check for a masked Physical SError pending that can be synchronized // by an Error synchronization event. if masked && IsSynchronizablePhysicalSErrorPending() then // This function might be called for an interworking case, and INTdis is masking // the SError interrupt. if ELUsingAArch32(S1TranslationRegime()) then syndrome32 = AArch32.PhysicalSErrorSyndrome(); DISR = AArch32.ReportDeferredSError(syndrome32.AET, syndrome32.ExT); else implicit_esb = FALSE; syndrome64 = AArch64.PhysicalSErrorSyndrome(implicit_esb); DISR_EL1 = AArch64.ReportDeferredSError(syndrome64)<31:0>; ClearPendingPhysicalSErrorARMv8p3EL3) && boolean IMPLEMENTATION_DEFINED "Has enhanced PAC 2 functionality");(); // Set ISR_EL1.A to 0 return;

Library pseudocode for aarch64/functions/pacras/pac/HaveFPACAArch64.PhysicalSErrorSyndrome

// HaveFPAC() // ========== // Returns TRUE if support for FPAC is implemented, FALSE otherwise. boolean// Return the SError syndrome bits(25) HaveFPAC() returnAArch64.PhysicalSErrorSyndrome(boolean implicit_esb); HaveEnhancedPAC2() && boolean IMPLEMENTATION_DEFINED "Has FPAC functionality";

Library pseudocode for aarch64/functions/pacras/pac/HaveFPACCombinedAArch64.ReportDeferredSError

// HaveFPACCombined() // ================== // Returns TRUE if support for FPACCombined is implemented, FALSE otherwise. // AArch64.ReportDeferredSError() // ============================== // Generate deferred SError syndrome booleanbits(64) HaveFPACCombined() returnAArch64.ReportDeferredSError(bits(25) syndrome) bits(64) target; target<31> = '1'; // A target<24> = syndrome<24>; // IDS target<23:0> = syndrome<23:0>; // ISS return target; HaveFPAC() && boolean IMPLEMENTATION_DEFINED "Has FPAC Combined functionality";

Library pseudocode for aarch64/functions/pacras/pac/HavePACExtAArch64.vESBOperation

// HavePACExt() // ============ // Returns TRUE if support for the PAC extension is implemented, FALSE otherwise. boolean// AArch64.vESBOperation() // ======================= // Perform the AArch64 ESB operation for virtual SError interrupts, either for ESB // executed in AArch64 state, or for ESB in AArch32 state with EL2 using AArch64 state HavePACExt() returnAArch64.vESBOperation() assert PSTATE.EL IN { HasArchVersionEL0(,} && EL2Enabled(); // If physical SError interrupts are routed to EL2, and TGE is not set, then a virtual // SError interrupt might be pending vSEI_enabled = HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1'; vSEI_pending = vSEI_enabled && HCR_EL2.VSE == '1'; vintdis = Halted() || ExternalDebugInterruptsDisabled(EL1); vmasked = vintdis || PSTATE.A == '1'; // Check for a masked virtual SError pending if vSEI_pending && vmasked then // This function might be called for the interworking case, and INTdis is masking // the virtual SError interrupt. if ELUsingAArch32(EL1) then VDISR = AArch32.ReportDeferredSError(VDFSR<15:14>, VDFSR<12>); else VDISR_EL2 = AArch64.ReportDeferredSErrorARMv8p3EL1);(VSESR_EL2<24:0>)<31:0>; HCR_EL2.VSE = '0'; // Clear pending virtual SError return;

Library pseudocode for aarch64/functions/pacregisters/pac/PtrHasUpperAndLowerAddRangesAArch64.MaybeZeroRegisterUppers

// PtrHasUpperAndLowerAddRanges() // ============================== // Returns TRUE if the pointer has upper and lower address ranges, FALSE otherwise. boolean// AArch64.MaybeZeroRegisterUppers() // ================================= // On taking an exception to AArch64 from AArch32, it is CONSTRAINED UNPREDICTABLE whether the top // 32 bits of registers visible at any lower Exception level using AArch32 are set to zero. PtrHasUpperAndLowerAddRanges() return PSTATE.EL ==AArch64.MaybeZeroRegisterUppers() assert UsingAArch32(); // Always called from AArch32 state before entering AArch64 state if PSTATE.EL == EL0 && !ELUsingAArch32(EL1 || PSTATE.EL ==) then first = 0; last = 14; include_R15 = FALSE; elsif PSTATE.EL IN { EL0 || (PSTATE.EL ==, EL1} && EL2Enabled() && !ELUsingAArch32(EL2) then first = 0; last = 30; include_R15 = FALSE; else first = 0; last = 30; include_R15 = TRUE; for n = first to last if (n != 15 || include_R15) && ConstrainUnpredictableBool(Unpredictable_ZEROUPPER) then _R[n]<63:32> = Zeros && HCR_EL2.E2H == '1');(); return;

Library pseudocode for aarch64/functions/pacregisters/strip/StripAArch64.ResetGeneralRegisters

// Strip() // ======= // Strip() returns a 64-bit value containing A, but replacing the pointer authentication // code field bits with the extension of the address bits. This can apply to either // instructions or data, where, as the use of tagged pointers is distinct, it might be // handled differently. bits(64)// AArch64.ResetGeneralRegisters() // =============================== Strip(bits(64) A, boolean data) bits(64) original_ptr; bits(64) extfield; boolean tbi =AArch64.ResetGeneralRegisters() for i = 0 to 30 EffectiveTBIX(A, !data, PSTATE.EL) == '1'; integer bottom_PAC_bit = CalculateBottomPACBit(A<55>); extfield = Replicate(A<55>, 64); [i] = bits(64) UNKNOWN; if tbi then original_ptr = A<63:56>:extfield< 56-bottom_PAC_bit-1:0>:A<bottom_PAC_bit-1:0>; else original_ptr = extfield< 64-bottom_PAC_bit-1:0>:A<bottom_PAC_bit-1:0>; return original_ptr; return;

Library pseudocode for aarch64/functions/pacregisters/trappacuse/TrapPACUseAArch64.ResetSIMDFPRegisters

// TrapPACUse() // ============ // Used for the trapping of the pointer authentication functions by higher exception // levels.// AArch64.ResetSIMDFPRegisters() // ============================== TrapPACUse(bits(2) target_el) assertAArch64.ResetSIMDFPRegisters() for i = 0 to 31 HaveELV(target_el) && target_el != EL0 && UInt(target_el) >= UInt(PSTATE.EL); bits(64) preferred_exception_return = ThisInstrAddr(); ExceptionRecord exception; vect_offset = 0; exception = ExceptionSyndrome(Exception_PACTrap); AArch64.TakeException(target_el, exception, preferred_exception_return, vect_offset);[i] = bits(128) UNKNOWN; return;

Library pseudocode for aarch64/functions/rasregisters/AArch64.ESBOperationAArch64.ResetSpecialRegisters

// AArch64.ESBOperation() // ====================== // Perform the AArch64 ESB operation, either for ESB executed in AArch64 state, or for // ESB in AArch32 state when SError interrupts are routed to an Exception level using // AArch64// AArch64.ResetSpecialRegisters() // =============================== AArch64.ESBOperation() AArch64.ResetSpecialRegisters() route_to_el3 = // AArch64 special registers SP_EL0 = bits(64) UNKNOWN; SP_EL1 = bits(64) UNKNOWN; SPSR_EL1 = bits(32) UNKNOWN; ELR_EL1 = bits(64) UNKNOWN; if HaveEL(EL3EL2) && SCR_EL3.EA == '1'; route_to_el2 = () then SP_EL2 = bits(64) UNKNOWN; SPSR_EL2 = bits(32) UNKNOWN; ELR_EL2 = bits(64) UNKNOWN; ifEL2EnabledHaveEL() && (HCR_EL2.TGE == '1' || HCR_EL2.AMO == '1')); target = if route_to_el3 then( EL3 elsif route_to_el2 then) then SP_EL3 = bits(64) UNKNOWN; SPSR_EL3 = bits(32) UNKNOWN; ELR_EL3 = bits(64) UNKNOWN; // AArch32 special registers that are not architecturally mapped to AArch64 registers if EL2HaveAArch32EL else( EL1; if target == EL1 then mask_active = PSTATE.EL IN {EL0, EL1}; elsif HaveVirtHostExt() && target == EL2 && HCR_EL2.<E2H,TGE> == '11' then mask_active = PSTATE.EL IN {EL0, EL2}; else mask_active = PSTATE.EL == target; mask_set = (PSTATE.A == '1' && (!HaveDoubleFaultExt() || SCR_EL3.EA == '0' || PSTATE.EL != EL3 || SCR_EL3.NMEA == '0')); intdis = Halted() || ExternalDebugInterruptsDisabled(target); masked = (UInt(target) < UInt(PSTATE.EL)) || intdis || (mask_active && mask_set); // Check for a masked Physical SError pending that can be synchronized // by an Error synchronization event. if masked && IsSynchronizablePhysicalSErrorPending() then // This function might be called for an interworking case, and INTdis is masking // the SError interrupt. if ELUsingAArch32(S1TranslationRegime()) then syndrome32 = AArch32.PhysicalSErrorSyndrome(); DISR = AArch32.ReportDeferredSError(syndrome32.AET, syndrome32.ExT); else implicit_esb = FALSE; syndrome64 = AArch64.PhysicalSErrorSyndrome(implicit_esb); DISR_EL1 = AArch64.ReportDeferredSError(syndrome64); ClearPendingPhysicalSError(); // Set ISR_EL1.A to 0 ) then SPSR_fiq = bits(32) UNKNOWN; SPSR_irq = bits(32) UNKNOWN; SPSR_abt = bits(32) UNKNOWN; SPSR_und = bits(32) UNKNOWN; // External debug special registers DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(32) UNKNOWN; return;

Library pseudocode for aarch64/functions/rasregisters/AArch64.PhysicalSErrorSyndromeAArch64.ResetSystemRegisters

// Return the SError syndrome bits(25) AArch64.PhysicalSErrorSyndrome(boolean implicit_esb);AArch64.ResetSystemRegisters(boolean cold_reset);

Library pseudocode for aarch64/functions/rasregisters/AArch64.ReportDeferredSErrorPC

// AArch64.ReportDeferredSError() // ============================== // Generate deferred SError syndrome // PC - non-assignment form // ======================== // Read program counter. bits(64) AArch64.ReportDeferredSError(bits(25) syndrome) bits(64) target; target<31> = '1'; // A target<24> = syndrome<24>; // IDS target<23:0> = syndrome<23:0>; // ISS return target;PC[] return _PC;

Library pseudocode for aarch64/functions/rasregisters/AArch64.vESBOperationSP

// AArch64.vESBOperation() // ======================= // Perform the AArch64 ESB operation for virtual SError interrupts, either for ESB // executed in AArch64 state, or for ESB in AArch32 state with EL2 using AArch64 state// SP[] - assignment form // ====================== // Write to stack pointer from either a 32-bit or a 64-bit value. AArch64.vESBOperation() assert PSTATE.EL IN {SP[] = bits(width) value assert width IN {32,64}; if PSTATE.SP == '0' then SP_EL0 =ZeroExtend(value); else case PSTATE.EL of when EL0,SP_EL0 = ZeroExtend(value); when EL1} &&SP_EL1 = EL2EnabledZeroExtend(); // If physical SError interrupts are routed to EL2, and TGE is not set, then a virtual // SError interrupt might be pending vSEI_enabled = HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1'; vSEI_pending = vSEI_enabled && HCR_EL2.VSE == '1'; vintdis =(value); when HaltedEL2() ||SP_EL2 = ExternalDebugInterruptsDisabledZeroExtend((value); whenEL1EL3); vmasked = vintdis || PSTATE.A == '1'; // Check for a masked virtual SError pending if vSEI_pending && vmasked then // This function might be called for the interworking case, and INTdis is masking // the virtual SError interrupt. ifSP_EL3 = ELUsingAArch32ZeroExtend((value); return; // SP[] - non-assignment form // ========================== // Read stack pointer with implicit slice of 8, 16, 32 or 64 bits. bits(width)SP[] assert width IN {8,16,32,64}; if PSTATE.SP == '0' then return SP_EL0<width-1:0>; else case PSTATE.EL of when EL0 return SP_EL0<width-1:0>; when EL1) then VDISR =return SP_EL1<width-1:0>; when AArch32.ReportDeferredSErrorEL2(VDFSR<15:14>, VDFSR<12>); else VDISR_EL2 =return SP_EL2<width-1:0>; when AArch64.ReportDeferredSErrorEL3(VSESR_EL2<24:0>); HCR_EL2.VSE = '0'; // Clear pending virtual SError return;return SP_EL3<width-1:0>;

Library pseudocode for aarch64/functions/registers/AArch64.MaybeZeroRegisterUppersV

// AArch64.MaybeZeroRegisterUppers() // ================================= // On taking an exception to AArch64 from AArch32, it is CONSTRAINED UNPREDICTABLE whether the top // 32 bits of registers visible at any lower Exception level using AArch32 are set to zero.// V[] - assignment form // ===================== // Write to SIMD&FP register with implicit extension from // 8, 16, 32, 64 or 128 bits. AArch64.MaybeZeroRegisterUppers() assertV[integer n] = bits(width) value assert n >= 0 && n <= 31; assert width IN {8,16,32,64,128}; integer vlen = if UsingAArch32IsSVEEnabled(); // Always called from AArch32 state before entering AArch64 state if PSTATE.EL ==(PSTATE.EL) then EL0VL && !else 128; ifELUsingAArch32ConstrainUnpredictableBool(EL1Unpredictable_SVEZEROUPPER) then first = 0; last = 14; include_R15 = FALSE; elsif PSTATE.EL IN { _Z[n] =EL0ZeroExtend,(value); else _Z[n]<vlen-1:0> = EL1ZeroExtend} &&(value); // V[] - non-assignment form // ========================= // Read from SIMD&FP register with implicit slice of 8, 16 // 32, 64 or 128 bits. bits(width) EL2Enabled() && !ELUsingAArch32(EL2) then first = 0; last = 30; include_R15 = FALSE; else first = 0; last = 30; include_R15 = TRUE; for n = first to last if (n != 15 || include_R15) && ConstrainUnpredictableBool(Unpredictable_ZEROUPPER) then _R[n]<63:32> = Zeros(); return;V[integer n] assert n >= 0 && n <= 31; assert width IN {8,16,32,64,128}; return _Z[n]<width-1:0>;

Library pseudocode for aarch64/functions/registers/AArch64.ResetGeneralRegistersVpart

// AArch64.ResetGeneralRegisters() // ===============================// Vpart[] - non-assignment form // ============================= // Reads a 128-bit SIMD&FP register in up to two parts: // part 0 returns the bottom 8, 16, 32 or 64 bits of a value held in the register; // part 1 returns the top half of the bottom 64 bits or the top half of the 128-bit // value held in the register. bits(width) AArch64.ResetGeneralRegisters() for i = 0 to 30Vpart[integer n, integer part] assert n >= 0 && n <= 31; assert part IN {0, 1}; if part == 0 then assert width < 128; return [n]; else assert width IN {32,64}; bits(128) vreg = V[n]; return vreg<(width * 2)-1:width>; // Vpart[] - assignment form // ========================= // Writes a 128-bit SIMD&FP register in up to two parts: // part 0 zero extends a 8, 16, 32, or 64-bit value to fill the whole register; // part 1 inserts a 64-bit value into the top half of the register. Vpart[integer n, integer part] = bits(width) value assert n >= 0 && n <= 31; assert part IN {0, 1}; if part == 0 then assert width < 128; V[n] = value; else assert width == 64; bits(64) vreg = V[n]; VXV[i] = bits(64) UNKNOWN; return;[n] = value<63:0> : vreg;

Library pseudocode for aarch64/functions/registers/AArch64.ResetSIMDFPRegistersX

// AArch64.ResetSIMDFPRegisters() // ==============================// X[] - assignment form // ===================== // Write to general-purpose register from either a 32-bit or a 64-bit value. AArch64.ResetSIMDFPRegisters() for i = 0 to 31X[integer n] = bits(width) value assert n >= 0 && n <= 31; assert width IN {32,64}; if n != 31 then _R[n] = (value); return; // X[] - non-assignment form // ========================= // Read from general-purpose register with implicit slice of 8, 16, 32 or 64 bits. bits(width) X[integer n] assert n >= 0 && n <= 31; assert width IN {8,16,32,64}; if n != 31 then return _R[n]<width-1:0>; else return ZerosVZeroExtend[i] = bits(128) UNKNOWN; return;(width);

Library pseudocode for aarch64/functions/registerssve/AArch64.ResetSpecialRegistersAArch32.IsFPEnabled

// AArch64.ResetSpecialRegisters() // ===============================// AArch32.IsFPEnabled() // ===================== // Returns TRUE if access to the SIMD&FP instructions or System registers are // enabled at the target exception level in AArch32 state and FALSE otherwise. boolean AArch64.ResetSpecialRegisters() // AArch64 special registers SP_EL0 = bits(64) UNKNOWN; SP_EL1 = bits(64) UNKNOWN; SPSR_EL1 = bits(64) UNKNOWN; ELR_EL1 = bits(64) UNKNOWN; ifAArch32.IsFPEnabled(bits(2) el) if el == EL0 && !ELUsingAArch32(EL1) then return AArch64.IsFPEnabled(el); if HaveEL(EL3) && ELUsingAArch32(EL3) && !IsSecure() then // Check if access disabled in NSACR if NSACR.cp10 == '0' then return FALSE; if el IN {EL0, EL1} then // Check if access disabled in CPACR case CPACR.cp10 of when '00' disabled = TRUE; when '01' disabled = el == EL0; when '10' disabled = ConstrainUnpredictableBool(Unpredictable_RESCPACR); when '11' disabled = FALSE; if disabled then return FALSE; if el IN {EL0, EL1, EL2) then SP_EL2 = bits(64) UNKNOWN; SPSR_EL2 = bits(64) UNKNOWN; ELR_EL2 = bits(64) UNKNOWN; if} && EL2Enabled() then if !ELUsingAArch32(EL2) then return AArch64.IsFPEnabled(EL2); if HCPTR.TCP10 == '1' then return FALSE; if HaveEL(EL3) then SP_EL3 = bits(64) UNKNOWN; SPSR_EL3 = bits(64) UNKNOWN; ELR_EL3 = bits(64) UNKNOWN; // AArch32 special registers that are not architecturally mapped to AArch64 registers if) && ! HaveAArch32ELELUsingAArch32(EL1EL3) then SPSR_fiq<31:0> = bits(32) UNKNOWN; SPSR_irq<31:0> = bits(32) UNKNOWN; SPSR_abt<31:0> = bits(32) UNKNOWN; SPSR_und<31:0> = bits(32) UNKNOWN; // Check if access disabled in CPTR_EL3 if CPTR_EL3.TFP == '1' then return FALSE; // External debug special registers DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(64) UNKNOWN; return; return TRUE;

Library pseudocode for aarch64/functions/registerssve/AArch64.ResetSystemRegistersAArch64.IsFPEnabled

// AArch64.IsFPEnabled() // ===================== // Returns TRUE if access to the SIMD&FP instructions or System registers are // enabled at the target exception level in AArch64 state and FALSE otherwise. boolean AArch64.IsFPEnabled(bits(2) el) // Check if access disabled in CPACR_EL1 if el IN {EL0, EL1} && !IsInHost() then // Check FP&SIMD at EL0/EL1 case CPACR_EL1.FPEN of when 'x0' disabled = TRUE; when '01' disabled = el == EL0; when '11' disabled = FALSE; if disabled then return FALSE; // Check if access disabled in CPTR_EL2 if el IN {EL0, EL1, EL2} && EL2Enabled() then if HaveVirtHostExt() && HCR_EL2.E2H == '1' then case CPTR_EL2.FPEN of when 'x0' disabled = TRUE; when '01' disabled = el == EL0 && HCR_EL2.TGE == '1'; when '11' disabled = FALSE; if disabled then return FALSE; else if CPTR_EL2.TFP == '1' then return FALSE; // Check if access disabled in CPTR_EL3 if HaveEL(EL3AArch64.ResetSystemRegisters(boolean cold_reset);) then if CPTR_EL3.TFP == '1' then return FALSE; return TRUE;

Library pseudocode for aarch64/functions/registerssve/PCCeilPow2

// PC - non-assignment form // ======================== // Read program counter. // CeilPow2() // ========== bits(64)// For a positive integer X, return the smallest power of 2 >= X integer PC[] return _PC;CeilPow2(integer x) if x == 0 then return 0; if x == 1 then return 2; returnFloorPow2(x - 1) * 2;

Library pseudocode for aarch64/functions/registerssve/SPCheckSVEEnabled

// SP[] - assignment form // ====================== // Write to stack pointer from either a 32-bit or a 64-bit value.// CheckSVEEnabled() // ================= // Checks for traps on SVE instructions and instructions that // access SVE System registers. SP[] = bits(width) value assert width IN {32,64}; if PSTATE.SP == '0' then SP_EL0 =CheckSVEEnabled() // Check if access disabled in CPACR_EL1 if PSTATE.EL IN { ZeroExtendEL0(value); else case PSTATE.EL of when, EL1} && !IsInHost() then // Check SVE at EL0/EL1 case CPACR_EL1.ZEN of when 'x0' disabled = TRUE; when '01' disabled = PSTATE.EL == EL0 SP_EL0 =; when '11' disabled = FALSE; if disabled then ZeroExtendSVEAccessTrap(value); when( EL1 SP_EL1 =); // Check SIMD&FP at EL0/EL1 case CPACR_EL1.FPEN of when 'x0' disabled = TRUE; when '01' disabled = PSTATE.EL == ZeroExtendEL0(value); when; when '11' disabled = FALSE; if disabled then AArch64.AdvSIMDFPAccessTrap(EL1); // Check if access disabled in CPTR_EL2 if PSTATE.EL IN {EL0, EL1, EL2 SP_EL2 =} && ZeroExtendEL2Enabled(value); when() then if EL3HaveVirtHostExt SP_EL3 =() && HCR_EL2.E2H == '1' then // Check SVE at EL2 case CPTR_EL2.ZEN of when 'x0' disabled = TRUE; when '01' disabled = PSTATE.EL == ZeroExtendEL0(value); return; // SP[] - non-assignment form // ========================== // Read stack pointer with implicit slice of 8, 16, 32 or 64 bits. bits(width)&& HCR_EL2.TGE == '1'; when '11' disabled = FALSE; if disabled then SP[] assert width IN {8,16,32,64}; if PSTATE.SP == '0' then return SP_EL0<width-1:0>; else case PSTATE.EL of when( EL2); // Check SIMD&FP at EL2 case CPTR_EL2.FPEN of when 'x0' disabled = TRUE; when '01' disabled = PSTATE.EL == EL0 return SP_EL0<width-1:0>; when&& HCR_EL2.TGE == '1'; when '11' disabled = FALSE; if disabled then EL1AArch64.AdvSIMDFPAccessTrap return SP_EL1<width-1:0>; when( EL2); else if CPTR_EL2.TZ == '1' then SVEAccessTrap(EL2); if CPTR_EL2.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL2); // Check if access disabled in CPTR_EL3 if HaveEL(EL3) then if CPTR_EL3.EZ == '0' then SVEAccessTrap(EL3); if CPTR_EL3.TFP == '1' then AArch64.AdvSIMDFPAccessTrap return SP_EL2<width-1:0>; when( EL3 return SP_EL3<width-1:0>;);

Library pseudocode for aarch64/functions/registerssve/VDecodePredCount

// V[] - assignment form // ===================== // Write to SIMD&FP register with implicit extension from // 8, 16, 32, 64 or 128 bits.// DecodePredCount() // ================= integer V[integer n] = bits(width) value assert n >= 0 && n <= 31; assert width IN {8,16,32,64,128}; integer vlen = ifDecodePredCount(bits(5) pattern, integer esize) integer elements = IsSVEEnabled(PSTATE.EL) then VL else 128; ifDIV esize; integer numElem; case pattern of when '00000' numElem = ConstrainUnpredictableBoolFloorPow2(Unpredictable_SVEZEROUPPER) then _Z[n] = ZeroExtend(value); else _Z[n]<vlen-1:0> = ZeroExtend(value); // V[] - non-assignment form // ========================= // Read from SIMD&FP register with implicit slice of 8, 16 // 32, 64 or 128 bits. bits(width) V[integer n] assert n >= 0 && n <= 31; assert width IN {8,16,32,64,128}; return _Z[n]<width-1:0>;(elements); when '00001' numElem = if elements >= 1 then 1 else 0; when '00010' numElem = if elements >= 2 then 2 else 0; when '00011' numElem = if elements >= 3 then 3 else 0; when '00100' numElem = if elements >= 4 then 4 else 0; when '00101' numElem = if elements >= 5 then 5 else 0; when '00110' numElem = if elements >= 6 then 6 else 0; when '00111' numElem = if elements >= 7 then 7 else 0; when '01000' numElem = if elements >= 8 then 8 else 0; when '01001' numElem = if elements >= 16 then 16 else 0; when '01010' numElem = if elements >= 32 then 32 else 0; when '01011' numElem = if elements >= 64 then 64 else 0; when '01100' numElem = if elements >= 128 then 128 else 0; when '01101' numElem = if elements >= 256 then 256 else 0; when '11101' numElem = elements - (elements MOD 4); when '11110' numElem = elements - (elements MOD 3); when '11111' numElem = elements; otherwise numElem = 0; return numElem;

Library pseudocode for aarch64/functions/registerssve/VpartElemFFR

// Vpart[] - non-assignment form // ============================= // Reads a 128-bit SIMD&FP register in up to two parts: // part 0 returns the bottom 8, 16, 32 or 64 bits of a value held in the register; // part 1 returns the top half of the bottom 64 bits or the top half of the 128-bit // value held in the register. // ElemFFR[] - non-assignment form // =============================== bits(width)bit Vpart[integer n, integer part] assert n >= 0 && n <= 31; assert part IN {0, 1}; if part == 0 then assert width < 128; returnElemFFR[integer e, integer esize] return VElemP[n]; else assert width IN {32,64}; bits(128) vreg =[_FFR, e, esize]; // ElemFFR[] - assignment form // =========================== V[n]; return vreg<(width * 2)-1:width>; // Vpart[] - assignment form // ========================= // Writes a 128-bit SIMD&FP register in up to two parts: // part 0 zero extends a 8, 16, 32, or 64-bit value to fill the whole register; // part 1 inserts a 64-bit value into the top half of the register. Vpart[integer n, integer part] = bits(width) value assert n >= 0 && n <= 31; assert part IN {0, 1}; if part == 0 then assert width < 128;ElemFFR[integer e, integer esize] = bit value integer psize = esize DIV 8; integer n = e * psize; assert n >= 0 && (n + psize) <= VPL[n] = value; else assert width == 64; bits(64) vreg =; _FFR<n+psize-1:n> = VZeroExtend[n]; V[n] = value<63:0> : vreg;(value, psize); return;

Library pseudocode for aarch64/functions/registerssve/XElemP

// X[] - assignment form // ===================== // Write to general-purpose register from either a 32-bit or a 64-bit value.// ElemP[] - non-assignment form // ============================= bit X[integer n] = bits(width) value assert n >= 0 && n <= 31; assert width IN {32,64}; if n != 31 then _R[n] =ElemP[bits(N) pred, integer e, integer esize] integer n = e * (esize DIV 8); assert n >= 0 && n < N; return pred<n>; // ElemP[] - assignment form // ========================= ZeroExtend(value); return; // X[] - non-assignment form // ========================= // Read from general-purpose register with implicit slice of 8, 16, 32 or 64 bits. bits(width) X[integer n] assert n >= 0 && n <= 31; assert width IN {8,16,32,64}; if n != 31 then return _R[n]<width-1:0>; else returnElemP[bits(N) &pred, integer e, integer esize] = bit value integer psize = esize DIV 8; integer n = e * psize; assert n >= 0 && (n + psize) <= N; pred<n+psize-1:n> = ZerosZeroExtend(width);(value, psize); return;

Library pseudocode for aarch64/functions/sve/AArch32.IsFPEnabledFFR

// AArch32.IsFPEnabled() // ===================== // Returns TRUE if access to the SIMD&FP instructions or System registers are // enabled at the target exception level in AArch32 state and FALSE otherwise. // FFR[] - non-assignment form // =========================== booleanbits(width) AArch32.IsFPEnabled(bits(2) el) if el ==FFR[] assert width == EL0PL && !; return _FFR<width-1:0>; // FFR[] - assignment form // =======================ELUsingAArch32(FFR[] = bits(width) value assert width ==EL1PL) then return; if AArch64.IsFPEnabled(el); if HaveEL(EL3) && ELUsingAArch32(EL3) && !IsSecure() then // Check if access disabled in NSACR if NSACR.cp10 == '0' then return FALSE; if el IN {EL0, EL1} then // Check if access disabled in CPACR case CPACR.cp10 of when '00' disabled = TRUE; when '01' disabled = el == EL0; when '10' disabled = ConstrainUnpredictableBool(Unpredictable_RESCPACRUnpredictable_SVEZEROUPPER); when '11' disabled = FALSE; if disabled then return FALSE; if el IN {) then _FFR =EL0ZeroExtend, EL1, EL2} && EL2Enabled() then if !ELUsingAArch32(EL2) then return AArch64.IsFPEnabled(EL2); if HCPTR.TCP10 == '1' then return FALSE; if HaveEL(EL3) && !ELUsingAArch32(EL3) then // Check if access disabled in CPTR_EL3 if CPTR_EL3.TFP == '1' then return FALSE; return TRUE;(value); else _FFR<width-1:0> = value;

Library pseudocode for aarch64/functions/sve/AArch64.IsFPEnabledFPCompareNE

// AArch64.IsFPEnabled() // ===================== // Returns TRUE if access to the SIMD&FP instructions or System registers are // enabled at the target exception level in AArch64 state and FALSE otherwise. // FPCompareNE() // ============= boolean AArch64.IsFPEnabled(bits(2) el) // Check if access disabled in CPACR_EL1 if el IN {FPCompareNE(bits(N) op1, bits(N) op2,EL0FPCRType,fpcr) assert N IN {16,32,64}; (type1,sign1,value1) = EL1FPUnpack} && !(op1, fpcr); (type2,sign2,value2) =IsInHostFPUnpack() then // Check FP&SIMD at EL0/EL1 case CPACR_EL1.FPEN of when 'x0' disabled = TRUE; when '01' disabled = el ==(op2, fpcr); op1_nan = type1 IN { EL0FPType_SNaN; when '11' disabled = FALSE; if disabled then return FALSE; // Check if access disabled in CPTR_EL2 if el IN {,EL0FPType_QNaN,}; op2_nan = type2 IN { EL1FPType_SNaN, EL2FPType_QNaN} &&}; if op1_nan || op2_nan then result = TRUE; if type1 == EL2EnabledFPType_SNaN() then if|| type2 == HaveVirtHostExtFPType_SNaN() && HCR_EL2.E2H == '1' then case CPTR_EL2.FPEN of when 'x0' disabled = TRUE; when '01' disabled = el ==then EL0FPProcessException && HCR_EL2.TGE == '1'; when '11' disabled = FALSE; if disabled then return FALSE; else if CPTR_EL2.TFP == '1' then return FALSE; // Check if access disabled in CPTR_EL3 if( HaveELFPExc_InvalidOp(EL3) then if CPTR_EL3.TFP == '1' then return FALSE; return TRUE;, fpcr); else // All non-NaN cases can be evaluated on the values produced by FPUnpack() result = (value1 != value2); return result;

Library pseudocode for aarch64/functions/sve/CeilPow2FPCompareUN

// CeilPow2() // ========== // FPCompareUN() // ============= // For a positive integer X, return the smallest power of 2 >= X integerboolean CeilPow2(integer x) if x == 0 then return 0; if x == 1 then return 2; returnFPCompareUN(bits(N) op1, bits(N) op2, fpcr) assert N IN {16,32,64}; (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); if type1 == FPType_SNaN || type2 == FPType_SNaN then FPProcessException(FPExc_InvalidOp, fpcr); return type1 IN {FPType_SNaN, FPType_QNaN} || type2 IN {FPType_SNaN, FPType_QNaNFloorPow2FPCRType(x - 1) * 2;};

Library pseudocode for aarch64/functions/sve/CheckSVEEnabledFPConvertSVE

// CheckSVEEnabled() // ================= // Checks for traps on SVE instructions and instructions that // access SVE System registers.// FPConvertSVE() // ============== bits(M) CheckSVEEnabled() // Check if access disabled in CPACR_EL1 if PSTATE.EL IN {FPConvertSVE(bits(N) op,EL0FPCRType,fpcr, EL1FPRounding} && !rounding) fpcr.AHP = '0'; returnIsInHostFPConvert() then // Check SVE at EL0/EL1 case CPACR_EL1.ZEN of when 'x0' disabled = TRUE; when '01' disabled = PSTATE.EL ==(op, fpcr, rounding); // FPConvertSVE() // ============== bits(M) EL0; when '11' disabled = FALSE; if disabled thenFPConvertSVE(bits(N) op, SVEAccessTrapFPCRType(fpcr) fpcr.AHP = '0'; returnEL1FPConvert); // Check SIMD&FP at EL0/EL1 case CPACR_EL1.FPEN of when 'x0' disabled = TRUE; when '01' disabled = PSTATE.EL ==(op, fpcr, EL0FPRoundingMode; when '11' disabled = FALSE; if disabled then AArch64.AdvSIMDFPAccessTrap(EL1); // Check if access disabled in CPTR_EL2 if PSTATE.EL IN {EL0, EL1, EL2} && EL2Enabled() then if HaveVirtHostExt() && HCR_EL2.E2H == '1' then // Check SVE at EL2 case CPTR_EL2.ZEN of when 'x0' disabled = TRUE; when '01' disabled = PSTATE.EL == EL0 && HCR_EL2.TGE == '1'; when '11' disabled = FALSE; if disabled then SVEAccessTrap(EL2); // Check SIMD&FP at EL2 case CPTR_EL2.FPEN of when 'x0' disabled = TRUE; when '01' disabled = PSTATE.EL == EL0 && HCR_EL2.TGE == '1'; when '11' disabled = FALSE; if disabled then AArch64.AdvSIMDFPAccessTrap(EL2); else if CPTR_EL2.TZ == '1' then SVEAccessTrap(EL2); if CPTR_EL2.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL2); // Check if access disabled in CPTR_EL3 if HaveEL(EL3) then if CPTR_EL3.EZ == '0' then SVEAccessTrap(EL3); if CPTR_EL3.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL3);(fpcr));

Library pseudocode for aarch64/functions/sve/DecodePredCountFPExpA

// DecodePredCount() // ================= // FPExpA() // ======== integerbits(N) DecodePredCount(bits(5) pattern, integer esize) integer elements =FPExpA(bits(N) op) assert N IN {16,32,64}; bits(N) result; bits(N) coeff; integer idx = if N == 16 then VLUInt DIV esize; integer numElem; case pattern of when '00000' numElem =(op<4:0>) else (op<5:0>); coeff = FPExpCoefficientFloorPow2UInt(elements); when '00001' numElem = if elements >= 1 then 1 else 0; when '00010' numElem = if elements >= 2 then 2 else 0; when '00011' numElem = if elements >= 3 then 3 else 0; when '00100' numElem = if elements >= 4 then 4 else 0; when '00101' numElem = if elements >= 5 then 5 else 0; when '00110' numElem = if elements >= 6 then 6 else 0; when '00111' numElem = if elements >= 7 then 7 else 0; when '01000' numElem = if elements >= 8 then 8 else 0; when '01001' numElem = if elements >= 16 then 16 else 0; when '01010' numElem = if elements >= 32 then 32 else 0; when '01011' numElem = if elements >= 64 then 64 else 0; when '01100' numElem = if elements >= 128 then 128 else 0; when '01101' numElem = if elements >= 256 then 256 else 0; when '11101' numElem = elements - (elements MOD 4); when '11110' numElem = elements - (elements MOD 3); when '11111' numElem = elements; otherwise numElem = 0; return numElem;[idx]; if N == 16 then result<15:0> = '0':op<9:5>:coeff<9:0>; elsif N == 32 then result<31:0> = '0':op<13:6>:coeff<22:0>; else // N == 64 result<63:0> = '0':op<16:6>:coeff<51:0>; return result;

Library pseudocode for aarch64/functions/sve/ElemFFRFPExpCoefficient

// ElemFFR[] - non-assignment form // =============================== // FPExpCoefficient() // ================== bitbits(N) ElemFFR[integer e, integer esize] returnFPExpCoefficient[integer index] assert N IN {16,32,64}; integer result; if N == 16 then case index of when 0 result = 0x0000; when 1 result = 0x0016; when 2 result = 0x002d; when 3 result = 0x0045; when 4 result = 0x005d; when 5 result = 0x0075; when 6 result = 0x008e; when 7 result = 0x00a8; when 8 result = 0x00c2; when 9 result = 0x00dc; when 10 result = 0x00f8; when 11 result = 0x0114; when 12 result = 0x0130; when 13 result = 0x014d; when 14 result = 0x016b; when 15 result = 0x0189; when 16 result = 0x01a8; when 17 result = 0x01c8; when 18 result = 0x01e8; when 19 result = 0x0209; when 20 result = 0x022b; when 21 result = 0x024e; when 22 result = 0x0271; when 23 result = 0x0295; when 24 result = 0x02ba; when 25 result = 0x02e0; when 26 result = 0x0306; when 27 result = 0x032e; when 28 result = 0x0356; when 29 result = 0x037f; when 30 result = 0x03a9; when 31 result = 0x03d4; elsif N == 32 then case index of when 0 result = 0x000000; when 1 result = 0x0164d2; when 2 result = 0x02cd87; when 3 result = 0x043a29; when 4 result = 0x05aac3; when 5 result = 0x071f62; when 6 result = 0x08980f; when 7 result = 0x0a14d5; when 8 result = 0x0b95c2; when 9 result = 0x0d1adf; when 10 result = 0x0ea43a; when 11 result = 0x1031dc; when 12 result = 0x11c3d3; when 13 result = 0x135a2b; when 14 result = 0x14f4f0; when 15 result = 0x16942d; when 16 result = 0x1837f0; when 17 result = 0x19e046; when 18 result = 0x1b8d3a; when 19 result = 0x1d3eda; when 20 result = 0x1ef532; when 21 result = 0x20b051; when 22 result = 0x227043; when 23 result = 0x243516; when 24 result = 0x25fed7; when 25 result = 0x27cd94; when 26 result = 0x29a15b; when 27 result = 0x2b7a3a; when 28 result = 0x2d583f; when 29 result = 0x2f3b79; when 30 result = 0x3123f6; when 31 result = 0x3311c4; when 32 result = 0x3504f3; when 33 result = 0x36fd92; when 34 result = 0x38fbaf; when 35 result = 0x3aff5b; when 36 result = 0x3d08a4; when 37 result = 0x3f179a; when 38 result = 0x412c4d; when 39 result = 0x4346cd; when 40 result = 0x45672a; when 41 result = 0x478d75; when 42 result = 0x49b9be; when 43 result = 0x4bec15; when 44 result = 0x4e248c; when 45 result = 0x506334; when 46 result = 0x52a81e; when 47 result = 0x54f35b; when 48 result = 0x5744fd; when 49 result = 0x599d16; when 50 result = 0x5bfbb8; when 51 result = 0x5e60f5; when 52 result = 0x60ccdf; when 53 result = 0x633f89; when 54 result = 0x65b907; when 55 result = 0x68396a; when 56 result = 0x6ac0c7; when 57 result = 0x6d4f30; when 58 result = 0x6fe4ba; when 59 result = 0x728177; when 60 result = 0x75257d; when 61 result = 0x77d0df; when 62 result = 0x7a83b3; when 63 result = 0x7d3e0c; else // N == 64 case index of when 0 result = 0x0000000000000; when 1 result = 0x02C9A3E778061; when 2 result = 0x059B0D3158574; when 3 result = 0x0874518759BC8; when 4 result = 0x0B5586CF9890F; when 5 result = 0x0E3EC32D3D1A2; when 6 result = 0x11301D0125B51; when 7 result = 0x1429AAEA92DE0; when 8 result = 0x172B83C7D517B; when 9 result = 0x1A35BEB6FCB75; when 10 result = 0x1D4873168B9AA; when 11 result = 0x2063B88628CD6; when 12 result = 0x2387A6E756238; when 13 result = 0x26B4565E27CDD; when 14 result = 0x29E9DF51FDEE1; when 15 result = 0x2D285A6E4030B; when 16 result = 0x306FE0A31B715; when 17 result = 0x33C08B26416FF; when 18 result = 0x371A7373AA9CB; when 19 result = 0x3A7DB34E59FF7; when 20 result = 0x3DEA64C123422; when 21 result = 0x4160A21F72E2A; when 22 result = 0x44E086061892D; when 23 result = 0x486A2B5C13CD0; when 24 result = 0x4BFDAD5362A27; when 25 result = 0x4F9B2769D2CA7; when 26 result = 0x5342B569D4F82; when 27 result = 0x56F4736B527DA; when 28 result = 0x5AB07DD485429; when 29 result = 0x5E76F15AD2148; when 30 result = 0x6247EB03A5585; when 31 result = 0x6623882552225; when 32 result = 0x6A09E667F3BCD; when 33 result = 0x6DFB23C651A2F; when 34 result = 0x71F75E8EC5F74; when 35 result = 0x75FEB564267C9; when 36 result = 0x7A11473EB0187; when 37 result = 0x7E2F336CF4E62; when 38 result = 0x82589994CCE13; when 39 result = 0x868D99B4492ED; when 40 result = 0x8ACE5422AA0DB; when 41 result = 0x8F1AE99157736; when 42 result = 0x93737B0CDC5E5; when 43 result = 0x97D829FDE4E50; when 44 result = 0x9C49182A3F090; when 45 result = 0xA0C667B5DE565; when 46 result = 0xA5503B23E255D; when 47 result = 0xA9E6B5579FDBF; when 48 result = 0xAE89F995AD3AD; when 49 result = 0xB33A2B84F15FB; when 50 result = 0xB7F76F2FB5E47; when 51 result = 0xBCC1E904BC1D2; when 52 result = 0xC199BDD85529C; when 53 result = 0xC67F12E57D14B; when 54 result = 0xCB720DCEF9069; when 55 result = 0xD072D4A07897C; when 56 result = 0xD5818DCFBA487; when 57 result = 0xDA9E603DB3285; when 58 result = 0xDFC97337B9B5F; when 59 result = 0xE502EE78B3FF6; when 60 result = 0xEA4AFA2A490DA; when 61 result = 0xEFA1BEE615A27; when 62 result = 0xF50765B6E4540; when 63 result = 0xFA7C1819E90D8; return result<N-1:0>; ElemP[_FFR, e, esize]; // ElemFFR[] - assignment form // =========================== ElemFFR[integer e, integer esize] = bit value integer psize = esize DIV 8; integer n = e * psize; assert n >= 0 && (n + psize) <= PL; _FFR<n+psize-1:n> = ZeroExtend(value, psize); return;

Library pseudocode for aarch64/functions/sve/ElemPFPMinNormal

// ElemP[] - non-assignment form // ============================= // FPMinNormal() // ============= bitbits(N) ElemP[bits(N) pred, integer e, integer esize] integer n = e * (esize DIV 8); assert n >= 0 && n < N; return pred<n>; // ElemP[] - assignment form // =========================FPMinNormal(bit sign) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); exp = ElemP[bits(N) &pred, integer e, integer esize] = bit value integer psize = esize DIV 8; integer n = e * psize; assert n >= 0 && (n + psize) <= N; pred<n+psize-1:n> =(E-1):'1'; frac = ZeroExtendZeros(value, psize); return;(F); return sign : exp : frac;

Library pseudocode for aarch64/functions/sve/FFRFPOne

// FFR[] - non-assignment form // =========================== // FPOne() // ======= bits(width)bits(N) FFR[] assert width ==FPOne(bit sign) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); exp = '0': PLOnes; return _FFR<width-1:0>; // FFR[] - assignment form // =======================(E-1); frac = FFR[] = bits(width) value assert width == PL; if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then _FFR = ZeroExtend(value); else _FFR<width-1:0> = value;(F); return sign : exp : frac;

Library pseudocode for aarch64/functions/sve/FPCompareNEFPPointFive

// FPCompareNE() // FPPointFive() // ============= booleanbits(N) FPCompareNE(bits(N) op1, bits(N) op2,FPPointFive(bit sign) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); exp = '0': FPCRTypeOnes fpcr) assert N IN {16,32,64}; (type1,sign1,value1) =(E-2):'0'; frac = FPUnpackZeros(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); op1_nan = type1 IN {FPType_SNaN, FPType_QNaN}; op2_nan = type2 IN {FPType_SNaN, FPType_QNaN}; if op1_nan || op2_nan then result = TRUE; if type1 == FPType_SNaN || type2 == FPType_SNaN then FPProcessException(FPExc_InvalidOp, fpcr); else // All non-NaN cases can be evaluated on the values produced by FPUnpack() result = (value1 != value2); FPProcessDenorms(type1, type2, N, fpcr); return result;(F); return sign : exp : frac;

Library pseudocode for aarch64/functions/sve/FPCompareUNFPProcess

// FPCompareUN() // ============= // FPProcess() // =========== booleanbits(N) FPCompareUN(bits(N) op1, bits(N) op2,FPProcess(bits(N) input) bits(N) result; assert N IN {16,32,64}; (fptype,sign,value) = FPCRType fpcr) assert N IN {16,32,64}; (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) =(input, FPCR); if fptype == FPUnpack(op2, fpcr); if type1 == FPType_SNaN || type2 ==|| fptype == FPType_SNaNFPType_QNaN thenthen result = FPProcessExceptionFPProcessNaN((fptype, input, FPCR); elsif fptype ==FPExc_InvalidOpFPType_Infinity, fpcr); result = type1 IN {then result =FPType_SNaNFPInfinity,(sign); elsif fptype == FPType_QNaNFPType_Zero} || type2 IN {then result =FPType_SNaNFPZero,(sign); else result = FPType_QNaNFPRound}; if !result then FPProcessDenorms(type1, type2, N, fpcr); (value, FPCR); return result;

Library pseudocode for aarch64/functions/sve/FPConvertSVEFPScale

// FPConvertSVE() // ============== // FPScale() // ========= bits(M)bits(N) FPConvertSVE(bits(N) op,FPScale(bits (N) op, integer scale, FPCRType fpcr,fpcr) assert N IN {16,32,64}; (fptype,sign,value) = FPRoundingFPUnpack rounding) fpcr.AHP = '0'; return(op, fpcr); if fptype == FPConvertFPType_SNaN(op, fpcr, rounding); // FPConvertSVE() // ============== bits(M)|| fptype == FPConvertSVE(bits(N) op,then result = FPCRTypeFPProcessNaN fpcr) fpcr.AHP = '0'; return(fptype, op, fpcr); elsif fptype == FPConvertFPType_Zero(op, fpcr,then result = (sign); elsif fptype == FPType_Infinity then result = FPInfinity(sign); else result = FPRoundFPRoundingModeFPZero(fpcr));(value * (2.0^scale), fpcr); return result;

Library pseudocode for aarch64/functions/sve/FPExpAFPTrigMAdd

// FPExpA() // ======== // FPTrigMAdd() // ============ bits(N) FPExpA(bits(N) op) assert N IN {16,32,64}; bits(N) result; bits(N) coeff; integer idx = if N == 16 thenFPTrigMAdd(integer x, bits(N) op1, bits(N) op2, UIntFPCRType(op<4:0>) elsefpcr) assert N IN {16,32,64}; assert x >= 0; assert x < 8; bits(N) coeff; if op2<N-1> == '1' then x = x + 8; op2<N-1> = '0'; coeff = UIntFPTrigMAddCoefficient(op<5:0>); coeff =[x]; result = FPExpCoefficientFPMulAdd[idx]; if N == 16 then result<15:0> = '0':op<9:5>:coeff<9:0>; elsif N == 32 then result<31:0> = '0':op<13:6>:coeff<22:0>; else // N == 64 result<63:0> = '0':op<16:6>:coeff<51:0>; (coeff, op1, op2, fpcr); return result;

Library pseudocode for aarch64/functions/sve/FPExpCoefficientFPTrigMAddCoefficient

// FPExpCoefficient() // ================== // FPTrigMAddCoefficient() // ======================= bits(N) FPExpCoefficient[integer index] FPTrigMAddCoefficient[integer index] assert N IN {16,32,64}; integer result; if N == 16 then case index of when 0 result = 0x0000; when 1 result = 0x0016; when 2 result = 0x002d; when 3 result = 0x0045; when 4 result = 0x005d; when 5 result = 0x0075; when 6 result = 0x008e; when 7 result = 0x00a8; when 8 result = 0x00c2; when 9 result = 0x00dc; when 10 result = 0x00f8; when 11 result = 0x0114; when 12 result = 0x0130; when 13 result = 0x014d; when 14 result = 0x016b; when 15 result = 0x0189; when 16 result = 0x01a8; when 17 result = 0x01c8; when 18 result = 0x01e8; when 19 result = 0x0209; when 20 result = 0x022b; when 21 result = 0x024e; when 22 result = 0x0271; when 23 result = 0x0295; when 24 result = 0x02ba; when 25 result = 0x02e0; when 26 result = 0x0306; when 27 result = 0x032e; when 28 result = 0x0356; when 29 result = 0x037f; when 30 result = 0x03a9; when 31 result = 0x03d4; when 0 result = 0x3c00; when 1 result = 0xb155; when 2 result = 0x2030; when 3 result = 0x0000; when 4 result = 0x0000; when 5 result = 0x0000; when 6 result = 0x0000; when 7 result = 0x0000; when 8 result = 0x3c00; when 9 result = 0xb800; when 10 result = 0x293a; when 11 result = 0x0000; when 12 result = 0x0000; when 13 result = 0x0000; when 14 result = 0x0000; when 15 result = 0x0000; elsif N == 32 then case index of when 0 result = 0x000000; when 1 result = 0x0164d2; when 2 result = 0x02cd87; when 3 result = 0x043a29; when 4 result = 0x05aac3; when 5 result = 0x071f62; when 6 result = 0x08980f; when 7 result = 0x0a14d5; when 8 result = 0x0b95c2; when 9 result = 0x0d1adf; when 10 result = 0x0ea43a; when 11 result = 0x1031dc; when 12 result = 0x11c3d3; when 13 result = 0x135a2b; when 14 result = 0x14f4f0; when 15 result = 0x16942d; when 16 result = 0x1837f0; when 17 result = 0x19e046; when 18 result = 0x1b8d3a; when 19 result = 0x1d3eda; when 20 result = 0x1ef532; when 21 result = 0x20b051; when 22 result = 0x227043; when 23 result = 0x243516; when 24 result = 0x25fed7; when 25 result = 0x27cd94; when 26 result = 0x29a15b; when 27 result = 0x2b7a3a; when 28 result = 0x2d583f; when 29 result = 0x2f3b79; when 30 result = 0x3123f6; when 31 result = 0x3311c4; when 32 result = 0x3504f3; when 33 result = 0x36fd92; when 34 result = 0x38fbaf; when 35 result = 0x3aff5b; when 36 result = 0x3d08a4; when 37 result = 0x3f179a; when 38 result = 0x412c4d; when 39 result = 0x4346cd; when 40 result = 0x45672a; when 41 result = 0x478d75; when 42 result = 0x49b9be; when 43 result = 0x4bec15; when 44 result = 0x4e248c; when 45 result = 0x506334; when 46 result = 0x52a81e; when 47 result = 0x54f35b; when 48 result = 0x5744fd; when 49 result = 0x599d16; when 50 result = 0x5bfbb8; when 51 result = 0x5e60f5; when 52 result = 0x60ccdf; when 53 result = 0x633f89; when 54 result = 0x65b907; when 55 result = 0x68396a; when 56 result = 0x6ac0c7; when 57 result = 0x6d4f30; when 58 result = 0x6fe4ba; when 59 result = 0x728177; when 60 result = 0x75257d; when 61 result = 0x77d0df; when 62 result = 0x7a83b3; when 63 result = 0x7d3e0c; when 0 result = 0x3f800000; when 1 result = 0xbe2aaaab; when 2 result = 0x3c088886; when 3 result = 0xb95008b9; when 4 result = 0x36369d6d; when 5 result = 0x00000000; when 6 result = 0x00000000; when 7 result = 0x00000000; when 8 result = 0x3f800000; when 9 result = 0xbf000000; when 10 result = 0x3d2aaaa6; when 11 result = 0xbab60705; when 12 result = 0x37cd37cc; when 13 result = 0x00000000; when 14 result = 0x00000000; when 15 result = 0x00000000; else // N == 64 case index of when 0 result = 0x0000000000000; when 1 result = 0x02C9A3E778061; when 2 result = 0x059B0D3158574; when 3 result = 0x0874518759BC8; when 4 result = 0x0B5586CF9890F; when 5 result = 0x0E3EC32D3D1A2; when 6 result = 0x11301D0125B51; when 7 result = 0x1429AAEA92DE0; when 8 result = 0x172B83C7D517B; when 9 result = 0x1A35BEB6FCB75; when 10 result = 0x1D4873168B9AA; when 11 result = 0x2063B88628CD6; when 12 result = 0x2387A6E756238; when 13 result = 0x26B4565E27CDD; when 14 result = 0x29E9DF51FDEE1; when 15 result = 0x2D285A6E4030B; when 16 result = 0x306FE0A31B715; when 17 result = 0x33C08B26416FF; when 18 result = 0x371A7373AA9CB; when 19 result = 0x3A7DB34E59FF7; when 20 result = 0x3DEA64C123422; when 21 result = 0x4160A21F72E2A; when 22 result = 0x44E086061892D; when 23 result = 0x486A2B5C13CD0; when 24 result = 0x4BFDAD5362A27; when 25 result = 0x4F9B2769D2CA7; when 26 result = 0x5342B569D4F82; when 27 result = 0x56F4736B527DA; when 28 result = 0x5AB07DD485429; when 29 result = 0x5E76F15AD2148; when 30 result = 0x6247EB03A5585; when 31 result = 0x6623882552225; when 32 result = 0x6A09E667F3BCD; when 33 result = 0x6DFB23C651A2F; when 34 result = 0x71F75E8EC5F74; when 35 result = 0x75FEB564267C9; when 36 result = 0x7A11473EB0187; when 37 result = 0x7E2F336CF4E62; when 38 result = 0x82589994CCE13; when 39 result = 0x868D99B4492ED; when 40 result = 0x8ACE5422AA0DB; when 41 result = 0x8F1AE99157736; when 42 result = 0x93737B0CDC5E5; when 43 result = 0x97D829FDE4E50; when 44 result = 0x9C49182A3F090; when 45 result = 0xA0C667B5DE565; when 46 result = 0xA5503B23E255D; when 47 result = 0xA9E6B5579FDBF; when 48 result = 0xAE89F995AD3AD; when 49 result = 0xB33A2B84F15FB; when 50 result = 0xB7F76F2FB5E47; when 51 result = 0xBCC1E904BC1D2; when 52 result = 0xC199BDD85529C; when 53 result = 0xC67F12E57D14B; when 54 result = 0xCB720DCEF9069; when 55 result = 0xD072D4A07897C; when 56 result = 0xD5818DCFBA487; when 57 result = 0xDA9E603DB3285; when 58 result = 0xDFC97337B9B5F; when 59 result = 0xE502EE78B3FF6; when 60 result = 0xEA4AFA2A490DA; when 61 result = 0xEFA1BEE615A27; when 62 result = 0xF50765B6E4540; when 63 result = 0xFA7C1819E90D8; when 0 result = 0x3ff0000000000000; when 1 result = 0xbfc5555555555543; when 2 result = 0x3f8111111110f30c; when 3 result = 0xbf2a01a019b92fc6; when 4 result = 0x3ec71de351f3d22b; when 5 result = 0xbe5ae5e2b60f7b91; when 6 result = 0x3de5d8408868552f; when 7 result = 0x0000000000000000; when 8 result = 0x3ff0000000000000; when 9 result = 0xbfe0000000000000; when 10 result = 0x3fa5555555555536; when 11 result = 0xbf56c16c16c13a0b; when 12 result = 0x3efa01a019b1e8d8; when 13 result = 0xbe927e4f7282f468; when 14 result = 0x3e21ee96d2641b13; when 15 result = 0xbda8f76380fbb401; return result<N-1:0>;

Library pseudocode for aarch64/functions/sve/FPMinNormalFPTrigSMul

// FPMinNormal() // ============= // FPTrigSMul() // ============ bits(N) FPMinNormal(bit sign) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); exp =FPTrigSMul(bits(N) op1, bits(N) op2, ZerosFPCRType(E-1):'1'; frac =fpcr) assert N IN {16,32,64}; result = (op1, op1, fpcr); (fptype, sign, value) = FPUnpack(result, fpcr); if (fptype != FPType_QNaN) && (fptype != FPType_SNaNZerosFPMul(F); return sign : exp : frac;) then result<N-1> = op2<0>; return result;

Library pseudocode for aarch64/functions/sve/FPOneFPTrigSSel

// FPOne() // ======= // FPTrigSSel() // ============ bits(N) FPOne(bit sign) FPTrigSSel(bits(N) op1, bits(N) op2) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); exp = '0': bits(N) result; if op2<0> == '1' then result =OnesFPOne(E-1); frac = Zeros(F); return sign : exp : frac;(op2<1>); else result = op1; result<N-1> = result<N-1> EOR op2<1>; return result;

Library pseudocode for aarch64/functions/sve/FPPointFiveFirstActive

// FPPointFive() // FirstActive() // ============= bits(N)bit FPPointFive(bit sign) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); exp = '0':FirstActive(bits(N) mask, bits(N) x, integer esize) integer elements = N DIV (esize DIV 8); for e = 0 to elements-1 ifOnesElemP(E-2):'0'; frac =[mask, e, esize] == '1' then return ZerosElemP(F); return sign : exp : frac;[x, e, esize]; return '0';

Library pseudocode for aarch64/functions/sve/FPProcessFloorPow2

// FPProcess() // FloorPow2() // =========== // For a positive integer X, return the largest power of 2 <= X bits(N)integer FPProcess(bits(N) input) bits(N) result; assert N IN {16,32,64};FloorPow2(integer x) assert x >= 0; integer n = 1; if x == 0 then return 0; while x >= 2^n do n = n + 1; return 2^(n - 1); FPCRType fpcr = FPCR[]; (fptype,sign,value) = FPUnpack(input, fpcr); if fptype == FPType_SNaN || fptype == FPType_QNaN then result = FPProcessNaN(fptype, input, fpcr); elsif fptype == FPType_Infinity then result = FPInfinity(sign); elsif fptype == FPType_Zero then result = FPZero(sign); else result = FPRound(value, fpcr); FPProcessDenorm(fptype, N, fpcr); return result;

Library pseudocode for aarch64/functions/sve/FPScaleHaveSVE

// FPScale() // HaveSVE() // ========= bits(N)boolean FPScale(bits (N) op, integer scale,HaveSVE() return FPCRTypeHasArchVersion fpcr) assert N IN {16,32,64}; (fptype,sign,value) =( FPUnpackARMv8p2(op, fpcr); if fptype == FPType_SNaN || fptype == FPType_QNaN then result = FPProcessNaN(fptype, op, fpcr); elsif fptype == FPType_Zero then result = FPZero(sign); elsif fptype == FPType_Infinity then result = FPInfinity(sign); else result = FPRound(value * (2.0^scale), fpcr); FPProcessDenorm(fptype, N, fpcr); return result;) && boolean IMPLEMENTATION_DEFINED "Have SVE ISA";

Library pseudocode for aarch64/functions/sve/FPTrigMAddHaveSVEFP32MatMulExt

// FPTrigMAdd() // ============ // HaveSVEFP32MatMulExt() // ====================== // Returns TRUE if single-precision floating-point matrix multiply instruction support implemented and FALSE otherwise. bits(N)boolean FPTrigMAdd(integer x, bits(N) op1, bits(N) op2,HaveSVEFP32MatMulExt() return FPCRTypeHaveSVE fpcr) assert N IN {16,32,64}; assert x >= 0; assert x < 8; bits(N) coeff; if op2<N-1> == '1' then x = x + 8; coeff = FPTrigMAddCoefficient[x]; boolean altfp = HaveAltFP() && fpcr.AH == '1'; if altfp then (fptype,-,-) = FPUnpack(op2, fpcr, FALSE); if !(fptype IN {FPType_SNaN, FPType_QNaN}) then op2<N-1> = '0'; else op2<N-1> = '0'; result = FPMulAdd(coeff, op1, op2, fpcr); return result;() && boolean IMPLEMENTATION_DEFINED "Have SVE FP32 Matrix Multiply extension";

Library pseudocode for aarch64/functions/sve/FPTrigMAddCoefficientHaveSVEFP64MatMulExt

// FPTrigMAddCoefficient() // ======================= // HaveSVEFP64MatMulExt() // ====================== // Returns TRUE if double-precision floating-point matrix multiply instruction support implemented and FALSE otherwise. bits(N)boolean FPTrigMAddCoefficient[integer index] assert N IN {16,32,64}; integer result; if N == 16 then case index of when 0 result = 0x3c00; when 1 result = 0xb155; when 2 result = 0x2030; when 3 result = 0x0000; when 4 result = 0x0000; when 5 result = 0x0000; when 6 result = 0x0000; when 7 result = 0x0000; when 8 result = 0x3c00; when 9 result = 0xb800; when 10 result = 0x293a; when 11 result = 0x0000; when 12 result = 0x0000; when 13 result = 0x0000; when 14 result = 0x0000; when 15 result = 0x0000; elsif N == 32 then case index of when 0 result = 0x3f800000; when 1 result = 0xbe2aaaab; when 2 result = 0x3c088886; when 3 result = 0xb95008b9; when 4 result = 0x36369d6d; when 5 result = 0x00000000; when 6 result = 0x00000000; when 7 result = 0x00000000; when 8 result = 0x3f800000; when 9 result = 0xbf000000; when 10 result = 0x3d2aaaa6; when 11 result = 0xbab60705; when 12 result = 0x37cd37cc; when 13 result = 0x00000000; when 14 result = 0x00000000; when 15 result = 0x00000000; else // N == 64 case index of when 0 result = 0x3ff0000000000000; when 1 result = 0xbfc5555555555543; when 2 result = 0x3f8111111110f30c; when 3 result = 0xbf2a01a019b92fc6; when 4 result = 0x3ec71de351f3d22b; when 5 result = 0xbe5ae5e2b60f7b91; when 6 result = 0x3de5d8408868552f; when 7 result = 0x0000000000000000; when 8 result = 0x3ff0000000000000; when 9 result = 0xbfe0000000000000; when 10 result = 0x3fa5555555555536; when 11 result = 0xbf56c16c16c13a0b; when 12 result = 0x3efa01a019b1e8d8; when 13 result = 0xbe927e4f7282f468; when 14 result = 0x3e21ee96d2641b13; when 15 result = 0xbda8f76380fbb401; return result<N-1:0>;HaveSVEFP64MatMulExt() returnHaveSVE() && boolean IMPLEMENTATION_DEFINED "Have SVE FP64 Matrix Multiply extension";

Library pseudocode for aarch64/functions/sve/FPTrigSMulImplementedSVEVectorLength

// FPTrigSMul() // ============ // ImplementedSVEVectorLength() // ============================ // Reduce SVE vector length to a supported value (e.g. power of two) bits(N)integer FPTrigSMul(bits(N) op1, bits(N) op2,ImplementedSVEVectorLength(integer nbits) return integer IMPLEMENTATION_DEFINED; FPCRType fpcr) assert N IN {16,32,64}; result = FPMul(op1, op1, fpcr); (fptype, sign, value) = FPUnpack(result, fpcr); if (fptype != FPType_QNaN) && (fptype != FPType_SNaN) then result<N-1> = op2<0>; FPProcessDenorm(fptype, N, fpcr); return result;

Library pseudocode for aarch64/functions/sve/FPTrigSSelIsEven

// FPTrigSSel() // ============ // IsEven() // ======== bits(N)boolean FPTrigSSel(bits(N) op1, bits(N) op2) assert N IN {16,32,64}; bits(N) result; if op2<0> == '1' then result =IsEven(integer val) return val MOD 2 == 0; FPOne(op2<1>); else result = op1; result<N-1> = result<N-1> EOR op2<1>; return result;

Library pseudocode for aarch64/functions/sve/FirstActiveIsFPEnabled

// FirstActive() // IsFPEnabled() // ============= // Returns TRUE if accesses to the Advanced SIMD and floating-point // registers are enabled at the target exception level in the current // execution state and FALSE otherwise. bitboolean FirstActive(bits(N) mask, bits(N) x, integer esize) integer elements = N DIV (esize DIV 8); for e = 0 to elements-1 ifIsFPEnabled(bits(2) el) if ElemPELUsingAArch32[mask, e, esize] == '1' then return(el) then return (el); else return AArch64.IsFPEnabledElemPAArch32.IsFPEnabled[x, e, esize]; return '0';(el);

Library pseudocode for aarch64/functions/sve/FloorPow2IsSVEEnabled

// FloorPow2() // =========== // For a positive integer X, return the largest power of 2 <= X // IsSVEEnabled() // ============== // Returns TRUE if access to SVE instructions and System registers is // enabled at the target exception level and FALSE otherwise. integerboolean FloorPow2(integer x) assert x >= 0; integer n = 1; if x == 0 then return 0; while x >= 2^n do n = n + 1; return 2^(n - 1);IsSVEEnabled(bits(2) el) ifELUsingAArch32(el) then return FALSE; // Check if access disabled in CPACR_EL1 if el IN {EL0, EL1} && !IsInHost() then // Check SVE at EL0/EL1 case CPACR_EL1.ZEN of when 'x0' disabled = TRUE; when '01' disabled = el == EL0; when '11' disabled = FALSE; if disabled then return FALSE; // Check if access disabled in CPTR_EL2 if el IN {EL0, EL1, EL2} && EL2Enabled() then if HaveVirtHostExt() && HCR_EL2.E2H == '1' then case CPTR_EL2.ZEN of when 'x0' disabled = TRUE; when '01' disabled = el == EL0 && HCR_EL2.TGE == '1'; when '11' disabled = FALSE; if disabled then return FALSE; else if CPTR_EL2.TZ == '1' then return FALSE; // Check if access disabled in CPTR_EL3 if HaveEL(EL3) then if CPTR_EL3.EZ == '0' then return FALSE; return TRUE;

Library pseudocode for aarch64/functions/sve/HaveSVELastActive

// HaveSVE() // ========= // LastActive() // ============ booleanbit HaveSVE() returnLastActive(bits(N) mask, bits(N) x, integer esize) integer elements = N DIV (esize DIV 8); for e = elements-1 downto 0 if HasArchVersionElemP([mask, e, esize] == '1' then returnARMv8p2ElemP) && boolean IMPLEMENTATION_DEFINED "Have SVE ISA";[x, e, esize]; return '0';

Library pseudocode for aarch64/functions/sve/HaveSVEFP32MatMulExtLastActiveElement

// HaveSVEFP32MatMulExt() // ====================== // Returns TRUE if single-precision floating-point matrix multiply instruction support implemented and FALSE otherwise. // LastActiveElement() // =================== booleaninteger HaveSVEFP32MatMulExt() returnLastActiveElement(bits(N) mask, integer esize) assert esize IN {8, 16, 32, 64}; integer elements = DIV esize; for e = elements-1 downto 0 if ElemPHaveSVEVL() && boolean IMPLEMENTATION_DEFINED "Have SVE FP32 Matrix Multiply extension";[mask, e, esize] == '1' then return e; return -1;

Library pseudocode for aarch64/functions/sve/HaveSVEFP64MatMulExtMAX_PL

// HaveSVEFP64MatMulExt() // ====================== // Returns TRUE if double-precision floating-point matrix multiply instruction support implemented and FALSE otherwise. booleanconstant integer HaveSVEFP64MatMulExt() returnMAX_PL = 256; HaveSVE() && boolean IMPLEMENTATION_DEFINED "Have SVE FP64 Matrix Multiply extension";

Library pseudocode for aarch64/functions/sve/ImplementedSVEVectorLengthMAX_VL

// ImplementedSVEVectorLength() // ============================ // Reduce SVE vector length to a supported value (e.g. power of two) integerconstant integer ImplementedSVEVectorLength(integer nbits) return integer IMPLEMENTATION_DEFINED;MAX_VL = 2048;

Library pseudocode for aarch64/functions/sve/IsEvenMaybeZeroSVEUppers

// IsEven() // ======== boolean// MaybeZeroSVEUppers() // ==================== IsEven(integer val) return val MOD 2 == 0;MaybeZeroSVEUppers(bits(2) target_el) boolean lower_enabled; ifUInt(target_el) <= UInt(PSTATE.EL) || !IsSVEEnabled(target_el) then return; if target_el == EL3 then if EL2Enabled() then lower_enabled = IsFPEnabled(EL2); else lower_enabled = IsFPEnabled(EL1); elsif target_el == EL2 then assert !ELUsingAArch32(EL2); if HCR_EL2.TGE == '0' then lower_enabled = IsFPEnabled(EL1); else lower_enabled = IsFPEnabled(EL0); else assert target_el == EL1 && !ELUsingAArch32(EL1); lower_enabled = IsFPEnabled(EL0); if lower_enabled then integer vl = if IsSVEEnabled(PSTATE.EL) then VL else 128; integer pl = vl DIV 8; for n = 0 to 31 if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then _Z[n] = ZeroExtend(_Z[n]<vl-1:0>); for n = 0 to 15 if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then _P[n] = ZeroExtend(_P[n]<pl-1:0>); if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then _FFR = ZeroExtend(_FFR<pl-1:0>);

Library pseudocode for aarch64/functions/sve/IsFPEnabledMemNF

// IsFPEnabled() // ============= // Returns TRUE if accesses to the Advanced SIMD and floating-point // registers are enabled at the target exception level in the current // execution state and FALSE otherwise. // MemNF[] - non-assignment form // ============================= boolean(bits(8*size), boolean) MemNF[bits(64) address, integer size, IsFPEnabled(bits(2) el) ifacctype] assert size IN {1, 2, 4, 8, 16}; bits(8*size) value; aligned = (address == ELUsingAArch32Align(el) then return(address, size)); A = AArch32.IsFPEnabledSCTLR(el); else return[].A; if !aligned && (A == '1') then return (bits(8*size) UNKNOWN, TRUE); atomic = aligned || size == 1; if !atomic then (value<7:0>, bad) = MemSingleNF[address, 1, acctype, aligned]; if bad then return (bits(8*size) UNKNOWN, TRUE); // For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory // access will generate an Alignment Fault, as to get this far means the first byte did // not, so we must be changing to a new translation page. if !aligned then c = (Unpredictable_DEVPAGE2); assert c IN {Constraint_FAULT, Constraint_NONE}; if c == Constraint_NONE then aligned = TRUE; for i = 1 to size-1 (value<8*i+7:8*i>, bad) = MemSingleNF[address+i, 1, acctype, aligned]; if bad then return (bits(8*size) UNKNOWN, TRUE); else (value, bad) = MemSingleNF[address, size, acctype, aligned]; if bad then return (bits(8*size) UNKNOWN, TRUE); if BigEndian() then value = BigEndianReverseAArch64.IsFPEnabledConstrainUnpredictable(el);(value); return (value, FALSE);

Library pseudocode for aarch64/functions/sve/IsSVEEnabledMemSingleNF

// IsSVEEnabled() // ============== // Returns TRUE if access to SVE instructions and System registers is // enabled at the target exception level and FALSE otherwise. // MemSingleNF[] - non-assignment form // =================================== boolean(bits(8*size), boolean) MemSingleNF[bits(64) address, integer size, IsSVEEnabled(bits(2) el) ifacctype, boolean wasaligned] bits(8*size) value; boolean iswrite = FALSE; ELUsingAArch32AddressDescriptor(el) then return FALSE; memaddrdesc; // Check if access disabled in CPACR_EL1 if el IN { // Implementation may suppress NF load for any reason ifEL0ConstrainUnpredictableBool,( EL1Unpredictable_NONFAULT} && !) then return (bits(8*size) UNKNOWN, TRUE); // MMU or MPU memaddrdesc =IsInHostAArch64.TranslateAddress() then // Check SVE at EL0/EL1 case CPACR_EL1.ZEN of when 'x0' disabled = TRUE; when '01' disabled = el ==(address, acctype, iswrite, wasaligned, size); // Non-fault load from Device memory must not be performed externally if memaddrdesc.memattrs.memtype == EL0MemType_Device; when '11' disabled = FALSE; if disabled then return FALSE; then return (bits(8*size) UNKNOWN, TRUE); // Check if access disabled in CPTR_EL2 if el IN { // Check for aborts or debug exceptions ifEL0IsFault,(memaddrdesc) then return (bits(8*size) UNKNOWN, TRUE); // Memory array access accdesc = EL1CreateAccessDescriptor,(acctype); if EL2HaveMTEExt} &&() then if EL2EnabledAArch64.AccessIsTagChecked() then if(address, acctype) then bits(4) ptag = HaveVirtHostExtAArch64.PhysicalTag() && HCR_EL2.E2H == '1' then case CPTR_EL2.ZEN of when 'x0' disabled = TRUE; when '01' disabled = el ==(address); if ! EL0AArch64.CheckTag && HCR_EL2.TGE == '1'; when '11' disabled = FALSE; if disabled then return FALSE; else if CPTR_EL2.TZ == '1' then return FALSE; // Check if access disabled in CPTR_EL3 if HaveEL(EL3) then if CPTR_EL3.EZ == '0' then return FALSE; (memaddrdesc, ptag, iswrite) then return (bits(8*size) UNKNOWN, TRUE); value = _Mem[memaddrdesc, size, accdesc]; return TRUE; return (value, FALSE);

Library pseudocode for aarch64/functions/sve/LastActiveNoneActive

// LastActive() // NoneActive() // ============ bit LastActive(bits(N) mask, bits(N) x, integer esize) NoneActive(bits(N) mask, bits(N) x, integer esize) integer elements = N DIV (esize DIV 8); for e = elements-1 downto 0 for e = 0 to elements-1 if ElemP[mask, e, esize] == '1' then return[mask, e, esize] == '1' && ElemP[x, e, esize]; return '0';[x, e, esize] == '1' then return '0'; return '1';

Library pseudocode for aarch64/functions/sve/LastActiveElementP

// LastActiveElement() // =================== // P[] - non-assignment form // ========================= integerbits(width) LastActiveElement(bits(N) mask, integer esize) assert esize IN {8, 16, 32, 64}; integer elements =P[integer n] assert n >= 0 && n <= 31; assert width == VLPL DIV esize; for e = elements-1 downto 0 if; return _P[n]<width-1:0>; // P[] - assignment form // ===================== P[integer n] = bits(width) value assert n >= 0 && n <= 31; assert width == PL; if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then _P[n] = ZeroExtendElemP[mask, e, esize] == '1' then return e; return -1;(value); else _P[n]<width-1:0> = value;

Library pseudocode for aarch64/functions/sve/MAX_PLPL

constant integer// PL - non-assignment form // ======================== integer MAX_PL = 256;PL returnVL DIV 8;

Library pseudocode for aarch64/functions/sve/MAX_VLPredTest

constant integer// PredTest() // ========== bits(4) MAX_VL = 2048;PredTest(bits(N) mask, bits(N) result, integer esize) bit n =FirstActive(mask, result, esize); bit z = NoneActive(mask, result, esize); bit c = NOT LastActive(mask, result, esize); bit v = '0'; return n:z:c:v;

Library pseudocode for aarch64/functions/sve/MaybeZeroSVEUppersReducePredicated

// MaybeZeroSVEUppers() // ====================// ReducePredicated() // ================== bits(esize) MaybeZeroSVEUppers(bits(2) target_el) boolean lower_enabled; ifReducePredicated( UIntReduceOp(target_el) <=op, bits(N) input, bits(M) mask, bits(esize) identity) assert(N == M * 8); integer p2bits = UIntCeilPow2(PSTATE.EL) || !(N); bits(p2bits) operand; integer elements = p2bits DIV esize; for e = 0 to elements-1 if e * esize < N &&IsSVEEnabledElemP(target_el) then return; if target_el ==[mask, e, esize] == '1' then EL3Elem then if[operand, e, esize] = EL2EnabledElem() then lower_enabled =[input, e, esize]; else IsFPEnabledElem([operand, e, esize] = identity; returnEL2Reduce); else lower_enabled = IsFPEnabled(EL1); elsif target_el == EL2 then assert !ELUsingAArch32(EL2); if HCR_EL2.TGE == '0' then lower_enabled = IsFPEnabled(EL1); else lower_enabled = IsFPEnabled(EL0); else assert target_el == EL1 && !ELUsingAArch32(EL1); lower_enabled = IsFPEnabled(EL0); if lower_enabled then integer vl = if IsSVEEnabled(PSTATE.EL) then VL else 128; integer pl = vl DIV 8; for n = 0 to 31 if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then _Z[n] = ZeroExtend(_Z[n]<vl-1:0>); for n = 0 to 15 if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then _P[n] = ZeroExtend(_P[n]<pl-1:0>); if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then _FFR = ZeroExtend(_FFR<pl-1:0>);(op, operand, esize);

Library pseudocode for aarch64/functions/sve/MemNFReverse

// MemNF[] - non-assignment form // ============================= // Reverse() // ========= // Reverse subwords of M bits in an N-bit word (bits(8*size), boolean) MemNF[bits(64) address, integer size,bits(N) AccType acctype] assert size IN {1, 2, 4, 8, 16}; bits(8*size) value; aligned = (address ==Reverse(bits(N) word, integer M) bits(N) result; integer sw = N DIV M; assert N == sw * M; for s = 0 to sw-1 AlignElem(address, size)); A =[result, sw - 1 - s, M] = SCTLRElem[].A; if !aligned && (A == '1') then return (bits(8*size) UNKNOWN, TRUE); atomic = aligned || size == 1; if !atomic then (value<7:0>, bad) = MemSingleNF[address, 1, acctype, aligned]; if bad then return (bits(8*size) UNKNOWN, TRUE); // For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory // access will generate an Alignment Fault, as to get this far means the first byte did // not, so we must be changing to a new translation page. if !aligned then c = ConstrainUnpredictable(Unpredictable_DEVPAGE2); assert c IN {Constraint_FAULT, Constraint_NONE}; if c == Constraint_NONE then aligned = TRUE; for i = 1 to size-1 (value<8*i+7:8*i>, bad) = MemSingleNF[address+i, 1, acctype, aligned]; if bad then return (bits(8*size) UNKNOWN, TRUE); else (value, bad) = MemSingleNF[address, size, acctype, aligned]; if bad then return (bits(8*size) UNKNOWN, TRUE); if BigEndian(acctype) then value = BigEndianReverse(value); return (value, FALSE);[word, s, M]; return result;

Library pseudocode for aarch64/functions/sve/MemSingleNFSVEAccessTrap

// MemSingleNF[] - non-assignment form // =================================== (bits(8*size), boolean) MemSingleNF[bits(64) address, integer size,// SVEAccessTrap() // =============== // Trapped access to SVE registers due to CPACR_EL1, CPTR_EL2, or CPTR_EL3. AccType acctype, boolean wasaligned] bits(8*size) value; boolean iswrite = FALSE;SVEAccessTrap(bits(2) target_el) assert AddressDescriptorUInt memaddrdesc; // Implementation may suppress NF load for any reason if(target_el) >= ConstrainUnpredictableBoolUInt((PSTATE.EL) && target_el !=Unpredictable_NONFAULTEL0) then return (bits(8*size) UNKNOWN, TRUE); // MMU or MPU memaddrdesc =&& AArch64.TranslateAddressHaveEL(address, acctype, iswrite, wasaligned, size); // Non-fault load from Device memory must not be performed externally if memaddrdesc.memattrs.memtype ==(target_el); route_to_el2 = target_el == MemType_DeviceEL1 then return (bits(8*size) UNKNOWN, TRUE); // Check for aborts or debug exceptions if&& IsFaultEL2Enabled(memaddrdesc) then return (bits(8*size) UNKNOWN, TRUE); () && HCR_EL2.TGE == '1'; // Memory array access accdesc = exception = CreateAccessDescriptorExceptionSyndrome(acctype); if( HaveMTEExtException_SVEAccessTrap() then if); bits(64) preferred_exception_return = AArch64.AccessIsTagCheckedThisInstrAddr(address, acctype) then bits(4) ptag =(); vect_offset = 0x0; if route_to_el2 then AArch64.PhysicalTagAArch64.TakeException(address); if !(, exception, preferred_exception_return, vect_offset); else AArch64.TakeExceptionAArch64.CheckTagEL2(memaddrdesc, ptag, iswrite) then return (bits(8*size) UNKNOWN, TRUE); value = _Mem[memaddrdesc, size, accdesc, iswrite]; return (value, FALSE);(target_el, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/functions/sve/NoneActiveSVECmp

// NoneActive() // ============ bitenumeration NoneActive(bits(N) mask, bits(N) x, integer esize) integer elements = N DIV (esize DIV 8); for e = 0 to elements-1 ifSVECmp { ElemP[mask, e, esize] == '1' &&Cmp_EQ, Cmp_NE, Cmp_GE, Cmp_GT, Cmp_LT, Cmp_LE, ElemP[x, e, esize] == '1' then return '0'; return '1';Cmp_UN };

Library pseudocode for aarch64/functions/sve/PSVEMoveMaskPreferred

// P[] - non-assignment form // ========================= // SVEMoveMaskPreferred() // ====================== // Return FALSE if a bitmask immediate encoding would generate an immediate // value that could also be represented by a single DUP instruction. // Used as a condition for the preferred MOV<-DUPM alias. bits(width)boolean P[integer n] assert n >= 0 && n <= 31; assert width ==SVEMoveMaskPreferred(bits(13) imm13) bits(64) imm; (imm, -) = PLDecodeBitMasks; return _P[n]<width-1:0>; (imm13<12>, imm13<5:0>, imm13<11:6>, TRUE); // P[] - assignment form // ===================== // Check for 8 bit immediates if ! P[integer n] = bits(width) value assert n >= 0 && n <= 31; assert width ==(imm<7:0>) then // Check for 'ffffffffffffffxy' or '00000000000000xy' if PLIsZero; if(imm<63:7>) || ConstrainUnpredictableBoolIsOnes((imm<63:7>) then return FALSE; // Check for 'ffffffxyffffffxy' or '000000xy000000xy' if imm<63:32> == imm<31:0> && (Unpredictable_SVEZEROUPPERIsZero) then _P[n] =(imm<31:7>) || (imm<31:7>)) then return FALSE; // Check for 'ffxyffxyffxyffxy' or '00xy00xy00xy00xy' if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> && (IsZero(imm<15:7>) || IsOnes(imm<15:7>)) then return FALSE; // Check for 'xyxyxyxyxyxyxyxy' if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> && (imm<15:8> == imm<7:0>) then return FALSE; // Check for 16 bit immediates else // Check for 'ffffffffffffxy00' or '000000000000xy00' if IsZero(imm<63:15>) || IsOnes(imm<63:15>) then return FALSE; // Check for 'ffffxy00ffffxy00' or '0000xy000000xy00' if imm<63:32> == imm<31:0> && (IsZero(imm<31:7>) || IsOnesZeroExtendIsOnes(value); else _P[n]<width-1:0> = value;(imm<31:7>)) then return FALSE; // Check for 'xy00xy00xy00xy00' if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> then return FALSE; return TRUE;

Library pseudocode for aarch64/functions/sve/PLSystem

// PL - non-assignment form // ======================== integerarray bits( PL return) _Z[0..31]; array bits( ) _P[0..15]; bits(MAX_PLVLMAX_PL DIV 8;) _FFR;

Library pseudocode for aarch64/functions/sve/PredTestVL

// PredTest() // ========== // VL - non-assignment form // ======================== bits(4)integer PredTest(bits(N) mask, bits(N) result, integer esize) bit n =VL integer vl; if PSTATE.EL == FirstActiveEL1(mask, result, esize); bit z =|| (PSTATE.EL == NoneActiveEL0(mask, result, esize); bit c = NOT&& ! ()) then vl = UInt(ZCR_EL1.LEN); if PSTATE.EL == EL2 || (PSTATE.EL == EL0 && IsInHost()) then vl = UInt(ZCR_EL2.LEN); elsif PSTATE.EL IN {EL0, EL1} && EL2Enabled() then vl = Min(vl, UInt(ZCR_EL2.LEN)); if PSTATE.EL == EL3 then vl = UInt(ZCR_EL3.LEN); elsif HaveEL(EL3) then vl = Min(vl, UInt(ZCR_EL3.LEN)); vl = (vl + 1) * 128; vl = ImplementedSVEVectorLengthLastActiveIsInHost(mask, result, esize); bit v = '0'; return n:z:c:v;(vl); return vl;

Library pseudocode for aarch64/functions/sve/ReducePredicatedZ

// ReducePredicated() // ================== // Z[] - non-assignment form // ========================= bits(esize)bits(width) ReducePredicated(Z[integer n] assert n >= 0 && n <= 31; assert width ==ReduceOpVL op, bits(N) input, bits(M) mask, bits(esize) identity) assert(N == M * 8); integer p2bits =; return _Z[n]<width-1:0>; // Z[] - assignment form // ===================== CeilPow2(N); bits(p2bits) operand; integer elements = p2bits DIV esize; for e = 0 to elements-1 if e * esize < N &&Z[integer n] = bits(width) value assert n >= 0 && n <= 31; assert width == ElemPVL[mask, e, esize] == '1' then; if ElemConstrainUnpredictableBool[operand, e, esize] =( ElemUnpredictable_SVEZEROUPPER[input, e, esize]; else) then _Z[n] = ElemZeroExtend[operand, e, esize] = identity; return Reduce(op, operand, esize);(value); else _Z[n]<width-1:0> = value;

Library pseudocode for aarch64/functions/svesysregisters/ReverseCNTKCTL

// Reverse() // ========= // Reverse subwords of M bits in an N-bit word // CNTKCTL[] - non-assignment form // =============================== bits(N)CNTKCTLType Reverse(bits(N) word, integer M) bits(N) result; integer sw = N DIV M; assert N == sw * M; for s = 0 to sw-1CNTKCTL[] bits(32) r; if ElemIsInHost[result, sw - 1 - s, M] = Elem[word, s, M]; return result;() then r = CNTHCTL_EL2; return r; r = CNTKCTL_EL1; return r;

Library pseudocode for aarch64/functions/svesysregisters/SVEAccessTrapCNTKCTLType

// SVEAccessTrap() // =============== // Trapped access to SVE registers due to CPACR_EL1, CPTR_EL2, or CPTR_EL3.type SVEAccessTrap(bits(2) target_el) assertCNTKCTLType; UInt(target_el) >= UInt(PSTATE.EL) && target_el != EL0 && HaveEL(target_el); route_to_el2 = target_el == EL1 && EL2Enabled() && HCR_EL2.TGE == '1'; exception = ExceptionSyndrome(Exception_SVEAccessTrap); bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; if route_to_el2 then AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset); else AArch64.TakeException(target_el, exception, preferred_exception_return, vect_offset);

Library pseudocode for aarch64/functions/svesysregisters/SVECmpCPACR

enumeration// CPACR[] - non-assignment form // ============================= CPACRType SVECmp {CPACR[] bits(32) r; if Cmp_EQ, Cmp_NE, Cmp_GE, Cmp_GT, Cmp_LT, Cmp_LE, Cmp_UN };() then r = CPTR_EL2; return r; r = CPACR_EL1; return r;

Library pseudocode for aarch64/functions/svesysregisters/SVEMoveMaskPreferredCPACRType

// SVEMoveMaskPreferred() // ====================== // Return FALSE if a bitmask immediate encoding would generate an immediate // value that could also be represented by a single DUP instruction. // Used as a condition for the preferred MOV<-DUPM alias. booleantype SVEMoveMaskPreferred(bits(13) imm13) bits(64) imm; (imm, -) =CPACRType; DecodeBitMasks(imm13<12>, imm13<5:0>, imm13<11:6>, TRUE); // Check for 8 bit immediates if !IsZero(imm<7:0>) then // Check for 'ffffffffffffffxy' or '00000000000000xy' if IsZero(imm<63:7>) || IsOnes(imm<63:7>) then return FALSE; // Check for 'ffffffxyffffffxy' or '000000xy000000xy' if imm<63:32> == imm<31:0> && (IsZero(imm<31:7>) || IsOnes(imm<31:7>)) then return FALSE; // Check for 'ffxyffxyffxyffxy' or '00xy00xy00xy00xy' if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> && (IsZero(imm<15:7>) || IsOnes(imm<15:7>)) then return FALSE; // Check for 'xyxyxyxyxyxyxyxy' if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> && (imm<15:8> == imm<7:0>) then return FALSE; // Check for 16 bit immediates else // Check for 'ffffffffffffxy00' or '000000000000xy00' if IsZero(imm<63:15>) || IsOnes(imm<63:15>) then return FALSE; // Check for 'ffffxy00ffffxy00' or '0000xy000000xy00' if imm<63:32> == imm<31:0> && (IsZero(imm<31:7>) || IsOnes(imm<31:7>)) then return FALSE; // Check for 'xy00xy00xy00xy00' if imm<63:32> == imm<31:0> && imm<31:16> == imm<15:0> then return FALSE; return TRUE;

Library pseudocode for aarch64/functions/svesysregisters/SystemELR

array bits(// ELR[] - non-assignment form // =========================== bits(64)MAX_VL) _Z[0..31]; array bits(ELR[bits(2) el] bits(64) r; case el of whenMAX_PLEL1) _P[0..15]; bits(r = ELR_EL1; when r = ELR_EL2; when EL3 r = ELR_EL3; otherwise Unreachable(); return r; // ELR[] - non-assignment form // =========================== bits(64) ELR[] assert PSTATE.EL != EL0; return ELR[PSTATE.EL]; // ELR[] - assignment form // ======================= ELR[bits(2) el] = bits(64) value bits(64) r = value; case el of when EL1 ELR_EL1 = r; when EL2 ELR_EL2 = r; when EL3 ELR_EL3 = r; otherwise Unreachable(); return; // ELR[] - assignment form // ======================= ELR[] = bits(64) value assert PSTATE.EL != EL0; ELRMAX_PLEL2) _FFR;[PSTATE.EL] = value; return;

Library pseudocode for aarch64/functions/svesysregisters/VLESR

// VL - non-assignment form // ======================== // ESR[] - non-assignment form // =========================== integerESRType VL integer vl; if PSTATE.EL ==ESR[bits(2) regime] bits(32) r; case regime of when EL1 || (PSTATE.EL ==r = ESR_EL1; when EL0EL2 && !r = ESR_EL2; whenIsInHostEL3()) then vl =r = ESR_EL3; otherwise UIntUnreachable(ZCR_EL1.LEN); (); return r; if PSTATE.EL ==// ESR[] - non-assignment form // =========================== ESRType EL2 || (PSTATE.EL ==ESR[] return EL0ESR &&[ IsInHostS1TranslationRegime()) then vl =()]; // ESR[] - assignment form // ======================= UInt(ZCR_EL2.LEN); elsif PSTATE.EL IN {ESR[bits(2) regime] =EL0ESRType,value bits(32) r = value; case regime of when EL1} &&ESR_EL1 = r; when EL2EnabledEL2() then vl =ESR_EL2 = r; when Min(vl, UInt(ZCR_EL2.LEN)); if PSTATE.EL == EL3 then vl =ESR_EL3 = r; otherwise UIntUnreachable(ZCR_EL3.LEN); elsif(); return; // ESR[] - assignment form // ======================= HaveEL(ESR[] =EL3ESRType) then vl =value MinESR(vl,[ UIntS1TranslationRegime(ZCR_EL3.LEN)); vl = (vl + 1) * 128; vl = ImplementedSVEVectorLength(vl); return vl;()] = value;

Library pseudocode for aarch64/functions/svesysregisters/ZESRType

// Z[] - non-assignment form // ========================= bits(width)type Z[integer n] assert n >= 0 && n <= 31; assert width ==ESRType; VL; return _Z[n]<width-1:0>; // Z[] - assignment form // ===================== Z[integer n] = bits(width) value assert n >= 0 && n <= 31; assert width == VL; if ConstrainUnpredictableBool(Unpredictable_SVEZEROUPPER) then _Z[n] = ZeroExtend(value); else _Z[n]<width-1:0> = value;

Library pseudocode for aarch64/functions/sysregisters/CNTKCTLFAR

// CNTKCTL[] - non-assignment form // =============================== // FAR[] - non-assignment form // =========================== CNTKCTLTypebits(64) CNTKCTL[] FAR[bits(2) regime] bits(64) r; if case regime of when r = FAR_EL1; when EL2 r = FAR_EL2; when EL3 r = FAR_EL3; otherwise Unreachable(); return r; // FAR[] - non-assignment form // =========================== bits(64) FAR[] return FAR[S1TranslationRegime()]; // FAR[] - assignment form // ======================= FAR[bits(2) regime] = bits(64) value bits(64) r = value; case regime of when EL1 FAR_EL1 = r; when EL2 FAR_EL2 = r; when EL3 FAR_EL3 = r; otherwise Unreachable(); return; // FAR[] - assignment form // ======================= FAR[] = bits(64) value FAR[S1TranslationRegimeIsInHostEL1() then r = CNTHCTL_EL2; return r; r = CNTKCTL_EL1; return r;()] = value; return;

Library pseudocode for aarch64/functions/sysregisters/CNTKCTLTypeMAIR

type// MAIR[] - non-assignment form // ============================ MAIRType CNTKCTLType;MAIR[bits(2) regime] bits(64) r; case regime of whenEL1 r = MAIR_EL1; when EL2 r = MAIR_EL2; when EL3 r = MAIR_EL3; otherwise Unreachable(); return r; // MAIR[] - non-assignment form // ============================ MAIRType MAIR[] return MAIR[S1TranslationRegime()];

Library pseudocode for aarch64/functions/sysregisters/CPACRMAIRType

// CPACR[] - non-assignment form // ============================= CPACRTypetype CPACR[] bits(64) r; ifMAIRType; IsInHost() then r = CPTR_EL2; return r; r = CPACR_EL1; return r;

Library pseudocode for aarch64/functions/sysregisters/CPACRTypeSCTLR

type// SCTLR[] - non-assignment form // ============================= SCTLRType CPACRType;SCTLR[bits(2) regime] bits(64) r; case regime of whenEL1 r = SCTLR_EL1; when EL2 r = SCTLR_EL2; when EL3 r = SCTLR_EL3; otherwise Unreachable(); return r; // SCTLR[] - non-assignment form // ============================= SCTLRType SCTLR[] return SCTLR[S1TranslationRegime()];

Library pseudocode for aarch64/functions/sysregisters/ELRSCTLRType

// ELR[] - non-assignment form // =========================== bits(64)type ELR[bits(2) el] bits(64) r; case el of whenSCTLRType; EL1 r = ELR_EL1; when EL2 r = ELR_EL2; when EL3 r = ELR_EL3; otherwise Unreachable(); return r; // ELR[] - non-assignment form // =========================== bits(64) ELR[] assert PSTATE.EL != EL0; return ELR[PSTATE.EL]; // ELR[] - assignment form // ======================= ELR[bits(2) el] = bits(64) value bits(64) r = value; case el of when EL1 ELR_EL1 = r; when EL2 ELR_EL2 = r; when EL3 ELR_EL3 = r; otherwise Unreachable(); return; // ELR[] - assignment form // ======================= ELR[] = bits(64) value assert PSTATE.EL != EL0; ELR[PSTATE.EL] = value; return;

Library pseudocode for aarch64/functions/sysregisters/ESRVBAR

// ESR[] - non-assignment form // =========================== // VBAR[] - non-assignment form // ============================ ESRTypebits(64) ESR[bits(2) regime] VBAR[bits(2) regime] bits(64) r; case regime of when EL1 r = ESR_EL1; r = VBAR_EL1; when EL2 r = ESR_EL2; r = VBAR_EL2; when EL3 r = ESR_EL3; r = VBAR_EL3; otherwise Unreachable(); return r; // ESR[] - non-assignment form // =========================== // VBAR[] - non-assignment form // ============================ ESRTypebits(64) ESR[] VBAR[] return ESRVBAR[S1TranslationRegime()]; // ESR[] - assignment form // ======================= ESR[bits(2) regime] = ESRType value bits(64) r = value; case regime of when EL1 ESR_EL1 = r; when EL2 ESR_EL2 = r; when EL3 ESR_EL3 = r; otherwise Unreachable(); return; // ESR[] - assignment form // ======================= ESR[] = ESRType value ESR[S1TranslationRegime()] = value;()];

Library pseudocode for aarch64/functions/sysregisterssystem/ESRTypeAArch64.AllocationTagAccessIsEnabled

type// AArch64.AllocationTagAccessIsEnabled() // ====================================== // Check whether access to Allocation Tags is enabled. boolean ESRType;AArch64.AllocationTagAccessIsEnabled(AccType acctype) bits(2) el; if AArch64.AccessIsPrivileged(acctype) then el = PSTATE.EL; else el = EL0; if SCR_EL3.ATA == '0' && el IN {EL0, EL1, EL2} then return FALSE; elsif HCR_EL2.ATA == '0' && el IN {EL0, EL1} && EL2Enabled() && HCR_EL2.<E2H,TGE> != '11' then return FALSE; elsif SCTLR_EL3.ATA == '0' && el == EL3 then return FALSE; elsif SCTLR_EL2.ATA == '0' && el == EL2 then return FALSE; elsif SCTLR_EL1.ATA == '0' && el == EL1 then return FALSE; elsif SCTLR_EL2.ATA0 == '0' && el == EL0 && EL2Enabled() && HCR_EL2.<E2H,TGE> == '11' then return FALSE; elsif SCTLR_EL1.ATA0 == '0' && el == EL0 && !(EL2Enabled() && HCR_EL2.<E2H,TGE> == '11') then return FALSE; else return TRUE;

Library pseudocode for aarch64/functions/sysregisterssystem/FARAArch64.CheckSystemAccess

// FAR[] - non-assignment form // AArch64.CheckSystemAccess() // =========================== bits(64)// Checks if an AArch64 MSR, MRS or SYS instruction is allowed from // the current exception level and security state. Also checks for // traps by TIDCP to IMPLEMENTATION DEFINED registers and for NV access. FAR[bits(2) regime] bits(64) r; case regime of whenAArch64.CheckSystemAccess(bits(2) op0, bits(3) op1, bits(4) crn, bits(4) crm, bits(3) op2, bits(5) rt, bit read) need_secure = FALSE; case op1 of when '00x' min_EL = EL1 r = FAR_EL1; when; when '010' min_EL = EL2EL1 r = FAR_EL2; when; when '011' min_EL = EL3EL0 r = FAR_EL3; otherwise; when '100' min_EL = UnreachableEL2(); return r; // FAR[] - non-assignment form // =========================== bits(64); when '101' if ! FAR[] return() then UNDEFINED; min_EL = FAR[S1TranslationRegime()]; // FAR[] - assignment form // ======================= FAR[bits(2) regime] = bits(64) value bits(64) r = value; case regime of when EL1 FAR_EL1 = r; when EL2 FAR_EL2 = r; when; when '110' min_EL = EL3 FAR_EL3 = r; otherwise; when '111' min_EL = UnreachableEL1(); return; ; need_secure = TRUE; // FAR[] - assignment form // ======================= if FAR[] = bits(64) value(PSTATE.EL) < FARUInt[(min_EL) then UNDEFINED; elsif need_secure && !S1TranslationRegimeIsSecure()] = value; return;() then UNDEFINED;

Library pseudocode for aarch64/functions/sysregisterssystem/MAIRAArch64.ChooseNonExcludedTag

// MAIR[] - non-assignment form // ============================ // AArch64.ChooseNonExcludedTag() // ============================== // Return a tag derived from the start and the offset values, excluding // any tags in the given mask. MAIRTypebits(4) MAIR[bits(2) regime] bits(64) r; case regime of whenAArch64.ChooseNonExcludedTag(bits(4) tag, bits(4) offset, bits(16) exclude) if EL1IsOnes r = MAIR_EL1; when(exclude) then return '0000'; if offset == '0000' then while exclude< EL2UInt r = MAIR_EL2; when(tag)> == '1' do tag = tag + '0001'; while offset != '0000' do offset = offset - '0001'; tag = tag + '0001'; while exclude< EL3UInt r = MAIR_EL3; otherwise Unreachable(); return r; // MAIR[] - non-assignment form // ============================ MAIRType MAIR[] return MAIR[S1TranslationRegime()];(tag)> == '1' do tag = tag + '0001'; return tag;

Library pseudocode for aarch64/functions/sysregisterssystem/MAIRTypeAArch64.ExecutingATS1xPInstr

type// AArch64.ExecutingATS1xPInstr() // ============================== // Return TRUE if current instruction is AT S1E1R/WP boolean MAIRType;AArch64.ExecutingATS1xPInstr() if !HavePrivATExt() then return FALSE; instr = ThisInstr(); if instr<22+:10> == '1101010100' then op1 = instr<16+:3>; CRn = instr<12+:4>; CRm = instr<8+:4>; op2 = instr<5+:3>; return op1 == '000' && CRn == '0111' && CRm == '1001' && op2 IN {'000','001'}; else return FALSE;

Library pseudocode for aarch64/functions/sysregisterssystem/SCTLRAArch64.ExecutingBROrBLROrRetInstr

// SCTLR[] - non-assignment form // ============================= // AArch64.ExecutingBROrBLROrRetInstr() // ==================================== // Returns TRUE if current instruction is a BR, BLR, RET, B[L]RA[B][Z], or RETA[B]. SCTLRTypeboolean SCTLR[bits(2) regime] bits(64) r; case regime of whenAArch64.ExecutingBROrBLROrRetInstr() if ! EL1HaveBTIExt r = SCTLR_EL1; when() then return FALSE; instr = EL2ThisInstr r = SCTLR_EL2; when EL3 r = SCTLR_EL3; otherwise Unreachable(); return r; // SCTLR[] - non-assignment form // ============================= SCTLRType SCTLR[] return SCTLR[S1TranslationRegime()];(); if instr<31:25> == '1101011' && instr<20:16> == '11111' then opc = instr<24:21>; return opc != '0101'; else return FALSE;

Library pseudocode for aarch64/functions/sysregisterssystem/SCTLRTypeAArch64.ExecutingBTIInstr

type// AArch64.ExecutingBTIInstr() // =========================== // Returns TRUE if current instruction is a BTI. boolean SCTLRType;AArch64.ExecutingBTIInstr() if !HaveBTIExt() then return FALSE; instr = ThisInstr(); if instr<31:22> == '1101010100' && instr<21:12> == '0000110010' && instr<4:0> == '11111' then CRm = instr<11:8>; op2 = instr<7:5>; return (CRm == '0100' && op2<0> == '0'); else return FALSE;

Library pseudocode for aarch64/functions/sysregisterssystem/VBARAArch64.ExecutingERETInstr

// VBAR[] - non-assignment form // AArch64.ExecutingERETInstr() // ============================ // Returns TRUE if current instruction is ERET. bits(64)boolean VBAR[bits(2) regime] bits(64) r; case regime of whenAArch64.ExecutingERETInstr() instr = EL1ThisInstr r = VBAR_EL1; when EL2 r = VBAR_EL2; when EL3 r = VBAR_EL3; otherwise Unreachable(); return r; // VBAR[] - non-assignment form // ============================ bits(64) VBAR[] return VBAR[S1TranslationRegime()];(); return instr<31:12> == '11010110100111110000';

Library pseudocode for aarch64/functions/system/AArch64.AllocationTagAccessIsEnabledAArch64.NextRandomTagBit

// AArch64.AllocationTagAccessIsEnabled() // ====================================== // Check whether access to Allocation Tags is enabled. // AArch64.NextRandomTagBit() // ========================== // Generate a random bit suitable for generating a random Allocation Tag. booleanbit AArch64.AllocationTagAccessIsEnabled(AArch64.NextRandomTagBit() bits(16) lfsr = RGSR_EL1.SEED; bit top = lfsr<5> EOR lfsr<3> EOR lfsr<2> EOR lfsr<0>; RGSR_EL1.SEED = top:lfsr<15:1>; return top;AccType acctype) bits(2) el = AArch64.AccessUsesEL(acctype); if SCR_EL3.ATA == '0' && el IN {EL0, EL1, EL2} then return FALSE; elsif HCR_EL2.ATA == '0' && el IN {EL0, EL1} && EL2Enabled() && HCR_EL2.<E2H,TGE> != '11' then return FALSE; elsif SCTLR_EL3.ATA == '0' && el == EL3 then return FALSE; elsif SCTLR_EL2.ATA == '0' && el == EL2 then return FALSE; elsif SCTLR_EL1.ATA == '0' && el == EL1 then return FALSE; elsif SCTLR_EL2.ATA0 == '0' && el == EL0 && EL2Enabled() && HCR_EL2.<E2H,TGE> == '11' then return FALSE; elsif SCTLR_EL1.ATA0 == '0' && el == EL0 && !(EL2Enabled() && HCR_EL2.<E2H,TGE> == '11') then return FALSE; else return TRUE;

Library pseudocode for aarch64/functions/system/AArch64.CheckSystemAccessAArch64.RandomTag

// AArch64.CheckSystemAccess() // ===========================// AArch64.RandomTag() // =================== // Generate a random Allocation Tag. bits(4) AArch64.CheckSystemAccess(bits(2) op0, bits(3) op1, bits(4) crn, bits(4) crm, bits(3) op2, bits(5) rt, bit read) return;AArch64.RandomTag() bits(4) tag; for i = 0 to 3 tag<i> =AArch64.NextRandomTagBit(); return tag;

Library pseudocode for aarch64/functions/system/AArch64.ChooseNonExcludedTagAArch64.SysInstr

// AArch64.ChooseNonExcludedTag() // ============================== // Return a tag derived from the start and the offset values, excluding // any tags in the given mask. bits(4)// Execute a system instruction with write (source operand). AArch64.ChooseNonExcludedTag(bits(4) tag, bits(4) offset, bits(16) exclude) ifAArch64.SysInstr(integer op0, integer op1, integer crn, integer crm, integer op2, bits(64) val); IsOnes(exclude) then return '0000'; if offset == '0000' then while exclude<UInt(tag)> == '1' do tag = tag + '0001'; while offset != '0000' do offset = offset - '0001'; tag = tag + '0001'; while exclude<UInt(tag)> == '1' do tag = tag + '0001'; return tag;

Library pseudocode for aarch64/functions/system/AArch64.ExecutingATS1xPInstrAArch64.SysInstrWithResult

// AArch64.ExecutingATS1xPInstr() // ============================== // Return TRUE if current instruction is AT S1E1R/WP boolean// Execute a system instruction with read (result operand). // Returns the result of the instruction. bits(64) AArch64.ExecutingATS1xPInstr() if !AArch64.SysInstrWithResult(integer op0, integer op1, integer crn, integer crm, integer op2);HavePrivATExt() then return FALSE; instr = ThisInstr(); if instr<22+:10> == '1101010100' then op1 = instr<16+:3>; CRn = instr<12+:4>; CRm = instr<8+:4>; op2 = instr<5+:3>; return op1 == '000' && CRn == '0111' && CRm == '1001' && op2 IN {'000','001'}; else return FALSE;

Library pseudocode for aarch64/functions/system/AArch64.ExecutingBROrBLROrRetInstrAArch64.SysRegRead

// AArch64.ExecutingBROrBLROrRetInstr() // ==================================== // Returns TRUE if current instruction is a BR, BLR, RET, B[L]RA[B][Z], or RETA[B]. boolean// Read from a system register and return the contents of the register. bits(64) AArch64.ExecutingBROrBLROrRetInstr() if !AArch64.SysRegRead(integer op0, integer op1, integer crn, integer crm, integer op2);HaveBTIExt() then return FALSE; instr = ThisInstr(); if instr<31:25> == '1101011' && instr<20:16> == '11111' then opc = instr<24:21>; return opc != '0101'; else return FALSE;

Library pseudocode for aarch64/functions/system/AArch64.ExecutingBTIInstrAArch64.SysRegWrite

// AArch64.ExecutingBTIInstr() // =========================== // Returns TRUE if current instruction is a BTI. boolean// Write to a system register. AArch64.ExecutingBTIInstr() if !AArch64.SysRegWrite(integer op0, integer op1, integer crn, integer crm, integer op2, bits(64) val);HaveBTIExt() then return FALSE; instr = ThisInstr(); if instr<31:22> == '1101010100' && instr<21:12> == '0000110010' && instr<4:0> == '11111' then CRm = instr<11:8>; op2 = instr<7:5>; return (CRm == '0100' && op2<0> == '0'); else return FALSE;

Library pseudocode for aarch64/functions/system/AArch64.ExecutingERETInstrBTypeCompatible

// AArch64.ExecutingERETInstr() // ============================ // Returns TRUE if current instruction is ERET. booleanboolean BTypeCompatible; AArch64.ExecutingERETInstr() instr = ThisInstr(); return instr<31:12> == '11010110100111110000';

Library pseudocode for aarch64/functions/system/AArch64.NextRandomTagBitBTypeCompatible_BTI

// AArch64.NextRandomTagBit() // ========================== // Generate a random bit suitable for generating a random Allocation Tag. // BTypeCompatible_BTI // =================== // This function determines whether a given hint encoding is compatible with the current value of // PSTATE.BTYPE. A value of TRUE here indicates a valid Branch Target Identification instruction. bitboolean AArch64.NextRandomTagBit() bits(16) lfsr = RGSR_EL1.SEED; bit top = lfsr<5> EOR lfsr<3> EOR lfsr<2> EOR lfsr<0>; RGSR_EL1.SEED = top:lfsr<15:1>; return top;BTypeCompatible_BTI(bits(2) hintcode) case hintcode of when '00' return FALSE; when '01' return PSTATE.BTYPE != '11'; when '10' return PSTATE.BTYPE != '10'; when '11' return TRUE;

Library pseudocode for aarch64/functions/system/AArch64.RandomTagBTypeCompatible_PACIXSP

// AArch64.RandomTag() // =================== // Generate a random Allocation Tag. // BTypeCompatible_PACIXSP() // ========================= // Returns TRUE if PACIASP, PACIBSP instruction is implicit compatible with PSTATE.BTYPE, // FALSE otherwise. bits(4)boolean AArch64.RandomTag() bits(4) tag; for i = 0 to 3 tag<i> =BTypeCompatible_PACIXSP() if PSTATE.BTYPE IN {'01', '10'} then return TRUE; elsif PSTATE.BTYPE == '11' then index = if PSTATE.EL == then 35 else 36; return SCTLRAArch64.NextRandomTagBitEL0(); return tag;[]<index> == '0'; else return FALSE;

Library pseudocode for aarch64/functions/system/AArch64.SysInstrBTypeNext

// Execute a system instruction with write (source operand).bits(2) BTypeNext; AArch64.SysInstr(integer op0, integer op1, integer crn, integer crm, integer op2, bits(64) val);

Library pseudocode for aarch64/functions/system/AArch64.SysInstrWithResultInGuardedPage

// Execute a system instruction with read (result operand). // Returns the result of the instruction. bits(64)boolean InGuardedPage; AArch64.SysInstrWithResult(integer op0, integer op1, integer crn, integer crm, integer op2);

Library pseudocode for aarch64/functions/system/AArch64.SysRegReadSetBTypeCompatible

// Read from a system register and return the contents of the register. bits(64)// SetBTypeCompatible() // ==================== // Sets the value of BTypeCompatible global variable used by BTI AArch64.SysRegRead(integer op0, integer op1, integer crn, integer crm, integer op2);SetBTypeCompatible(boolean x) BTypeCompatible = x;

Library pseudocode for aarch64/functions/system/AArch64.SysRegWriteSetBTypeNext

// Write to a system register.// SetBTypeNext() // ============== // Set the value of BTypeNext global variable used by BTI AArch64.SysRegWrite(integer op0, integer op1, integer crn, integer crm, integer op2, bits(64) val);SetBTypeNext(bits(2) x) BTypeNext = x;

Library pseudocode for aarch64/functions/system/BTypeCompatible_ChooseRandomNonExcludedTag

boolean BTypeCompatible;// The _ChooseRandomNonExcludedTag function is used when GCR_EL1.RRND == '1' to generate random // Allocation Tags. // // The resulting Allocation Tag is selected from the set [0,15], excluding any Allocation Tag where // exclude[tag_value] == 1. If 'exclude' is all ones, the returned Allocation Tag is '0000'. // // This function is expected to generate a non-deterministic selection from the set of non-excluded // Allocation Tags. A reasonable implementation is described by the Pseudocode used when // GCR_EL1.RRND is 0, but with a non-deterministic implementation of NextRandomTagBit(). bits(4) _ChooseRandomNonExcludedTag(bits(16) exclude);

Library pseudocode for aarch64/functionsinstrs/systembranch/BTypeCompatible_BTIeret/AArch64.ExceptionReturn

// BTypeCompatible_BTI // =================== // This function determines whether a given hint encoding is compatible with the current value of // PSTATE.BTYPE. A value of TRUE here indicates a valid Branch Target Identification instruction. boolean// AArch64.ExceptionReturn() // ========================= BTypeCompatible_BTI(bits(2) hintcode) case hintcode of when '00' return FALSE; when '01' return PSTATE.BTYPE != '11'; when '10' return PSTATE.BTYPE != '10'; when '11' return TRUE;AArch64.ExceptionReturn(bits(64) new_pc, bits(32) spsr)SynchronizeContext(); sync_errors = HaveIESB() && SCTLR[].IESB == '1'; if HaveDoubleFaultExt() then sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == EL3); if sync_errors then SynchronizeErrors(); iesb_req = TRUE; TakeUnmaskedPhysicalSErrorInterrupts(iesb_req); // Attempts to change to an illegal state will invoke the Illegal Execution state mechanism SetPSTATEFromPSR(spsr); ClearExclusiveLocal(ProcessorID()); SendEventLocal(); if PSTATE.IL == '1' && spsr<4> == '1' && spsr<20> == '0' then // If the exception return is illegal, PC[63:32,1:0] are UNKNOWN new_pc<63:32> = bits(32) UNKNOWN; new_pc<1:0> = bits(2) UNKNOWN; elsif UsingAArch32() then // Return to AArch32 // ELR_ELx[1:0] or ELR_ELx[0] are treated as being 0, depending on the target instruction set state if PSTATE.T == '1' then new_pc<0> = '0'; // T32 else new_pc<1:0> = '00'; // A32 else // Return to AArch64 // ELR_ELx[63:56] might include a tag new_pc = AArch64.BranchAddr(new_pc); if UsingAArch32() then // 32 most significant bits are ignored. BranchTo(new_pc<31:0>, BranchType_ERET); else BranchToAddr(new_pc, BranchType_ERET);

Library pseudocode for aarch64/functionsinstrs/systemcountop/BTypeCompatible_PACIXSPCountOp

// BTypeCompatible_PACIXSP() // ========================= // Returns TRUE if PACIASP, PACIBSP instruction is implicit compatible with PSTATE.BTYPE, // FALSE otherwise. booleanenumeration BTypeCompatible_PACIXSP() if PSTATE.BTYPE IN {'01', '10'} then return TRUE; elsif PSTATE.BTYPE == '11' then index = if PSTATE.EL ==CountOp { EL0 then 35 else 36; returnCountOp_CLZ, CountOp_CLS, SCTLR[]<index> == '0'; else return FALSE;CountOp_CNT};

Library pseudocode for aarch64/functionsinstrs/systemextendreg/BTypeNextDecodeRegExtend

bits(2) BTypeNext;// DecodeRegExtend() // ================= // Decode a register extension option ExtendTypeDecodeRegExtend(bits(3) op) case op of when '000' return ExtendType_UXTB; when '001' return ExtendType_UXTH; when '010' return ExtendType_UXTW; when '011' return ExtendType_UXTX; when '100' return ExtendType_SXTB; when '101' return ExtendType_SXTH; when '110' return ExtendType_SXTW; when '111' return ExtendType_SXTX;

Library pseudocode for aarch64/functionsinstrs/systemextendreg/ChooseRandomNonExcludedTagExtendReg

// The ChooseRandomNonExcludedTag function is used when GCR_EL1.RRND == '1' to generate random // Allocation Tags. // // The resulting Allocation Tag is selected from the set [0,15], excluding any Allocation Tag where // exclude[tag_value] == 1. If 'exclude' is all Ones, the returned Allocation Tag is '0000'. // // This function is permitted to generate a non-deterministic selection from the set of non-excluded // Allocation Tags. A reasonable implementation is described by the Pseudocode used when // GCR_EL1.RRND is 0, but with a non-deterministic implementation of NextRandomTagBit(). Implementations // may choose to behave the same as GCR_EL1.RRND=0. bits(4)// ExtendReg() // =========== // Perform a register extension and shift bits(N) ChooseRandomNonExcludedTag(bits(16) exclude);ExtendReg(integer reg,ExtendType exttype, integer shift) assert shift >= 0 && shift <= 4; bits(N) val = X[reg]; boolean unsigned; integer len; case exttype of when ExtendType_SXTB unsigned = FALSE; len = 8; when ExtendType_SXTH unsigned = FALSE; len = 16; when ExtendType_SXTW unsigned = FALSE; len = 32; when ExtendType_SXTX unsigned = FALSE; len = 64; when ExtendType_UXTB unsigned = TRUE; len = 8; when ExtendType_UXTH unsigned = TRUE; len = 16; when ExtendType_UXTW unsigned = TRUE; len = 32; when ExtendType_UXTX unsigned = TRUE; len = 64; // Note the extended width of the intermediate value and // that sign extension occurs from bit <len+shift-1>, not // from bit <len-1>. This is equivalent to the instruction // [SU]BFIZ Rtmp, Rreg, #shift, #len // It may also be seen as a sign/zero extend followed by a shift: // LSL(Extend(val<len-1:0>, N, unsigned), shift); len = Min(len, N - shift); return Extend(val<len-1:0> : Zeros(shift), N, unsigned);

Library pseudocode for aarch64/functionsinstrs/systemextendreg/InGuardedPageExtendType

boolean InGuardedPage;enumerationExtendType {ExtendType_SXTB, ExtendType_SXTH, ExtendType_SXTW, ExtendType_SXTX, ExtendType_UXTB, ExtendType_UXTH, ExtendType_UXTW, ExtendType_UXTX};

Library pseudocode for aarch64/functionsinstrs/systemfloat/SetBTypeCompatiblearithmetic/max-min/fpmaxminop/FPMaxMinOp

// SetBTypeCompatible() // ==================== // Sets the value of BTypeCompatible global variable used by BTIenumeration SetBTypeCompatible(boolean x) BTypeCompatible = x;FPMaxMinOp {FPMaxMinOp_MAX, FPMaxMinOp_MIN, FPMaxMinOp_MAXNUM, FPMaxMinOp_MINNUM};

Library pseudocode for aarch64/functionsinstrs/systemfloat/SetBTypeNextarithmetic/unary/fpunaryop/FPUnaryOp

// SetBTypeNext() // ============== // Set the value of BTypeNext global variable used by BTIenumeration SetBTypeNext(bits(2) x) BTypeNext = x;FPUnaryOp {FPUnaryOp_ABS, FPUnaryOp_MOV, FPUnaryOp_NEG, FPUnaryOp_SQRT};

Library pseudocode for aarch64/instrs/branchfloat/eretconvert/AArch64.ExceptionReturnfpconvop/FPConvOp

// AArch64.ExceptionReturn() // =========================enumeration AArch64.ExceptionReturn(bits(64) new_pc, bits(64) spsr)FPConvOp { SynchronizeContext(); sync_errors =FPConvOp_CVT_FtoI, HaveIESB() &&FPConvOp_CVT_ItoF, SCTLR[].IESB == '1'; ifFPConvOp_MOV_FtoI, HaveDoubleFaultExt() then sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL ==FPConvOp_MOV_ItoF , EL3); if sync_errors then SynchronizeErrors(); iesb_req = TRUE; TakeUnmaskedPhysicalSErrorInterrupts(iesb_req); // Attempts to change to an illegal state will invoke the Illegal Execution state mechanism bits(2) source_el = PSTATE.EL; SetPSTATEFromPSR(spsr); ClearExclusiveLocal(ProcessorID()); SendEventLocal(); if PSTATE.IL == '1' && spsr<4> == '1' && spsr<20> == '0' then // If the exception return is illegal, PC[63:32,1:0] are UNKNOWN new_pc<63:32> = bits(32) UNKNOWN; new_pc<1:0> = bits(2) UNKNOWN; elsif UsingAArch32() then // Return to AArch32 // ELR_ELx[1:0] or ELR_ELx[0] are treated as being 0, depending on the target instruction set state if PSTATE.T == '1' then new_pc<0> = '0'; // T32 else new_pc<1:0> = '00'; // A32 else // Return to AArch64 // ELR_ELx[63:56] might include a tag new_pc = AArch64.BranchAddr(new_pc); if UsingAArch32() then // 32 most significant bits are ignored. BranchTo(new_pc<31:0>, BranchType_ERET); else BranchToAddr(new_pc, BranchType_ERET);FPConvOp_CVT_FtoI_JS };

Library pseudocode for aarch64/instrs/countopinteger/CountOpbitfield/bfxpreferred/BFXPreferred

enumeration// BFXPreferred() // ============== // // Return TRUE if UBFX or SBFX is the preferred disassembly of a // UBFM or SBFM bitfield instruction. Must exclude more specific // aliases UBFIZ, SBFIZ, UXT[BH], SXT[BHW], LSL, LSR and ASR. boolean CountOp {BFXPreferred(bit sf, bit uns, bits(6) imms, bits(6) immr) integer S =CountOp_CLZ,(imms); integer R = CountOp_CLS,(immr); // must not match UBFIZ/SBFIX alias if (imms) < UIntCountOp_CNT};(immr) then return FALSE; // must not match LSR/ASR/LSL alias (imms == 31 or 63) if imms == sf:'11111' then return FALSE; // must not match UXTx/SXTx alias if immr == '000000' then // must not match 32-bit UXT[BH] or SXT[BH] if sf == '0' && imms IN {'000111', '001111'} then return FALSE; // must not match 64-bit SXT[BHW] if sf:uns == '10' && imms IN {'000111', '001111', '011111'} then return FALSE; // must be UBFX/SBFX alias return TRUE;

Library pseudocode for aarch64/instrs/extendreginteger/DecodeRegExtendbitmasks/DecodeBitMasks

// DecodeRegExtend() // ================= // Decode a register extension option // DecodeBitMasks() // ================ ExtendType// Decode AArch64 bitfield and logical immediate masks which use a similar encoding structure (bits(M), bits(M)) DecodeRegExtend(bits(3) op) case op of when '000' returnDecodeBitMasks(bit immN, bits(6) imms, bits(6) immr, boolean immediate) bits(64) tmask, wmask; bits(6) tmask_and, wmask_and; bits(6) tmask_or, wmask_or; bits(6) levels; // Compute log2 of element size // 2^len must be in range [2, M] len = ExtendType_UXTBHighestSetBit; when '001' return(immN:NOT(imms)); if len < 1 then UNDEFINED; assert M >= (1 << len); // Determine S, R and S - R parameters levels = ExtendType_UXTHZeroExtend; when '010' return( ExtendType_UXTWOnes; when '011' return(len), 6); // For logical immediates an all-ones value of S is reserved // since it would generate a useless all-ones result (many times) if immediate && (imms AND levels) == levels then UNDEFINED; S = ExtendType_UXTXUInt; when '100' return(imms AND levels); R = ExtendType_SXTBUInt; when '101' return(immr AND levels); diff = S - R; // 6-bit subtract with borrow // From a software perspective, the remaining code is equivalant to: // esize = 1 << len; // d = UInt(diff<len-1:0>); // welem = ZeroExtend(Ones(S + 1), esize); // telem = ZeroExtend(Ones(d + 1), esize); // wmask = Replicate(ROR(welem, R)); // tmask = Replicate(telem); // return (wmask, tmask); // Compute "top mask" tmask_and = diff<5:0> OR NOT(levels); tmask_or = diff<5:0> AND levels; tmask = ExtendType_SXTHOnes; when '110' return(64); tmask = ((tmask AND ExtendType_SXTWReplicate; when '111' return( (tmask_and<0>, 1) : Ones(1), 32)) OR Replicate(Zeros(1) : Replicate(tmask_or<0>, 1), 32)); // optimization of first step: // tmask = Replicate(tmask_and<0> : '1', 32); tmask = ((tmask AND Replicate(Replicate(tmask_and<1>, 2) : Ones(2), 16)) OR Replicate(Zeros(2) : Replicate(tmask_or<1>, 2), 16)); tmask = ((tmask AND Replicate(Replicate(tmask_and<2>, 4) : Ones(4), 8)) OR Replicate(Zeros(4) : Replicate(tmask_or<2>, 4), 8)); tmask = ((tmask AND Replicate(Replicate(tmask_and<3>, 8) : Ones(8), 4)) OR Replicate(Zeros(8) : Replicate(tmask_or<3>, 8), 4)); tmask = ((tmask AND Replicate(Replicate(tmask_and<4>, 16) : Ones(16), 2)) OR Replicate(Zeros(16) : Replicate(tmask_or<4>, 16), 2)); tmask = ((tmask AND Replicate(Replicate(tmask_and<5>, 32) : Ones(32), 1)) OR Replicate(Zeros(32) : Replicate(tmask_or<5>, 32), 1)); // Compute "wraparound mask" wmask_and = immr OR NOT(levels); wmask_or = immr AND levels; wmask = Zeros(64); wmask = ((wmask AND Replicate(Ones(1) : Replicate(wmask_and<0>, 1), 32)) OR Replicate(Replicate(wmask_or<0>, 1) : Zeros(1), 32)); // optimization of first step: // wmask = Replicate(wmask_or<0> : '0', 32); wmask = ((wmask AND Replicate(Ones(2) : Replicate(wmask_and<1>, 2), 16)) OR Replicate(Replicate(wmask_or<1>, 2) : Zeros(2), 16)); wmask = ((wmask AND Replicate(Ones(4) : Replicate(wmask_and<2>, 4), 8)) OR Replicate(Replicate(wmask_or<2>, 4) : Zeros(4), 8)); wmask = ((wmask AND Replicate(Ones(8) : Replicate(wmask_and<3>, 8), 4)) OR Replicate(Replicate(wmask_or<3>, 8) : Zeros(8), 4)); wmask = ((wmask AND Replicate(Ones(16) : Replicate(wmask_and<4>, 16), 2)) OR Replicate(Replicate(wmask_or<4>, 16) : Zeros(16), 2)); wmask = ((wmask AND Replicate(Ones(32) : Replicate(wmask_and<5>, 32), 1)) OR Replicate(Replicate(wmask_or<5>, 32) : ZerosExtendType_SXTXReplicate;(32), 1)); if diff<6> != '0' then // borrow from S - R wmask = wmask AND tmask; else wmask = wmask OR tmask; return (wmask<M-1:0>, tmask<M-1:0>);

Library pseudocode for aarch64/instrs/extendreginteger/ExtendRegins-ext/insert/movewide/movewideop/MoveWideOp

// ExtendReg() // =========== // Perform a register extension and shift bits(N)enumeration ExtendReg(integer reg,MoveWideOp { ExtendType exttype, integer shift) assert shift >= 0 && shift <= 4; bits(N) val =MoveWideOp_N, X[reg]; boolean unsigned; integer len; case exttype of whenMoveWideOp_Z, ExtendType_SXTB unsigned = FALSE; len = 8; when ExtendType_SXTH unsigned = FALSE; len = 16; when ExtendType_SXTW unsigned = FALSE; len = 32; when ExtendType_SXTX unsigned = FALSE; len = 64; when ExtendType_UXTB unsigned = TRUE; len = 8; when ExtendType_UXTH unsigned = TRUE; len = 16; when ExtendType_UXTW unsigned = TRUE; len = 32; when ExtendType_UXTX unsigned = TRUE; len = 64; // Note the extended width of the intermediate value and // that sign extension occurs from bit <len+shift-1>, not // from bit <len-1>. This is equivalent to the instruction // [SU]BFIZ Rtmp, Rreg, #shift, #len // It may also be seen as a sign/zero extend followed by a shift: // LSL(Extend(val<len-1:0>, N, unsigned), shift); len = Min(len, N - shift); return Extend(val<len-1:0> : Zeros(shift), N, unsigned);MoveWideOp_K};

Library pseudocode for aarch64/instrs/extendreginteger/ExtendTypelogical/movwpreferred/MoveWidePreferred

enumeration// MoveWidePreferred() // =================== // // Return TRUE if a bitmask immediate encoding would generate an immediate // value that could also be represented by a single MOVZ or MOVN instruction. // Used as a condition for the preferred MOV<-ORR alias. boolean ExtendType {MoveWidePreferred(bit sf, bit immN, bits(6) imms, bits(6) immr) integer S =ExtendType_SXTB,(imms); integer R = ExtendType_SXTH, ExtendType_SXTW, ExtendType_SXTX, ExtendType_UXTB, ExtendType_UXTH, ExtendType_UXTW, ExtendType_UXTX};(immr); integer width = if sf == '1' then 64 else 32; // element size must equal total immediate size if sf == '1' && immN:imms != '1xxxxxx' then return FALSE; if sf == '0' && immN:imms != '00xxxxx' then return FALSE; // for MOVZ must contain no more than 16 ones if S < 16 then // ones must not span halfword boundary when rotated return (-R MOD 16) <= (15 - S); // for MOVN must contain no more than 16 zeros if S >= width - 15 then // zeros must not span halfword boundary when rotated return (R MOD 16) <= (S - (width - 15)); return FALSE;

Library pseudocode for aarch64/instrs/floatinteger/arithmeticshiftreg/max-min/fpmaxminop/FPMaxMinOpDecodeShift

enumeration// DecodeShift() // ============= // Decode shift encodings ShiftType FPMaxMinOp {DecodeShift(bits(2) op) case op of when '00' returnFPMaxMinOp_MAX,; when '01' return FPMaxMinOp_MIN,; when '10' return FPMaxMinOp_MAXNUM,; when '11' return FPMaxMinOp_MINNUM};;

Library pseudocode for aarch64/instrs/floatinteger/arithmeticshiftreg/unary/fpunaryop/FPUnaryOpShiftReg

enumeration// ShiftReg() // ========== // Perform shift of a register operand bits(N) FPUnaryOp {ShiftReg(integer reg,FPUnaryOp_ABS,shiftype, integer amount) bits(N) result = FPUnaryOp_MOV,[reg]; case shiftype of when FPUnaryOp_NEG,result = (result, amount); when ShiftType_LSR result = LSR(result, amount); when ShiftType_ASR result = ASR(result, amount); when ShiftType_ROR result = RORFPUnaryOp_SQRT};(result, amount); return result;

Library pseudocode for aarch64/instrs/floatinteger/convertshiftreg/fpconvop/FPConvOpShiftType

enumeration FPConvOp {ShiftType {FPConvOp_CVT_FtoI,ShiftType_LSL, FPConvOp_CVT_ItoF,ShiftType_LSR, FPConvOp_MOV_FtoI,ShiftType_ASR, FPConvOp_MOV_ItoF ,ShiftType_ROR}; FPConvOp_CVT_FtoI_JS };

Library pseudocode for aarch64/instrs/integerlogicalop/bitfield/bfxpreferred/BFXPreferredLogicalOp

// BFXPreferred() // ============== // // Return TRUE if UBFX or SBFX is the preferred disassembly of a // UBFM or SBFM bitfield instruction. Must exclude more specific // aliases UBFIZ, SBFIZ, UXT[BH], SXT[BHW], LSL, LSR and ASR. booleanenumeration BFXPreferred(bit sf, bit uns, bits(6) imms, bits(6) immr) integer S =LogicalOp { UInt(imms); integer R =LogicalOp_AND, UInt(immr); // must not match UBFIZ/SBFIX alias ifLogicalOp_EOR, UInt(imms) < UInt(immr) then return FALSE; // must not match LSR/ASR/LSL alias (imms == 31 or 63) if imms == sf:'11111' then return FALSE; // must not match UXTx/SXTx alias if immr == '000000' then // must not match 32-bit UXT[BH] or SXT[BH] if sf == '0' && imms IN {'000111', '001111'} then return FALSE; // must not match 64-bit SXT[BHW] if sf:uns == '10' && imms IN {'000111', '001111', '011111'} then return FALSE; // must be UBFX/SBFX alias return TRUE;LogicalOp_ORR};

Library pseudocode for aarch64/instrs/integermemory/bitmasksmemop/DecodeBitMasksMemAtomicOp

// DecodeBitMasks() // ================ // Decode AArch64 bitfield and logical immediate masks which use a similar encoding structure (bits(M), bits(M))enumeration DecodeBitMasks(bit immN, bits(6) imms, bits(6) immr, boolean immediate) bits(64) tmask, wmask; bits(6) tmask_and, wmask_and; bits(6) tmask_or, wmask_or; bits(6) levels; // Compute log2 of element size // 2^len must be in range [2, M] len =MemAtomicOp { HighestSetBit(immN:NOT(imms)); if len < 1 then UNDEFINED; assert M >= (1 << len); // Determine S, R and S - R parameters levels =MemAtomicOp_ADD, ZeroExtend(MemAtomicOp_BIC,Ones(len), 6); // For logical immediates an all-ones value of S is reserved // since it would generate a useless all-ones result (many times) if immediate && (imms AND levels) == levels then UNDEFINED; S =MemAtomicOp_EOR, UInt(imms AND levels); R =MemAtomicOp_ORR, UInt(immr AND levels); diff = S - R; // 6-bit subtract with borrow // From a software perspective, the remaining code is equivalant to: // esize = 1 << len; // d = UInt(diff<len-1:0>); // welem = ZeroExtend(Ones(S + 1), esize); // telem = ZeroExtend(Ones(d + 1), esize); // wmask = Replicate(ROR(welem, R)); // tmask = Replicate(telem); // return (wmask, tmask); // Compute "top mask" tmask_and = diff<5:0> OR NOT(levels); tmask_or = diff<5:0> AND levels; tmask =MemAtomicOp_SMAX, Ones(64); tmask = ((tmask ANDMemAtomicOp_SMIN, Replicate(MemAtomicOp_UMAX,Replicate(tmask_and<0>, 1) :MemAtomicOp_UMIN, Ones(1), 32)) OR Replicate(Zeros(1) : Replicate(tmask_or<0>, 1), 32)); // optimization of first step: // tmask = Replicate(tmask_and<0> : '1', 32); tmask = ((tmask AND Replicate(Replicate(tmask_and<1>, 2) : Ones(2), 16)) OR Replicate(Zeros(2) : Replicate(tmask_or<1>, 2), 16)); tmask = ((tmask AND Replicate(Replicate(tmask_and<2>, 4) : Ones(4), 8)) OR Replicate(Zeros(4) : Replicate(tmask_or<2>, 4), 8)); tmask = ((tmask AND Replicate(Replicate(tmask_and<3>, 8) : Ones(8), 4)) OR Replicate(Zeros(8) : Replicate(tmask_or<3>, 8), 4)); tmask = ((tmask AND Replicate(Replicate(tmask_and<4>, 16) : Ones(16), 2)) OR Replicate(Zeros(16) : Replicate(tmask_or<4>, 16), 2)); tmask = ((tmask AND Replicate(Replicate(tmask_and<5>, 32) : Ones(32), 1)) OR Replicate(Zeros(32) : Replicate(tmask_or<5>, 32), 1)); // Compute "wraparound mask" wmask_and = immr OR NOT(levels); wmask_or = immr AND levels; wmask = Zeros(64); wmask = ((wmask AND Replicate(Ones(1) : Replicate(wmask_and<0>, 1), 32)) OR Replicate(Replicate(wmask_or<0>, 1) : Zeros(1), 32)); // optimization of first step: // wmask = Replicate(wmask_or<0> : '0', 32); wmask = ((wmask AND Replicate(Ones(2) : Replicate(wmask_and<1>, 2), 16)) OR Replicate(Replicate(wmask_or<1>, 2) : Zeros(2), 16)); wmask = ((wmask AND Replicate(Ones(4) : Replicate(wmask_and<2>, 4), 8)) OR Replicate(Replicate(wmask_or<2>, 4) : Zeros(4), 8)); wmask = ((wmask AND Replicate(Ones(8) : Replicate(wmask_and<3>, 8), 4)) OR Replicate(Replicate(wmask_or<3>, 8) : Zeros(8), 4)); wmask = ((wmask AND Replicate(Ones(16) : Replicate(wmask_and<4>, 16), 2)) OR Replicate(Replicate(wmask_or<4>, 16) : Zeros(16), 2)); wmask = ((wmask AND Replicate(Ones(32) : Replicate(wmask_and<5>, 32), 1)) OR Replicate(Replicate(wmask_or<5>, 32) : Zeros(32), 1)); if diff<6> != '0' then // borrow from S - R wmask = wmask AND tmask; else wmask = wmask OR tmask; return (wmask<M-1:0>, tmask<M-1:0>);MemAtomicOp_SWP};

Library pseudocode for aarch64/instrs/integermemory/ins-extmemop/insert/movewide/movewideop/MoveWideOpMemOp

enumeration MoveWideOp {MemOp {MoveWideOp_N,MemOp_LOAD, MoveWideOp_Z,MemOp_STORE, MoveWideOp_K};MemOp_PREFETCH};

Library pseudocode for aarch64/instrs/integermemory/logicalprefetch/movwpreferred/MoveWidePreferredPrefetch

// MoveWidePreferred() // =================== // // Return TRUE if a bitmask immediate encoding would generate an immediate // value that could also be represented by a single MOVZ or MOVN instruction. // Used as a condition for the preferred MOV<-ORR alias. // Prefetch() // ========== boolean// Decode and execute the prefetch hint on ADDRESS specified by PRFOP MoveWidePreferred(bit sf, bit immN, bits(6) imms, bits(6) immr) integer S =Prefetch(bits(64) address, bits(5) prfop) PrefetchHint hint; integer target; boolean stream; case prfop<4:3> of when '00' hint = Prefetch_READ; // PLD: prefetch for load when '01' hint = Prefetch_EXEC; // PLI: preload instructions when '10' hint = Prefetch_WRITE; // PST: prepare for store when '11' return; // unallocated hint target = UInt(imms); integer R =(prfop<2:1>); // target cache level stream = (prfop<0> != '0'); // streaming (non-temporal) UIntHint_Prefetch(immr); integer width = if sf == '1' then 64 else 32; // element size must equal total immediate size if sf == '1' && immN:imms != '1xxxxxx' then return FALSE; if sf == '0' && immN:imms != '00xxxxx' then return FALSE; // for MOVZ must contain no more than 16 ones if S < 16 then // ones must not span halfword boundary when rotated return (-R MOD 16) <= (15 - S); // for MOVN must contain no more than 16 zeros if S >= width - 15 then // zeros must not span halfword boundary when rotated return (R MOD 16) <= (S - (width - 15)); return FALSE;(address, hint, target, stream); return;

Library pseudocode for aarch64/instrs/integersystem/shiftregbarriers/DecodeShiftbarrierop/MemBarrierOp

// DecodeShift() // ============= // Decode shift encodings ShiftTypeenumeration DecodeShift(bits(2) op) case op of when '00' returnMemBarrierOp { ShiftType_LSL; when '01' returnMemBarrierOp_DSB // Data Synchronization Barrier , ShiftType_LSR; when '10' returnMemBarrierOp_DMB // Data Memory Barrier , ShiftType_ASR; when '11' returnMemBarrierOp_ISB // Instruction Synchronization Barrier , MemBarrierOp_SSBB // Speculative Synchronization Barrier to VA , MemBarrierOp_PSSBB // Speculative Synchronization Barrier to PA , ShiftType_ROR;MemBarrierOp_SB // Speculation Barrier };

Library pseudocode for aarch64/instrs/integersystem/shiftreghints/ShiftRegsyshintop/SystemHintOp

// ShiftReg() // ========== // Perform shift of a register operand bits(N)enumeration ShiftReg(integer reg,SystemHintOp { ShiftType shiftype, integer amount) bits(N) result =SystemHintOp_NOP, X[reg]; case shiftype of whenSystemHintOp_YIELD, ShiftType_LSL result =SystemHintOp_WFE, LSL(result, amount); whenSystemHintOp_WFI, ShiftType_LSR result =SystemHintOp_SEV, LSR(result, amount); whenSystemHintOp_SEVL, ShiftType_ASR result =SystemHintOp_DGH, ASR(result, amount); whenSystemHintOp_ESB, ShiftType_ROR result =SystemHintOp_PSB, SystemHintOp_TSB, SystemHintOp_BTI, ROR(result, amount); return result;SystemHintOp_CSDB };

Library pseudocode for aarch64/instrs/integersystem/shiftregregister/ShiftTypecpsr/pstatefield/PSTATEField

enumeration ShiftType {PSTATEField {ShiftType_LSL,PSTATEField_DAIFSet, ShiftType_LSR,PSTATEField_DAIFClr, ShiftType_ASR,PSTATEField_PAN, // Armv8.1 ShiftType_ROR};PSTATEField_UAO, // Armv8.2PSTATEField_DIT, // Armv8.4 PSTATEField_SSBS, PSTATEField_TCO, // Armv8.5 PSTATEField_SP };

Library pseudocode for aarch64/instrs/logicalopsystem/LogicalOpsysops/sysop/SysOp

enumeration// SysOp() // ======= SystemOp LogicalOp {SysOp(bits(3) op1, bits(4) CRn, bits(4) CRm, bits(3) op2) case op1:CRn:CRm:op2 of when '000 0111 1000 000' returnLogicalOp_AND,; // S1E1R when '100 0111 1000 000' return LogicalOp_EOR,; // S1E2R when '110 0111 1000 000' return ; // S1E3R when '000 0111 1000 001' return Sys_AT; // S1E1W when '100 0111 1000 001' return Sys_AT; // S1E2W when '110 0111 1000 001' return Sys_AT; // S1E3W when '000 0111 1000 010' return Sys_AT; // S1E0R when '000 0111 1000 011' return Sys_AT; // S1E0W when '100 0111 1000 100' return Sys_AT; // S12E1R when '100 0111 1000 101' return Sys_AT; // S12E1W when '100 0111 1000 110' return Sys_AT; // S12E0R when '100 0111 1000 111' return Sys_AT; // S12E0W when '011 0111 0100 001' return Sys_DC; // ZVA when '000 0111 0110 001' return Sys_DC; // IVAC when '000 0111 0110 010' return Sys_DC; // ISW when '011 0111 1010 001' return Sys_DC; // CVAC when '000 0111 1010 010' return Sys_DC; // CSW when '011 0111 1011 001' return Sys_DC; // CVAU when '011 0111 1110 001' return Sys_DC; // CIVAC when '000 0111 1110 010' return Sys_DC; // CISW when '011 0111 1101 001' return Sys_DC; // CVADP when '000 0111 0001 000' return Sys_IC; // IALLUIS when '000 0111 0101 000' return Sys_IC; // IALLU when '011 0111 0101 001' return Sys_IC; // IVAU when '100 1000 0000 001' return Sys_TLBI; // IPAS2E1IS when '100 1000 0000 101' return Sys_TLBI; // IPAS2LE1IS when '000 1000 0011 000' return Sys_TLBI; // VMALLE1IS when '100 1000 0011 000' return Sys_TLBI; // ALLE2IS when '110 1000 0011 000' return Sys_TLBI; // ALLE3IS when '000 1000 0011 001' return Sys_TLBI; // VAE1IS when '100 1000 0011 001' return Sys_TLBI; // VAE2IS when '110 1000 0011 001' return Sys_TLBI; // VAE3IS when '000 1000 0011 010' return Sys_TLBI; // ASIDE1IS when '000 1000 0011 011' return Sys_TLBI; // VAAE1IS when '100 1000 0011 100' return Sys_TLBI; // ALLE1IS when '000 1000 0011 101' return Sys_TLBI; // VALE1IS when '100 1000 0011 101' return Sys_TLBI; // VALE2IS when '110 1000 0011 101' return Sys_TLBI; // VALE3IS when '100 1000 0011 110' return Sys_TLBI; // VMALLS12E1IS when '000 1000 0011 111' return Sys_TLBI; // VAALE1IS when '100 1000 0100 001' return Sys_TLBI; // IPAS2E1 when '100 1000 0100 101' return Sys_TLBI; // IPAS2LE1 when '000 1000 0111 000' return Sys_TLBI; // VMALLE1 when '100 1000 0111 000' return Sys_TLBI; // ALLE2 when '110 1000 0111 000' return Sys_TLBI; // ALLE3 when '000 1000 0111 001' return Sys_TLBI; // VAE1 when '100 1000 0111 001' return Sys_TLBI; // VAE2 when '110 1000 0111 001' return Sys_TLBI; // VAE3 when '000 1000 0111 010' return Sys_TLBI; // ASIDE1 when '000 1000 0111 011' return Sys_TLBI; // VAAE1 when '100 1000 0111 100' return Sys_TLBI; // ALLE1 when '000 1000 0111 101' return Sys_TLBI; // VALE1 when '100 1000 0111 101' return Sys_TLBI; // VALE2 when '110 1000 0111 101' return Sys_TLBI; // VALE3 when '100 1000 0111 110' return Sys_TLBI; // VMALLS12E1 when '000 1000 0111 111' return Sys_TLBI; // VAALE1 return Sys_SYSLogicalOp_ORR};;

Library pseudocode for aarch64/instrs/memorysystem/memopsysops/MemAtomicOpsysop/SystemOp

enumeration MemAtomicOp {SystemOp {MemAtomicOp_ADD,Sys_AT, MemAtomicOp_BIC,Sys_DC, MemAtomicOp_EOR,Sys_IC, MemAtomicOp_ORR,Sys_TLBI, MemAtomicOp_SMAX,Sys_SYS}; MemAtomicOp_SMIN, MemAtomicOp_UMAX, MemAtomicOp_UMIN, MemAtomicOp_SWP};

Library pseudocode for aarch64/instrs/memoryvector/memoparithmetic/MemOpbinary/uniform/logical/bsl-eor/vbitop/VBitOp

enumeration MemOp {VBitOp {MemOp_LOAD,VBitOp_VBIF, MemOp_STORE,VBitOp_VBIT, MemOp_PREFETCH};VBitOp_VBSL,VBitOp_VEOR};

Library pseudocode for aarch64/instrs/memoryvector/prefetcharithmetic/Prefetchunary/cmp/compareop/CompareOp

// Prefetch() // ========== // Decode and execute the prefetch hint on ADDRESS specified by PRFOPenumeration Prefetch(bits(64) address, bits(5) prfop)CompareOp { PrefetchHint hint; integer target; boolean stream; case prfop<4:3> of when '00' hint =CompareOp_GT, Prefetch_READ; // PLD: prefetch for load when '01' hint =CompareOp_GE, Prefetch_EXEC; // PLI: preload instructions when '10' hint =CompareOp_EQ, Prefetch_WRITE; // PST: prepare for store when '11' return; // unallocated hint target =CompareOp_LE, UInt(prfop<2:1>); // target cache level stream = (prfop<0> != '0'); // streaming (non-temporal) Hint_Prefetch(address, hint, target, stream); return;CompareOp_LT};

Library pseudocode for aarch64/instrs/systemvector/barrierslogical/barrieropimmediateop/MemBarrierOpImmediateOp

enumeration MemBarrierOp {ImmediateOp { MemBarrierOp_DSB // Data Synchronization Barrier ,ImmediateOp_MOVI, MemBarrierOp_DMB // Data Memory Barrier ,ImmediateOp_MVNI, MemBarrierOp_ISB // Instruction Synchronization Barrier ,ImmediateOp_ORR, MemBarrierOp_SSBB // Speculative Synchronization Barrier to VA ,ImmediateOp_BIC}; MemBarrierOp_PSSBB // Speculative Synchronization Barrier to PA , MemBarrierOp_SB // Speculation Barrier };

Library pseudocode for aarch64/instrs/systemvector/hintsreduce/syshintopreduceop/SystemHintOpReduce

enumeration// Reduce() // ======== bits(esize) SystemHintOp {Reduce( SystemHintOp_NOP,op, bits(N) input, integer esize) integer half; bits(esize) hi; bits(esize) lo; bits(esize) result; if N == esize then return input<esize-1:0>; half = N DIV 2; hi = SystemHintOp_YIELD,(op, input<N-1:half>, esize); lo = SystemHintOp_WFE,(op, input<half-1:0>, esize); case op of when SystemHintOp_WFI,result = SystemHintOp_SEV,(lo, hi, FPCR); when SystemHintOp_SEVL,result = SystemHintOp_DGH,(lo, hi, FPCR); when SystemHintOp_ESB,result = SystemHintOp_PSB,(lo, hi, FPCR); when SystemHintOp_TSB,result = SystemHintOp_BTI,(lo, hi, FPCR); when SystemHintOp_WFET,result = SystemHintOp_WFIT,(lo, hi, FPCR); when SystemHintOp_CSDB };result = lo + hi; return result;

Library pseudocode for aarch64/instrs/systemvector/registerreduce/cpsrreduceop/pstatefield/PSTATEFieldReduceOp

enumeration PSTATEField {ReduceOp {PSTATEField_DAIFSet,ReduceOp_FMINNUM, PSTATEField_DAIFClr,ReduceOp_FMAXNUM, PSTATEField_PAN, // Armv8.1ReduceOp_FMIN, PSTATEField_UAO, // Armv8.2ReduceOp_FMAX, PSTATEField_DIT, // Armv8.4ReduceOp_FADD, PSTATEField_SSBS,ReduceOp_ADD}; PSTATEField_TCO, // Armv8.5 PSTATEField_SP };

Library pseudocode for aarch64/instrstranslation/systemattrs/sysops/sysop/SysOpAArch64.CombineS1S2Desc

// SysOp() // ======= // AArch64.CombineS1S2Desc() // ========================= // Combines the address descriptors from stage 1 and stage 2 SystemOpAddressDescriptor SysOp(bits(3) op1, bits(4) CRn, bits(4) CRm, bits(3) op2) case op1:CRn:CRm:op2 of when '000 0111 1000 000' returnAArch64.CombineS1S2Desc( Sys_ATAddressDescriptor; // S1E1R when '100 0111 1000 000' returns1desc, Sys_ATAddressDescriptor; // S1E2R when '110 0111 1000 000' returns2desc, Sys_ATAccType; // S1E3R when '000 0111 1000 001' returns2acctype) Sys_ATAddressDescriptor; // S1E1W when '100 0111 1000 001' returnresult; result.paddress = s2desc.paddress; apply_force_writeback = Sys_ATHaveStage2MemAttrControl; // S1E2W when '110 0111 1000 001' return() && HCR_EL2.FWB == '1'; if Sys_ATIsFault; // S1E3W when '000 0111 1000 010' return(s1desc) || Sys_ATIsFault; // S1E0R when '000 0111 1000 011' return(s2desc) then result = if Sys_ATIsFault; // S1E0W when '100 0111 1000 100' return(s1desc) then s1desc else s2desc; else result.fault = Sys_ATAArch64.NoFault; // S12E1R when '100 0111 1000 101' return(); if s2desc.memattrs.memtype == Sys_ATMemType_Device; // S12E1W when '100 0111 1000 110' return|| ( (apply_force_writeback && s1desc.memattrs.memtype == Sys_ATMemType_Device; // S12E0R when '100 0111 1000 111' return&& s2desc.memattrs.inner.attrs != '10') || (!apply_force_writeback && s1desc.memattrs.memtype == Sys_ATMemType_Device; // S12E0W when '011 0111 0100 001' return) ) then result.memattrs.memtype = Sys_DCMemType_Device; // ZVA when '000 0111 0110 001' return; if s1desc.memattrs.memtype == Sys_DCMemType_Normal; // IVAC when '000 0111 0110 010' returnthen result.memattrs.device = s2desc.memattrs.device; elsif s2desc.memattrs.memtype == Sys_DCMemType_Normal; // ISW when '011 0111 1010 001' returnthen result.memattrs.device = s1desc.memattrs.device; else // Both Device result.memattrs.device = Sys_DCCombineS1S2Device; // CVAC when '000 0111 1010 010' return(s1desc.memattrs.device, s2desc.memattrs.device); result.memattrs.tagged = FALSE; // S1 can be either Normal or Device, S2 is Normal. else result.memattrs.memtype = Sys_DCMemType_Normal; // CSW when '011 0111 1011 001' return; result.memattrs.device = Sys_DCDeviceType; // CVAU when '011 0111 1110 001' returnUNKNOWN; result.memattrs.inner = Sys_DCCombineS1S2AttrHints; // CIVAC when '000 0111 1110 010' return(s1desc.memattrs.inner, s2desc.memattrs.inner, s2acctype); result.memattrs.outer = Sys_DCCombineS1S2AttrHints; // CISW when '011 0111 1101 001' return(s1desc.memattrs.outer, s2desc.memattrs.outer, s2acctype); result.memattrs.shareable = (s1desc.memattrs.shareable || s2desc.memattrs.shareable); result.memattrs.outershareable = (s1desc.memattrs.outershareable || s2desc.memattrs.outershareable); result.memattrs.tagged = (s1desc.memattrs.tagged && result.memattrs.inner.attrs == Sys_DCMemAttr_WB; // CVADP when '000 0111 0001 000' return&& result.memattrs.inner.hints == Sys_ICMemHint_RWA; // IALLUIS when '000 0111 0101 000' return&& result.memattrs.outer.attrs == Sys_ICMemAttr_WB; // IALLU when '011 0111 0101 001' return&& result.memattrs.outer.hints == Sys_ICMemHint_RWA; // IVAU when '100 1000 0000 001' return); result.memattrs = Sys_TLBIMemAttrDefaults; // IPAS2E1IS when '100 1000 0000 101' return Sys_TLBI; // IPAS2LE1IS when '000 1000 0011 000' return Sys_TLBI; // VMALLE1IS when '100 1000 0011 000' return Sys_TLBI; // ALLE2IS when '110 1000 0011 000' return Sys_TLBI; // ALLE3IS when '000 1000 0011 001' return Sys_TLBI; // VAE1IS when '100 1000 0011 001' return Sys_TLBI; // VAE2IS when '110 1000 0011 001' return Sys_TLBI; // VAE3IS when '000 1000 0011 010' return Sys_TLBI; // ASIDE1IS when '000 1000 0011 011' return Sys_TLBI; // VAAE1IS when '100 1000 0011 100' return Sys_TLBI; // ALLE1IS when '000 1000 0011 101' return Sys_TLBI; // VALE1IS when '100 1000 0011 101' return Sys_TLBI; // VALE2IS when '110 1000 0011 101' return Sys_TLBI; // VALE3IS when '100 1000 0011 110' return Sys_TLBI; // VMALLS12E1IS when '000 1000 0011 111' return Sys_TLBI; // VAALE1IS when '100 1000 0100 001' return Sys_TLBI; // IPAS2E1 when '100 1000 0100 101' return Sys_TLBI; // IPAS2LE1 when '000 1000 0111 000' return Sys_TLBI; // VMALLE1 when '100 1000 0111 000' return Sys_TLBI; // ALLE2 when '110 1000 0111 000' return Sys_TLBI; // ALLE3 when '000 1000 0111 001' return Sys_TLBI; // VAE1 when '100 1000 0111 001' return Sys_TLBI; // VAE2 when '110 1000 0111 001' return Sys_TLBI; // VAE3 when '000 1000 0111 010' return Sys_TLBI; // ASIDE1 when '000 1000 0111 011' return Sys_TLBI; // VAAE1 when '100 1000 0111 100' return Sys_TLBI; // ALLE1 when '000 1000 0111 101' return Sys_TLBI; // VALE1 when '100 1000 0111 101' return Sys_TLBI; // VALE2 when '110 1000 0111 101' return Sys_TLBI; // VALE3 when '100 1000 0111 110' return Sys_TLBI; // VMALLS12E1 when '000 1000 0111 111' return Sys_TLBI; // VAALE1 return Sys_SYS;(result.memattrs); return result;

Library pseudocode for aarch64/instrstranslation/systemattrs/sysops/sysop/SystemOpAArch64.InstructionDevice

enumeration// AArch64.InstructionDevice() // =========================== // Instruction fetches from memory marked as Device but not execute-never might generate a // Permission Fault but are otherwise treated as if from Normal Non-cacheable memory. AddressDescriptor SystemOp {AArch64.InstructionDevice(Sys_AT,addrdesc, bits(64) vaddress, bits(52) ipaddress, integer level, Sys_DC,acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) c = Sys_IC,( Sys_TLBI,); assert c IN { , Constraint_FAULT}; if c == Constraint_FAULT then addrdesc.fault = AArch64.PermissionFault(ipaddress, boolean UNKNOWN, level, acctype, iswrite, secondstage, s2fs1walk); else addrdesc.memattrs.memtype = MemType_Normal; addrdesc.memattrs.inner.attrs = MemAttr_NC; addrdesc.memattrs.inner.hints = MemHint_No; addrdesc.memattrs.outer = addrdesc.memattrs.inner; addrdesc.memattrs.tagged = FALSE; addrdesc.memattrs = MemAttrDefaultsSys_SYS};(addrdesc.memattrs); return addrdesc;

Library pseudocode for aarch64/instrstranslation/systemattrs/sysops/tlbi/ASID_NONEAArch64.S1AttrDecode

constant bits(16)// AArch64.S1AttrDecode() // ====================== // Converts the Stage 1 attribute fields, using the MAIR, to orthogonal // attributes and hints. MemoryAttributes ASID_NONE =AArch64.S1AttrDecode(bits(2) SH, bits(3) attr, acctype) MemoryAttributes memattrs; mair = MAIR[]; index = 8 * UInt(attr); attrfield = mair<index+7:index>; memattrs.tagged = FALSE; if ((attrfield<7:4> != '0000' && attrfield<7:4> != '1111' && attrfield<3:0> == '0000') || (attrfield<7:4> == '0000' && attrfield<3:0> != 'xx00')) then // Reserved, maps to an allocated value (-, attrfield) = ConstrainUnpredictableBits(Unpredictable_RESMAIR); if !HaveMTEExt() && attrfield<7:4> == '1111' && attrfield<3:0> == '0000' then // Reserved, maps to an allocated value (-, attrfield) = ConstrainUnpredictableBits(Unpredictable_RESMAIR); if attrfield<7:4> == '0000' then // Device memattrs.memtype = MemType_Device; case attrfield<3:0> of when '0000' memattrs.device = DeviceType_nGnRnE; when '0100' memattrs.device = DeviceType_nGnRE; when '1000' memattrs.device = DeviceType_nGRE; when '1100' memattrs.device = DeviceType_GRE; otherwise Unreachable(); // Reserved, handled above elsif attrfield<3:0> != '0000' then // Normal memattrs.memtype = MemType_Normal; memattrs.outer = LongConvertAttrsHints(attrfield<7:4>, acctype); memattrs.inner = LongConvertAttrsHints(attrfield<3:0>, acctype); memattrs.shareable = SH<1> == '1'; memattrs.outershareable = SH == '10'; elsif HaveMTEExt() && attrfield == '11110000' then // Normal, Tagged WB-RWA memattrs.memtype = MemType_Normal; memattrs.outer = LongConvertAttrsHints('1111', acctype); // WB_RWA memattrs.inner = LongConvertAttrsHints('1111', acctype); // WB_RWA memattrs.shareable = SH<1> == '1'; memattrs.outershareable = SH == '10'; memattrs.tagged = TRUE; else Unreachable(); // Reserved, handled above if ((HCR_EL2.VM == '1' || HCR_EL2.DC == '1') && (PSTATE.EL == EL1 || (PSTATE.EL == EL0 && HCR_EL2.TGE == '0')) && acctype != AccType_NV2REGISTER ) then return memattrs; else return MemAttrDefaultsZerosAccType();(memattrs);

Library pseudocode for aarch64/instrstranslation/systemattrs/sysops/tlbi/BroadcastAArch64.TranslateAddressS1Off

// Broadcast // ========= // IMPLEMENTATION DEFINED function to broadcast TLBI operation within the indicated shareability // domain. // AArch64.TranslateAddressS1Off() // =============================== // Called for stage 1 translations when translation is disabled to supply a default translation. // Note that there are additional constraints on instruction prefetching that are not described in // this pseudocode. Broadcast(TLBRecordAArch64.TranslateAddressS1Off(bits(64) vaddress, AccType acctype, boolean iswrite) assert !ELUsingAArch32(S1TranslationRegime()); TLBRecord result; Top = AddrTop(vaddress, (acctype == AccType_IFETCH), PSTATE.EL); if !IsZero(vaddress<Top:PAMax()>) then level = 0; ipaddress = bits(52) UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,boolean UNKNOWN, level, acctype, iswrite, secondstage, s2fs1walk); return result; default_cacheable = (HasS2Translation() && HCR_EL2.DC == '1'); if default_cacheable then // Use default cacheable settings result.addrdesc.memattrs.memtype = MemType_Normal; result.addrdesc.memattrs.inner.attrs = MemAttr_WB; // Write-back result.addrdesc.memattrs.inner.hints = MemHint_RWA; result.addrdesc.memattrs.shareable = FALSE; result.addrdesc.memattrs.outershareable = FALSE; result.addrdesc.memattrs.tagged = HCR_EL2.DCT == '1'; elsif acctype != AccType_IFETCH then // Treat data as Device result.addrdesc.memattrs.memtype = MemType_Device; result.addrdesc.memattrs.device = DeviceType_nGnRnE; result.addrdesc.memattrs.inner = MemAttrHints UNKNOWN; result.addrdesc.memattrs.tagged = FALSE; else // Instruction cacheability controlled by SCTLR_ELx.I cacheable = SCTLR[].I == '1'; result.addrdesc.memattrs.memtype = MemType_Normal; if cacheable then result.addrdesc.memattrs.inner.attrs = MemAttr_WT; result.addrdesc.memattrs.inner.hints = MemHint_RA; else result.addrdesc.memattrs.inner.attrs = MemAttr_NC; result.addrdesc.memattrs.inner.hints = MemHint_No; result.addrdesc.memattrs.shareable = TRUE; result.addrdesc.memattrs.outershareable = TRUE; result.addrdesc.memattrs.tagged = FALSE; result.addrdesc.memattrs.outer = result.addrdesc.memattrs.inner; result.addrdesc.memattrs = MemAttrDefaults(result.addrdesc.memattrs); result.perms.ap = bits(3) UNKNOWN; result.perms.xn = '0'; result.perms.pxn = '0'; result.nG = bit UNKNOWN; result.contiguous = boolean UNKNOWN; result.domain = bits(4) UNKNOWN; result.level = integer UNKNOWN; result.blocksize = integer UNKNOWN; result.addrdesc.paddress.address = vaddress<51:0>; result.addrdesc.paddress.NS = if IsSecure() then '0' else '1'; result.addrdesc.fault = AArch64.NoFaultShareability shareability, TLBIRecord r) IMPLEMENTATION_DEFINED;(); result.descupdate.AF = FALSE; result.descupdate.AP = FALSE; result.descupdate.descaddr = result.addrdesc; return result;

Library pseudocode for aarch64/instrstranslation/systemchecks/sysops/tlbi/HasLargeAddressAArch64.AccessIsPrivileged

// HasLargeAddress() // ================= // Returns TRUE if the regime is configured for 52 bit addresses, FALSE otherwise. // AArch64.AccessIsPrivileged() // ============================ boolean HasLargeAddress(AArch64.AccessIsPrivileged(RegimeAccType regime) if !acctype) el =Have52BitIPAAndPASpaceExtAArch64.AccessUsesEL() then return FALSE; case regime of when(acctype); if el == Regime_EL3EL0 return TCR_EL3<32> == '1'; whenthen ispriv = FALSE; elsif el == Regime_EL2EL3 return TCR_EL2<32> == '1'; whenthen ispriv = TRUE; elsif el == Regime_EL20EL2 return TCR_EL2<59> == '1'; when&& (! Regime_EL10IsInHost return TCR_EL1<59> == '1'; otherwise() || HCR_EL2.TGE == '0') then ispriv = TRUE; elsif () && PSTATE.UAO == '1' then ispriv = TRUE; else ispriv = (acctype != AccType_UNPRIVUnreachableHaveUAOExt();); return ispriv;

Library pseudocode for aarch64/instrstranslation/systemchecks/sysops/tlbi/RegimeAArch64.AccessUsesEL

enumeration// AArch64.AccessUsesEL() // ====================== // Returns the Exception Level of the regime that will manage the translation for a given access type. bits(2) Regime {AArch64.AccessUsesEL( Regime_EL10, // EL1&0acctype) if acctype == Regime_EL20, // EL2&0then return Regime_EL2, // EL2; elsif acctype == then return EL2Regime_EL3 // EL3 };; else return PSTATE.EL;

Library pseudocode for aarch64/instrstranslation/systemchecks/sysops/tlbi/SecurityStateAArch64.CheckPermission

enumeration// AArch64.CheckPermission() // ========================= // Function used for permission checking from AArch64 stage 1 translations FaultRecord SecurityState {AArch64.CheckPermission( SS_NonSecure,perms, bits(64) vaddress, integer level, bit NS, acctype, boolean iswrite) assert !ELUsingAArch32(S1TranslationRegime()); wxn = SCTLR[].WXN == '1'; if (PSTATE.EL == EL0 || IsInHost() || (PSTATE.EL == EL1 && !HaveNV2Ext()) || (PSTATE.EL == EL1 && HaveNV2Ext() && (acctype != AccType_NV2REGISTER || !ELIsInHost(EL2)))) then priv_r = TRUE; priv_w = perms.ap<2> == '0'; user_r = perms.ap<1> == '1'; user_w = perms.ap<2:1> == '01'; ispriv = AArch64.AccessIsPrivileged(acctype); pan = if HavePANExt() then PSTATE.PAN else '0'; if (EL2Enabled() && ((PSTATE.EL == EL1 && HaveNVExt() && HCR_EL2.<NV, NV1> == '11') || (HaveNV2Ext() && acctype == AccType_NV2REGISTER && HCR_EL2.NV2 == '1'))) then pan = '0'; is_ldst = !(acctype IN {AccType_DC, AccType_DC_UNPRIV, AccType_AT, AccType_IFETCH}); is_ats1xp = (acctype == AccType_AT && AArch64.ExecutingATS1xPInstr()); if pan == '1' && user_r && ispriv && (is_ldst || is_ats1xp) then priv_r = FALSE; priv_w = FALSE; user_xn = perms.xn == '1' || (user_w && wxn); priv_xn = perms.pxn == '1' || (priv_w && wxn) || user_w; if ispriv then (r, w, xn) = (priv_r, priv_w, priv_xn); else (r, w, xn) = (user_r, user_w, user_xn); else // Access from EL2 or EL3 r = TRUE; w = perms.ap<2> == '0'; xn = perms.xn == '1' || (w && wxn); // Restriction on Secure instruction fetch if HaveEL(EL3) && IsSecure() && NS == '1' && SCR_EL3.SIF == '1' then xn = TRUE; if acctype == AccType_IFETCH then fail = xn; failedread = TRUE; elsif acctype IN { AccType_ATOMICRW, AccType_ORDEREDRW, AccType_ORDEREDATOMICRW } then fail = !r || !w; failedread = !r; elsif iswrite then fail = !w; failedread = FALSE; elsif acctype == AccType_DC && PSTATE.EL != EL0 then // DC maintenance instructions operating by VA, cannot fault from stage 1 translation, // other than DC IVAC, which requires write permission, and operations executed at EL0, // which require read permission. fail = FALSE; else fail = !r; failedread = TRUE; if fail then secondstage = FALSE; s2fs1walk = FALSE; ipaddress = bits(52) UNKNOWN; return AArch64.PermissionFault(ipaddress,boolean UNKNOWN, level, acctype, !failedread, secondstage, s2fs1walk); else return AArch64.NoFaultSS_Secure };();

Library pseudocode for aarch64/instrstranslation/systemchecks/sysops/tlbi/SecurityStateAtELAArch64.CheckS2Permission

// SecurityStateAtEL() // =================== // Returns the effective security state at the exception level based off current settings. // AArch64.CheckS2Permission() // =========================== // Function used for permission checking from AArch64 stage 2 translations SecurityStateFaultRecord SecurityStateAtEL(bits(2) EL) if !AArch64.CheckS2Permission(HaveELPermissions(perms, bits(64) vaddress, bits(52) ipaddress, integer level,EL3AccType) then if boolean IMPLEMENTATION_DEFINED "Secure-only implementation" then returnacctype, boolean iswrite, boolean NS, boolean s2fs1walk, boolean hwupdatewalk) assert ( SS_SecureIsSecureEL2Enabled; else return() || ( SS_NonSecureHaveEL; elsif EL ==( EL3EL2 then return) && ! SS_SecureIsSecure; else // For EL2 call only when EL2 is enabled in current security state assert(EL !=() && ! ELUsingAArch32(EL2 ||))) && EL2EnabledHasS2Translation()); if !(); r = perms.ap<1> == '1'; w = perms.ap<2> == '1'; ifELUsingAArch32HaveExtendedExecuteNeverExt(() then case perms.xn:perms.xxn of when '00' xn = FALSE; when '01' xn = PSTATE.EL ==EL3EL1) then return if SCR_EL3.NS == '1' then; when '10' xn = TRUE; when '11' xn = PSTATE.EL == SS_NonSecureEL0 else; else xn = perms.xn == '1'; // Stage 1 walk is checked as a read, regardless of the original type if acctype == SS_SecureAccType_IFETCH; else return if SCR.NS == '1' then&& !s2fs1walk then fail = xn; failedread = TRUE; elsif (acctype IN { SS_NonSecureAccType_ATOMICRW else, , AccType_ORDEREDATOMICRW }) && !s2fs1walk then fail = !r || !w; failedread = !r; elsif iswrite && !s2fs1walk then fail = !w; failedread = FALSE; elsif acctype == AccType_DC && PSTATE.EL != EL0 && !s2fs1walk then // DC maintenance instructions operating by VA, with the exception of DC IVAC, do // not generate Permission faults from stage 2 translation, other than when // performing a stage 1 translation table walk. fail = FALSE; elsif hwupdatewalk then fail = !w; failedread = !iswrite; else fail = !r; failedread = !iswrite; if fail then domain = bits(4) UNKNOWN; secondstage = TRUE; return AArch64.PermissionFault(ipaddress,NS, level, acctype, !failedread, secondstage, s2fs1walk); else return AArch64.NoFaultSS_SecureAccType_ORDEREDRW;();

Library pseudocode for aarch64/instrstranslation/systemdebug/sysops/tlbi/ShareabilityAArch64.CheckBreakpoint

enumeration// AArch64.CheckBreakpoint() // ========================= // Called before executing the instruction of length "size" bytes at "vaddress" in an AArch64 // translation regime, when either debug exceptions are enabled, or halting debug is enabled // and halting is allowed. FaultRecord Shareability {AArch64.CheckBreakpoint(bits(64) vaddress, Shareability_None,acctype, integer size) assert ! Shareability_Inner,( ()); assert (UsingAArch32() && size IN {2,4}) || size == 4; match = FALSE; for i = 0 to UInt(ID_AA64DFR0_EL1.BRPs) match_i = AArch64.BreakpointMatch(i, vaddress, acctype, size); match = match || match_i; if match && HaltOnBreakpointOrWatchpoint() then reason = DebugHalt_Breakpoint; Halt(reason); elsif match then acctype = AccType_IFETCH; iswrite = FALSE; return AArch64.DebugFault(acctype, iswrite); else return AArch64.NoFaultShareability_Outer };();

Library pseudocode for aarch64/instrstranslation/systemdebug/sysops/tlbi/TLBIAArch64.CheckDebug

// TLBI // ==== // IMPLEMENTATION DEFINED TLBI function. // AArch64.CheckDebug() // ==================== // Called on each access to check for a debug exception or entry to Debug state. TLBI(TLBIRecord r) IMPLEMENTATION_DEFINED;FaultRecordAArch64.CheckDebug(bits(64) vaddress, AccType acctype, boolean iswrite, integer size) FaultRecord fault = AArch64.NoFault(); d_side = (acctype != AccType_IFETCH); if HaveNV2Ext() && acctype == AccType_NV2REGISTER then mask = '0'; generate_exception = AArch64.GenerateDebugExceptionsFrom(EL2, IsSecure(), mask) && MDSCR_EL1.MDE == '1'; else generate_exception = AArch64.GenerateDebugExceptions() && MDSCR_EL1.MDE == '1'; halt = HaltOnBreakpointOrWatchpoint(); if generate_exception || halt then if d_side then fault = AArch64.CheckWatchpoint(vaddress, acctype, iswrite, size); else fault = AArch64.CheckBreakpoint(vaddress, acctype, size); return fault;

Library pseudocode for aarch64/instrstranslation/systemdebug/sysops/tlbi/TLBILevelAArch64.CheckWatchpoint

enumeration// AArch64.CheckWatchpoint() // ========================= // Called before accessing the memory location of "size" bytes at "address", // when either debug exceptions are enabled for the access, or halting debug // is enabled and halting is allowed. FaultRecord TLBILevel {AArch64.CheckWatchpoint(bits(64) vaddress, TLBILevel_Any,acctype, boolean iswrite, integer size) assert ! (S1TranslationRegime()); match = FALSE; ispriv = AArch64.AccessIsPrivileged(acctype); for i = 0 to UInt(ID_AA64DFR0_EL1.WRPs) match = match || AArch64.WatchpointMatch(i, vaddress, size, ispriv, acctype, iswrite); if match && HaltOnBreakpointOrWatchpoint() then if acctype != AccType_NONFAULT && acctype != AccType_CNOTFIRST then reason = DebugHalt_Watchpoint; EDWAR = vaddress; Halt(reason); else // Fault will be reported and cancelled return AArch64.DebugFault(acctype, iswrite); elsif match then return AArch64.DebugFault(acctype, iswrite); else return AArch64.NoFaultTLBILevel_Last };();

Library pseudocode for aarch64/instrstranslation/systemfaults/sysops/tlbi/TLBIOpAArch64.AccessFlagFault

enumeration TLBIOp { TLBIOp_ALL, TLBIOp_ASID, TLBIOp_IPAS2, TLBIOp_VAA, TLBIOp_VA, TLBIOp_VMALL, TLBIOp_VMALLS12, TLBIOp_RIPAS2, TLBIOp_RVAA, TLBIOp_RVA, };// AArch64.AccessFlagFault() // ========================= FaultRecordAArch64.AccessFlagFault(bits(52) ipaddress,boolean NS, integer level, AccType acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) extflag = bit UNKNOWN; errortype = bits(2) UNKNOWN; return AArch64.CreateFaultRecord(Fault_AccessFlag, ipaddress, NS, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch64/instrstranslation/systemfaults/sysops/tlbi/TLBIRecordAArch64.AddressSizeFault

type TLBIRecord is ( TLBIOp op,// AArch64.AddressSizeFault() // ========================== FaultRecord SecurityState security,AArch64.AddressSizeFault(bits(52) ipaddress,boolean NS, integer level, RegimeAccType regime, bits(16) vmid, bits(16) asid,acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) extflag = bit UNKNOWN; errortype = bits(2) UNKNOWN; return TLBILevelAArch64.CreateFaultRecord level,( TLBI_MemAttrFault_AddressSize attr, FullAddress address, // VA/IPA/BaseAddress bits(64) end_address, // for range operations, end address bits(2) tg, // for range - the TG parameter bits(4) ttl, ), ipaddress, NS, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch64/instrstranslation/systemfaults/sysops/tlbi/TLBI_ALLAArch64.AlignmentFault

// TLBI_ALL() // ========== // Invalidates all entries for the indicated translation regime with the // the indicated security state for all TLBs within the indicated shareability domain. // Invalidation applies to all applicable stage 1 and stage 2 entries. // The indicated attr defines the attributes of the memory operations that must be completed in // order to deem this operation to be completed. // When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation // are required to complete.// AArch64.AlignmentFault() // ======================== FaultRecord TLBI_ALL(AArch64.AlignmentFault(SecurityStateAccType security,acctype, boolean iswrite, boolean secondstage) ipaddress = bits(52) UNKNOWN; level = integer UNKNOWN; extflag = bit UNKNOWN; errortype = bits(2) UNKNOWN; s2fs1walk = boolean UNKNOWN; return RegimeAArch64.CreateFaultRecord regime,( ShareabilityFault_Alignment shareability, TLBI_MemAttr attr) assert PSTATE.EL IN {EL3, EL2}; TLBIRecord r; r.op = TLBIOp_ALL; r.security = security; r.regime = regime; r.level = TLBILevel_Any; r.attr = attr; TLBI(r); if shareability != Shareability_None then Broadcast(shareability, r); return;, ipaddress, boolean UNKNOWN, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch64/instrstranslation/systemfaults/sysops/tlbi/TLBI_ASIDAArch64.AsynchExternalAbort

// TLBI_ASID() // =========== // Invalidates all stage 1 entries matching the indicated VMID (where regime supports) // and ASID in the parameter Xt in the indicated translation regime with the // indicated security state for all TLBs within the indicated shareability domain. // Note: stage 1 and stage 2 combined entries are in the scope of this operation. // The indicated attr defines the attributes of the memory operations that must be completed in // order to deem this operation to be completed. // When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation // are required to complete.// AArch64.AsynchExternalAbort() // ============================= // Wrapper function for asynchronous external aborts FaultRecord TLBI_ASID(AArch64.AsynchExternalAbort(boolean parity, bits(2) errortype, bit extflag) faulttype = if parity thenSecurityStateFault_AsyncParity security,else RegimeFault_AsyncExternal regime, bits(16) vmid,; ipaddress = bits(52) UNKNOWN; level = integer UNKNOWN; acctype = ShareabilityAccType_NORMAL shareability,; iswrite = boolean UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; return TLBI_MemAttrAArch64.CreateFaultRecord attr, bits(64) Xt) assert PSTATE.EL IN {EL3, EL2, EL1}; TLBIRecord r; r.op = TLBIOp_ALL; r.security = security; r.regime = regime; r.vmid = vmid; r.level = TLBILevel_Any; r.attr = attr; r.asid = Xt<63:48>; TLBI(r); if shareability != Shareability_None then Broadcast(shareability, r); return;(faulttype, ipaddress, boolean UNKNOWN, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch64/instrstranslation/systemfaults/sysops/tlbi/TLBI_IPAS2AArch64.DebugFault

// TLBI_IPAS2() // ============ // Invalidate by IPA all stage 2 only TLB entries in the indicated shareability // domain matching the indicated VMID in the indicated regime with the indicated security state. // Note: stage 1 and stage 2 combined entries are not in the scope of this operation. // IPA and related parameters of the are derived from Xt. // When the indicated level is // TLBILevel_Any : this applies to TLB entries at all levels // TLBILevel_Last : this applies to TLB entries at last level only // The indicated attr defines the attributes of the memory operations that must be completed in // order to deem this operation to be completed. // When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation // are required to complete.// AArch64.DebugFault() // ==================== FaultRecord TLBI_IPAS2(AArch64.DebugFault(SecurityStateAccType security,acctype, boolean iswrite) ipaddress = bits(52) UNKNOWN; errortype = bits(2) UNKNOWN; level = integer UNKNOWN; extflag = bit UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; return RegimeAArch64.CreateFaultRecord regime, bits(16) vmid,( ShareabilityFault_Debug shareability, TLBILevel level, TLBI_MemAttr attr, bits(64) Xt) assert PSTATE.EL IN {EL3, EL2}; TLBIRecord r; r.op = TLBIOp_IPAS2; r.security = security; r.regime = regime; r.vmid = vmid; r.level = level; r.attr = attr; r.ttl = Xt<47:44>; r.address.address = Xt<39:0> : Zeros(12); r.address.NS = if security == SS_NonSecure then '1' else Xt<63>; TLBI(r); if shareability != Shareability_None then Broadcast(shareability, r); return;, ipaddress, boolean UNKNOWN, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch64/instrstranslation/systemfaults/sysops/tlbi/TLBI_MemAttrAArch64.NoFault

enumeration// AArch64.NoFault() // ================= FaultRecord TLBI_MemAttr {AArch64.NoFault() ipaddress = bits(52) UNKNOWN; level = integer UNKNOWN; acctype = TLBI_AllAttr,; iswrite = boolean UNKNOWN; extflag = bit UNKNOWN; errortype = bits(2) UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; return (Fault_NoneTLBI_ExcludeXS };, ipaddress, boolean UNKNOWN, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch64/instrstranslation/systemfaults/sysops/tlbi/TLBI_RIPAS2AArch64.PermissionFault

// TLBI_RIPAS2() // ============= // Range invalidate by IPA all stage 2 only TLB entries in the indicated // shareability domain matching the indicated VMID in the indicated regime with the indicated // security state. // Note: stage 1 and stage 2 combined entries are not in the scope of this operation. // The range of IPA and related parameters of the are derived from Xt. // When the indicated level is // TLBILevel_Any : this applies to TLB entries at all levels // TLBILevel_Last : this applies to TLB entries at last level only // The indicated attr defines the attributes of the memory operations that must be completed in // order to deem this operation to be completed. // When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation // are required to complete.// AArch64.PermissionFault() // ========================= FaultRecord TLBI_RIPAS2(AArch64.PermissionFault(bits(52) ipaddress,boolean NS, integer level,SecurityStateAccType security,acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) extflag = bit UNKNOWN; errortype = bits(2) UNKNOWN; return RegimeAArch64.CreateFaultRecord regime, bits(16) vmid,( ShareabilityFault_Permission shareability, TLBILevel level, TLBI_MemAttr attr, bits(64) Xt) assert PSTATE.EL IN {EL3, EL2, EL1}; TLBIRecord r; r.op = TLBIOp_RIPAS2; r.security = security; r.regime = regime; r.vmid = vmid; r.level = level; r.attr = attr; r.ttl = Xt<47:44>; bits(2) tg = Xt<47:46>; integer scale = UInt(Xt<45:44>); integer num = UInt(Xt<43:39>); integer baseaddr = SInt(Xt<36:0>); bits(64) start_addres; boolean valid; (valid, r.tg, start_address, r.end_address) = TLBI_Range(regime, Xt); if !valid then return; r.address.address = start_address<51:0>; TLBI(r); if shareability != Shareability_None then Broadcast(shareability, r); return;, ipaddress, NS, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch64/instrstranslation/systemfaults/sysops/tlbi/TLBI_RVAAArch64.TranslationFault

// TLBI_RVA() // ========== // Range invalidate by VA range all stage 1 TLB entries in the indicated // shareability domain matching the indicated VMID and ASID (where regime // supports VMID, ASID) in the indicated regime with the indicated security state. // ASID, and range related parameters are derived from Xt. // Note: stage 1 and stage 2 combined entries are in the scope of this operation. // When the indicated level is // TLBILevel_Any : this applies to TLB entries at all levels // TLBILevel_Last : this applies to TLB entries at last level only // The indicated attr defines the attributes of the memory operations that must be completed in // order to deem this operation to be completed. // When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation // are required to complete.// AArch64.TranslationFault() // ========================== FaultRecord TLBI_RVA(AArch64.TranslationFault(bits(52) ipaddress, boolean NS, integer level,SecurityStateAccType security,acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) extflag = bit UNKNOWN; errortype = bits(2) UNKNOWN; return RegimeAArch64.CreateFaultRecord regime, bits(16) vmid,( ShareabilityFault_Translation shareability, TLBILevel level, TLBI_MemAttr attr, bits(64) Xt) assert PSTATE.EL IN {EL3, EL2, EL1}; TLBIRecord r; r.op = TLBIOp_RVA; r.security = security; r.regime = regime; r.vmid = vmid; r.level = level; r.attr = attr; r.asid = Xt<63:48>; r.ttl = Xt<47:44>; bits(64) start_addres; boolean valid; (valid, r.tg, start_address, r.end_address) = TLBI_Range(regime, Xt); if !valid then return; r.address.address = start_address<51:0>; r.address.NS = if security == SS_NonSecure then '1' else Xt<63>; TLBI(r); if shareability != Shareability_None then Broadcast(shareability, r); return;, ipaddress, NS, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch64/instrstranslation/systemtranslation/sysops/tlbi/TLBI_RVAAAArch64.CheckAndUpdateDescriptor

// TLBI_RVAA() // =========== // Range invalidate by VA range all stage 1 TLB entries in the indicated // shareability domain matching the indicated VMID (where regimesupports VMID) // and all ASID in the indicated regime with the indicated security state. // VA range related parameters are derived from Xt. // Note: stage 1 and stage 2 combined entries are in the scope of this operation. // When the indicated level is // TLBILevel_Any : this applies to TLB entries at all levels // TLBILevel_Last : this applies to TLB entries at last level only // The indicated attr defines the attributes of the memory operations that must be completed in // order to deem this operation to be completed. // When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation // are required to complete.// AArch64.CheckAndUpdateDescriptor() // ================================== // Check and update translation table descriptor if hardware update is configured FaultRecord TLBI_RVAA(AArch64.CheckAndUpdateDescriptor(SecurityStateDescriptorUpdate security,result, RegimeFaultRecord regime, bits(16) vmid,fault, boolean secondstage, bits(64) vaddress, ShareabilityAccType shareability,acctype, boolean iswrite, boolean s2fs1walk, boolean hwupdatewalk) boolean hw_update_AF = FALSE; boolean hw_update_AP = FALSE; // Check if access flag can be updated // Address translation instructions are permitted to update AF but not required if result.AF then if fault.statuscode == TLBILevelFault_None level,|| TLBI_MemAttrConstrainUnpredictable attr, bits(64) Xt) assert PSTATE.EL IN {(EL3Unpredictable_AFUPDATE,) == EL2Constraint_TRUE,then hw_update_AF = TRUE; if result.AP && fault.statuscode == EL1Fault_None}; TLBIRecord r; r.op = TLBIOp_RVAA; r.security = security; r.regime = regime; r.vmid = vmid; r.level = level; r.attr = attr; r.ttl = Xt<47:44>; bits(2) tg = Xt<47:46>; integer scale =then write_perm_req = (iswrite || acctype IN { UIntAccType_ATOMICRW(Xt<45:44>); integer num =, UIntAccType_ORDEREDRW(Xt<43:39>); integer baseaddr =, SIntAccType_ORDEREDATOMICRW(Xt<36:0>); bits(64) start_addres; boolean valid; (valid, r.tg, start_address, r.end_address) =}) && !s2fs1walk; hw_update_AP = (write_perm_req && !(acctype IN { TLBI_RangeAccType_AT(regime, Xt); if !valid then return; r.address.address = start_address<51:0>; TLBI(r); if shareability !=, , AccType_DC_UNPRIV})) || hwupdatewalk; if hw_update_AF || hw_update_AP then if secondstage || !HasS2Translation() then descaddr2 = result.descaddr; else hwupdatewalk = TRUE; descaddr2 = AArch64.SecondStageWalk(result.descaddr, vaddress, acctype, iswrite, 8, hwupdatewalk); if IsFault(descaddr2) then return descaddr2.fault; accdesc = CreateAccessDescriptor(AccType_ATOMICRW); desc = _Mem[descaddr2, 8, accdesc]; el = AArch64.AccessUsesEL(acctype); case el of when EL3 reversedescriptors = SCTLR_EL3.EE == '1'; when EL2 reversedescriptors = SCTLR_EL2.EE == '1'; otherwise reversedescriptors = SCTLR_EL1.EE == '1'; if reversedescriptors then desc = BigEndianReverse(desc); if hw_update_AF then desc<10> = '1'; if hw_update_AP then desc<7> = (if secondstage then '1' else '0'); _Mem[descaddr2,8,accdesc] = if reversedescriptors then BigEndianReverseShareability_NoneAccType_DC then Broadcast(shareability, r); return;(desc) else desc; return fault;

Library pseudocode for aarch64/instrstranslation/systemtranslation/sysops/tlbi/TLBI_RangeAArch64.FirstStageTranslate

// TLBI_Range() // ============ // Extract the input address range information from encoded Xt. // AArch64.FirstStageTranslate() // ============================= // Perform a stage 1 translation walk. The function used by Address Translation operations is // similar except it uses the translation regime specified for the instruction. (boolean, bits(2), bits(64), bits(64))AddressDescriptor TLBI_Range(AArch64.FirstStageTranslate(bits(64) vaddress,RegimeAccType regime, bits(64) Xt) boolean valid = TRUE; bits(64) start =acctype, boolean iswrite, boolean wasaligned, integer size) if ZerosHaveNV2Ext(64); bits(64) end =() && acctype == ZerosAccType_NV2REGISTER(64); bits(2) tg = Xt<47:46>; integer scale =then s1_enabled = SCTLR_EL2.M == '1'; elsif UIntHasS2Translation(Xt<45:44>); integer num =() then s1_enabled = HCR_EL2.TGE == '0' && HCR_EL2.DC == '0' && SCTLR_EL1.M == '1'; else s1_enabled = UIntSCTLR(Xt<43:39>); integer tg_bits; if tg == '00' then return (FALSE, tg, start, end); case tg of when '01' // 4KB tg_bits = 12; if[].M == '1'; HasLargeAddressTLBRecord(regime) then start<52:16> = Xt<36:0>; start<63:53> =S1; S1.addrdesc.fault = ReplicateAArch64.NoFault(Xt<36>, 11); else start<48:12> = Xt<36:0>; start<63:49> =(); ipaddress = bits(52) UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; if s1_enabled then // First stage enabled S1 = ReplicateAArch64.TranslationTableWalk(Xt<36>, 15); when '10' // 16KB tg_bits = 14; if(ipaddress, TRUE, vaddress, acctype, iswrite, secondstage, s2fs1walk, size); permissioncheck = TRUE; if acctype == HasLargeAddressAccType_IFETCH(regime) then start<52:16> = Xt<36:0>; start<63:53> =then InGuardedPage = S1.GP == '1'; // Global state updated on instruction fetch that denotes // if the fetched instruction is from a guarded page. else S1 = ReplicateAArch64.TranslateAddressS1Off(Xt<36>, 11); else start<50:14> = Xt<36:0>; start<63:51> =(vaddress, acctype, iswrite); permissioncheck = FALSE; InGuardedPage = FALSE; // No memory is guarded when stage 1 address translation is disabled if ! ReplicateIsFault(Xt<36>, 13); when '11' // 64KB tg_bits = 16; start<52:16> = Xt<36:0>; start<63:53> =(S1.addrdesc) && ReplicateUsingAArch32(Xt<36>, 11); otherwise() && UnreachableHaveTrapLoadStoreMultipleDeviceExt(); integer range = (num+1) << (5*scale + 1 + tg_bits); end = start + range<63:0>; if end<52> != start<52> then // overflow, saturate it end =() && ReplicateAArch32.ExecutingLSMInstr(start<52>, 64-52) :() then if S1.addrdesc.memattrs.memtype == && S1.addrdesc.memattrs.device != DeviceType_GRE then nTLSMD = if S1TranslationRegime() == EL2 then SCTLR_EL2.nTLSMD else SCTLR_EL1.nTLSMD; if nTLSMD == '0' then S1.addrdesc.fault = AArch64.AlignmentFault(acctype, iswrite, secondstage); // Check for unaligned data accesses to Device memory if ((!wasaligned && acctype != AccType_IFETCH) || (acctype == AccType_DCZVA)) && !IsFault(S1.addrdesc) && S1.addrdesc.memattrs.memtype == MemType_Device then S1.addrdesc.fault = AArch64.AlignmentFault(acctype, iswrite, secondstage); if !IsFault(S1.addrdesc) && permissioncheck then S1.addrdesc.fault = AArch64.CheckPermission(S1.perms, vaddress, S1.level, S1.addrdesc.paddress.NS, acctype, iswrite); // Check for instruction fetches from Device memory not marked as execute-never. If there has // not been a Permission Fault then the memory is not marked execute-never. if (!IsFault(S1.addrdesc) && S1.addrdesc.memattrs.memtype == MemType_Device && acctype == AccType_IFETCH) then S1.addrdesc = AArch64.InstructionDevice(S1.addrdesc, vaddress, ipaddress, S1.level, acctype, iswrite, secondstage, s2fs1walk); // Check and update translation table descriptor if required hwupdatewalk = FALSE; s2fs1walk = FALSE; S1.addrdesc.fault = AArch64.CheckAndUpdateDescriptorOnesMemType_Device(52); (S1.descupdate, S1.addrdesc.fault, secondstage, vaddress, acctype, iswrite, s2fs1walk, hwupdatewalk); return (valid, tg, start, end); return S1.addrdesc;

Library pseudocode for aarch64/instrstranslation/systemtranslation/sysops/tlbi/TLBI_VAAArch64.FullTranslate

// TLBI_VA() // ========= // Invalidate by VA all stage 1 TLB entries in the indicated shareability domain // matching the indicated VMID and ASID (where regime supports VMID, ASID) in the indicated regime // with the indicated security state. // ASID, VA and related parameters are derived from Xt. // Note: stage 1 and stage 2 combined entries are in the scope of this operation. // When the indicated level is // TLBILevel_Any : this applies to TLB entries at all levels // TLBILevel_Last : this applies to TLB entries at last level only // The indicated attr defines the attributes of the memory operations that must be completed in // order to deem this operation to be completed. // When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation // are required to complete.// AArch64.FullTranslate() // ======================= // Perform both stage 1 and stage 2 translation walks for the current translation regime. The // function used by Address Translation operations is similar except it uses the translation // regime specified for the instruction. AddressDescriptor TLBI_VA(AArch64.FullTranslate(bits(64) vaddress,SecurityStateAccType security,acctype, boolean iswrite, boolean wasaligned, integer size) // First Stage Translation S1 = RegimeAArch64.FirstStageTranslate regime, bits(16) vmid,(vaddress, acctype, iswrite, wasaligned, size); if ! ShareabilityIsFault shareability,(S1) && !( TLBILevelHaveNV2Ext level,() && acctype == TLBI_MemAttrAccType_NV2REGISTER attr, bits(64) Xt) assert PSTATE.EL IN {) &&EL3HasS2Translation,() then s2fs1walk = FALSE; hwupdatewalk = FALSE; result = EL2AArch64.SecondStageTranslate, EL1}; TLBIRecord r; r.op = TLBIOp_VA; r.security = security; r.regime = regime; r.vmid = vmid; r.level = level; r.attr = attr; r.asid = Xt<63:48>; r.ttl = Xt<47:44>; r.address.address = Xt<39:0> : Zeros(12); TLBI(r); if shareability != Shareability_None then Broadcast(shareability, r); return;(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk, size, hwupdatewalk); else result = S1; return result;

Library pseudocode for aarch64/instrstranslation/systemtranslation/sysops/tlbi/TLBI_VAAAArch64.SecondStageTranslate

// TLBI_VAA() // ========== // Invalidate by VA all stage 1 TLB entries in the indicated shareability domain // matching the indicated VMID (where regime supports VMID) and all ASID in the indicated regime // with the indicated security state. // VA and related parameters are derived from Xt. // Note: stage 1 and stage 2 combined entries are in the scope of this operation. // When the indicated level is // TLBILevel_Any : this applies to TLB entries at all levels // TLBILevel_Last : this applies to TLB entries at last level only // The indicated attr defines the attributes of the memory operations that must be completed in // order to deem this operation to be completed. // When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation // are required to complete.// AArch64.SecondStageTranslate() // ============================== // Perform a stage 2 translation walk. The function used by Address Translation operations is // similar except it uses the translation regime specified for the instruction. AddressDescriptor TLBI_VAA(AArch64.SecondStageTranslate(SecurityStateAddressDescriptor security,S1, bits(64) vaddress, RegimeAccType regime, bits(16) vmid,acctype, boolean iswrite, boolean wasaligned, boolean s2fs1walk, integer size, boolean hwupdatewalk) assert ShareabilityHasS2Translation shareability,(); s2_enabled = HCR_EL2.VM == '1' || HCR_EL2.DC == '1'; secondstage = TRUE; if s2_enabled then // Second stage enabled ipaddress = S1.paddress.address<51:0>; NS = S1.paddress.NS == '1'; S2 = TLBILevelAArch64.TranslationTableWalk level,(ipaddress, NS, vaddress, acctype, iswrite, secondstage, s2fs1walk, size); // Check for unaligned data accesses to Device memory if ((!wasaligned && acctype != TLBI_MemAttrAccType_IFETCH attr, bits(64) Xt) assert PSTATE.EL IN {) || (acctype ==EL3AccType_DCZVA,)) && S2.addrdesc.memattrs.memtype == EL2MemType_Device,&& ! EL1IsFault}; TLBIRecord r; r.op = TLBIOp_VAA; r.security = security; r.regime = regime; r.vmid = vmid; r.level = level; r.attr = attr; r.ttl = Xt<47:44>; r.address.address = Xt<39:0> :(S2.addrdesc) then S2.addrdesc.fault = ZerosAArch64.AlignmentFault(12); (acctype, iswrite, secondstage); TLBI(r); if shareability != // Check for permissions on Stage2 translations if ! (S2.addrdesc) then S2.addrdesc.fault = AArch64.CheckS2Permission(S2.perms, vaddress, ipaddress, S2.level, acctype, iswrite, NS,s2fs1walk, hwupdatewalk); // Check for instruction fetches from Device memory not marked as execute-never. As there // has not been a Permission Fault then the memory is not marked execute-never. if (!s2fs1walk && !IsFault(S2.addrdesc) && S2.addrdesc.memattrs.memtype == MemType_Device && acctype == AccType_IFETCH) then S2.addrdesc = AArch64.InstructionDevice(S2.addrdesc, vaddress, ipaddress, S2.level, acctype, iswrite, secondstage, s2fs1walk); if (s2fs1walk && !IsFault(S2.addrdesc) && S2.addrdesc.memattrs.memtype == MemType_Device) then // Check for protected table walk. if HCR_EL2.PTW == '1' then S2.addrdesc.fault = AArch64.PermissionFault(ipaddress, NS, S2.level, acctype, iswrite, secondstage, s2fs1walk); else // Translation table walk occurs as Normal Non-cacheable memory. S2.addrdesc.memattrs.memtype = MemType_Normal; S2.addrdesc.memattrs.inner.attrs = MemAttr_NC; S2.addrdesc.memattrs.outer.attrs = MemAttr_NC; S2.addrdesc.memattrs.shareable = TRUE; S2.addrdesc.memattrs.outershareable = TRUE; // Check and update translation table descriptor if required S2.addrdesc.fault = AArch64.CheckAndUpdateDescriptor(S2.descupdate, S2.addrdesc.fault, secondstage, vaddress, acctype, iswrite, s2fs1walk, hwupdatewalk); if s2fs1walk then result = AArch64.CombineS1S2Desc(S1, S2.addrdesc, AccType_PTW); else result = AArch64.CombineS1S2DescShareability_NoneIsFault then Broadcast(shareability, r); return;(S1, S2.addrdesc, acctype); else result = S1; return result;

Library pseudocode for aarch64/instrstranslation/systemtranslation/sysops/tlbi/TLBI_VMALLAArch64.SecondStageWalk

// TLBI_VMALL() // ============ // Invalidates all stage 1 entries for the indicated translation regime with the // the indicated security state for all TLBs within the indicated shareability // domain that match the indicated VMID (where applicable). // Note: stage 1 and stage 2 combined entries are in the scope of this operation. // Note: stage 2 only entries are not in the scope of this operation. // The indicated attr defines the attributes of the memory operations that must be completed in // order to deem this operation to be completed. // When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation // are required to complete.// AArch64.SecondStageWalk() // ========================= // Perform a stage 2 translation on a stage 1 translation page table walk access. AddressDescriptor TLBI_VMALL(AArch64.SecondStageWalk(SecurityStateAddressDescriptor security,S1, bits(64) vaddress, RegimeAccType regime, bits(16) vmid,acctype, boolean iswrite, integer size, boolean hwupdatewalk) assert ShareabilityHasS2Translation shareability,(); s2fs1walk = TRUE; wasaligned = TRUE; return TLBI_MemAttrAArch64.SecondStageTranslate attr) assert PSTATE.EL IN {EL3, EL2, EL1}; TLBIRecord r; r.op = TLBIOp_VMALL; r.security = security; r.regime = regime; r.level = TLBILevel_Any; r.vmid = vmid; r.attr = attr; TLBI(r); if shareability != Shareability_None then Broadcast(shareability, r); return;(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk, size, hwupdatewalk);

Library pseudocode for aarch64/instrstranslation/systemtranslation/sysops/tlbi/TLBI_VMALLS12AArch64.TranslateAddress

// TLBI_VMALLS12() // =============== // Invalidates all stage 1 and stage 2 entries for the indicated translation // regime with the indicated security state for all TLBs within the indicated // shareability domain that match the indicated VMID. // The indicated attr defines the attributes of the memory operations that must be completed in // order to deem this operation to be completed. // When attr is TLBI_ExcludeXS, only operations with XS=0 within the scope of this TLB operation // are required to complete.// AArch64.TranslateAddress() // ========================== // Main entry point for translating an address AddressDescriptor TLBI_VMALLS12(AArch64.TranslateAddress(bits(64) vaddress,SecurityStateAccType security,acctype, boolean iswrite, boolean wasaligned, integer size) result = RegimeAArch64.FullTranslate regime, bits(16) vmid,(vaddress, acctype, iswrite, wasaligned, size); if !(acctype IN { ShareabilityAccType_PTW shareability,, TLBI_MemAttrAccType_IC attr) assert PSTATE.EL IN {,EL3AccType_AT,}) && ! EL2IsFault}; TLBIRecord r; r.op = TLBIOp_VMALLS12; r.security = security; r.regime = regime; r.level =(result) then result.fault = TLBILevel_AnyAArch64.CheckDebug; r.vmid = vmid; r.attr = attr; (vaddress, acctype, iswrite, size); TLBI(r); if shareability != // Update virtual address for abort functions result.vaddress = Shareability_NoneZeroExtend then Broadcast(shareability, r); return;(vaddress); return result;

Library pseudocode for aarch64/instrstranslation/systemwalk/sysops/tlbi/VMIDAArch64.TranslationTableWalk

// VMID[] // ====== // Effective VMID. // AArch64.TranslationTableWalk() // ============================== // Returns a result of a translation table walk // // Implementations might cache information from memory in any number of non-coherent TLB // caching structures, and so avoid memory accesses that have been expressed in this // pseudocode. The use of such TLBs is not expressed in this pseudocode. bits(16)TLBRecord VMID[] ifAArch64.TranslationTableWalk(bits(52) ipaddress, boolean s1_nonsecure, bits(64) vaddress, EL2EnabledAccType() then return VTTBR_EL2.VMID; elsifacctype, boolean iswrite, boolean secondstage, boolean s2fs1walk, integer size) if !secondstage then assert ! ELUsingAArch32(S1TranslationRegime()); else assert (IsSecureEL2Enabled() || (HaveEL(EL2) &&) && ! HaveSecureEL2ExtIsSecure() then return() && ! ELUsingAArch32(EL2))) && HasS2Translation(); TLBRecord result; AddressDescriptor descaddr; bits(64) baseregister; bits(64) inputaddr; // Input Address is 'vaddress' for stage 1, 'ipaddress' for stage 2 bit nswalk; // Stage 2 translation table walks are to Secure or to Non-secure PA space result.descupdate.AF = FALSE; result.descupdate.AP = FALSE; descaddr.memattrs.memtype = MemType_Normal; // Derived parameters for the page table walk: // grainsize = Log2(Size of Table) - Size of Table is 4KB, 16KB or 64KB in AArch64 // stride = Log2(Address per Level) - Bits of address consumed at each level // firstblocklevel = First level where a block entry is allowed // ps = Physical Address size as encoded in TCR_EL1.IPS or TCR_ELx/VTCR_EL2.PS // inputsize = Log2(Size of Input Address) - Input Address size in bits // level = Level to start walk from // This means that the number of levels after start level = 3-level if !secondstage then // First stage translation inputaddr = ZeroExtend(vaddress); el = AArch64.AccessUsesEL(acctype); isprivileged = AArch64.AccessIsPrivileged(acctype); top = AddrTop(inputaddr, (acctype == AccType_IFETCH), el); if el == EL3 then largegrain = TCR_EL3.TG0 == '01'; midgrain = TCR_EL3.TG0 == '10'; inputsize = 64 - UInt(TCR_EL3.T0SZ); inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48; if !Have52BitVAExt() && inputsize > inputsize_max then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_max; inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48); if inputsize < inputsize_min then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_min; ps = TCR_EL3.PS; basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>); disabled = FALSE; baseregister = TTBR0_EL3; descaddr.memattrs = WalkAttrDecode(TCR_EL3.SH0, TCR_EL3.ORGN0, TCR_EL3.IRGN0, secondstage); reversedescriptors = SCTLR_EL3.EE == '1'; lookupsecure = TRUE; singlepriv = TRUE; update_AF = HaveAccessFlagUpdateExt() && TCR_EL3.HA == '1'; update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL3.HD == '1'; hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL3.HPD == '1'; elsif ELIsInHost(el) then if inputaddr<top> == '0' then largegrain = TCR_EL2.TG0 == '01'; midgrain = TCR_EL2.TG0 == '10'; inputsize = 64 - UInt(TCR_EL2.T0SZ); inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48; if !Have52BitVAExt() && inputsize > inputsize_max then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_max; inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48); if inputsize < inputsize_min then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_min; basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>); disabled = TCR_EL2.EPD0 == '1' || (!isprivileged && HaveE0PDExt() && TCR_EL2.E0PD0 == '1'); disabled = disabled || (el == EL0 && acctype == AccType_NONFAULT && TCR_EL2.NFD0 == '1'); baseregister = TTBR0_EL2; descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH0, TCR_EL2.ORGN0, TCR_EL2.IRGN0, secondstage); hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD0 == '1'; else inputsize = 64 - UInt(TCR_EL2.T1SZ); largegrain = TCR_EL2.TG1 == '11'; // TG1 and TG0 encodings differ midgrain = TCR_EL2.TG1 == '01'; inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48; if !Have52BitVAExt() && inputsize > inputsize_max then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_max; inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48); if inputsize < inputsize_min then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_min; basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsOnes(inputaddr<top:inputsize>); disabled = TCR_EL2.EPD1 == '1' || (!isprivileged && HaveE0PDExt() && TCR_EL2.E0PD1 == '1'); disabled = disabled || (el == EL0 && acctype == AccType_NONFAULT && TCR_EL2.NFD1 == '1'); baseregister = TTBR1_EL2; descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH1, TCR_EL2.ORGN1, TCR_EL2.IRGN1, secondstage); hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD1 == '1'; ps = TCR_EL2.IPS; reversedescriptors = SCTLR_EL2.EE == '1'; lookupsecure = if IsSecureEL2Enabled() then IsSecure() else FALSE; singlepriv = FALSE; update_AF = HaveAccessFlagUpdateExt() && TCR_EL2.HA == '1'; update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL2.HD == '1'; elsif el == EL2 then inputsize = 64 - UInt(TCR_EL2.T0SZ); largegrain = TCR_EL2.TG0 == '01'; midgrain = TCR_EL2.TG0 == '10'; inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48; if !Have52BitVAExt() && inputsize > inputsize_max then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_max; inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48); if inputsize < inputsize_min then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_min; ps = TCR_EL2.PS; basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>); disabled = FALSE; baseregister = TTBR0_EL2; descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH0, TCR_EL2.ORGN0, TCR_EL2.IRGN0, secondstage); reversedescriptors = SCTLR_EL2.EE == '1'; lookupsecure = if IsSecureEL2Enabled() then IsSecure() else FALSE; singlepriv = TRUE; update_AF = HaveAccessFlagUpdateExt() && TCR_EL2.HA == '1'; update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL2.HD == '1'; hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD == '1'; else if inputaddr<top> == '0' then inputsize = 64 - UInt(TCR_EL1.T0SZ); largegrain = TCR_EL1.TG0 == '01'; midgrain = TCR_EL1.TG0 == '10'; inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48; if !Have52BitVAExt() && inputsize > inputsize_max then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_max; inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48); if inputsize < inputsize_min then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_min; basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>); disabled = TCR_EL1.EPD0 == '1' || (!isprivileged && HaveE0PDExt() && TCR_EL1.E0PD0 == '1'); disabled = disabled || (el == EL0 && acctype == AccType_NONFAULT && TCR_EL1.NFD0 == '1'); baseregister = TTBR0_EL1; descaddr.memattrs = WalkAttrDecode(TCR_EL1.SH0, TCR_EL1.ORGN0, TCR_EL1.IRGN0, secondstage); hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL1.HPD0 == '1'; else inputsize = 64 - UInt(TCR_EL1.T1SZ); largegrain = TCR_EL1.TG1 == '11'; // TG1 and TG0 encodings differ midgrain = TCR_EL1.TG1 == '01'; inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48; if !Have52BitVAExt() && inputsize > inputsize_max then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_max; inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48); if inputsize < inputsize_min then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_min; basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsOnes(inputaddr<top:inputsize>); disabled = TCR_EL1.EPD1 == '1' || (!isprivileged && HaveE0PDExt() && TCR_EL1.E0PD1 == '1'); disabled = disabled || (el == EL0 && acctype == AccType_NONFAULT && TCR_EL1.NFD1 == '1'); baseregister = TTBR1_EL1; descaddr.memattrs = WalkAttrDecode(TCR_EL1.SH1, TCR_EL1.ORGN1, TCR_EL1.IRGN1, secondstage); hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL1.HPD1 == '1'; ps = TCR_EL1.IPS; reversedescriptors = SCTLR_EL1.EE == '1'; lookupsecure = IsSecure(); singlepriv = FALSE; update_AF = HaveAccessFlagUpdateExt() && TCR_EL1.HA == '1'; update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL1.HD == '1'; if largegrain then grainsize = 16; // Log2(64KB page size) firstblocklevel = (if Have52BitPAExt() then 1 else 2); // Largest block is 4TB (2^42 bytes) for 52 bit PA // and 512MB (2^29 bytes) otherwise elsif midgrain then grainsize = 14; // Log2(16KB page size) firstblocklevel = 2; // Largest block is 32MB (2^25 bytes) else // Small grain grainsize = 12; // Log2(4KB page size) firstblocklevel = 1; // Largest block is 1GB (2^30 bytes) stride = grainsize - 3; // Log2(page size / 8 bytes) // The starting level is the number of strides needed to consume the input address level = 4 - (1 + ((inputsize - grainsize - 1) DIV stride)); else // Second stage translation inputaddr = ZeroExtend(ipaddress); if IsSecureBelowEL3() then // Second stage for Secure translation regime if s1_nonsecure then // Non-secure IPA space t0size = VTCR_EL2.T0SZ; tg0 = VTCR_EL2.TG0; nswalk = VTCR_EL2.NSW; else // Secure IPA space t0size = VSTCR_EL2.T0SZ; tg0 = VSTCR_EL2.TG0; nswalk = VSTCR_EL2.SW; // Stage 2 translation accesses the Non-secure PA space or the Secure PA space if nswalk == '1' then // When walk is Non-secure, access must be to the Non-secure PA space nsaccess = '1'; elsif !s1_nonsecure then // When walk is Secure and in the Secure IPA space, // access is specified by VSTCR_EL2.SA nsaccess = VSTCR_EL2.SA; elsif VSTCR_EL2.SW == '1' || VSTCR_EL2.SA == '1' then // When walk is Secure and in the Non-secure IPA space, // access is Non-secure when VSTCR_EL2.SA specifies the Non-secure PA space nsaccess = '1'; else // When walk is Secure and in the Non-secure IPA space, // if VSTCR_EL2.SA specifies the Secure PA space, access is specified by VTCR_EL2.NSA nsaccess = VTCR_EL2.NSA; else // Second stage for Non-secure translation regime t0size = VTCR_EL2.T0SZ; tg0 = VTCR_EL2.TG0; nswalk = '1'; nsaccess = '1'; inputsize = 64 - UInt(t0size); largegrain = tg0 == '01'; midgrain = tg0 == '10'; inputsize_max = if Have52BitPAExt() && PAMax() == 52 && largegrain then 52 else 48; if !Have52BitPAExt() && inputsize > inputsize_max then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_max; inputsize_min = 64 - (if !HaveSmallPageTblExt() then 39 else if largegrain then 47 else 48); if inputsize < inputsize_min then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_min; ps = VTCR_EL2.PS; basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<63:inputsize>); disabled = FALSE; descaddr.memattrs = WalkAttrDecode(VTCR_EL2.SH0, VTCR_EL2.ORGN0, VTCR_EL2.IRGN0, secondstage); reversedescriptors = SCTLR_EL2.EE == '1'; singlepriv = TRUE; update_AF = HaveAccessFlagUpdateExt() && VTCR_EL2.HA == '1'; update_AP = HaveDirtyBitModifierExt() && update_AF && VTCR_EL2.HD == '1'; if IsSecureEL2Enabled() then lookupsecure = !s1_nonsecure; else lookupsecure = FALSE; if lookupsecure then baseregister = VSTTBR_EL2; startlevel = UInt(VSTCR_EL2.SL0); else baseregister = VTTBR_EL2; startlevel = UInt(VTCR_EL2.SL0); if largegrain then grainsize = 16; // Log2(64KB page size) level = 3 - startlevel; firstblocklevel = (if Have52BitPAExt() then 1 else 2); // Largest block is 4TB (2^42 bytes) for 52 bit PA // and 512MB (2^29 bytes) otherwise elsif midgrain then grainsize = 14; // Log2(16KB page size) level = 3 - startlevel; firstblocklevel = 2; // Largest block is 32MB (2^25 bytes) else // Small grain grainsize = 12; // Log2(4KB page size) if HaveSmallPageTblExt() && startlevel == 3 then level = startlevel; // Startlevel 3 (VTCR_EL2.SL0 or VSCTR_EL2.SL0 == 0b11) for 4KB granule else level = 2 - startlevel; firstblocklevel = 1; // Largest block is 1GB (2^30 bytes) stride = grainsize - 3; // Log2(page size / 8 bytes) // Limits on IPA controls based on implemented PA size. Level 0 is only // supported by small grain translations if largegrain then // 64KB pages // Level 1 only supported if implemented PA size is greater than 2^42 bytes if level == 0 || (level == 1 && PAMax() <= 42) then basefound = FALSE; elsif midgrain then // 16KB pages // Level 1 only supported if implemented PA size is greater than 2^40 bytes if level == 0 || (level == 1 && PAMax() <= 40) then basefound = FALSE; else // Small grain, 4KB pages // Level 0 only supported if implemented PA size is greater than 2^42 bytes if level < 0 || (level == 0 && PAMax() <= 42) then basefound = FALSE; // If the inputsize exceeds the PAMax value, the behavior is CONSTRAINED UNPREDICTABLE inputsizecheck = inputsize; if inputsize > PAMax() && (!ELUsingAArch32(EL1) || inputsize > 40) then case ConstrainUnpredictable(Unpredictable_LARGEIPA) of when Constraint_FORCE // Restrict the inputsize to the PAMax value inputsize = PAMax(); inputsizecheck = PAMax(); when Constraint_FORCENOSLCHECK // As FORCE, except use the configured inputsize in the size checks below inputsize = PAMax(); when Constraint_FAULT // Generate a translation fault basefound = FALSE; otherwise Unreachable(); // Number of entries in the starting level table = // (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table)) startsizecheck = inputsizecheck - ((3 - level)*stride + grainsize); // Log2(Num of entries) // Check for starting level table with fewer than 2 entries or longer than 16 pages. // Lower bound check is: startsizecheck < Log2(2 entries) // Upper bound check is: startsizecheck > Log2(pagesize/8*16) if startsizecheck < 1 || startsizecheck > stride + 4 then basefound = FALSE; if !basefound || disabled then level = 0; // AArch32 reports this as a level 1 fault result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype, iswrite, secondstage, s2fs1walk); return result; case ps of when '000' outputsize = 32; when '001' outputsize = 36; when '010' outputsize = 40; when '011' outputsize = 42; when '100' outputsize = 44; when '101' outputsize = 48; when '110' outputsize = (if Have52BitPAExt() && largegrain then 52 else 48); otherwise outputsize = integer IMPLEMENTATION_DEFINED "Reserved Intermediate Physical Address size value"; if outputsize > PAMax() then outputsize = PAMax(); if outputsize < 48 && !IsZero(baseregister<47:outputsize>) then level = 0; result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Bottom bound of the Base address is: // Log2(8 bytes per entry)+Log2(Number of entries in starting level table) // Number of entries in starting level table = // (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table)) baselowerbound = 3 + inputsize - ((3-level)*stride + grainsize); // Log2(Num of entries*8) if outputsize == 52 then z = (if baselowerbound < 6 then 6 else baselowerbound); baseaddress = baseregister<5:2>:baseregister<47:z>:Zeros(16); (z); else return baseaddress = (baseregister<47:baselowerbound>:Zeros(baselowerbound)); ns_table = if lookupsecure then '0' else '1'; ap_table = '00'; xn_table = '0'; pxn_table = '0'; addrselecttop = inputsize - 1; apply_nvnv1_effect = HaveNVExt() && EL2Enabled() && HCR_EL2.<NV,NV1> == '11' && S1TranslationRegime() == EL1 && !secondstage; repeat addrselectbottom = (3-level)*stride + grainsize; bits(52) index = ZeroExtend(inputaddr<addrselecttop:addrselectbottom>:'000'); descaddr.paddress.address = baseaddress OR index; descaddr.paddress.NS = if secondstage then nswalk else ns_table; // If there are two stages of translation, then the first stage table walk addresses // are themselves subject to translation if secondstage || !HasS2Translation() || (HaveNV2Ext() && acctype == AccType_NV2REGISTER) then descaddr2 = descaddr; else hwupdatewalk = FALSE; descaddr2 = AArch64.SecondStageWalk(descaddr, vaddress, acctype, iswrite, 8, hwupdatewalk); // Check for a fault on the stage 2 walk if IsFault(descaddr2) then result.addrdesc.fault = descaddr2.fault; return result; // Update virtual address for abort functions descaddr2.vaddress = ZeroExtend(vaddress); accdesc = CreateAccessDescriptorPTW(acctype, secondstage, s2fs1walk, level); desc = _Mem[descaddr2, 8, accdesc]; if reversedescriptors then desc = BigEndianReverse(desc); if desc<0> == '0' || (desc<1:0> == '01' && (level == 3 || (HaveBlockBBM() && IsBlockDescriptorNTBitValid() && desc<16> == '1'))) then // Fault (00), Reserved (10), Block (01) at level 3, or Block(01) with nT bit set. result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Valid Block, Page, or Table entry if desc<1:0> == '01' || level == 3 then // Block (01) or Page (11) blocktranslate = TRUE; else // Table (11) if (outputsize < 52 && largegrain && (PAMax() == 52 || boolean IMPLEMENTATION_DEFINED "Address Size Fault on LPA descriptor bits [15:12]") && !IsZero(desc<15:12>)) || (outputsize < 48 && !IsZero(desc<47:outputsize>)) then result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype, iswrite, secondstage, s2fs1walk); return result; if outputsize == 52 then baseaddress = desc<15:12>:desc<47:grainsize>:Zeros(grainsize); else baseaddress = ZeroExtend(desc<47:grainsize>:Zeros(grainsize)); if !secondstage then // Unpack the upper and lower table attributes ns_table = ns_table OR desc<63>; if !secondstage && !hierattrsdisabled then ap_table<1> = ap_table<1> OR desc<62>; // read-only if apply_nvnv1_effect then pxn_table = pxn_table OR desc<60>; else xn_table = xn_table OR desc<60>; // pxn_table and ap_table[0] apply in EL1&0 or EL2&0 translation regimes if !singlepriv then if !apply_nvnv1_effect then pxn_table = pxn_table OR desc<59>; ap_table<0> = ap_table<0> OR desc<61>; // privileged level = level + 1; addrselecttop = addrselectbottom - 1; blocktranslate = FALSE; until blocktranslate; // Check block size is supported at this level if level < firstblocklevel then result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Check for misprogramming of the contiguous bit if largegrain then num_ch_entries = 5; elsif midgrain then num_ch_entries = if level == 3 then 7 else 5; else num_ch_entries = 4; contiguousbitcheck = inputsize < (addrselectbottom + num_ch_entries); if contiguousbitcheck && desc<52> == '1' then if boolean IMPLEMENTATION_DEFINED "Translation fault on misprogrammed contiguous bit" then result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Unpack the descriptor into address and upper and lower block attributes if largegrain then outputaddress = desc<15:12>:desc<47:addrselectbottom>:inputaddr<addrselectbottom-1:0>; else outputaddress = ZeroExtend(desc<47:addrselectbottom>:inputaddr<addrselectbottom-1:0>); // When 52-bit PA is supported, for 64 Kbyte translation granule, // block size might be larger than the supported output address size if ((outputsize < 52 && !IsZero(outputaddress<51:48>) && largegrain && (PAMax() == 52 || boolean IMPLEMENTATION_DEFINED "Address Size Fault on LPA descriptor bits [15:12]")) || (outputsize < 48 && !IsZero(outputaddress<47:outputsize>))) then result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Check Access Flag if desc<10> == '0' then if !update_AF then result.addrdesc.fault = AArch64.AccessFlagFault(ipaddress,s1_nonsecure, level, acctype, iswrite, secondstage, s2fs1walk); return result; else result.descupdate.AF = TRUE; if update_AP && desc<51> == '1' then // If hw update of access permission field is configured consider AP[2] as '0' / S2AP[2] as '1' if !secondstage && desc<7> == '1' then desc<7> = '0'; result.descupdate.AP = TRUE; elsif secondstage && desc<7> == '0' then desc<7> = '1'; result.descupdate.AP = TRUE; // Required descriptor if AF or AP[2]/S2AP[2] needs update result.descupdate.descaddr = descaddr; if apply_nvnv1_effect then pxn = desc<54>; // Bit[54] of the block/page descriptor holds PXN instead of UXN xn = '0'; // XN is '0' ap = desc<7>:'01'; // Bit[6] of the block/page descriptor is treated as '0' regardless of value programmed else xn = desc<54>; // Bit[54] of the block/page descriptor holds UXN pxn = desc<53>; // Bit[53] of the block/page descriptor holds PXN ap = desc<7:6>:'1'; // Bits[7:6] of the block/page descriptor hold AP[2:1] contiguousbit = desc<52>; nG = desc<11>; sh = desc<9:8>; memattr = desc<5:2>; // AttrIndx and NS bit in stage 1 result.domain = bits(4) UNKNOWN; // Domains not used result.level = level; result.blocksize = 2^((3-level)*stride + grainsize); // Stage 1 translation regimes also inherit attributes from the tables if !secondstage then result.perms.xn = xn OR xn_table; result.perms.ap<2> = ap<2> OR ap_table<1>; // Force read-only // PXN, nG and AP[1] apply in EL1&0 or EL2&0 stage 1 translation regimes if !singlepriv then result.perms.ap<1> = ap<1> AND NOT(ap_table<0>); // Force privileged only result.perms.pxn = pxn OR pxn_table; // Pages from Non-secure tables are marked non-global in Secure EL1&0 if IsSecure() then result.nG = nG OR ns_table; else result.nG = nG; else result.perms.ap<1> = '1'; result.perms.pxn = '0'; result.nG = '0'; result.GP = desc<50>; // Stage 1 block or pages might be guarded result.perms.ap<0> = '1'; result.addrdesc.memattrs = AArch64.S1AttrDecode(sh, memattr<2:0>, acctype); result.addrdesc.paddress.NS = memattr<3> OR ns_table; else result.perms.ap<2:1> = ap<2:1>; result.perms.ap<0> = '1'; result.perms.xn = xn; if HaveExtendedExecuteNeverExt() then result.perms.xxn = desc<53>; result.perms.pxn = '0'; result.nG = '0'; if s2fs1walk then result.addrdesc.memattrs = S2AttrDecode(sh, memattr, AccType_PTW); else result.addrdesc.memattrs = S2AttrDecode(sh, memattr, acctype); result.addrdesc.paddress.NS = nsaccess; result.addrdesc.paddress.address = outputaddress; result.addrdesc.fault = AArch64.NoFault(); result.contiguous = contiguousbit == '1'; if HaveCommonNotPrivateTransExtVMID_NONEZeroExtend;() then result.CnP = baseregister<0>; return result;

Library pseudocode for aarch64shared/instrsdebug/systemClearStickyErrors/sysops/tlbi/VMID_NONEClearStickyErrors

constant bits(16)// ClearStickyErrors() // =================== VMID_NONE =ClearStickyErrors() EDSCR.TXU = '0'; // Clear TX underrun flag EDSCR.RXO = '0'; // Clear RX overrun flag if () then // in Debug state EDSCR.ITO = '0'; // Clear ITR overrun flag // If halted and the ITR is not empty then it is UNPREDICTABLE whether the EDSCR.ERR is cleared. // The UNPREDICTABLE behavior also affects the instructions in flight, but this is not described // in the pseudocode. if Halted() && EDSCR.ITE == '0' && ConstrainUnpredictableBool(Unpredictable_CLEARERRITEZEROZerosHalted();) then return; EDSCR.ERR = '0'; // Clear cumulative error flag return;

Library pseudocode for aarch64shared/instrsdebug/vectorDebugTarget/arithmetic/binary/uniform/logical/bsl-eor/vbitop/VBitOpDebugTarget

enumeration// DebugTarget() // ============= // Returns the debug exception target Exception level bits(2) VBitOp {DebugTarget() secure =VBitOp_VBIF,(); return VBitOp_VBIT, VBitOp_VBSL, VBitOp_VEOR};(secure);

Library pseudocode for aarch64shared/instrsdebug/vectorDebugTarget/arithmetic/unary/cmp/compareop/CompareOpDebugTargetFrom

enumeration// DebugTargetFrom() // ================= bits(2) CompareOp {DebugTargetFrom(boolean secure) ifCompareOp_GT,( CompareOp_GE,) && (!secure || ( CompareOp_EQ,() && (! CompareOp_LE,( ) ||SCR_EL3.EEL2 == '1'))) then if ELUsingAArch32(EL2) then route_to_el2 = (HDCR.TDE == '1' || HCR.TGE == '1'); else route_to_el2 = (MDCR_EL2.TDE == '1' || HCR_EL2.TGE == '1'); else route_to_el2 = FALSE; if route_to_el2 then target = EL2; elsif HaveEL(EL3) && HighestELUsingAArch32() && secure then target = EL3; else target = EL1CompareOp_LT};; return target;

Library pseudocode for aarch64shared/instrsdebug/vectorDoubleLockStatus/logical/immediateop/ImmediateOpDoubleLockStatus

enumeration// DoubleLockStatus() // ================== // Returns the state of the OS Double Lock. // FALSE if OSDLR_EL1.DLK == 0 or DBGPRCR_EL1.CORENPDRQ == 1 or the PE is in Debug state. // TRUE if OSDLR_EL1.DLK == 1 and DBGPRCR_EL1.CORENPDRQ == 0 and the PE is in Non-debug state. boolean ImmediateOp {DoubleLockStatus() if !ImmediateOp_MOVI,() then return FALSE; elsif ImmediateOp_MVNI,( ImmediateOp_ORR,) then return DBGOSDLR.DLK == '1' && DBGPRCR.CORENPDRQ == '0' && ! (); else return OSDLR_EL1.DLK == '1' && DBGPRCR_EL1.CORENPDRQ == '0' && !HaltedImmediateOp_BIC};();

Library pseudocode for aarch64shared/instrsdebug/vectorauthentication/reduce/reduceop/ReduceAllowExternalDebugAccess

// Reduce() // ======== // AllowExternalDebugAccess() // ========================== // Returns TRUE if an external debug interface access to the External debug registers // is allowed, FALSE otherwise. bits(esize)boolean Reduce(AllowExternalDebugAccess() // The access may also be subject to OS Lock, power-down, etc. ifReduceOpHaveSecureExtDebugView op, bits(N) input, integer esize) boolean altfp =() then return HaveAltFPAllowExternalDebugAccess() && !(UsingAArch32IsAccessSecure() && FPCR.AH == '1'; return()); else return ReduceAllowExternalDebugAccess(op, input, esize, altfp); // Reduce() // ======== // Perform the operation 'op' on pairs of elements from the input vector, // reducing the vector to a scalar result. The 'altfp' argument controls // alternative floating-point behaviour. bits(esize)( ExternalSecureInvasiveDebugEnabled()); // AllowExternalDebugAccess() // ========================== // Returns TRUE if an external debug interface access to the External debug registers // is allowed for the given Security state, FALSE otherwise. boolean Reduce(AllowExternalDebugAccess(boolean allow_secure) // The access may also be subject to OS Lock, power-down, etc. ifReduceOpHaveSecureExtDebugView op, bits(N) input, integer esize, boolean altfp) integer half; bits(esize) hi; bits(esize) lo; bits(esize) result; if N == esize then return input<esize-1:0>; half = N DIV 2; hi =() || ReduceExternalInvasiveDebugEnabled(op, input<N-1:half>, esize, altfp); lo =() then if allow_secure then return TRUE; elsif ReduceHaveEL(op, input<half-1:0>, esize, altfp); case op of when( ReduceOp_FMINNUMEL3 result =) then if FPMinNumELUsingAArch32(lo, hi, FPCR[]); when( ReduceOp_FMAXNUMEL3 result =) then return SDCR.EDAD == '0'; else return MDCR_EL3.EDAD == '0'; else return ! FPMaxNumIsSecure(lo, hi, FPCR[]); when ReduceOp_FMIN result = FPMin(lo, hi, FPCR[], altfp); when ReduceOp_FMAX result = FPMax(lo, hi, FPCR[], altfp); when ReduceOp_FADD result = FPAdd(lo, hi, FPCR[]); when ReduceOp_ADD result = lo + hi; return result;(); else return FALSE;

Library pseudocode for aarch64shared/instrsdebug/vectorauthentication/reduce/reduceop/ReduceOpAllowExternalPMUAccess

enumeration// AllowExternalPMUAccess() // ======================== // Returns TRUE if an external debug interface access to the PMU registers is allowed, FALSE otherwise. boolean ReduceOp {AllowExternalPMUAccess() // The access may also be subject to OS Lock, power-down, etc. ifReduceOp_FMINNUM,() then return ReduceOp_FMAXNUM,( ReduceOp_FMIN,()); else return ReduceOp_FMAX,( ExternalSecureNoninvasiveDebugEnabled()); // AllowExternalPMUAccess() // ======================== // Returns TRUE if an external debug interface access to the PMU registers is allowed for the given // Security state, FALSE otherwise. boolean ReduceOp_FADD,AllowExternalPMUAccess(boolean allow_secure) // The access may also be subject to OS Lock, power-down, etc. if () || ExternalNoninvasiveDebugEnabled() then if allow_secure then return TRUE; elsif HaveEL(EL3) then if ELUsingAArch32(EL3) then return SDCR.EPMAD == '0'; else return MDCR_EL3.EPMAD == '0'; else return !IsSecureReduceOp_ADD};(); else return FALSE;

Library pseudocode for aarch64shared/translationdebug/attrsauthentication/AArch64.CombineS1S2DescDebug_authentication

// AArch64.CombineS1S2Desc() // ========================= // Combines the address descriptors from stage 1 and stage 2 AddressDescriptorsignal DBGEN; signal NIDEN; signal SPIDEN; signal SPNIDEN; AArch64.CombineS1S2Desc(AddressDescriptor s1desc, AddressDescriptor s2desc, AccType s2acctype) AddressDescriptor result; result.paddress = s2desc.paddress; apply_force_writeback = HaveStage2MemAttrControl() && HCR_EL2.FWB == '1'; if IsFault(s1desc) || IsFault(s2desc) then result = if IsFault(s1desc) then s1desc else s2desc; else result.fault = AArch64.NoFault(); if s2desc.memattrs.memtype == MemType_Device || ( (apply_force_writeback && s1desc.memattrs.memtype == MemType_Device && s2desc.memattrs.inner.attrs != '10') || (!apply_force_writeback && s1desc.memattrs.memtype == MemType_Device) ) then result.memattrs.memtype = MemType_Device; if s1desc.memattrs.memtype == MemType_Normal then result.memattrs.device = s2desc.memattrs.device; elsif s2desc.memattrs.memtype == MemType_Normal then result.memattrs.device = s1desc.memattrs.device; else // Both Device result.memattrs.device = CombineS1S2Device(s1desc.memattrs.device, s2desc.memattrs.device); result.memattrs.tagged = FALSE; // S1 can be either Normal or Device, S2 is Normal. else result.memattrs.memtype = MemType_Normal; result.memattrs.device = DeviceType UNKNOWN; result.memattrs.inner = CombineS1S2AttrHints(s1desc.memattrs.inner, s2desc.memattrs.inner, s2acctype); result.memattrs.outer = CombineS1S2AttrHints(s1desc.memattrs.outer, s2desc.memattrs.outer, s2acctype); result.memattrs.shareable = (s1desc.memattrs.shareable || s2desc.memattrs.shareable); result.memattrs.outershareable = (s1desc.memattrs.outershareable || s2desc.memattrs.outershareable); result.memattrs.tagged = (s1desc.memattrs.tagged && result.memattrs.inner.attrs == MemAttr_WB && result.memattrs.inner.hints == MemHint_RWA && result.memattrs.outer.attrs == MemAttr_WB && result.memattrs.outer.hints == MemHint_RWA); result.memattrs = MemAttrDefaults(result.memattrs); return result;

Library pseudocode for aarch64shared/translationdebug/attrsauthentication/AArch64.InstructionDeviceExternalInvasiveDebugEnabled

// AArch64.InstructionDevice() // =========================== // Instruction fetches from memory marked as Device but not execute-never might generate a // Permission Fault but are otherwise treated as if from Normal Non-cacheable memory. // ExternalInvasiveDebugEnabled() // ============================== // The definition of this function is IMPLEMENTATION DEFINED. // In the recommended interface, this function returns the state of the DBGEN signal. AddressDescriptorboolean AArch64.InstructionDevice(ExternalInvasiveDebugEnabled() return DBGEN == HIGH;AddressDescriptor addrdesc, bits(64) vaddress, bits(52) ipaddress, integer level, AccType acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) c = ConstrainUnpredictable(Unpredictable_INSTRDEVICE); assert c IN {Constraint_NONE, Constraint_FAULT}; if c == Constraint_FAULT then addrdesc.fault = AArch64.PermissionFault(ipaddress, boolean UNKNOWN, level, acctype, iswrite, secondstage, s2fs1walk); else addrdesc.memattrs.memtype = MemType_Normal; addrdesc.memattrs.inner.attrs = MemAttr_NC; addrdesc.memattrs.inner.hints = MemHint_No; addrdesc.memattrs.outer = addrdesc.memattrs.inner; addrdesc.memattrs.tagged = FALSE; addrdesc.memattrs = MemAttrDefaults(addrdesc.memattrs); return addrdesc;

Library pseudocode for aarch64shared/translationdebug/attrsauthentication/AArch64.S1AttrDecodeExternalNoninvasiveDebugAllowed

// AArch64.S1AttrDecode() // ====================== // Converts the Stage 1 attribute fields, using the MAIR, to orthogonal // attributes and hints. // ExternalNoninvasiveDebugAllowed() // ================================= // Returns TRUE if Trace and PC Sample-based Profiling are allowed MemoryAttributesboolean AArch64.S1AttrDecode(bits(2) SH, bits(3) attr,ExternalNoninvasiveDebugAllowed() return ( AccTypeExternalNoninvasiveDebugEnabled acctype)() && (! MemoryAttributesIsSecure memattrs; mair =() || MAIRExternalSecureNoninvasiveDebugEnabled[]; index = 8 *() || ( UIntELUsingAArch32(attr); attrfield = mair<index+7:index>; memattrs.tagged = FALSE; if ((attrfield<7:4> != '0000' && attrfield<7:4> != '1111' && attrfield<3:0> == '0000') || (attrfield<7:4> == '0000' && attrfield<3:0> != 'xx00')) then // Reserved, maps to an allocated value (-, attrfield) =( ConstrainUnpredictableBits(Unpredictable_RESMAIR); if !HaveMTEExt() && attrfield<7:4> == '1111' && attrfield<3:0> == '0000' then // Reserved, maps to an allocated value (-, attrfield) = ConstrainUnpredictableBits(Unpredictable_RESMAIR); if attrfield<7:4> == '0000' then // Device memattrs.memtype = MemType_Device; case attrfield<3:0> of when '0000' memattrs.device = DeviceType_nGnRnE; when '0100' memattrs.device = DeviceType_nGnRE; when '1000' memattrs.device = DeviceType_nGRE; when '1100' memattrs.device = DeviceType_GRE; otherwise Unreachable(); // Reserved, handled above elsif attrfield<3:0> != '0000' then // Normal memattrs.memtype = MemType_Normal; memattrs.outer = LongConvertAttrsHints(attrfield<7:4>, acctype); memattrs.inner = LongConvertAttrsHints(attrfield<3:0>, acctype); memattrs.shareable = SH<1> == '1'; memattrs.outershareable = SH == '10'; elsif HaveMTEExt() && attrfield == '11110000' then // Normal, Tagged WB-RWA memattrs.memtype = MemType_Normal; memattrs.outer = LongConvertAttrsHints('1111', acctype); // WB_RWA memattrs.inner = LongConvertAttrsHints('1111', acctype); // WB_RWA memattrs.shareable = SH<1> == '1'; memattrs.outershareable = SH == '10'; memattrs.tagged = TRUE; else Unreachable(); // Reserved, handled above if ((HCR_EL2.VM == '1' || HCR_EL2.DC == '1') && (PSTATE.EL == EL1 || (PSTATE.EL ==) && PSTATE.EL == EL0 && HCR_EL2.TGE == '0')) && acctype != AccType_NV2REGISTER ) then return memattrs; else return MemAttrDefaults(memattrs);&& SDER.SUNIDEN == '1')));

Library pseudocode for aarch64shared/translationdebug/attrsauthentication/AArch64.TranslateAddressS1OffExternalNoninvasiveDebugEnabled

// AArch64.TranslateAddressS1Off() // =============================== // Called for stage 1 translations when translation is disabled to supply a default translation. // Note that there are additional constraints on instruction prefetching that are not described in // this pseudocode. // ExternalNoninvasiveDebugEnabled() // ================================= // This function returns TRUE if the FEAT_Debugv8p4 is implemented, otherwise this // function is IMPLEMENTATION DEFINED. // In the recommended interface, ExternalNoninvasiveDebugEnabled returns the state of the (DBGEN // OR NIDEN) signal. TLBRecordboolean AArch64.TranslateAddressS1Off(bits(64) vaddress,ExternalNoninvasiveDebugEnabled() return ! AccTypeHaveNoninvasiveDebugAuth acctype, boolean iswrite) assert !() ||ELUsingAArch32ExternalInvasiveDebugEnabled(S1TranslationRegime()); TLBRecord result; result.descupdate.AF = FALSE; result.descupdate.AP = FALSE; Top = AddrTop(vaddress, (acctype == AccType_IFETCH), PSTATE.EL); if !IsZero(vaddress<Top:PAMax()>) then level = 0; ipaddress = bits(52) UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,boolean UNKNOWN, level, acctype, iswrite, secondstage, s2fs1walk); return result; default_cacheable = (HasS2Translation() && HCR_EL2.DC == '1'); if default_cacheable then // Use default cacheable settings result.addrdesc.memattrs.memtype = MemType_Normal; result.addrdesc.memattrs.inner.attrs = MemAttr_WB; // Write-back result.addrdesc.memattrs.inner.hints = MemHint_RWA; result.addrdesc.memattrs.shareable = FALSE; result.addrdesc.memattrs.outershareable = FALSE; result.addrdesc.memattrs.tagged = HCR_EL2.DCT == '1'; elsif acctype != AccType_IFETCH then // Treat data as Device result.addrdesc.memattrs.memtype = MemType_Device; result.addrdesc.memattrs.device = DeviceType_nGnRnE; result.addrdesc.memattrs.inner = MemAttrHints UNKNOWN; result.addrdesc.memattrs.tagged = FALSE; else // Instruction cacheability controlled by SCTLR_ELx.I cacheable = SCTLR[].I == '1'; result.addrdesc.memattrs.memtype = MemType_Normal; if cacheable then result.addrdesc.memattrs.inner.attrs = MemAttr_WT; result.addrdesc.memattrs.inner.hints = MemHint_RA; else result.addrdesc.memattrs.inner.attrs = MemAttr_NC; result.addrdesc.memattrs.inner.hints = MemHint_No; result.addrdesc.memattrs.shareable = TRUE; result.addrdesc.memattrs.outershareable = TRUE; result.addrdesc.memattrs.tagged = FALSE; result.addrdesc.memattrs.outer = result.addrdesc.memattrs.inner; result.addrdesc.memattrs = MemAttrDefaults(result.addrdesc.memattrs); result.perms.ap = bits(3) UNKNOWN; result.perms.xn = '0'; result.perms.pxn = '0'; result.nG = bit UNKNOWN; result.contiguous = boolean UNKNOWN; result.domain = bits(4) UNKNOWN; result.level = integer UNKNOWN; result.blocksize = integer UNKNOWN; result.addrdesc.paddress.address = vaddress<51:0>; result.addrdesc.paddress.NS = if IsSecure() then '0' else '1'; result.addrdesc.fault = AArch64.NoFault(); result.descupdate.descaddr = result.addrdesc; return result;() || NIDEN == HIGH;

Library pseudocode for aarch64shared/translationdebug/checksauthentication/AArch64.AccessUsesELExternalSecureInvasiveDebugEnabled

// AArch64.AccessUsesEL() // ====================== // Returns the Exception Level of the regime that will manage the translation for a given access type. // ExternalSecureInvasiveDebugEnabled() // ==================================== // The definition of this function is IMPLEMENTATION DEFINED. // In the recommended interface, this function returns the state of the (DBGEN AND SPIDEN) signal. // CoreSight allows asserting SPIDEN without also asserting DBGEN, but this is not recommended. bits(2)boolean AArch64.AccessUsesEL(ExternalSecureInvasiveDebugEnabled() if !AccTypeHaveEL acctype) if acctype ==( AccType_UNPRIVEL3 then return) && ! EL0IsSecure; elsif acctype ==() then return FALSE; return AccType_NV2REGISTERExternalInvasiveDebugEnabled then return EL2; else return PSTATE.EL;() && SPIDEN == HIGH;

Library pseudocode for aarch64shared/translationdebug/checksauthentication/AArch64.CheckPermissionExternalSecureNoninvasiveDebugEnabled

// AArch64.CheckPermission() // ========================= // Function used for permission checking from AArch64 stage 1 translations // ExternalSecureNoninvasiveDebugEnabled() // ======================================= // This function returns the value of ExternalSecureInvasiveDebugEnabled() when FEAT_Debugv8p4 // is implemented. Otherwise, the definition of this function is IMPLEMENTATION DEFINED. // In the recommended interface, this function returns the state of the (DBGEN OR NIDEN) AND // (SPIDEN OR SPNIDEN) signal. FaultRecordboolean AArch64.CheckPermission(ExternalSecureNoninvasiveDebugEnabled() if !Permissions perms, bits(64) vaddress, integer level, bit NS, AccType acctype, boolean iswrite) assert !ELUsingAArch32(S1TranslationRegime()); wxn = SCTLR[].WXN == '1'; if (PSTATE.EL == EL0 || IsInHost() || (PSTATE.EL == EL1 && !HaveNV2Ext()) || (PSTATE.EL == EL1 && HaveNV2Ext() && (acctype != AccType_NV2REGISTER || !ELIsInHost(EL2)))) then priv_r = TRUE; priv_w = perms.ap<2> == '0'; user_r = perms.ap<1> == '1'; user_w = perms.ap<2:1> == '01'; ispriv = AArch64.AccessUsesEL(acctype) != EL0; user_xn = perms.xn == '1' || (user_w && wxn); priv_xn = perms.pxn == '1' || (priv_w && wxn) || user_w; pan = if HavePANExt() then PSTATE.PAN else '0'; epan = if HavePAN3Ext() then SCTLR[].EPAN else '0'; // Restriction on Secure instruction fetch if boolean IMPLEMENTATION_DEFINED "SCR_EL3.SIF affects PAN3 execute permission check" then if HaveEL(EL3) &&) && ! IsSecure() && NS == '1' && SCR_EL3.SIF == '1' then user_xn = TRUE; priv_xn = TRUE; if (() then return FALSE; ifEL2EnabledHaveNoninvasiveDebugAuth() && ((PSTATE.EL ==() then return EL1ExternalNoninvasiveDebugEnabled &&() && (SPIDEN == HIGH || SPNIDEN == HIGH); else return HaveNVExtExternalSecureInvasiveDebugEnabled() && HCR_EL2.<NV, NV1> == '11') || (HaveNV2Ext() && acctype == AccType_NV2REGISTER && HCR_EL2.NV2 == '1'))) then pan = '0'; is_ldst = !(acctype IN {AccType_DC, AccType_DC_UNPRIV, AccType_AT, AccType_IFETCH}); is_ats1xp = (acctype == AccType_AT && AArch64.ExecutingATS1xPInstr()); if (pan == '1' && (user_r || (epan == '1' && !user_xn)) && ispriv && (is_ldst || is_ats1xp)) then priv_r = FALSE; priv_w = FALSE; if ispriv then (r, w, xn) = (priv_r, priv_w, priv_xn); else (r, w, xn) = (user_r, user_w, user_xn); else // Access from EL2 or EL3 r = TRUE; w = perms.ap<2> == '0'; xn = perms.xn == '1' || (w && wxn); // Restriction on Secure instruction fetch if !boolean IMPLEMENTATION_DEFINED "SCR_EL3.SIF affects PAN3 execute permission check" then if HaveEL(EL3) && IsSecure() && NS == '1' && SCR_EL3.SIF == '1' then xn = TRUE; if acctype == AccType_IFETCH then fail = xn; failedread = TRUE; elsif acctype IN { AccType_ATOMICRW, AccType_ORDEREDRW, AccType_ORDEREDATOMICRW } then fail = !r || !w; failedread = !r; elsif iswrite then fail = !w; failedread = FALSE; elsif acctype == AccType_DC && PSTATE.EL != EL0 then // DC maintenance instructions operating by VA, cannot fault from stage 1 translation, // other than DC IVAC, which requires write permission, and operations executed at EL0, // which require read permission. fail = FALSE; else fail = !r; failedread = TRUE; if fail then secondstage = FALSE; s2fs1walk = FALSE; ipaddress = bits(52) UNKNOWN; return AArch64.PermissionFault(ipaddress,boolean UNKNOWN, level, acctype, !failedread, secondstage, s2fs1walk); else return AArch64.NoFault();

Library pseudocode for aarch64shared/translationdebug/checksauthentication/AArch64.CheckS2PermissionIsAccessSecure

// AArch64.CheckS2Permission() // =========================== // Function used for permission checking from AArch64 stage 2 translations FaultRecord// Returns TRUE when an access is Secure boolean AArch64.CheckS2Permission(IsAccessSecure();Permissions perms, bits(64) vaddress, bits(52) ipaddress, integer level, AccType acctype, boolean iswrite, boolean NS, boolean s2fs1walk, boolean hwupdatewalk) assert (IsSecureEL2Enabled() || (HaveEL(EL2) && !IsSecure() && !ELUsingAArch32(EL2))) && HasS2Translation(); r = perms.ap<1> == '1'; w = perms.ap<2> == '1'; if HaveExtendedExecuteNeverExt() then case perms.xn:perms.xxn of when '00' xn = FALSE; when '01' xn = PSTATE.EL == EL1; when '10' xn = TRUE; when '11' xn = PSTATE.EL == EL0; else xn = perms.xn == '1'; // Stage 1 walk is checked as a read, regardless of the original type if acctype == AccType_IFETCH && !s2fs1walk then fail = xn; failedread = TRUE; elsif (acctype IN { AccType_ATOMICRW, AccType_ORDEREDRW, AccType_ORDEREDATOMICRW }) && !s2fs1walk then fail = !r || !w; failedread = !r; elsif iswrite && !s2fs1walk then fail = !w; failedread = FALSE; elsif acctype == AccType_DC && PSTATE.EL != EL0 && !s2fs1walk then // DC maintenance instructions operating by VA, with the exception of DC IVAC, do // not generate Permission faults from stage 2 translation, other than when // performing a stage 1 translation table walk. fail = FALSE; elsif hwupdatewalk then fail = !w; failedread = !iswrite; else fail = !r; failedread = !iswrite; if fail then domain = bits(4) UNKNOWN; secondstage = TRUE; return AArch64.PermissionFault(ipaddress,NS, level, acctype, !failedread, secondstage, s2fs1walk); else return AArch64.NoFault();

Library pseudocode for aarch64shared/translation/debug/AArch64.CheckBreakpointauthentication/IsCorePowered

// AArch64.CheckBreakpoint() // ========================= // Called before executing the instruction of length "size" bytes at "vaddress" in an AArch64 // translation regime, when either debug exceptions are enabled, or halting debug is enabled // and halting is allowed. FaultRecord// Returns TRUE if the Core power domain is powered on, FALSE otherwise. boolean AArch64.CheckBreakpoint(bits(64) vaddress,IsCorePowered(); AccType acctype, integer size) assert !ELUsingAArch32(S1TranslationRegime()); assert (UsingAArch32() && size IN {2,4}) || size == 4; match = FALSE; for i = 0 to UInt(ID_AA64DFR0_EL1.BRPs) match_i = AArch64.BreakpointMatch(i, vaddress, acctype, size); match = match || match_i; if match && HaltOnBreakpointOrWatchpoint() then reason = DebugHalt_Breakpoint; Halt(reason); elsif match then acctype = AccType_IFETCH; iswrite = FALSE; return AArch64.DebugFault(acctype, iswrite); else return AArch64.NoFault();

Library pseudocode for aarch64shared/translation/debug/AArch64.CheckDebugbreakpoint/CheckValidStateMatch

// AArch64.CheckDebug() // ==================== // Called on each access to check for a debug exception or entry to Debug state. // CheckValidStateMatch() // ====================== // Checks for an invalid state match that will generate Constrained Unpredictable behaviour, otherwise // returns Constraint_NONE. FaultRecord(Constraint, bits(2), bit, bits(2)) AArch64.CheckDebug(bits(64) vaddress,CheckValidStateMatch(bits(2) SSC, bit HMC, bits(2) PxC, boolean isbreakpnt) boolean reserved = FALSE; // Match 'Usr/Sys/Svc' only valid for AArch32 breakpoints if (!isbreakpnt || ! AccTypeHaveAArch32EL acctype, boolean iswrite, integer size)( FaultRecordEL1 fault =)) && HMC:PxC == '000' && SSC != '11' then reserved = TRUE; // Both EL3 and EL2 are not implemented if ! AArch64.NoFaultHaveEL(); d_side = (acctype !=( AccType_IFETCHEL3); if) && ! HaveNV2ExtHaveEL() && acctype ==( AccType_NV2REGISTEREL2 then mask = '0'; generate_exception =) && (HMC != '0' || SSC != '00') then reserved = TRUE; // EL3 is not implemented if ! AArch64.GenerateDebugExceptionsFromHaveEL(EL3) && SSC IN {'01','10'} && HMC:SSC:PxC != '10100' then reserved = TRUE; // EL3 using AArch64 only if (!HaveEL(EL3) || HighestELUsingAArch32()) && HMC:SSC:PxC == '11000' then reserved = TRUE; // EL2 is not implemented if !HaveEL(EL2,) && HMC:SSC:PxC == '11100' then reserved = TRUE; // Secure EL2 is not implemented if ! IsSecureHaveSecureEL2Ext(), mask) && MDSCR_EL1.MDE == '1'; else generate_exception =() && (HMC:SSC:PxC) IN {'01100','10100','x11x1'} then reserved = TRUE; // Values that are not allocated in any architecture version if (HMC:SSC:PxC) IN {'01110','100x0','10110','11x10'} then reserved = TRUE; if reserved then // If parameters are set to a reserved type, behaves as either disabled or a defined type (c, <HMC,SSC,PxC>) = AArch64.GenerateDebugExceptionsConstrainUnpredictableBits() && MDSCR_EL1.MDE == '1'; halt =( HaltOnBreakpointOrWatchpointUnpredictable_RESBPWPCTRL(); if generate_exception || halt then if d_side then fault =); assert c IN { AArch64.CheckWatchpointConstraint_DISABLED(vaddress, acctype, iswrite, size); else fault =, }; if c == Constraint_DISABLED then return (c, bits(2) UNKNOWN, bit UNKNOWN, bits(2) UNKNOWN); // Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value return (Constraint_NONEAArch64.CheckBreakpointConstraint_UNKNOWN(vaddress, acctype, size); return fault;, SSC, HMC, PxC);

Library pseudocode for aarch64shared/translation/debug/AArch64.CheckWatchpointcti/CTI_SetEventLevel

// AArch64.CheckWatchpoint() // ========================= // Called before accessing the memory location of "size" bytes at "address", // when either debug exceptions are enabled for the access, or halting debug // is enabled and halting is allowed. FaultRecord// Set a Cross Trigger multi-cycle input event trigger to the specified level. CTI_SetEventLevel( AArch64.CheckWatchpoint(bits(64) vaddress, AccType acctype, boolean iswrite, integer size) assert !ELUsingAArch32(S1TranslationRegime()); if acctype IN {AccType_TTW, AccType_IC, AccType_AT} then return AArch64.NoFault(); if acctype == AccType_DC then if !iswrite then return AArch64.NoFault(); match = FALSE; ispriv = AArch64.AccessUsesEL(acctype) != EL0; for i = 0 to UInt(ID_AA64DFR0_EL1.WRPs) match = match || AArch64.WatchpointMatch(i, vaddress, size, ispriv, acctype, iswrite); if match && HaltOnBreakpointOrWatchpoint() then if acctype != AccType_NONFAULT && acctype != AccType_CNOTFIRST then reason = DebugHalt_Watchpoint; EDWAR = vaddress; Halt(reason); else // Fault will be reported and cancelled return AArch64.DebugFault(acctype, iswrite); elsif match then return AArch64.DebugFault(acctype, iswrite); else return AArch64.NoFault();id, signal level);

Library pseudocode for aarch64shared/translationdebug/faultscti/AArch64.AccessFlagFaultCTI_SignalEvent

// AArch64.AccessFlagFault() // ========================= FaultRecord// Signal a discrete event on a Cross Trigger input event trigger. AArch64.AccessFlagFault(bits(52) ipaddress,boolean NS, integer level,CTI_SignalEvent( AccTypeCrossTriggerIn acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) extflag = bit UNKNOWN; errortype = bits(2) UNKNOWN; return AArch64.CreateFaultRecord(Fault_AccessFlag, ipaddress, NS, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk);id);

Library pseudocode for aarch64shared/translationdebug/faultscti/AArch64.AddressSizeFaultCrossTrigger

// AArch64.AddressSizeFault() // ========================== FaultRecordenumeration AArch64.AddressSizeFault(bits(52) ipaddress,boolean NS, integer level,CrossTriggerOut { AccType acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) extflag = bit UNKNOWN; errortype = bits(2) UNKNOWN; returnCrossTriggerOut_DebugRequest, AArch64.CreateFaultRecord(CrossTriggerOut_RestartRequest,CrossTriggerOut_IRQ, CrossTriggerOut_RSVD3, CrossTriggerOut_TraceExtIn0, CrossTriggerOut_TraceExtIn1, CrossTriggerOut_TraceExtIn2, CrossTriggerOut_TraceExtIn3}; enumeration CrossTriggerIn {CrossTriggerIn_CrossHalt, CrossTriggerIn_PMUOverflow, CrossTriggerIn_RSVD2, CrossTriggerIn_RSVD3, CrossTriggerIn_TraceExtOut0, CrossTriggerIn_TraceExtOut1, CrossTriggerIn_TraceExtOut2, Fault_AddressSize, ipaddress, NS, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk);CrossTriggerIn_TraceExtOut3};

Library pseudocode for aarch64shared/translationdebug/faultsdccanditr/AArch64.AlignmentFaultCheckForDCCInterrupts

// AArch64.AlignmentFault() // ======================== FaultRecord// CheckForDCCInterrupts() // ======================= AArch64.AlignmentFault(CheckForDCCInterrupts() commrx = (EDSCR.RXfull == '1'); commtx = (EDSCR.TXfull == '0'); // COMMRX and COMMTX support is optional and not recommended for new designs. // SetInterruptRequestLevel(InterruptID_COMMRX, if commrx then HIGH else LOW); // SetInterruptRequestLevel(InterruptID_COMMTX, if commtx then HIGH else LOW); // The value to be driven onto the common COMMIRQ signal. ifAccTypeELUsingAArch32 acctype, boolean iswrite, boolean secondstage) ipaddress = bits(52) UNKNOWN; level = integer UNKNOWN; extflag = bit UNKNOWN; errortype = bits(2) UNKNOWN; s2fs1walk = boolean UNKNOWN; return( AArch64.CreateFaultRecordEL1() then commirq = ((commrx && DBGDCCINT.RX == '1') || (commtx && DBGDCCINT.TX == '1')); else commirq = ((commrx && MDCCINT_EL1.RX == '1') || (commtx && MDCCINT_EL1.TX == '1')); SetInterruptRequestLevel(Fault_AlignmentInterruptID_COMMIRQ, ipaddress, boolean UNKNOWN, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk);, if commirq then HIGH else LOW); return;

Library pseudocode for aarch64shared/translationdebug/faultsdccanditr/AArch64.AsynchExternalAbortDBGDTRRX_EL0

// AArch64.AsynchExternalAbort() // ============================= // Wrapper function for asynchronous external aborts FaultRecord// DBGDTRRX_EL0[] (external write) // =============================== // Called on writes to debug register 0x08C. AArch64.AsynchExternalAbort(boolean parity, bits(2) errortype, bit extflag) DBGDTRRX_EL0[boolean memory_mapped] = bits(32) value faulttype = if parity then if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits IMPLEMENTATION_DEFINED "generate error response"; return; if EDSCR.ERR == '1' then return; // Error flag set: ignore write // The Software lock is OPTIONAL. if memory_mapped && EDLSR.SLK == '1' then return; // Software lock locked: ignore write if EDSCR.RXfull == '1' || ( Fault_AsyncParityHalted else() && EDSCR.MA == '1' && EDSCR.ITE == '0') then EDSCR.RXO = '1'; EDSCR.ERR = '1'; // Overrun condition: ignore write return; EDSCR.RXfull = '1'; DTRRX = value; if Fault_AsyncExternalHalted; ipaddress = bits(52) UNKNOWN; level = integer UNKNOWN; acctype =() && EDSCR.MA == '1' then EDSCR.ITE = '0'; // See comments in EDITR[] (external write) if ! AccType_NORMALUsingAArch32; iswrite = boolean UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; return() then (0xD5330501<31:0>); // A64 "MRS X1,DBGDTRRX_EL0" ExecuteA64(0xB8004401<31:0>); // A64 "STR W1,[X0],#4" X[1] = bits(64) UNKNOWN; else ExecuteT32(0xEE10<15:0> /*hw1*/, 0x1E15<15:0> /*hw2*/); // T32 "MRS R1,DBGDTRRXint" ExecuteT32(0xF840<15:0> /*hw1*/, 0x1B04<15:0> /*hw2*/); // T32 "STR R1,[R0],#4" R[1] = bits(32) UNKNOWN; // If the store aborts, the Data Abort exception is taken and EDSCR.ERR is set to 1 if EDSCR.ERR == '1' then EDSCR.RXfull = bit UNKNOWN; DBGDTRRX_EL0 = bits(32) UNKNOWN; else // "MRS X1,DBGDTRRX_EL0" calls DBGDTR_EL0[] (read) which clears RXfull. assert EDSCR.RXfull == '0'; EDSCR.ITE = '1'; // See comments in EDITR[] (external write) return; // DBGDTRRX_EL0[] (external read) // ============================== bits(32) AArch64.CreateFaultRecordExecuteA64(faulttype, ipaddress, boolean UNKNOWN, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk);DBGDTRRX_EL0[boolean memory_mapped] return DTRRX;

Library pseudocode for aarch64shared/translationdebug/faultsdccanditr/AArch64.DebugFaultDBGDTRTX_EL0

// AArch64.DebugFault() // ==================== // DBGDTRTX_EL0[] (external read) // ============================== // Called on reads of debug register 0x080. FaultRecordbits(32) AArch64.DebugFault(DBGDTRTX_EL0[boolean memory_mapped] if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits IMPLEMENTATION_DEFINED "generate error response"; return bits(32) UNKNOWN; underrun = EDSCR.TXfull == '0' || (AccTypeHalted acctype, boolean iswrite) () && EDSCR.MA == '1' && EDSCR.ITE == '0'); value = if underrun then bits(32) UNKNOWN else DTRTX; ipaddress = bits(52) UNKNOWN; errortype = bits(2) UNKNOWN; level = integer UNKNOWN; extflag = bit UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; if EDSCR.ERR == '1' then return value; // Error flag set: no side-effects return // The Software lock is OPTIONAL. if memory_mapped && EDLSR.SLK == '1' then // Software lock locked: no side-effects return value; if underrun then EDSCR.TXU = '1'; EDSCR.ERR = '1'; // Underrun condition: block side-effects return value; // Return UNKNOWN EDSCR.TXfull = '0'; if AArch64.CreateFaultRecordHalted(() && EDSCR.MA == '1' then EDSCR.ITE = '0'; // See comments in EDITR[] (external write) if !() then ExecuteA64(0xB8404401<31:0>); // A64 "LDR W1,[X0],#4" else ExecuteT32(0xF850<15:0> /*hw1*/, 0x1B04<15:0> /*hw2*/); // T32 "LDR R1,[R0],#4" // If the load aborts, the Data Abort exception is taken and EDSCR.ERR is set to 1 if EDSCR.ERR == '1' then EDSCR.TXfull = bit UNKNOWN; DBGDTRTX_EL0 = bits(32) UNKNOWN; else if !UsingAArch32() then ExecuteA64(0xD5130501<31:0>); // A64 "MSR DBGDTRTX_EL0,X1" else ExecuteT32(0xEE00<15:0> /*hw1*/, 0x1E15<15:0> /*hw2*/); // T32 "MSR DBGDTRTXint,R1" // "MSR DBGDTRTX_EL0,X1" calls DBGDTR_EL0[] (write) which sets TXfull. assert EDSCR.TXfull == '1'; if !UsingAArch32() then X[1] = bits(64) UNKNOWN; else R[1] = bits(32) UNKNOWN; EDSCR.ITE = '1'; // See comments in EDITR[] (external write) return value; // DBGDTRTX_EL0[] (external write) // =============================== Fault_DebugUsingAArch32, ipaddress, boolean UNKNOWN, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk);DBGDTRTX_EL0[boolean memory_mapped] = bits(32) value // The Software lock is OPTIONAL. if memory_mapped && EDLSR.SLK == '1' then return; // Software lock locked: ignore write DTRTX = value; return;

Library pseudocode for aarch64shared/translationdebug/faultsdccanditr/AArch64.ExclusiveFaultDBGDTR_EL0

// AArch64.ExclusiveFault() // ======================== FaultRecord// DBGDTR_EL0[] (write) // ==================== // System register writes to DBGDTR_EL0, DBGDTRTX_EL0 (AArch64) and DBGDTRTXint (AArch32) AArch64.ExclusiveFault(DBGDTR_EL0[] = bits(N) value // For MSR DBGDTRTX_EL0,<Rt> N=32, value=X[t]<31:0>, X[t]<63:32> is ignored // For MSR DBGDTR_EL0,<Xt> N=64, value=X[t]<63:0> assert N IN {32,64}; if EDSCR.TXfull == '1' then value = bits(N) UNKNOWN; // On a 64-bit write, implement a half-duplex channel if N == 64 then DTRRX = value<63:32>; DTRTX = value<31:0>; // 32-bit or 64-bit write EDSCR.TXfull = '1'; return; // DBGDTR_EL0[] (read) // =================== // System register reads of DBGDTR_EL0, DBGDTRRX_EL0 (AArch64) and DBGDTRRXint (AArch32) bits(N)AccType acctype, boolean iswrite) ipaddress = bits(52) UNKNOWN; level = integer UNKNOWN; extflag = bit UNKNOWN; errortype = bits(2) UNKNOWN; secondstage = boolean UNKNOWN; s2fs1walk = boolean UNKNOWN; return AArch64.CreateFaultRecord(Fault_Exclusive, ipaddress, boolean UNKNOWN, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk);DBGDTR_EL0[] // For MRS <Rt>,DBGDTRTX_EL0 N=32, X[t]=Zeros(32):result // For MRS <Xt>,DBGDTR_EL0 N=64, X[t]=result assert N IN {32,64}; bits(N) result; if EDSCR.RXfull == '0' then result = bits(N) UNKNOWN; else // On a 64-bit read, implement a half-duplex channel // NOTE: the word order is reversed on reads with regards to writes if N == 64 then result<63:32> = DTRTX; result<31:0> = DTRRX; EDSCR.RXfull = '0'; return result;

Library pseudocode for aarch64shared/translationdebug/faultsdccanditr/AArch64.NoFaultDTR

// AArch64.NoFault() // ================= FaultRecordbits(32) DTRRX; bits(32) DTRTX; AArch64.NoFault() ipaddress = bits(52) UNKNOWN; level = integer UNKNOWN; acctype = AccType_NORMAL; iswrite = boolean UNKNOWN; extflag = bit UNKNOWN; errortype = bits(2) UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; return AArch64.CreateFaultRecord(Fault_None, ipaddress, boolean UNKNOWN, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk);

Library pseudocode for aarch64shared/translationdebug/faultsdccanditr/AArch64.PermissionFaultEDITR

// AArch64.PermissionFault() // ========================= FaultRecord// EDITR[] (external write) // ======================== // Called on writes to debug register 0x084. AArch64.PermissionFault(bits(52) ipaddress,boolean NS, integer level,EDITR[boolean memory_mapped] = bits(32) value if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits IMPLEMENTATION_DEFINED "generate error response"; return; if EDSCR.ERR == '1' then return; // Error flag set: ignore write // The Software lock is OPTIONAL. if memory_mapped && EDLSR.SLK == '1' then return; // Software lock locked: ignore write if ! AccTypeHalted acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) () then return; // Non-debug state: ignore write extflag = bit UNKNOWN; errortype = bits(2) UNKNOWN; return if EDSCR.ITE == '0' || EDSCR.MA == '1' then EDSCR.ITO = '1'; EDSCR.ERR = '1'; // Overrun condition: block write return; // ITE indicates whether the processor is ready to accept another instruction; the processor // may support multiple outstanding instructions. Unlike the "InstrCompl" flag in [v7A] there // is no indication that the pipeline is empty (all instructions have completed). In this // pseudocode, the assumption is that only one instruction can be executed at a time, // meaning ITE acts like "InstrCompl". EDSCR.ITE = '0'; if ! AArch64.CreateFaultRecordUsingAArch32(() then(value); else ExecuteT32Fault_PermissionExecuteA64, ipaddress, NS, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk);(value<15:0>/*hw1*/, value<31:16> /*hw2*/); EDSCR.ITE = '1'; return;

Library pseudocode for aarch64shared/translationdebug/faultshalting/AArch64.TranslationFaultDCPSInstruction

// AArch64.TranslationFault() // ========================== FaultRecord// DCPSInstruction() // ================= // Operation of the DCPS instruction in Debug state AArch64.TranslationFault(bits(52) ipaddress, boolean NS, integer level,DCPSInstruction(bits(2) target_el) AccTypeSynchronizeContext acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk) (); extflag = bit UNKNOWN; errortype = bits(2) UNKNOWN; return case target_el of when AArch64.CreateFaultRecordEL1(if PSTATE.EL == || (PSTATE.EL == EL3 && !UsingAArch32()) then handle_el = PSTATE.EL; elsif EL2Enabled() && HCR_EL2.TGE == '1' then UNDEFINED; else handle_el = EL1; when EL2 if !HaveEL(EL2) then UNDEFINED; elsif PSTATE.EL == EL3 && !UsingAArch32() then handle_el = EL3; elsif !IsSecureEL2Enabled() && IsSecure() then UNDEFINED; else handle_el = EL2; when EL3 if EDSCR.SDD == '1' || !HaveEL(EL3) then UNDEFINED; handle_el = EL3; otherwise Unreachable(); from_secure = IsSecure(); if ELUsingAArch32(handle_el) then if PSTATE.M == M32_Monitor then SCR.NS = '0'; assert UsingAArch32(); // Cannot move from AArch64 to AArch32 case handle_el of when EL1AArch32.WriteMode(M32_Svc); if HavePANExt() && SCTLR.SPAN == '0' then PSTATE.PAN = '1'; when EL2 AArch32.WriteMode(M32_Hyp); when EL3AArch32.WriteMode(M32_Monitor); if HavePANExt() then if !from_secure then PSTATE.PAN = '0'; elsif SCTLR.SPAN == '0' then PSTATE.PAN = '1'; if handle_el == EL2 then ELR_hyp = bits(32) UNKNOWN; HSR = bits(32) UNKNOWN; else LR = bits(32) UNKNOWN; SPSR[] = bits(32) UNKNOWN; PSTATE.E = SCTLR[].EE; DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; else // Targeting AArch64 if UsingAArch32() then AArch64.MaybeZeroRegisterUppers(); MaybeZeroSVEUppers(target_el); PSTATE.nRW = '0'; PSTATE.SP = '1'; PSTATE.EL = handle_el; if HavePANExt() && ((handle_el == EL1 && SCTLR_EL1.SPAN == '0') || (handle_el == EL2 && HCR_EL2.E2H == '1' && HCR_EL2.TGE == '1' && SCTLR_EL2.SPAN == '0')) then PSTATE.PAN = '1'; ELR[] = bits(64) UNKNOWN; SPSR[] = bits(32) UNKNOWN; ESR[] = bits(32) UNKNOWN; DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(32) UNKNOWN; if HaveUAOExt() then PSTATE.UAO = '0'; if HaveMTEExt() then PSTATE.TCO = '1'; UpdateEDSCRFields(); // Update EDSCR PE state flags sync_errors = HaveIESB() && SCTLR[].IESB == '1'; if HaveDoubleFaultExt() && !UsingAArch32() then sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == EL3); // SCTLR[].IESB might be ignored in Debug state. if !ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then sync_errors = FALSE; if sync_errors then SynchronizeErrorsFault_TranslationEL2, ipaddress, NS, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk);(); return;

Library pseudocode for aarch64shared/translationdebug/translationhalting/AArch64.CheckAndUpdateDescriptorDRPSInstruction

// AArch64.CheckAndUpdateDescriptor() // ================================== // Check and update translation table descriptor if hardware update is configured FaultRecord// DRPSInstruction() // ================= // Operation of the A64 DRPS and T32 ERET instructions in Debug state AArch64.CheckAndUpdateDescriptor(DRPSInstruction()DescriptorUpdateSynchronizeContext result,(); sync_errors = FaultRecordHaveIESB fault, boolean secondstage, bits(64) vaddress,() && AccTypeSCTLR acctype, boolean iswrite, boolean s2fs1walk, boolean hwupdatewalk) boolean hw_update_AF = FALSE; boolean hw_update_AP = FALSE; // Check if access flag can be updated // Address translation instructions are permitted to update AF but not required if result.AF then if fault.statuscode ==[].IESB == '1'; if Fault_NoneHaveDoubleFaultExt ||() && ! ConstrainUnpredictableUsingAArch32(() then sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL ==Unpredictable_AFUPDATEEL3) ==); // SCTLR[].IESB might be ignored in Debug state. if ! Constraint_TRUEConstrainUnpredictableBool then hw_update_AF = TRUE; if result.AP && fault.statuscode ==( Fault_NoneUnpredictable_IESBinDebug then write_perm_req = (iswrite || acctype IN {) then sync_errors = FALSE; if sync_errors thenAccType_ATOMICRWSynchronizeErrors,();AccType_ORDEREDRWSetPSTATEFromPSR,( AccType_ORDEREDATOMICRWSPSR }) && !s2fs1walk; hw_update_AP = (write_perm_req && !(acctype IN {[]); // PSTATE.{N,Z,C,V,Q,GE,SS,D,A,I,F} are not observable and ignored in Debug state, so // behave as if UNKNOWN. ifAccType_ATUsingAArch32,() then PSTATE.<N,Z,C,V,Q,GE,SS,A,I,F> = bits(13) UNKNOWN; // In AArch32, all instructions are T32 and unconditional. PSTATE.IT = '00000000'; PSTATE.T = '1'; // PSTATE.J is RES0 DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; else PSTATE.<N,Z,C,V,SS,D,A,I,F> = bits(9) UNKNOWN; DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(32) UNKNOWN; AccType_DCUpdateEDSCRFields, AccType_DC_UNPRIV})) || hwupdatewalk; if hw_update_AF || hw_update_AP then if secondstage || !HasS2Translation() then descaddr2 = result.descaddr; else hwupdatewalk = TRUE; descaddr2 = AArch64.SecondStageWalk(result.descaddr, vaddress, acctype, iswrite, 8, hwupdatewalk); if IsFault(descaddr2) then return descaddr2.fault; accdesc = CreateAccessDescriptor(AccType_ATOMICRW); desc = _Mem[descaddr2, 8, accdesc, iswrite]; el = AArch64.AccessUsesEL(acctype); case el of when EL3 reversedescriptors = SCTLR_EL3.EE == '1'; when EL2 reversedescriptors = SCTLR_EL2.EE == '1'; otherwise reversedescriptors = SCTLR_EL1.EE == '1'; if reversedescriptors then desc = BigEndianReverse(desc); if hw_update_AF then desc<10> = '1'; if hw_update_AP then desc<7> = (if secondstage then '1' else '0'); _Mem[descaddr2,8,accdesc] = if reversedescriptors then BigEndianReverse(desc) else desc; (); // Update EDSCR PE state flags return fault; return;

Library pseudocode for aarch64shared/translationdebug/translationhalting/AArch64.FirstStageTranslateDebugHalt

// AArch64.FirstStageTranslate() // ============================= // Perform a stage 1 translation walk. The function used by Address Translation operations is // similar except it uses the translation regime specified for the instruction. AddressDescriptorconstant bits(6) AArch64.FirstStageTranslate(bits(64) vaddress,DebugHalt_Breakpoint = '000111'; constant bits(6) AccType acctype, boolean iswrite, boolean wasaligned, integer size) ifDebugHalt_EDBGRQ = '010011'; constant bits(6) HaveNV2Ext() && acctype ==DebugHalt_Step_Normal = '011011'; constant bits(6) AccType_NV2REGISTER then s1_enabled = SCTLR_EL2.M == '1'; elsifDebugHalt_Step_Exclusive = '011111'; constant bits(6) HasS2Translation() then s1_enabled = HCR_EL2.TGE == '0' && HCR_EL2.DC == '0' && SCTLR_EL1.M == '1'; else s1_enabled =DebugHalt_OSUnlockCatch = '100011'; constant bits(6) SCTLR[].M == '1';DebugHalt_ResetCatch = '100111'; constant bits(6) TLBRecord S1; S1.addrdesc.fault =DebugHalt_Watchpoint = '101011'; constant bits(6) AArch64.NoFault(); ipaddress = bits(52) UNKNOWN; secondstage = FALSE; s2fs1walk = FALSE; if s1_enabled then // First stage enabled S1 =DebugHalt_HaltInstruction = '101111'; constant bits(6) AArch64.TranslationTableWalk(ipaddress, TRUE, vaddress, acctype, iswrite, secondstage, s2fs1walk, size); permissioncheck = TRUE; if acctype ==DebugHalt_SoftwareAccess = '110011'; constant bits(6) AccType_IFETCH then InGuardedPage = S1.GP == '1'; // Global state updated on instruction fetch that denotes // if the fetched instruction is from a guarded page. else S1 =DebugHalt_ExceptionCatch = '110111'; constant bits(6) AArch64.TranslateAddressS1Off(vaddress, acctype, iswrite); permissioncheck = FALSE; InGuardedPage = FALSE; // No memory is guarded when stage 1 address translation is disabled if !IsFault(S1.addrdesc) && UsingAArch32() && HaveTrapLoadStoreMultipleDeviceExt() && AArch32.ExecutingLSMInstr() then if S1.addrdesc.memattrs.memtype == MemType_Device && S1.addrdesc.memattrs.device != DeviceType_GRE then nTLSMD = if S1TranslationRegime() == EL2 then SCTLR_EL2.nTLSMD else SCTLR_EL1.nTLSMD; if nTLSMD == '0' then S1.addrdesc.fault = AArch64.AlignmentFault(acctype, iswrite, secondstage); // Check for unaligned data accesses to Device memory if (((!wasaligned && acctype != AccType_IFETCH) || acctype == AccType_DCZVA) && !IsFault(S1.addrdesc) && S1.addrdesc.memattrs.memtype == MemType_Device) then S1.addrdesc.fault = AArch64.AlignmentFault(acctype, iswrite, secondstage); if !IsFault(S1.addrdesc) && permissioncheck then S1.addrdesc.fault = AArch64.CheckPermission(S1.perms, vaddress, S1.level, S1.addrdesc.paddress.NS, acctype, iswrite); // Check for instruction fetches from Device memory not marked as execute-never. If there has // not been a Permission Fault then the memory is not marked execute-never. if (!IsFault(S1.addrdesc) && S1.addrdesc.memattrs.memtype == MemType_Device && acctype == AccType_IFETCH) then S1.addrdesc = AArch64.InstructionDevice(S1.addrdesc, vaddress, ipaddress, S1.level, acctype, iswrite, secondstage, s2fs1walk); // Check and update translation table descriptor if required hwupdatewalk = FALSE; s2fs1walk = FALSE; S1.addrdesc.fault = AArch64.CheckAndUpdateDescriptor(S1.descupdate, S1.addrdesc.fault, secondstage, vaddress, acctype, iswrite, s2fs1walk, hwupdatewalk); return S1.addrdesc;DebugHalt_Step_NoSyndrome = '111011';

Library pseudocode for aarch64shared/translationdebug/translationhalting/AArch64.FullTranslateDisableITRAndResumeInstructionPrefetch

// AArch64.FullTranslate() // ======================= // Perform both stage 1 and stage 2 translation walks for the current translation regime. The // function used by Address Translation operations is similar except it uses the translation // regime specified for the instruction. AddressDescriptor AArch64.FullTranslate(bits(64) vaddress, AccType acctype, boolean iswrite, boolean wasaligned, integer size) // First Stage Translation S1 = AArch64.FirstStageTranslate(vaddress, acctype, iswrite, wasaligned, size); if !IsFault(S1) && !(HaveNV2Ext() && acctype == AccType_NV2REGISTER) && HasS2Translation() then s2fs1walk = FALSE; hwupdatewalk = FALSE; result = AArch64.SecondStageTranslate(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk, size, hwupdatewalk); else result = S1; return result;DisableITRAndResumeInstructionPrefetch();

Library pseudocode for aarch64shared/translationdebug/translationhalting/AArch64.SecondStageTranslateExecuteA64

// AArch64.SecondStageTranslate() // ============================== // Perform a stage 2 translation walk. The function used by Address Translation operations is // similar except it uses the translation regime specified for the instruction. AddressDescriptor// Execute an A64 instruction in Debug state. AArch64.SecondStageTranslate(ExecuteA64(bits(32) instr);AddressDescriptor S1, bits(64) vaddress, AccType acctype, boolean iswrite, boolean wasaligned, boolean s2fs1walk, integer size, boolean hwupdatewalk) assert HasS2Translation(); s2_enabled = HCR_EL2.VM == '1' || HCR_EL2.DC == '1'; secondstage = TRUE; if s2_enabled then // Second stage enabled ipaddress = S1.paddress.address<51:0>; NS = S1.paddress.NS == '1'; S2 = AArch64.TranslationTableWalk(ipaddress, NS, vaddress, acctype, iswrite, secondstage, s2fs1walk, size); // Check for unaligned data accesses to Device memory if (((!wasaligned && acctype != AccType_IFETCH) || (acctype == AccType_DCZVA && !s2fs1walk)) && S2.addrdesc.memattrs.memtype == MemType_Device && !IsFault(S2.addrdesc)) then S2.addrdesc.fault = AArch64.AlignmentFault(acctype, iswrite, secondstage); // Check for permissions on Stage2 translations if !IsFault(S2.addrdesc) then S2.addrdesc.fault = AArch64.CheckS2Permission(S2.perms, vaddress, ipaddress, S2.level, acctype, iswrite, NS,s2fs1walk, hwupdatewalk); // Check for instruction fetches from Device memory not marked as execute-never. As there // has not been a Permission Fault then the memory is not marked execute-never. if (!s2fs1walk && !IsFault(S2.addrdesc) && S2.addrdesc.memattrs.memtype == MemType_Device && acctype == AccType_IFETCH) then S2.addrdesc = AArch64.InstructionDevice(S2.addrdesc, vaddress, ipaddress, S2.level, acctype, iswrite, secondstage, s2fs1walk); if (s2fs1walk && !IsFault(S2.addrdesc) && S2.addrdesc.memattrs.memtype == MemType_Device) then // Check for protected table walk. if HCR_EL2.PTW == '1' then S2.addrdesc.fault = AArch64.PermissionFault(ipaddress, NS, S2.level, acctype, iswrite, secondstage, s2fs1walk); else // Translation table walk occurs as Normal Non-cacheable memory. S2.addrdesc.memattrs.memtype = MemType_Normal; S2.addrdesc.memattrs.inner.attrs = MemAttr_NC; S2.addrdesc.memattrs.outer.attrs = MemAttr_NC; S2.addrdesc.memattrs.shareable = TRUE; S2.addrdesc.memattrs.outershareable = TRUE; // Check and update translation table descriptor if required S2.addrdesc.fault = AArch64.CheckAndUpdateDescriptor(S2.descupdate, S2.addrdesc.fault, secondstage, vaddress, acctype, iswrite, s2fs1walk, hwupdatewalk); if s2fs1walk then result = AArch64.CombineS1S2Desc(S1, S2.addrdesc, AccType_TTW); else result = AArch64.CombineS1S2Desc(S1, S2.addrdesc, acctype); else result = S1; return result;

Library pseudocode for aarch64shared/translationdebug/translationhalting/AArch64.SecondStageWalkExecuteT32

// AArch64.SecondStageWalk() // ========================= // Perform a stage 2 translation on a stage 1 translation table walk access. AddressDescriptor// Execute a T32 instruction in Debug state. AArch64.SecondStageWalk(ExecuteT32(bits(16) hw1, bits(16) hw2);AddressDescriptor S1, bits(64) vaddress, AccType acctype, boolean iswrite, integer size, boolean hwupdatewalk) assert HasS2Translation(); s2fs1walk = TRUE; wasaligned = TRUE; return AArch64.SecondStageTranslate(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk, size, hwupdatewalk);

Library pseudocode for aarch64shared/translationdebug/translationhalting/AArch64.TranslateAddressExitDebugState

// AArch64.TranslateAddress() // ========================== // Main entry point for translating an address AddressDescriptor// ExitDebugState() // ================ AArch64.TranslateAddress(bits(64) vaddress,ExitDebugState() assert AccTypeHalted acctype, boolean iswrite, boolean wasaligned, integer size) result =(); AArch64.FullTranslateSynchronizeContext(vaddress, acctype, iswrite, wasaligned, size); if !(); // Although EDSCR.STATUS signals that the PE is restarting, debuggers must use EDPRSR.SDR to // detect that the PE has restarted. EDSCR.STATUS = '000001'; // Signal restarting EDESR<2:0> = '000'; // Clear any pending Halting debug events bits(64) new_pc; bits(32) spsr; ifIsFaultUsingAArch32(result) then result.fault =() then new_pc = AArch64.CheckDebugZeroExtend(vaddress, acctype, iswrite, size); // Update virtual address for abort functions result.vaddress =(DLR); spsr = DSPSR; else new_pc = DLR_EL0; spsr = DSPSR_EL0; // If this is an illegal return, SetPSTATEFromPSR() will set PSTATE.IL. (spsr); // Can update privileged bits, even at EL0 if UsingAArch32() then if ConstrainUnpredictableBool(Unpredictable_RESTARTALIGNPC) then new_pc<0> = '0'; BranchTo(new_pc<31:0>, BranchType_DBGEXIT); // AArch32 branch else // If targeting AArch32 then possibly zero the 32 most significant bits of the target PC if spsr<4> == '1' && ConstrainUnpredictableBool(Unpredictable_RESTARTZEROUPPERPC) then new_pc<63:32> = Zeros(); BranchTo(new_pc, BranchType_DBGEXIT); // A type of branch that is never predicted (EDSCR.STATUS,EDPRSR.SDR) = ('000010','1'); // Atomically signal restarted UpdateEDSCRFields(); // Stop signalling PE state DisableITRAndResumeInstructionPrefetchZeroExtendSetPSTATEFromPSR(vaddress); (); return result; return;

Library pseudocode for aarch64shared/translationdebug/walkhalting/AArch64.TranslationTableWalkHalt

// AArch64.TranslationTableWalk() // ============================== // Returns a result of a translation table walk // // Implementations might cache information from memory in any number of non-coherent TLB // caching structures, and so avoid memory accesses that have been expressed in this // pseudocode. The use of such TLBs is not expressed in this pseudocode. TLBRecord// Halt() // ====== AArch64.TranslationTableWalk(bits(52) ipaddress, boolean s1_nonsecure, bits(64) vaddress,Halt(bits(6) reason) AccTypeCTI_SignalEvent acctype, boolean iswrite, boolean secondstage, boolean s2fs1walk, integer size) if !secondstage then assert !(ELUsingAArch32CrossTriggerIn_CrossHalt(); // Trigger other cores to halt bits(64) preferred_restart_address =S1TranslationRegimeThisInstrAddr()); else assert ((); spsr =IsSecureEL2EnabledGetPSRFromPSTATE() || ((); ifHaveELUsingAArch32(() then // If entering from AArch32 state, spsr<21> is the DIT bit which has to be moved for DSPSR spsr<24> = spsr<21>; spsr<21> = PSTATE.SS; // Always save the SS bit if (EL2HaveBTIExt) && !() && !(reason IN {IsSecureDebugHalt_Step_Normal() && !,ELUsingAArch32DebugHalt_Step_Exclusive(,EL2DebugHalt_Step_NoSyndrome))) &&, HasS2TranslationDebugHalt_Breakpoint();, TLBRecordDebugHalt_HaltInstruction result;}) && AddressDescriptorConstrainUnpredictableBool descaddr; bits(64) baseregister; bits(64) inputaddr; // Input Address is 'vaddress' for stage 1, 'ipaddress' for stage 2 bit nswalk; // Stage 2 translation table walks are to Secure or to Non-secure PA space result.descupdate.AF = FALSE; result.descupdate.AP = FALSE; descaddr.memattrs.memtype =( MemType_NormalUnpredictable_ZEROBTYPE; )) then DSPSR<11:10> = '00'; // Derived parameters for the translation table walk: // grainsize = Log2(Size of Table) - Size of Table is 4KB, 16KB or 64KB in AArch64 // stride = Log2(Address per Level) - Bits of address consumed at each level // firstblocklevel = First level where a block entry is allowed // ps = Physical Address size as encoded in TCR_EL1.IPS or TCR_ELx/VTCR_EL2.PS // inputsize = Log2(Size of Input Address) - Input Address size in bits // level = Level to start walk from // This means that the number of levels after start level = 3-level if !secondstage then // First stage translation inputaddr = if ZeroExtendUsingAArch32(vaddress); el =() then DLR = preferred_restart_address<31:0>; DSPSR = spsr; else DLR_EL0 = preferred_restart_address; DSPSR_EL0 = spsr; EDSCR.ITE = '1'; EDSCR.ITO = '0'; if AArch64.AccessUsesELIsSecure(acctype); isprivileged =() then EDSCR.SDD = '0'; // If entered in Secure state, allow debug elsif AArch64.AccessUsesELHaveEL(acctype) !=( EL0; top = AddrTop(inputaddr, (acctype == AccType_IFETCH), el); if el == EL3 then largegrain = TCR_EL3.TG0 == '01'; midgrain = TCR_EL3.TG0 == '10'; inputsize = 64 -) then EDSCR.SDD = if UIntExternalSecureInvasiveDebugEnabled(TCR_EL3.T0SZ); inputsize_max = if() then '0' else '1'; else assert EDSCR.SDD == '1'; // Otherwise EDSCR.SDD is RES1 EDSCR.MA = '0'; // In Debug state: // * PSTATE.{SS,SSBS,D,A,I,F} are not observable and ignored so behave-as-if UNKNOWN. // * PSTATE.{N,Z,C,V,Q,GE,E,M,nRW,EL,SP,DIT} are also not observable, but since these // are not changed on exception entry, this function also leaves them unchanged. // * PSTATE.{IT,T} are ignored. // * PSTATE.IL is ignored and behave-as-if 0. // * PSTATE.{UAO,PAN} are observable and not changed on entry into Debug state. if Have52BitVAExtUsingAArch32() && largegrain then 52 else 48; if !() then PSTATE.<IT,SS,SSBS,A,I,F,T> = bits(14) UNKNOWN; else PSTATE.<SS,SSBS,D,A,I,F> = bits(6) UNKNOWN; PSTATE.BTYPE = '00'; PSTATE.IL = '0';Have52BitVAExtStopInstructionPrefetchAndEnableITR() && inputsize > inputsize_max then c =(); EDSCR.STATUS = reason; // Signal entered Debug state ConstrainUnpredictableUpdateEDSCRFields(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_max; inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48); if inputsize < inputsize_min then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_min; ps = TCR_EL3.PS; basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>); disabled = FALSE; baseregister = TTBR0_EL3; descaddr.memattrs = WalkAttrDecode(TCR_EL3.SH0, TCR_EL3.ORGN0, TCR_EL3.IRGN0, secondstage); reversedescriptors = SCTLR_EL3.EE == '1'; lookupsecure = TRUE; singlepriv = TRUE; update_AF = HaveAccessFlagUpdateExt() && TCR_EL3.HA == '1'; update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL3.HD == '1'; hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL3.HPD == '1'; elsif ELIsInHost(el) then if inputaddr<top> == '0' then largegrain = TCR_EL2.TG0 == '01'; midgrain = TCR_EL2.TG0 == '10'; inputsize = 64 - UInt(TCR_EL2.T0SZ); inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48; if !Have52BitVAExt() && inputsize > inputsize_max then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_max; inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48); if inputsize < inputsize_min then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_min; basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>); disabled = TCR_EL2.EPD0 == '1' || (!isprivileged && HaveE0PDExt() && TCR_EL2.E0PD0 == '1'); if el == EL0 && TCR_EL2.NFD0 == '1' then disabled = disabled || acctype == AccType_NONFAULT; baseregister = TTBR0_EL2; descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH0, TCR_EL2.ORGN0, TCR_EL2.IRGN0, secondstage); hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD0 == '1'; else inputsize = 64 - UInt(TCR_EL2.T1SZ); largegrain = TCR_EL2.TG1 == '11'; // TG1 and TG0 encodings differ midgrain = TCR_EL2.TG1 == '01'; inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48; if !Have52BitVAExt() && inputsize > inputsize_max then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_max; inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48); if inputsize < inputsize_min then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_min; basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsOnes(inputaddr<top:inputsize>); disabled = TCR_EL2.EPD1 == '1' || (!isprivileged && HaveE0PDExt() && TCR_EL2.E0PD1 == '1'); if el == EL0 && TCR_EL2.NFD1 == '1' then disabled = disabled || acctype == AccType_NONFAULT; baseregister = TTBR1_EL2; descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH1, TCR_EL2.ORGN1, TCR_EL2.IRGN1, secondstage); hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD1 == '1'; ps = TCR_EL2.IPS; reversedescriptors = SCTLR_EL2.EE == '1'; lookupsecure = if IsSecureEL2Enabled() then IsSecure() else FALSE; singlepriv = FALSE; update_AF = HaveAccessFlagUpdateExt() && TCR_EL2.HA == '1'; update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL2.HD == '1'; elsif el == EL2 then inputsize = 64 - UInt(TCR_EL2.T0SZ); largegrain = TCR_EL2.TG0 == '01'; midgrain = TCR_EL2.TG0 == '10'; inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48; if !Have52BitVAExt() && inputsize > inputsize_max then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_max; inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48); if inputsize < inputsize_min then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_min; ps = TCR_EL2.PS; basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>); disabled = FALSE; baseregister = TTBR0_EL2; descaddr.memattrs = WalkAttrDecode(TCR_EL2.SH0, TCR_EL2.ORGN0, TCR_EL2.IRGN0, secondstage); reversedescriptors = SCTLR_EL2.EE == '1'; lookupsecure = if IsSecureEL2Enabled() then IsSecure() else FALSE; singlepriv = TRUE; update_AF = HaveAccessFlagUpdateExt() && TCR_EL2.HA == '1'; update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL2.HD == '1'; hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL2.HPD == '1'; else if inputaddr<top> == '0' then inputsize = 64 - UInt(TCR_EL1.T0SZ); largegrain = TCR_EL1.TG0 == '01'; midgrain = TCR_EL1.TG0 == '10'; inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48; if !Have52BitVAExt() && inputsize > inputsize_max then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_max; inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48); if inputsize < inputsize_min then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_min; basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<top:inputsize>); disabled = TCR_EL1.EPD0 == '1' || (!isprivileged && HaveE0PDExt() && TCR_EL1.E0PD0 == '1'); if el == EL0 && TCR_EL1.NFD0 == '1' then disabled = disabled || acctype == AccType_NONFAULT; baseregister = TTBR0_EL1; descaddr.memattrs = WalkAttrDecode(TCR_EL1.SH0, TCR_EL1.ORGN0, TCR_EL1.IRGN0, secondstage); hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL1.HPD0 == '1'; else inputsize = 64 - UInt(TCR_EL1.T1SZ); largegrain = TCR_EL1.TG1 == '11'; // TG1 and TG0 encodings differ midgrain = TCR_EL1.TG1 == '01'; inputsize_max = if Have52BitVAExt() && largegrain then 52 else 48; if !Have52BitVAExt() && inputsize > inputsize_max then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_max; inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48); if inputsize < inputsize_min then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_min; basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsOnes(inputaddr<top:inputsize>); disabled = TCR_EL1.EPD1 == '1' || (!isprivileged && HaveE0PDExt() && TCR_EL1.E0PD1 == '1'); if el == EL0 && TCR_EL1.NFD1 == '1' then disabled = disabled || acctype == AccType_NONFAULT; baseregister = TTBR1_EL1; descaddr.memattrs = WalkAttrDecode(TCR_EL1.SH1, TCR_EL1.ORGN1, TCR_EL1.IRGN1, secondstage); hierattrsdisabled = AArch64.HaveHPDExt() && TCR_EL1.HPD1 == '1'; ps = TCR_EL1.IPS; reversedescriptors = SCTLR_EL1.EE == '1'; lookupsecure = IsSecure(); singlepriv = FALSE; update_AF = HaveAccessFlagUpdateExt() && TCR_EL1.HA == '1'; update_AP = HaveDirtyBitModifierExt() && update_AF && TCR_EL1.HD == '1'; if largegrain then grainsize = 16; // Log2(64KB page size) firstblocklevel = (if Have52BitPAExt() then 1 else 2); // Largest block is 4TB (2^42 bytes) for 52 bit PA // and 512MB (2^29 bytes) otherwise elsif midgrain then grainsize = 14; // Log2(16KB page size) firstblocklevel = 2; // Largest block is 32MB (2^25 bytes) else // Small grain grainsize = 12; // Log2(4KB page size) firstblocklevel = 1; // Largest block is 1GB (2^30 bytes) stride = grainsize - 3; // Log2(page size / 8 bytes) // The starting level is the number of strides needed to consume the input address level = 4 - (1 + ((inputsize - grainsize - 1) DIV stride)); else // Second stage translation inputaddr = ZeroExtend(ipaddress); if IsSecureBelowEL3() then // Second stage for Secure translation regime if s1_nonsecure then // Non-secure IPA space t0size = VTCR_EL2.T0SZ; tg0 = VTCR_EL2.TG0; nswalk = VTCR_EL2.NSW; else // Secure IPA space t0size = VSTCR_EL2.T0SZ; tg0 = VSTCR_EL2.TG0; nswalk = VSTCR_EL2.SW; // Stage 2 translation accesses the Non-secure PA space or the Secure PA space if nswalk == '1' then // When walk is Non-secure, access must be to the Non-secure PA space nsaccess = '1'; elsif !s1_nonsecure then // When walk is Secure and in the Secure IPA space, // access is specified by VSTCR_EL2.SA nsaccess = VSTCR_EL2.SA; elsif VSTCR_EL2.SW == '1' || VSTCR_EL2.SA == '1' then // When walk is Secure and in the Non-secure IPA space, // access is Non-secure when VSTCR_EL2.SA specifies the Non-secure PA space nsaccess = '1'; else // When walk is Secure and in the Non-secure IPA space, // if VSTCR_EL2.SA specifies the Secure PA space, access is specified by VTCR_EL2.NSA nsaccess = VTCR_EL2.NSA; else // Second stage for Non-secure translation regime t0size = VTCR_EL2.T0SZ; tg0 = VTCR_EL2.TG0; nswalk = '1'; nsaccess = '1'; inputsize = 64 - UInt(t0size); largegrain = tg0 == '01'; midgrain = tg0 == '10'; inputsize_max = if Have52BitPAExt() && PAMax() == 52 && largegrain then 52 else 48; if !Have52BitPAExt() && inputsize > inputsize_max then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_max; inputsize_min = 64 - (if !HaveSmallTranslationTableExt() then 39 else if largegrain then 47 else 48); if inputsize < inputsize_min then c = ConstrainUnpredictable(Unpredictable_RESTnSZ); assert c IN {Constraint_FORCE, Constraint_FAULT}; if c == Constraint_FORCE then inputsize = inputsize_min; ps = VTCR_EL2.PS; basefound = inputsize >= inputsize_min && inputsize <= inputsize_max && IsZero(inputaddr<63:inputsize>); disabled = FALSE; descaddr.memattrs = WalkAttrDecode(VTCR_EL2.SH0, VTCR_EL2.ORGN0, VTCR_EL2.IRGN0, secondstage); reversedescriptors = SCTLR_EL2.EE == '1'; singlepriv = TRUE; update_AF = HaveAccessFlagUpdateExt() && VTCR_EL2.HA == '1'; update_AP = HaveDirtyBitModifierExt() && update_AF && VTCR_EL2.HD == '1'; if IsSecureEL2Enabled() then lookupsecure = !s1_nonsecure; else lookupsecure = FALSE; if lookupsecure then baseregister = VSTTBR_EL2; startlevel = UInt(VSTCR_EL2.SL0); else baseregister = VTTBR_EL2; startlevel = UInt(VTCR_EL2.SL0); if largegrain then grainsize = 16; // Log2(64KB page size) level = 3 - startlevel; firstblocklevel = (if Have52BitPAExt() then 1 else 2); // Largest block is 4TB (2^42 bytes) for 52 bit PA // and 512MB (2^29 bytes) otherwise elsif midgrain then grainsize = 14; // Log2(16KB page size) level = 3 - startlevel; firstblocklevel = 2; // Largest block is 32MB (2^25 bytes) else // Small grain grainsize = 12; // Log2(4KB page size) if HaveSmallTranslationTableExt() && startlevel == 3 then level = startlevel; // Startlevel 3 (VTCR_EL2.SL0 or VSTCR_EL2.SL0 == 0b11) for 4KB granule else level = 2 - startlevel; firstblocklevel = 1; // Largest block is 1GB (2^30 bytes) stride = grainsize - 3; // Log2(page size / 8 bytes) // Limits on IPA controls based on implemented PA size. Level 0 is only // supported by small grain translations if largegrain then // 64KB pages // Level 1 only supported if implemented PA size is greater than 2^42 bytes if level == 0 || (level == 1 && PAMax() <= 42) then basefound = FALSE; elsif midgrain then // 16KB pages // Level 1 only supported if implemented PA size is greater than 2^40 bytes if level == 0 || (level == 1 && PAMax() <= 40) then basefound = FALSE; else // Small grain, 4KB pages // Level 0 only supported if implemented PA size is greater than 2^42 bytes if level < 0 || (level == 0 && PAMax() <= 42) then basefound = FALSE; // If the inputsize exceeds the PAMax value, the behavior is CONSTRAINED UNPREDICTABLE inputsizecheck = inputsize; if inputsize > PAMax() && (!ELUsingAArch32(EL1) || inputsize > 40) then case ConstrainUnpredictable(Unpredictable_LARGEIPA) of when Constraint_FORCE // Restrict the inputsize to the PAMax value inputsize = PAMax(); inputsizecheck = PAMax(); when Constraint_FORCENOSLCHECK // As FORCE, except use the configured inputsize in the size checks below inputsize = PAMax(); when Constraint_FAULT // Generate a translation fault basefound = FALSE; otherwise Unreachable(); // Number of entries in the starting level table = // (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table)) startsizecheck = inputsizecheck - ((3 - level)*stride + grainsize); // Log2(Num of entries) // Check for starting level table with fewer than 2 entries or longer than 16 pages. // Lower bound check is: startsizecheck < Log2(2 entries) // Upper bound check is: startsizecheck > Log2(pagesize/8*16) if startsizecheck < 1 || startsizecheck > stride + 4 then basefound = FALSE; if !basefound || disabled then level = 0; // AArch32 reports this as a level 1 fault result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype, iswrite, secondstage, s2fs1walk); return result; case ps of when '000' outputsize = 32; when '001' outputsize = 36; when '010' outputsize = 40; when '011' outputsize = 42; when '100' outputsize = 44; when '101' outputsize = 48; when '110' outputsize = (if Have52BitPAExt() && largegrain then 52 else 48); otherwise outputsize = integer IMPLEMENTATION_DEFINED "Reserved Intermediate Physical Address size value"; if outputsize > PAMax() then outputsize = PAMax(); if outputsize < 48 && !IsZero(baseregister<47:outputsize>) then level = 0; result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Bottom bound of the Base address is: // Log2(8 bytes per entry)+Log2(Number of entries in starting level table) // Number of entries in starting level table = // (Size of Input Address)/((Address per level)^(Num levels remaining)*(Size of Table)) baselowerbound = 3 + inputsize - ((3-level)*stride + grainsize); // Log2(Num of entries*8) if outputsize == 52 then z = (if baselowerbound < 6 then 6 else baselowerbound); baseaddress = baseregister<5:2>:baseregister<47:z>:Zeros(z); else baseaddress = ZeroExtend(baseregister<47:baselowerbound>:Zeros(baselowerbound)); ns_table = if lookupsecure then '0' else '1'; ap_table = '00'; xn_table = '0'; pxn_table = '0'; addrselecttop = inputsize - 1; apply_nvnv1_effect = HaveNVExt() && EL2Enabled() && HCR_EL2.<NV,NV1> == '11' && S1TranslationRegime() == EL1 && !secondstage; repeat addrselectbottom = (3-level)*stride + grainsize; bits(52) index = ZeroExtend(inputaddr<addrselecttop:addrselectbottom>:'000'); descaddr.paddress.address = baseaddress OR index; descaddr.paddress.NS = if secondstage then nswalk else ns_table; // If there are two stages of translation, then the first stage table walk addresses // are themselves subject to translation if secondstage || !HasS2Translation() || (HaveNV2Ext() && acctype == AccType_NV2REGISTER) then descaddr2 = descaddr; else hwupdatewalk = FALSE; descaddr2 = AArch64.SecondStageWalk(descaddr, vaddress, acctype, iswrite, 8, hwupdatewalk); // Check for a fault on the stage 2 walk if IsFault(descaddr2) then result.addrdesc.fault = descaddr2.fault; return result; // Update virtual address for abort functions descaddr2.vaddress = ZeroExtend(vaddress); accdesc = CreateAccessDescriptorTTW(acctype, secondstage, s2fs1walk, level); desc = _Mem[descaddr2, 8, accdesc, iswrite]; if reversedescriptors then desc = BigEndianReverse(desc); if desc<0> == '0' || (desc<1:0> == '01' && (level == 3 || (HaveBlockBBM() && IsBlockDescriptorNTBitValid() && desc<16> == '1'))) then // Fault (00), Reserved (10), Block (01) at level 3, or Block(01) with nT bit set. result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Valid Block, Page, or Table entry if desc<1:0> == '01' || level == 3 then // Block (01) or Page (11) blocktranslate = TRUE; else // Table (11) if (outputsize < 52 && largegrain && (PAMax() == 52 || boolean IMPLEMENTATION_DEFINED "Address Size Fault on LPA descriptor bits [15:12]") && !IsZero(desc<15:12>)) || (outputsize < 48 && !IsZero(desc<47:outputsize>)) then result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype, iswrite, secondstage, s2fs1walk); return result; if outputsize == 52 then baseaddress = desc<15:12>:desc<47:grainsize>:Zeros(grainsize); else baseaddress = ZeroExtend(desc<47:grainsize>:Zeros(grainsize)); if !secondstage then // Unpack the upper and lower table attributes ns_table = ns_table OR desc<63>; if !secondstage && !hierattrsdisabled then ap_table<1> = ap_table<1> OR desc<62>; // read-only if apply_nvnv1_effect then pxn_table = pxn_table OR desc<60>; else xn_table = xn_table OR desc<60>; // pxn_table and ap_table[0] apply in EL1&0 or EL2&0 translation regimes if !singlepriv then if !apply_nvnv1_effect then pxn_table = pxn_table OR desc<59>; ap_table<0> = ap_table<0> OR desc<61>; // privileged level = level + 1; addrselecttop = addrselectbottom - 1; blocktranslate = FALSE; until blocktranslate; // Check block size is supported at this level if level < firstblocklevel then result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Check for misprogramming of the contiguous bit if largegrain then num_ch_entries = 5; elsif midgrain then num_ch_entries = if level == 3 then 7 else 5; else num_ch_entries = 4; contiguousbitcheck = inputsize < (addrselectbottom + num_ch_entries); if contiguousbitcheck && desc<52> == '1' then if boolean IMPLEMENTATION_DEFINED "Translation fault on misprogrammed contiguous bit" then result.addrdesc.fault = AArch64.TranslationFault(ipaddress, s1_nonsecure, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Unpack the descriptor into address and upper and lower block attributes if largegrain then outputaddress = desc<15:12>:desc<47:addrselectbottom>:inputaddr<addrselectbottom-1:0>; else outputaddress = ZeroExtend(desc<47:addrselectbottom>:inputaddr<addrselectbottom-1:0>); // When 52-bit PA is supported, for 64 Kbyte translation granule, // block size might be larger than the supported output address size if ((outputsize < 52 && !IsZero(outputaddress<51:48>) && largegrain && (PAMax() == 52 || boolean IMPLEMENTATION_DEFINED "Address Size Fault on LPA descriptor bits [15:12]")) || (outputsize < 48 && !IsZero(outputaddress<47:outputsize>))) then result.addrdesc.fault = AArch64.AddressSizeFault(ipaddress,s1_nonsecure, level, acctype, iswrite, secondstage, s2fs1walk); return result; // Check Access Flag if desc<10> == '0' then if !update_AF then result.addrdesc.fault = AArch64.AccessFlagFault(ipaddress,s1_nonsecure, level, acctype, iswrite, secondstage, s2fs1walk); return result; else result.descupdate.AF = TRUE; if update_AP && desc<51> == '1' then // If hw update of access permission field is configured consider AP[2] as '0' / S2AP[2] as '1' if !secondstage && desc<7> == '1' then desc<7> = '0'; result.descupdate.AP = TRUE; elsif secondstage && desc<7> == '0' then desc<7> = '1'; result.descupdate.AP = TRUE; // Required descriptor if AF or AP[2]/S2AP[2] needs update result.descupdate.descaddr = descaddr; if apply_nvnv1_effect then pxn = desc<54>; // Bit[54] of the block/page descriptor holds PXN instead of UXN xn = '0'; // XN is '0' ap = desc<7>:'01'; // Bit[6] of the block/page descriptor is treated as '0' regardless of value programmed else xn = desc<54>; // Bit[54] of the block/page descriptor holds UXN pxn = desc<53>; // Bit[53] of the block/page descriptor holds PXN ap = desc<7:6>:'1'; // Bits[7:6] of the block/page descriptor hold AP[2:1] contiguousbit = desc<52>; nG = desc<11>; sh = desc<9:8>; memattr = desc<5:2>; // AttrIndx and NS bit in stage 1 result.domain = bits(4) UNKNOWN; // Domains not used result.level = level; result.blocksize = 2^((3-level)*stride + grainsize); // Stage 1 translation regimes also inherit attributes from the tables if !secondstage then result.perms.xn = xn OR xn_table; result.perms.ap<2> = ap<2> OR ap_table<1>; // Force read-only // PXN, nG and AP[1] apply in EL1&0 or EL2&0 stage 1 translation regimes if !singlepriv then result.perms.ap<1> = ap<1> AND NOT(ap_table<0>); // Force privileged only result.perms.pxn = pxn OR pxn_table; // Pages from Non-secure tables are marked non-global in Secure EL1&0 if IsSecure() then result.nG = nG OR ns_table; else result.nG = nG; else result.perms.ap<1> = '1'; result.perms.pxn = '0'; result.nG = '0'; result.GP = desc<50>; // Stage 1 block or pages might be guarded result.perms.ap<0> = '1'; result.addrdesc.memattrs = AArch64.S1AttrDecode(sh, memattr<2:0>, acctype); result.addrdesc.paddress.NS = memattr<3> OR ns_table; else result.perms.ap<2:1> = ap<2:1>; result.perms.ap<0> = '1'; result.perms.xn = xn; if HaveExtendedExecuteNeverExt() then result.perms.xxn = desc<53>; result.perms.pxn = '0'; result.nG = '0'; if s2fs1walk then result.addrdesc.memattrs = S2AttrDecode(sh, memattr, AccType_TTW); else result.addrdesc.memattrs = S2AttrDecode(sh, memattr, acctype); result.addrdesc.paddress.NS = nsaccess; result.addrdesc.paddress.address = outputaddress; result.addrdesc.fault = AArch64.NoFault(); result.contiguous = contiguousbit == '1'; if HaveCommonNotPrivateTransExt() then result.CnP = baseregister<0>; (); // Update EDSCR PE state flags. return result; return;

Library pseudocode for shared/debug/ClearStickyErrorshalting/ClearStickyErrorsHaltOnBreakpointOrWatchpoint

// ClearStickyErrors() // ===================// HaltOnBreakpointOrWatchpoint() // ============================== // Returns TRUE if the Breakpoint and Watchpoint debug events should be considered for Debug // state entry, FALSE if they should be considered for a debug exception. boolean ClearStickyErrors() EDSCR.TXU = '0'; // Clear TX underrun flag EDSCR.RXO = '0'; // Clear RX overrun flag ifHaltOnBreakpointOrWatchpoint() return HaltedHaltingAllowed() then // in Debug state EDSCR.ITO = '0'; // Clear ITR overrun flag // If halted and the ITR is not empty then it is UNPREDICTABLE whether the EDSCR.ERR is cleared. // The UNPREDICTABLE behavior also affects the instructions in flight, but this is not described // in the pseudocode. if Halted() && EDSCR.ITE == '0' && ConstrainUnpredictableBool(Unpredictable_CLEARERRITEZERO) then return; EDSCR.ERR = '0'; // Clear cumulative error flag return;() && EDSCR.HDE == '1' && OSLSR_EL1.OSLK == '0';

Library pseudocode for shared/debug/DebugTargethalting/DebugTargetHalted

// DebugTarget() // ============= // Returns the debug exception target Exception level // Halted() // ======== bits(2)boolean DebugTarget() secure =Halted() return !(EDSCR.STATUS IN {'000001', '000010'}); // Halted IsSecure(); return DebugTargetFrom(secure);

Library pseudocode for shared/debug/DebugTargethalting/DebugTargetFromHaltingAllowed

// DebugTargetFrom() // ================= // HaltingAllowed() // ================ // Returns TRUE if halting is currently allowed, FALSE if halting is prohibited. bits(2)boolean DebugTargetFrom(boolean secure) HaltingAllowed() if HaveELHalted(() ||EL2DoubleLockStatus) && (!secure || (() then return FALSE; elsifHaveSecureEL2ExtIsSecure() && (!() then returnHaveELExternalSecureInvasiveDebugEnabled((); else returnEL3ExternalInvasiveDebugEnabled) ||SCR_EL3.EEL2 == '1'))) then if ELUsingAArch32(EL2) then route_to_el2 = (HDCR.TDE == '1' || HCR.TGE == '1'); else route_to_el2 = (MDCR_EL2.TDE == '1' || HCR_EL2.TGE == '1'); else route_to_el2 = FALSE; if route_to_el2 then target = EL2; elsif HaveEL(EL3) && HighestELUsingAArch32() && secure then target = EL3; else target = EL1; return target;();

Library pseudocode for shared/debug/DoubleLockStatushalting/DoubleLockStatusRestarting

// DoubleLockStatus() // ================== // Returns the state of the OS Double Lock. // FALSE if OSDLR_EL1.DLK == 0 or DBGPRCR_EL1.CORENPDRQ == 1 or the PE is in Debug state. // TRUE if OSDLR_EL1.DLK == 1 and DBGPRCR_EL1.CORENPDRQ == 0 and the PE is in Non-debug state. // Restarting() // ============ boolean DoubleLockStatus() if !Restarting() return EDSCR.STATUS == '000001'; // RestartingHaveDoubleLock() then return FALSE; elsif ELUsingAArch32(EL1) then return DBGOSDLR.DLK == '1' && DBGPRCR.CORENPDRQ == '0' && !Halted(); else return OSDLR_EL1.DLK == '1' && DBGPRCR_EL1.CORENPDRQ == '0' && !Halted();

Library pseudocode for shared/debug/authenticationhalting/AllowExternalDebugAccessStopInstructionPrefetchAndEnableITR

// AllowExternalDebugAccess() // ========================== // Returns TRUE if an external debug interface access to the External debug registers // is allowed, FALSE otherwise. boolean AllowExternalDebugAccess() // The access may also be subject to OS Lock, power-down, etc. if HaveSecureExtDebugView() then return AllowExternalDebugAccess(IsAccessSecure()); else return AllowExternalDebugAccess(ExternalSecureInvasiveDebugEnabled()); // AllowExternalDebugAccess() // ========================== // Returns TRUE if an external debug interface access to the External debug registers // is allowed for the given Security state, FALSE otherwise. boolean AllowExternalDebugAccess(boolean allow_secure) // The access may also be subject to OS Lock, power-down, etc. if HaveSecureExtDebugView() || ExternalInvasiveDebugEnabled() then if allow_secure then return TRUE; elsif HaveEL(EL3) then if ELUsingAArch32(EL3) then return SDCR.EDAD == '0'; else return MDCR_EL3.EDAD == '0'; else return !IsSecure(); else return FALSE;StopInstructionPrefetchAndEnableITR();

Library pseudocode for shared/debug/authenticationhalting/AllowExternalPMUAccessUpdateEDSCRFields

// AllowExternalPMUAccess() // ======================== // Returns TRUE if an external debug interface access to the PMU registers is allowed, FALSE otherwise. boolean// UpdateEDSCRFields() // =================== // Update EDSCR PE state fields AllowExternalPMUAccess() // The access may also be subject to OS Lock, power-down, etc. ifUpdateEDSCRFields() if ! HaveSecureExtDebugViewHalted() then return EDSCR.EL = '00'; EDSCR.NS = bit UNKNOWN; EDSCR.RW = '1111'; else EDSCR.EL = PSTATE.EL; EDSCR.NS = if AllowExternalPMUAccessIsSecure(() then '0' else '1'; bits(4) RW; RW<1> = ifIsAccessSecureELUsingAArch32()); else return( AllowExternalPMUAccessEL1() then '0' else '1'; if PSTATE.EL !=ExternalSecureNoninvasiveDebugEnabledEL0()); // AllowExternalPMUAccess() // ======================== // Returns TRUE if an external debug interface access to the PMU registers is allowed for the given // Security state, FALSE otherwise. booleanthen RW<0> = RW<1>; else RW<0> = if AllowExternalPMUAccess(boolean allow_secure) // The access may also be subject to OS Lock, power-down, etc. if() then '0' else '1'; if ! HaveSecureExtDebugViewHaveEL() ||( ExternalNoninvasiveDebugEnabledEL2() then if allow_secure then return TRUE; elsif) || ( HaveEL(EL3) then if) && SCR_GEN[].NS == '0' && !IsSecureEL2Enabled()) then RW<2> = RW<1>; else RW<2> = if ELUsingAArch32(EL2) then '0' else '1'; if !HaveEL(EL3) then return SDCR.EPMAD == '0'; else return MDCR_EL3.EPMAD == '0'; RW<3> = RW<2>; else return ! RW<3> = if(EL3IsSecureELUsingAArch32(); else return FALSE;) then '0' else '1'; // The least-significant bits of EDSCR.RW are UNKNOWN if any higher EL is using AArch32. if RW<3> == '0' then RW<2:0> = bits(3) UNKNOWN; elsif RW<2> == '0' then RW<1:0> = bits(2) UNKNOWN; elsif RW<1> == '0' then RW<0> = bit UNKNOWN; EDSCR.RW = RW; return;

Library pseudocode for shared/debug/authenticationhaltingevents/Debug_authenticationCheckExceptionCatch

signal DBGEN; signal NIDEN; signal SPIDEN; signal SPNIDEN;// CheckExceptionCatch() // ===================== // Check whether an Exception Catch debug event is set on the current Exception levelCheckExceptionCatch(boolean exception_entry) // Called after an exception entry or exit, that is, such that IsSecure() and PSTATE.EL are correct // for the exception target. base = if IsSecure() then 0 else 4; if HaltingAllowed() then if HaveExtendedECDebugEvents() then exception_exit = !exception_entry; ctrl = EDECCR<UInt(PSTATE.EL) + base + 8>:EDECCR<UInt(PSTATE.EL) + base>; case ctrl of when '00' halt = FALSE; when '01' halt = TRUE; when '10' halt = (exception_exit == TRUE); when '11' halt = (exception_entry == TRUE); else halt = (EDECCR<UInt(PSTATE.EL) + base> == '1'); if halt then Halt(DebugHalt_ExceptionCatch);

Library pseudocode for shared/debug/authenticationhaltingevents/ExternalInvasiveDebugEnabledCheckHaltingStep

// ExternalInvasiveDebugEnabled() // ============================== // The definition of this function is IMPLEMENTATION DEFINED. // In the recommended interface, this function returns the state of the DBGEN signal. boolean// CheckHaltingStep() // ================== // Check whether EDESR.SS has been set by Halting Step ExternalInvasiveDebugEnabled() return DBGEN == HIGH;CheckHaltingStep() ifHaltingAllowed() && EDESR.SS == '1' then // The STATUS code depends on how we arrived at the state where EDESR.SS == 1. if HaltingStep_DidNotStep() then Halt(DebugHalt_Step_NoSyndrome); elsif HaltingStep_SteppedEX() then Halt(DebugHalt_Step_Exclusive); else Halt(DebugHalt_Step_Normal);

Library pseudocode for shared/debug/authenticationhaltingevents/ExternalNoninvasiveDebugAllowedCheckOSUnlockCatch

// ExternalNoninvasiveDebugAllowed() // ================================= // Returns TRUE if Trace and PC Sample-based Profiling are allowed boolean// CheckOSUnlockCatch() // ==================== // Called on unlocking the OS Lock to pend an OS Unlock Catch debug event ExternalNoninvasiveDebugAllowed() return (CheckOSUnlockCatch() if (ExternalNoninvasiveDebugEnabledHaveDoPD() && (!() && CTIDEVCTL.OSUCE == '1') || (!IsSecureHaveDoPD() ||() && EDECR.OSUCE == '1') then if ! ExternalSecureNoninvasiveDebugEnabledHalted() || (ELUsingAArch32(EL1) && PSTATE.EL == EL0 && SDER.SUNIDEN == '1')));() then EDESR.OSUC = '1';

Library pseudocode for shared/debug/authenticationhaltingevents/ExternalNoninvasiveDebugEnabledCheckPendingOSUnlockCatch

// ExternalNoninvasiveDebugEnabled() // ================================= // This function returns TRUE if the FEAT_Debugv8p4 is implemented, otherwise this // function is IMPLEMENTATION DEFINED. // In the recommended interface, ExternalNoninvasiveDebugEnabled returns the state of the (DBGEN // OR NIDEN) signal. boolean// CheckPendingOSUnlockCatch() // =========================== // Check whether EDESR.OSUC has been set by an OS Unlock Catch debug event ExternalNoninvasiveDebugEnabled() return !CheckPendingOSUnlockCatch() ifHaveNoninvasiveDebugAuthHaltingAllowed() ||() && EDESR.OSUC == '1' then (DebugHalt_OSUnlockCatchExternalInvasiveDebugEnabledHalt() || NIDEN == HIGH;);

Library pseudocode for shared/debug/authenticationhaltingevents/ExternalSecureInvasiveDebugEnabledCheckPendingResetCatch

// ExternalSecureInvasiveDebugEnabled() // ==================================== // The definition of this function is IMPLEMENTATION DEFINED. // In the recommended interface, this function returns the state of the (DBGEN AND SPIDEN) signal. // CoreSight allows asserting SPIDEN without also asserting DBGEN, but this is not recommended. boolean// CheckPendingResetCatch() // ======================== // Check whether EDESR.RC has been set by a Reset Catch debug event ExternalSecureInvasiveDebugEnabled() if !CheckPendingResetCatch() ifHaveELHaltingAllowed(() && EDESR.RC == '1' thenEL3Halt) && !(IsSecureDebugHalt_ResetCatch() then return FALSE; return ExternalInvasiveDebugEnabled() && SPIDEN == HIGH;);

Library pseudocode for shared/debug/authenticationhaltingevents/ExternalSecureNoninvasiveDebugEnabledCheckResetCatch

// ExternalSecureNoninvasiveDebugEnabled() // ======================================= // This function returns the value of ExternalSecureInvasiveDebugEnabled() when FEAT_Debugv8p4 // is implemented. Otherwise, the definition of this function is IMPLEMENTATION DEFINED. // In the recommended interface, this function returns the state of the (DBGEN OR NIDEN) AND // (SPIDEN OR SPNIDEN) signal. boolean// CheckResetCatch() // ================= // Called after reset ExternalSecureNoninvasiveDebugEnabled() if !CheckResetCatch() if (HaveELHaveDoPD(() && CTIDEVCTL.RCE == '1') || (!EL3HaveDoPD) && !() && EDECR.RCE == '1') then EDESR.RC = '1'; // If halting is allowed then halt immediately ifIsSecureHaltingAllowed() then return FALSE; if() then HaveNoninvasiveDebugAuthHalt() then return( ExternalNoninvasiveDebugEnabledDebugHalt_ResetCatch() && (SPIDEN == HIGH || SPNIDEN == HIGH); else return ExternalSecureInvasiveDebugEnabled(););

Library pseudocode for shared/debug/authenticationhaltingevents/IsAccessSecureCheckSoftwareAccessToDebugRegisters

// Returns TRUE when an access is Secure boolean// CheckSoftwareAccessToDebugRegisters() // ===================================== // Check for access to Breakpoint and Watchpoint registers. IsAccessSecure();CheckSoftwareAccessToDebugRegisters() os_lock = (ifELUsingAArch32(EL1) then DBGOSLSR.OSLK else OSLSR_EL1.OSLK); if HaltingAllowed() && EDSCR.TDA == '1' && os_lock == '0' then Halt(DebugHalt_SoftwareAccess);

Library pseudocode for shared/debug/authenticationhaltingevents/IsCorePoweredExternalDebugRequest

// Returns TRUE if the Core power domain is powered on, FALSE otherwise. boolean// ExternalDebugRequest() // ====================== IsCorePowered();ExternalDebugRequest() ifHaltingAllowed() then Halt(DebugHalt_EDBGRQ); // Otherwise the CTI continues to assert the debug request until it is taken.

Library pseudocode for shared/debug/breakpointhaltingevents/CheckValidStateMatchHaltingStep_DidNotStep

// CheckValidStateMatch() // ====================== // Checks for an invalid state match that will generate Constrained Unpredictable behaviour, otherwise // returns Constraint_NONE. (Constraint, bits(2), bit, bits(2))// Returns TRUE if the previously executed instruction was executed in the inactive state, that is, // if it was not itself stepped. boolean CheckValidStateMatch(bits(2) SSC, bit HMC, bits(2) PxC, boolean isbreakpnt) boolean reserved = FALSE; // Match 'Usr/Sys/Svc' only valid for AArch32 breakpoints if (!isbreakpnt || !HaltingStep_DidNotStep();HaveAArch32EL(EL1)) && HMC:PxC == '000' && SSC != '11' then reserved = TRUE; // Both EL3 and EL2 are not implemented if !HaveEL(EL3) && !HaveEL(EL2) && (HMC != '0' || SSC != '00') then reserved = TRUE; // EL3 is not implemented if !HaveEL(EL3) && SSC IN {'01','10'} && HMC:SSC:PxC != '10100' then reserved = TRUE; // EL3 using AArch64 only if (!HaveEL(EL3) || HighestELUsingAArch32()) && HMC:SSC:PxC == '11000' then reserved = TRUE; // EL2 is not implemented if !HaveEL(EL2) && HMC:SSC:PxC == '11100' then reserved = TRUE; // Secure EL2 is not implemented if !HaveSecureEL2Ext() && (HMC:SSC:PxC) IN {'01100','10100','x11x1'} then reserved = TRUE; // Values that are not allocated in any architecture version if (HMC:SSC:PxC) IN {'01110','100x0','10110','11x10'} then reserved = TRUE; if reserved then // If parameters are set to a reserved type, behaves as either disabled or a defined type (c, <HMC,SSC,PxC>) = ConstrainUnpredictableBits(Unpredictable_RESBPWPCTRL); assert c IN {Constraint_DISABLED, Constraint_UNKNOWN}; if c == Constraint_DISABLED then return (c, bits(2) UNKNOWN, bit UNKNOWN, bits(2) UNKNOWN); // Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value return (Constraint_NONE, SSC, HMC, PxC);

Library pseudocode for shared/debug/ctihaltingevents/CTI_SetEventLevelHaltingStep_SteppedEX

// Set a Cross Trigger multi-cycle input event trigger to the specified level. CTI_SetEventLevel(// Returns TRUE if the previously executed instruction was a Load-Exclusive class instruction // executed in the active-not-pending state. booleanCrossTriggerIn id, signal level);HaltingStep_SteppedEX();

Library pseudocode for shared/debug/ctihaltingevents/CTI_SignalEventRunHaltingStep

// Signal a discrete event on a Cross Trigger input event trigger.// RunHaltingStep() // ================ CTI_SignalEvent(RunHaltingStep(boolean exception_generated, bits(2) exception_target, boolean syscall, boolean reset) // "exception_generated" is TRUE if the previous instruction generated a synchronous exception // or was cancelled by an asynchronous exception. // // if "exception_generated" is TRUE then "exception_target" is the target of the exception, and // "syscall" is TRUE if the exception is a synchronous exception where the preferred return // address is the instruction following that which generated the exception. // // "reset" is TRUE if exiting reset state into the highest EL. if reset then assert !(); // Cannot come out of reset halted active = EDECR.SS == '1' && !Halted(); if active && reset then // Coming out of reset with EDECR.SS set EDESR.SS = '1'; elsif active && HaltingAllowed() then if exception_generated && exception_target == EL3 then advance = syscall || ExternalSecureInvasiveDebugEnabledCrossTriggerInHalted id);(); else advance = TRUE; if advance then EDESR.SS = '1'; return;

Library pseudocode for shared/debug/ctiinterrupts/CrossTriggerExternalDebugInterruptsDisabled

enumeration// ExternalDebugInterruptsDisabled() // ================================= // Determine whether EDSCR disables interrupts routed to 'target' boolean CrossTriggerOut {ExternalDebugInterruptsDisabled(bits(2) target) case target of whenCrossTriggerOut_DebugRequest,int_dis = EDSCR.INTdis == '11' && CrossTriggerOut_RestartRequest,(); when CrossTriggerOut_IRQ,int_dis = EDSCR.INTdis == '1x' && CrossTriggerOut_RSVD3,(); when CrossTriggerOut_TraceExtIn0,if CrossTriggerOut_TraceExtIn1,() then int_dis = EDSCR.INTdis == '1x' && CrossTriggerOut_TraceExtIn2,(); else int_dis = EDSCR.INTdis != '00' && CrossTriggerOut_TraceExtIn3}; enumeration CrossTriggerIn {CrossTriggerIn_CrossHalt, CrossTriggerIn_PMUOverflow, CrossTriggerIn_RSVD2, CrossTriggerIn_RSVD3, CrossTriggerIn_TraceExtOut0, CrossTriggerIn_TraceExtOut1, CrossTriggerIn_TraceExtOut2, CrossTriggerIn_TraceExtOut3};(); return int_dis;

Library pseudocode for shared/debug/dccanditrinterrupts/CheckForDCCInterruptsInterruptID

// CheckForDCCInterrupts() // =======================enumeration CheckForDCCInterrupts() commrx = (EDSCR.RXfull == '1'); commtx = (EDSCR.TXfull == '0'); // COMMRX and COMMTX support is optional and not recommended for new designs. // SetInterruptRequestLevel(InterruptID_COMMRX, if commrx then HIGH else LOW); // SetInterruptRequestLevel(InterruptID_COMMTX, if commtx then HIGH else LOW); // The value to be driven onto the common COMMIRQ signal. ifInterruptID { ELUsingAArch32(InterruptID_PMUIRQ,EL1) then commirq = ((commrx && DBGDCCINT.RX == '1') || (commtx && DBGDCCINT.TX == '1')); else commirq = ((commrx && MDCCINT_EL1.RX == '1') || (commtx && MDCCINT_EL1.TX == '1')); SetInterruptRequestLevel(InterruptID_COMMIRQ,InterruptID_CTIIRQ, InterruptID_COMMRX, InterruptID_COMMIRQ, if commirq then HIGH else LOW); return;InterruptID_COMMTX};

Library pseudocode for shared/debug/dccanditrinterrupts/DBGDTRRX_EL0SetInterruptRequestLevel

// DBGDTRRX_EL0[] (external write) // =============================== // Called on writes to debug register 0x08C.// Set a level-sensitive interrupt to the specified level. SetInterruptRequestLevel( DBGDTRRX_EL0[boolean memory_mapped] = bits(32) value if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits IMPLEMENTATION_DEFINED "generate error response"; return; if EDSCR.ERR == '1' then return; // Error flag set: ignore write // The Software lock is OPTIONAL. if memory_mapped && EDLSR.SLK == '1' then return; // Software lock locked: ignore write if EDSCR.RXfull == '1' || (Halted() && EDSCR.MA == '1' && EDSCR.ITE == '0') then EDSCR.RXO = '1'; EDSCR.ERR = '1'; // Overrun condition: ignore write return; EDSCR.RXfull = '1'; DTRRX = value; if Halted() && EDSCR.MA == '1' then EDSCR.ITE = '0'; // See comments in EDITR[] (external write) if !UsingAArch32() then ExecuteA64(0xD5330501<31:0>); // A64 "MRS X1,DBGDTRRX_EL0" ExecuteA64(0xB8004401<31:0>); // A64 "STR W1,[X0],#4" X[1] = bits(64) UNKNOWN; else ExecuteT32(0xEE10<15:0> /*hw1*/, 0x1E15<15:0> /*hw2*/); // T32 "MRS R1,DBGDTRRXint" ExecuteT32(0xF840<15:0> /*hw1*/, 0x1B04<15:0> /*hw2*/); // T32 "STR R1,[R0],#4" R[1] = bits(32) UNKNOWN; // If the store aborts, the Data Abort exception is taken and EDSCR.ERR is set to 1 if EDSCR.ERR == '1' then EDSCR.RXfull = bit UNKNOWN; DBGDTRRX_EL0 = bits(64) UNKNOWN; else // "MRS X1,DBGDTRRX_EL0" calls DBGDTR_EL0[] (read) which clears RXfull. assert EDSCR.RXfull == '0'; EDSCR.ITE = '1'; // See comments in EDITR[] (external write) return; // DBGDTRRX_EL0[] (external read) // ============================== bits(32) DBGDTRRX_EL0[boolean memory_mapped] return DTRRX;id, signal level);

Library pseudocode for shared/debug/dccanditrsamplebasedprofiling/DBGDTRTX_EL0CreatePCSample

// DBGDTRTX_EL0[] (external read) // ============================== // Called on reads of debug register 0x080. bits(32)// CreatePCSample() // ================ DBGDTRTX_EL0[boolean memory_mapped] CreatePCSample() // In a simple sequential execution of the program, CreatePCSample is executed each time the PE // executes an instruction that can be sampled. An implementation is not constrained such that // reads of EDPCSRlo return the current values of PC, etc. if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits IMPLEMENTATION_DEFINED "generate error response"; return bits(32) UNKNOWN; underrun = EDSCR.TXfull == '0' || ( pc_sample.valid =HaltedExternalNoninvasiveDebugAllowed() && EDSCR.MA == '1' && EDSCR.ITE == '0'); value = if underrun then bits(32) UNKNOWN else DTRTX; if EDSCR.ERR == '1' then return value; // Error flag set: no side-effects // The Software lock is OPTIONAL. if memory_mapped && EDLSR.SLK == '1' then // Software lock locked: no side-effects return value; if underrun then EDSCR.TXU = '1'; EDSCR.ERR = '1'; // Underrun condition: block side-effects return value; // Return UNKNOWN EDSCR.TXfull = '0'; if() && ! Halted() && EDSCR.MA == '1' then EDSCR.ITE = '0'; // See comments in EDITR[] (external write) if !(); pc_sample.pc =ThisInstrAddr(); pc_sample.el = PSTATE.EL; pc_sample.rw = if UsingAArch32() then() then '0' else '1'; pc_sample.ns = if ExecuteA64IsSecure(0xB8404401<31:0>); // A64 "LDR W1,[X0],#4" else() then '0' else '1'; pc_sample.contextidr = if ExecuteT32ELUsingAArch32(0xF850<15:0> /*hw1*/, 0x1B04<15:0> /*hw2*/); // T32 "LDR R1,[R0],#4" // If the load aborts, the Data Abort exception is taken and EDSCR.ERR is set to 1 if EDSCR.ERR == '1' then EDSCR.TXfull = bit UNKNOWN; DBGDTRTX_EL0 = bits(64) UNKNOWN; else if !(UsingAArch32EL1() then) then CONTEXTIDR else CONTEXTIDR_EL1; pc_sample.has_el2 = ExecuteA64EL2Enabled(0xD5130501<31:0>); // A64 "MSR DBGDTRTX_EL0,X1" else(); if ExecuteT32EL2Enabled(0xEE00<15:0> /*hw1*/, 0x1E15<15:0> /*hw2*/); // T32 "MSR DBGDTRTXint,R1" // "MSR DBGDTRTX_EL0,X1" calls DBGDTR_EL0[] (write) which sets TXfull. assert EDSCR.TXfull == '1'; if !() then ifUsingAArch32ELUsingAArch32() then( XEL2[1] = bits(64) UNKNOWN; else) then pc_sample.vmid = RZeroExtend[1] = bits(32) UNKNOWN; EDSCR.ITE = '1'; // See comments in EDITR[] (external write) return value; // DBGDTRTX_EL0[] (external write) // ===============================(VTTBR.VMID, 16); elsif ! () || VTCR_EL2.VS == '0' then pc_sample.vmid = ZeroExtend(VTTBR_EL2.VMID<7:0>, 16); else pc_sample.vmid = VTTBR_EL2.VMID; if (HaveVirtHostExt() || HaveV82Debug()) && !ELUsingAArch32(EL2) then pc_sample.contextidr_el2 = CONTEXTIDR_EL2; else pc_sample.contextidr_el2 = bits(32) UNKNOWN; pc_sample.el0h = PSTATE.EL == EL0 && IsInHostDBGDTRTX_EL0[boolean memory_mapped] = bits(32) value // The Software lock is OPTIONAL. if memory_mapped && EDLSR.SLK == '1' then return; // Software lock locked: ignore write DTRTX = value; (); return;

Library pseudocode for shared/debug/dccanditrsamplebasedprofiling/DBGDTR_EL0EDPCSRlo

// DBGDTR_EL0[] (write) // ==================== // System register writes to DBGDTR_EL0, DBGDTRTX_EL0 (AArch64) and DBGDTRTXint (AArch32)// EDPCSRlo[] (read) // ================= bits(32) DBGDTR_EL0[] = bits(N) value // For MSR DBGDTRTX_EL0,<Rt> N=32, value=X[t]<31:0>, X[t]<63:32> is ignored // For MSR DBGDTR_EL0,<Xt> N=64, value=X[t]<63:0> assert N IN {32,64}; if EDSCR.TXfull == '1' then value = bits(N) UNKNOWN; // On a 64-bit write, implement a half-duplex channel if N == 64 then DTRRX = value<63:32>; DTRTX = value<31:0>; // 32-bit or 64-bit write EDSCR.TXfull = '1'; return; EDPCSRlo[boolean memory_mapped] // DBGDTR_EL0[] (read) // =================== // System register reads of DBGDTR_EL0, DBGDTRRX_EL0 (AArch64) and DBGDTRRXint (AArch32) if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits IMPLEMENTATION_DEFINED "generate error response"; return bits(32) UNKNOWN; bits(N) // The Software lock is OPTIONAL. update = !memory_mapped || EDLSR.SLK == '0'; // Software locked: no side-effects if pc_sample.valid then sample = pc_sample.pc<31:0>; if update then if () && EDSCR.SC2 == '1' then EDPCSRhi.PC = (if pc_sample.rw == '0' then Zeros(24) else pc_sample.pc<55:32>); EDPCSRhi.EL = pc_sample.el; EDPCSRhi.NS = pc_sample.ns; else EDPCSRhi = (if pc_sample.rw == '0' then Zeros(32) else pc_sample.pc<63:32>); EDCIDSR = pc_sample.contextidr; if (HaveVirtHostExt() || HaveV82Debug()) && EDSCR.SC2 == '1' then EDVIDSR = (if HaveEL(EL2) && pc_sample.ns == '1' then pc_sample.contextidr_el2 else bits(32) UNKNOWN); else if HaveEL(EL2) && pc_sample.ns == '1' && pc_sample.el IN {EL1,EL0} then EDVIDSR.VMID = pc_sample.vmid; else EDVIDSR.VMID = Zeros(); EDVIDSR.NS = pc_sample.ns; EDVIDSR.E2 = (if pc_sample.el == EL2 then '1' else '0'); EDVIDSR.E3 = (if pc_sample.el == EL3 then '1' else '0') AND pc_sample.rw; // The conditions for setting HV are not specified if PCSRhi is zero. // An example implementation may be "pc_sample.rw". EDVIDSR.HV = (if !IsZero(EDPCSRhi) then '1' else bit IMPLEMENTATION_DEFINED "0 or 1"); else sample = OnesDBGDTR_EL0[] // For MRS <Rt>,DBGDTRTX_EL0 N=32, X[t]=Zeros(32):result // For MRS <Xt>,DBGDTR_EL0 N=64, X[t]=result assert N IN {32,64}; bits(N) result; if EDSCR.RXfull == '0' then result = bits(N) UNKNOWN; else // On a 64-bit read, implement a half-duplex channel // NOTE: the word order is reversed on reads with regards to writes if N == 64 then result<63:32> = DTRTX; result<31:0> = DTRRX; EDSCR.RXfull = '0'; return result;(32); if update then EDPCSRhi = bits(32) UNKNOWN; EDCIDSR = bits(32) UNKNOWN; EDVIDSR = bits(32) UNKNOWN; return sample;

Library pseudocode for shared/debug/dccanditrsamplebasedprofiling/DTRPCSample

bits(32) DTRRX; bits(32) DTRTX;typePCSample is ( boolean valid, bits(64) pc, bits(2) el, bit rw, bit ns, boolean has_el2, bits(32) contextidr, bits(32) contextidr_el2, boolean el0h, bits(16) vmid ) PCSample pc_sample;

Library pseudocode for shared/debug/dccanditrsamplebasedprofiling/EDITRPMPCSR

// EDITR[] (external write) // ======================== // Called on writes to debug register 0x084.// PMPCSR[] (read) // =============== bits(32) EDITR[boolean memory_mapped] = bits(32) value PMPCSR[boolean memory_mapped] if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits IMPLEMENTATION_DEFINED "generate error response"; return; return bits(32) UNKNOWN; if EDSCR.ERR == '1' then return; // Error flag set: ignore write // The Software lock is OPTIONAL. if memory_mapped && EDLSR.SLK == '1' then return; // Software lock locked: ignore write update = !memory_mapped || PMLSR.SLK == '0'; // Software locked: no side-effects if ! if pc_sample.valid then sample = pc_sample.pc<31:0>; if update then PMPCSR<55:32> = (if pc_sample.rw == '0' thenHaltedZeros() then return; // Non-debug state: ignore write (24) else pc_sample.pc<55:32>); PMPCSR.EL = pc_sample.el; PMPCSR.NS = pc_sample.ns; if EDSCR.ITE == '0' || EDSCR.MA == '1' then EDSCR.ITO = '1'; EDSCR.ERR = '1'; // Overrun condition: block write return; PMCID1SR = pc_sample.contextidr; PMCID2SR = if pc_sample.has_el2 then pc_sample.contextidr_el2 else bits(32) UNKNOWN; // ITE indicates whether the processor is ready to accept another instruction; the processor // may support multiple outstanding instructions. Unlike the "InstrCompl" flag in [v7A] there // is no indication that the pipeline is empty (all instructions have completed). In this // pseudocode, the assumption is that only one instruction can be executed at a time, // meaning ITE acts like "InstrCompl". EDSCR.ITE = '0'; if ! PMVIDSR.VMID = (if pc_sample.has_el2 && pc_sample.el IN {UsingAArch32EL1() then, ExecuteA64EL0(value); else} && !pc_sample.el0h then pc_sample.vmid else bits(16) UNKNOWN); else sample = ExecuteT32Ones(value<15:0>/*hw1*/, value<31:16> /*hw2*/); (32); if update then PMPCSR<55:32> = bits(24) UNKNOWN; PMPCSR.EL = bits(2) UNKNOWN; PMPCSR.NS = bit UNKNOWN; EDSCR.ITE = '1'; PMCID1SR = bits(32) UNKNOWN; PMCID2SR = bits(32) UNKNOWN; return; PMVIDSR.VMID = bits(16) UNKNOWN; return sample;

Library pseudocode for shared/debug/haltingsoftwarestep/DCPSInstructionCheckSoftwareStep

// DCPSInstruction() // ================= // Operation of the DCPS instruction in Debug state// CheckSoftwareStep() // =================== // Take a Software Step exception if in the active-pending state DCPSInstruction(bits(2) target_el)CheckSoftwareStep() // Other self-hosted debug functions will call AArch32.GenerateDebugExceptions() if called from // AArch32 state. However, because Software Step is only active when the debug target Exception // level is using AArch64, CheckSoftwareStep only calls AArch64.GenerateDebugExceptions(). if ! SynchronizeContext(); case target_el of when EL1 if PSTATE.EL == EL2 || (PSTATE.EL == EL3 && !UsingAArch32()) then handle_el = PSTATE.EL; elsif EL2Enabled() && HCR_EL2.TGE == '1' then UNDEFINED; else handle_el = EL1; when EL2 if !HaveEL(EL2) then UNDEFINED; elsif PSTATE.EL == EL3 && !UsingAArch32() then handle_el = EL3; elsif !IsSecureEL2Enabled() && IsSecure() then UNDEFINED; else handle_el = EL2; when EL3 if EDSCR.SDD == '1' || !HaveEL(EL3) then UNDEFINED; handle_el = EL3; otherwise Unreachable(); from_secure = IsSecure(); if ELUsingAArch32(handle_el) then if PSTATE.M ==( M32_MonitorDebugTarget then SCR.NS = '0'; assert()) && UsingAArch32AArch64.GenerateDebugExceptions(); // Cannot move from AArch64 to AArch32 case handle_el of when() then if MDSCR_EL1.SS == '1' && PSTATE.SS == '0' then EL1AArch64.SoftwareStepExceptionAArch32.WriteMode(M32_Svc); if HavePANExt() && SCTLR.SPAN == '0' then PSTATE.PAN = '1'; when EL2 AArch32.WriteMode(M32_Hyp); when EL3AArch32.WriteMode(M32_Monitor); if HavePANExt() then if !from_secure then PSTATE.PAN = '0'; elsif SCTLR.SPAN == '0' then PSTATE.PAN = '1'; if handle_el == EL2 then ELR_hyp = bits(32) UNKNOWN; HSR = bits(32) UNKNOWN; else LR = bits(32) UNKNOWN; SPSR[] = bits(32) UNKNOWN; PSTATE.E = SCTLR[].EE; DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; else // Targeting AArch64 if UsingAArch32() then AArch64.MaybeZeroRegisterUppers(); MaybeZeroSVEUppers(target_el); PSTATE.nRW = '0'; PSTATE.SP = '1'; PSTATE.EL = handle_el; if HavePANExt() && ((handle_el == EL1 && SCTLR_EL1.SPAN == '0') || (handle_el == EL2 && HCR_EL2.E2H == '1' && HCR_EL2.TGE == '1' && SCTLR_EL2.SPAN == '0')) then PSTATE.PAN = '1'; ELR[] = bits(64) UNKNOWN; SPSR[] = bits(64) UNKNOWN; ESR[] = bits(64) UNKNOWN; DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(64) UNKNOWN; if HaveUAOExt() then PSTATE.UAO = '0'; if HaveMTEExt() then PSTATE.TCO = '1'; UpdateEDSCRFields(); // Update EDSCR PE state flags sync_errors = HaveIESB() && SCTLR[].IESB == '1'; if HaveDoubleFaultExt() && !UsingAArch32() then sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == EL3); // SCTLR[].IESB might be ignored in Debug state. if !ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then sync_errors = FALSE; if sync_errors then SynchronizeErrors(); return;();

Library pseudocode for shared/debug/haltingsoftwarestep/DRPSInstructionDebugExceptionReturnSS

// DRPSInstruction() // ================= // Operation of the A64 DRPS and T32 ERET instructions in Debug state// DebugExceptionReturnSS() // ======================== // Returns value to write to PSTATE.SS on an exception return or Debug state exit. bit DRPSInstruction()DebugExceptionReturnSS(bits(32) spsr) assert SynchronizeContextHalted(); sync_errors =() || HaveIESBRestarting() &&() || PSTATE.EL != SCTLREL0[].IESB == '1'; if; SS_bit = '0'; if MDSCR_EL1.SS == '1' then if HaveDoubleFaultExtRestarting() && !() then enabled_at_source = FALSE; elsifUsingAArch32() then sync_errors = sync_errors || (SCR_EL3.EA == '1' && SCR_EL3.NMEA == '1' && PSTATE.EL == enabled_at_source = EL3AArch32.GenerateDebugExceptions); // SCTLR[].IESB might be ignored in Debug state. if !(); else enabled_at_source =ConstrainUnpredictableBoolAArch64.GenerateDebugExceptions((); ifUnpredictable_IESBinDebugIllegalExceptionReturn) then sync_errors = FALSE; if sync_errors then(spsr) then dest = PSTATE.EL; else (valid, dest) = SynchronizeErrorsELFromSPSR(); (spsr); assert valid; bits(64) spsr = secure = SPSRIsSecureBelowEL3[];() || dest == SetPSTATEFromPSREL3(spsr); // PSTATE.{N,Z,C,V,Q,GE,SS,D,A,I,F} are not observable and ignored in Debug state, so // behave as if UNKNOWN. if; if UsingAArch32ELUsingAArch32() then PSTATE.<N,Z,C,V,Q,GE,SS,A,I,F> = bits(13) UNKNOWN; // In AArch32, all instructions are T32 and unconditional. PSTATE.IT = '00000000'; PSTATE.T = '1'; // PSTATE.J is RES0 DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; else PSTATE.<N,Z,C,V,SS,D,A,I,F> = bits(9) UNKNOWN; DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(64) UNKNOWN;(dest) then enabled_at_dest = (dest, secure); else mask = spsr<9>; enabled_at_dest = AArch64.GenerateDebugExceptionsFrom(dest, secure, mask); ELd = DebugTargetFrom(secure); if !ELUsingAArch32UpdateEDSCRFieldsAArch32.GenerateDebugExceptionsFrom(); // Update EDSCR PE state flags return;(ELd) && !enabled_at_source && enabled_at_dest then SS_bit = spsr<21>; return SS_bit;

Library pseudocode for shared/debug/haltingsoftwarestep/DebugHaltSSAdvance

constant bits(6)// SSAdvance() // =========== // Advance the Software Step state machine. DebugHalt_Breakpoint = '000111'; constant bits(6)SSAdvance() // A simpler implementation of this function just clears PSTATE.SS to zero regardless of the // current Software Step state machine. However, this check is made to illustrate that the // processor only needs to consider advancing the state machine from the active-not-pending // state. target = DebugHalt_EDBGRQ = '010011'; constant bits(6)(); step_enabled = ! DebugHalt_Step_Normal = '011011'; constant bits(6) DebugHalt_Step_Exclusive = '011111'; constant bits(6) DebugHalt_OSUnlockCatch = '100011'; constant bits(6) DebugHalt_ResetCatch = '100111'; constant bits(6) DebugHalt_Watchpoint = '101011'; constant bits(6) DebugHalt_HaltInstruction = '101111'; constant bits(6) DebugHalt_SoftwareAccess = '110011'; constant bits(6) DebugHalt_ExceptionCatch = '110111'; constant bits(6) DebugHalt_Step_NoSyndrome = '111011';(target) && MDSCR_EL1.SS == '1'; active_not_pending = step_enabled && PSTATE.SS == '1'; if active_not_pending then PSTATE.SS = '0'; return;

Library pseudocode for shared/debug/haltingsoftwarestep/DisableITRAndResumeInstructionPrefetchSoftwareStep_DidNotStep

// Returns TRUE if the previously executed instruction was executed in the inactive state, that is, // if it was not itself stepped. // Might return TRUE or FALSE if the previously executed instruction was an ISB or ERET executed // in the active-not-pending state, or if another exception was taken before the Software Step exception. // Returns FALSE otherwise, indicating that the previously executed instruction was executed in the // active-not-pending state, that is, the instruction was stepped. boolean DisableITRAndResumeInstructionPrefetch();SoftwareStep_DidNotStep();

Library pseudocode for shared/debug/haltingsoftwarestep/ExecuteA64SoftwareStep_SteppedEX

// Execute an A64 instruction in Debug state.// Returns a value that describes the previously executed instruction. The result is valid only if // SoftwareStep_DidNotStep() returns FALSE. // Might return TRUE or FALSE if the instruction was an AArch32 LDREX or LDAEX that failed its condition code test. // Otherwise returns TRUE if the instruction was a Load-Exclusive class instruction, and FALSE if the // instruction was not a Load-Exclusive class instruction. boolean ExecuteA64(bits(32) instr);SoftwareStep_SteppedEX();

Library pseudocode for shared/debugexceptions/haltingexceptions/ExecuteT32ConditionSyndrome

// Execute a T32 instruction in Debug state.// ConditionSyndrome() // =================== // Return CV and COND fields of instruction syndrome bits(5) ExecuteT32(bits(16) hw1, bits(16) hw2);ConditionSyndrome() bits(5) syndrome; ifUsingAArch32() then cond = AArch32.CurrentCond(); if PSTATE.T == '0' then // A32 syndrome<4> = '1'; // A conditional A32 instruction that is known to pass its condition code check // can be presented either with COND set to 0xE, the value for unconditional, or // the COND value held in the instruction. if ConditionHolds(cond) && ConstrainUnpredictableBool(Unpredictable_ESRCONDPASS) then syndrome<3:0> = '1110'; else syndrome<3:0> = cond; else // T32 // When a T32 instruction is trapped, it is IMPLEMENTATION DEFINED whether: // * CV set to 0 and COND is set to an UNKNOWN value // * CV set to 1 and COND is set to the condition code for the condition that // applied to the instruction. if boolean IMPLEMENTATION_DEFINED "Condition valid for trapped T32" then syndrome<4> = '1'; syndrome<3:0> = cond; else syndrome<4> = '0'; syndrome<3:0> = bits(4) UNKNOWN; else syndrome<4> = '1'; syndrome<3:0> = '1110'; return syndrome;

Library pseudocode for shared/debugexceptions/haltingexceptions/ExitDebugStateException

// ExitDebugState() // ================enumeration ExitDebugState() assertException { Halted();Exception_Uncategorized, // Uncategorized or unknown reason SynchronizeContext(); // Although EDSCR.STATUS signals that the PE is restarting, debuggers must use EDPRSR.SDR to // detect that the PE has restarted. EDSCR.STATUS = '000001'; // Signal restarting EDESR<2:0> = '000'; // Clear any pending Halting debug events bits(64) new_pc; bits(64) spsr; ifException_WFxTrap, // Trapped WFI or WFE instruction UsingAArch32() then new_pc =Exception_CP15RTTrap, // Trapped AArch32 MCR or MRC access to CP15 ZeroExtend(DLR); spsr =Exception_CP15RRTTrap, // Trapped AArch32 MCRR or MRRC access to CP15 ZeroExtend(DSPSR); else new_pc = DLR_EL0; spsr = DSPSR_EL0; // If this is an illegal return, SetPSTATEFromPSR() will set PSTATE.IL. ifException_CP14RTTrap, // Trapped AArch32 MCR or MRC access to CP14 UsingAArch32() thenException_CP14DTTrap, // Trapped AArch32 LDC or STC access to CP14 SetPSTATEFromPSR(spsr<31:0>); // Can update privileged bits, even at EL0 elseException_AdvSIMDFPAccessTrap, // HCPTR-trapped access to SIMD or FP SetPSTATEFromPSR(spsr); // Can update privileged bits, even at EL0 ifException_FPIDTrap, // Trapped access to SIMD or FP ID register // Trapped BXJ instruction not supported in Armv8 UsingAArch32() then ifException_PACTrap, // Trapped invalid PAC use ConstrainUnpredictableBool(Exception_CP14RRTTrap, // Trapped MRRC access to CP14 from AArch32Unpredictable_RESTARTALIGNPC) then new_pc<0> = '0';Exception_IllegalState, // Illegal Execution state BranchTo(new_pc<31:0>,Exception_SupervisorCall, // Supervisor Call BranchType_DBGEXIT); // AArch32 branch else // If targeting AArch32 then possibly zero the 32 most significant bits of the target PC if spsr<4> == '1' &&Exception_HypervisorCall, // Hypervisor Call ConstrainUnpredictableBool(Exception_MonitorCall, // Monitor Call or Trapped SMC instructionUnpredictable_RESTARTZEROUPPERPC) then new_pc<63:32> =Exception_SystemRegisterTrap, // Trapped MRS or MSR system register access Zeros();Exception_ERetTrap, // Trapped invalid ERET use BranchTo(new_pc,Exception_InstructionAbort, // Instruction Abort or Prefetch Abort BranchType_DBGEXIT); // A type of branch that is never predicted (EDSCR.STATUS,EDPRSR.SDR) = ('000010','1'); // Atomically signal restartedException_PCAlignment, // PC alignment fault UpdateEDSCRFields(); // Stop signalling PE stateException_DataAbort, // Data Abort Exception_NV2DataAbort, // Data abort at EL1 reported as being from EL2 Exception_PACFail, // PAC Authentication failure Exception_SPAlignment, // SP alignment fault Exception_FPTrappedException, // IEEE trapped FP exception Exception_SError, // SError interrupt Exception_Breakpoint, // (Hardware) Breakpoint Exception_SoftwareStep, // Software Step Exception_Watchpoint, // Watchpoint Exception_NV2Watchpoint, // Watchpoint at EL1 reported as being from EL2 Exception_SoftwareBreakpoint, // Software Breakpoint Instruction Exception_VectorCatch, // AArch32 Vector Catch Exception_IRQ, // IRQ interrupt Exception_SVEAccessTrap, // HCPTR trapped access to SVE Exception_BranchTarget, // Branch Target Identification DisableITRAndResumeInstructionPrefetch(); return;Exception_FIQ}; // FIQ interrupt

Library pseudocode for shared/debugexceptions/haltingexceptions/HaltExceptionRecord

// Halt() // ======type Halt(bits(6) reason)ExceptionRecord is ( CTI_SignalEventException(CrossTriggerIn_CrossHalt); // Trigger other cores to halt bits(64) preferred_restart_address = ThisInstrAddr(); bits(32) spsr_32; bits(64) spsr_64; if UsingAArch32() then spsr_32 = GetPSRFromPSTATE(DebugState); else spsr_64 = GetPSRFromPSTATE(DebugState); if (HaveBTIExt() && !(reason IN {DebugHalt_Step_Normal, DebugHalt_Step_Exclusive, DebugHalt_Step_NoSyndrome, DebugHalt_Breakpoint, DebugHalt_HaltInstruction}) && ConstrainUnpredictableBool(Unpredictable_ZEROBTYPE)) then if UsingAArch32() then spsr_32<11:10> = '00'; else spsr_64<11:10> = '00'; if UsingAArch32() then DLR = preferred_restart_address<31:0>; DSPSR = spsr_32; else DLR_EL0 = preferred_restart_address; DSPSR_EL0 = spsr_64; EDSCR.ITE = '1'; EDSCR.ITO = '0'; if IsSecure() then EDSCR.SDD = '0'; // If entered in Secure state, allow debug elsif HaveEL(EL3) then EDSCR.SDD = if ExternalSecureInvasiveDebugEnabled() then '0' else '1'; else assert EDSCR.SDD == '1'; // Otherwise EDSCR.SDD is RES1 EDSCR.MA = '0'; // In Debug state: // * PSTATE.{SS,SSBS,D,A,I,F} are not observable and ignored so behave-as-if UNKNOWN. // * PSTATE.{N,Z,C,V,Q,GE,E,M,nRW,EL,SP,DIT} are also not observable, but since these // are not changed on exception entry, this function also leaves them unchanged. // * PSTATE.{IT,T} are ignored. // * PSTATE.IL is ignored and behave-as-if 0. // * PSTATE.{UAO,PAN} are observable and not changed on entry into Debug state. if UsingAArch32() then PSTATE.<IT,SS,SSBS,A,I,F,T> = bits(14) UNKNOWN; else PSTATE.<SS,SSBS,D,A,I,F> = bits(6) UNKNOWN; PSTATE.BTYPE = '00'; PSTATE.IL = '0'; StopInstructionPrefetchAndEnableITR(); EDSCR.STATUS = reason; // Signal entered Debug state UpdateEDSCRFields(); // Update EDSCR PE state flags. return;exceptype, // Exception class bits(25) syndrome, // Syndrome record bits(64) vaddress, // Virtual fault address boolean ipavalid, // Physical fault address for second stage faults is valid bits(1) NS, // Physical fault address for second stage faults is Non-secure or secure bits(52) ipaddress) // Physical fault address for second stage faults

Library pseudocode for shared/debugexceptions/haltingexceptions/HaltOnBreakpointOrWatchpointExceptionSyndrome

// HaltOnBreakpointOrWatchpoint() // ============================== // Returns TRUE if the Breakpoint and Watchpoint debug events should be considered for Debug // state entry, FALSE if they should be considered for a debug exception. // ExceptionSyndrome() // =================== // Return a blank exception syndrome record for an exception of the given type. booleanExceptionRecord HaltOnBreakpointOrWatchpoint() returnExceptionSyndrome( exceptype) ExceptionRecord r; r.exceptype = exceptype; // Initialize all other fields r.syndrome = Zeros(); r.vaddress = Zeros(); r.ipavalid = FALSE; r.NS = '0'; r.ipaddress = ZerosHaltingAllowedException() && EDSCR.HDE == '1' && OSLSR_EL1.OSLK == '0';(); return r;

Library pseudocode for shared/debugexceptions/haltingtraps/HaltedReservedValue

// Halted() // ======== boolean// ReservedValue() // =============== Halted() return !(EDSCR.STATUS IN {'000001', '000010'}); // HaltedReservedValue() ifUsingAArch32() && !AArch32.GeneralExceptionsToAArch64() then AArch32.TakeUndefInstrException(); else AArch64.UndefinedFault();

Library pseudocode for shared/debugexceptions/haltingtraps/HaltingAllowedUnallocatedEncoding

// HaltingAllowed() // ================ // Returns TRUE if halting is currently allowed, FALSE if halting is prohibited. boolean// UnallocatedEncoding() // ===================== HaltingAllowed() UnallocatedEncoding() if HaltedUsingAArch32() ||() && DoubleLockStatusAArch32.ExecutingCP10or11Instr() then return FALSE; elsif FPEXC.DEX = '0'; if IsSecureUsingAArch32() then return() && ! ExternalSecureInvasiveDebugEnabledAArch32.GeneralExceptionsToAArch64(); else return() then (); else AArch64.UndefinedFaultExternalInvasiveDebugEnabledAArch32.TakeUndefInstrException();

Library pseudocode for shared/debugfunctions/haltingaborts/RestartingEncodeLDFSC

// Restarting() // ============ // EncodeLDFSC() // ============= // Function that gives the Long-descriptor FSC code for types of Fault booleanbits(6) Restarting() return EDSCR.STATUS == '000001'; // RestartingEncodeLDFSC(Fault statuscode, integer level) bits(6) result; case statuscode of when Fault_AddressSize result = '0000':level<1:0>; assert level IN {0,1,2,3}; when Fault_AccessFlag result = '0010':level<1:0>; assert level IN {1,2,3}; when Fault_Permission result = '0011':level<1:0>; assert level IN {1,2,3}; when Fault_Translation result = '0001':level<1:0>; assert level IN {0,1,2,3}; when Fault_SyncExternal result = '010000'; when Fault_SyncExternalOnWalk result = '0101':level<1:0>; assert level IN {0,1,2,3}; when Fault_SyncParity result = '011000'; when Fault_SyncParityOnWalk result = '0111':level<1:0>; assert level IN {0,1,2,3}; when Fault_AsyncParity result = '011001'; when Fault_AsyncExternal result = '010001'; when Fault_Alignment result = '100001'; when Fault_Debug result = '100010'; when Fault_TLBConflict result = '110000'; when Fault_HWUpdateAccessFlag result = '110001'; when Fault_Lockdown result = '110100'; // IMPLEMENTATION DEFINED when Fault_Exclusive result = '110101'; // IMPLEMENTATION DEFINED otherwise Unreachable(); return result;

Library pseudocode for shared/debugfunctions/haltingaborts/StopInstructionPrefetchAndEnableITRIPAValid

// IPAValid() // ========== // Return TRUE if the IPA is reported for the abort boolean IPAValid(FaultRecord fault) assert fault.statuscode != Fault_None; if fault.s2fs1walk then return fault.statuscode IN {Fault_AccessFlag, Fault_Permission, Fault_Translation, Fault_AddressSize}; elsif fault.secondstage then return fault.statuscode IN {Fault_AccessFlag, Fault_Translation, Fault_AddressSizeStopInstructionPrefetchAndEnableITR();}; else return FALSE;

Library pseudocode for shared/debugfunctions/haltingaborts/UpdateEDSCRFieldsIsAsyncAbort

// UpdateEDSCRFields() // =================== // Update EDSCR PE state fields// IsAsyncAbort() // ============== // Returns TRUE if the abort currently being processed is an asynchronous abort, and FALSE // otherwise. boolean UpdateEDSCRFields() if !IsAsyncAbort(HaltedFault() then EDSCR.EL = '00'; EDSCR.NS = bit UNKNOWN; EDSCR.RW = '1111'; else EDSCR.EL = PSTATE.EL; EDSCR.NS = ifstatuscode) assert statuscode != IsSecureFault_None() then '0' else '1'; ; bits(4) RW; RW<1> = if return (statuscode IN { ELUsingAArch32Fault_AsyncExternal(,EL1Fault_AsyncParity) then '0' else '1'; if PSTATE.EL !=}); // IsAsyncAbort() // ============== boolean EL0IsAsyncAbort then RW<0> = RW<1>; else RW<0> = if( UsingAArch32FaultRecord() then '0' else '1'; if !fault) returnHaveELIsAsyncAbort(EL2) || (HaveEL(EL3) && SCR_GEN[].NS == '0' && !IsSecureEL2Enabled()) then RW<2> = RW<1>; else RW<2> = if ELUsingAArch32(EL2) then '0' else '1'; if !HaveEL(EL3) then RW<3> = RW<2>; else RW<3> = if ELUsingAArch32(EL3) then '0' else '1'; // The least-significant bits of EDSCR.RW are UNKNOWN if any higher EL is using AArch32. if RW<3> == '0' then RW<2:0> = bits(3) UNKNOWN; elsif RW<2> == '0' then RW<1:0> = bits(2) UNKNOWN; elsif RW<1> == '0' then RW<0> = bit UNKNOWN; EDSCR.RW = RW; return;(fault.statuscode);

Library pseudocode for shared/debugfunctions/haltingeventsaborts/CheckExceptionCatchIsDebugException

// CheckExceptionCatch() // ===================== // Check whether an Exception Catch debug event is set on the current Exception level// IsDebugException() // ================== boolean CheckExceptionCatch(boolean exception_entry) // Called after an exception entry or exit, that is, such that IsSecure() and PSTATE.EL are correct // for the exception target. base = ifIsDebugException( IsSecureFaultRecord() then 0 else 4; iffault) assert fault.statuscode != HaltingAllowedFault_None() then if; return fault.statuscode == HaveExtendedECDebugEventsFault_Debug() then exception_exit = !exception_entry; ctrl = EDECCR<UInt(PSTATE.EL) + base + 8>:EDECCR<UInt(PSTATE.EL) + base>; case ctrl of when '00' halt = FALSE; when '01' halt = TRUE; when '10' halt = (exception_exit == TRUE); when '11' halt = (exception_entry == TRUE); else halt = (EDECCR<UInt(PSTATE.EL) + base> == '1'); if halt then Halt(DebugHalt_ExceptionCatch);;

Library pseudocode for shared/debugfunctions/haltingeventsaborts/CheckHaltingStepIsExternalAbort

// CheckHaltingStep() // ================== // Check whether EDESR.SS has been set by Halting Step// IsExternalAbort() // ================= // Returns TRUE if the abort currently being processed is an external abort and FALSE otherwise. boolean CheckHaltingStep() ifIsExternalAbort( HaltingAllowedFault() && EDESR.SS == '1' then // The STATUS code depends on how we arrived at the state where EDESR.SS == 1. ifstatuscode) assert statuscode != HaltingStep_DidNotStepFault_None() then; return (statuscode IN { HaltFault_SyncExternal(,DebugHalt_Step_NoSyndromeFault_SyncParity); elsif, HaltingStep_SteppedEXFault_SyncExternalOnWalk() then, HaltFault_SyncParityOnWalk(,DebugHalt_Step_ExclusiveFault_AsyncExternal); else, HaltFault_AsyncParity(}); // IsExternalAbort() // ================= boolean(FaultRecord fault) return IsExternalAbortDebugHalt_Step_NormalIsExternalAbort);(fault.statuscode);

Library pseudocode for shared/debugfunctions/haltingeventsaborts/CheckOSUnlockCatchIsExternalSyncAbort

// CheckOSUnlockCatch() // ==================== // Called on unlocking the OS Lock to pend an OS Unlock Catch debug event// IsExternalSyncAbort() // ===================== // Returns TRUE if the abort currently being processed is an external synchronous abort and FALSE otherwise. boolean CheckOSUnlockCatch() if (IsExternalSyncAbort(HaveDoPDFault() && CTIDEVCTL.OSUCE == '1') || (!statuscode) assert statuscode !=HaveDoPDFault_None() && EDECR.OSUCE == '1') then if !; return (statuscode IN {, Fault_SyncParity, Fault_SyncExternalOnWalk, Fault_SyncParityOnWalk}); // IsExternalSyncAbort() // ===================== boolean IsExternalSyncAbort(FaultRecord fault) return IsExternalSyncAbortHaltedFault_SyncExternal() then EDESR.OSUC = '1';(fault.statuscode);

Library pseudocode for shared/debugfunctions/haltingeventsaborts/CheckPendingOSUnlockCatchIsFault

// CheckPendingOSUnlockCatch() // =========================== // Check whether EDESR.OSUC has been set by an OS Unlock Catch debug event// IsFault() // ========= // Return TRUE if a fault is associated with an address descriptor boolean CheckPendingOSUnlockCatch() ifIsFault( HaltingAllowedAddressDescriptor() && EDESR.OSUC == '1' thenaddrdesc) return addrdesc.fault.statuscode != HaltFault_None(DebugHalt_OSUnlockCatch);;

Library pseudocode for shared/debugfunctions/haltingeventsaborts/CheckPendingResetCatchIsSErrorInterrupt

// CheckPendingResetCatch() // ======================== // Check whether EDESR.RC has been set by a Reset Catch debug event// IsSErrorInterrupt() // =================== // Returns TRUE if the abort currently being processed is an SError interrupt, and FALSE // otherwise. boolean CheckPendingResetCatch() ifIsSErrorInterrupt( HaltingAllowedFault() && EDESR.RC == '1' thenstatuscode) assert statuscode != HaltFault_None(; return (statuscode IN {, Fault_AsyncParity}); // IsSErrorInterrupt() // =================== boolean IsSErrorInterrupt(FaultRecord fault) return IsSErrorInterruptDebugHalt_ResetCatchFault_AsyncExternal);(fault.statuscode);

Library pseudocode for shared/debugfunctions/haltingeventsaborts/CheckResetCatchIsSecondStage

// CheckResetCatch() // ================= // Called after reset// IsSecondStage() // =============== boolean CheckResetCatch() if (IsSecondStage(HaveDoPDFaultRecord() && CTIDEVCTL.RCE == '1') || (!fault) assert fault.statuscode !=HaveDoPDFault_None() && EDECR.RCE == '1') then EDESR.RC = '1'; // If halting is allowed then halt immediately if HaltingAllowed() then Halt(DebugHalt_ResetCatch);; return fault.secondstage;

Library pseudocode for shared/debugfunctions/haltingeventsaborts/CheckSoftwareAccessToDebugRegistersLSInstructionSyndrome

// CheckSoftwareAccessToDebugRegisters() // ===================================== // Check for access to Breakpoint and Watchpoint registers.bits(11) CheckSoftwareAccessToDebugRegisters() os_lock = (ifLSInstructionSyndrome(); ELUsingAArch32(EL1) then DBGOSLSR.OSLK else OSLSR_EL1.OSLK); if HaltingAllowed() && EDSCR.TDA == '1' && os_lock == '0' then Halt(DebugHalt_SoftwareAccess);

Library pseudocode for shared/debugfunctions/haltingeventscommon/ExternalDebugRequestASR

// ExternalDebugRequest() // ======================// ASR() // ===== bits(N) ExternalDebugRequest() ifASR(bits(N) x, integer shift) assert shift >= 0; if shift == 0 then result = x; else (result, -) = HaltingAllowedASR_C() then Halt(DebugHalt_EDBGRQ); // Otherwise the CTI continues to assert the debug request until it is taken.(x, shift); return result;

Library pseudocode for shared/debugfunctions/haltingeventscommon/HaltingStep_DidNotStepASR_C

// Returns TRUE if the previously executed instruction was executed in the inactive state, that is, // if it was not itself stepped. boolean// ASR_C() // ======= (bits(N), bit) HaltingStep_DidNotStep();ASR_C(bits(N) x, integer shift) assert shift > 0; extended_x =SignExtend(x, shift+N); result = extended_x<shift+N-1:shift>; carry_out = extended_x<shift-1>; return (result, carry_out);

Library pseudocode for shared/debugfunctions/haltingeventscommon/HaltingStep_SteppedEXAbs

// Returns TRUE if the previously executed instruction was a Load-Exclusive class instruction // executed in the active-not-pending state. boolean// Abs() // ===== integer HaltingStep_SteppedEX();Abs(integer x) return if x >= 0 then x else -x; // Abs() // ===== realAbs(real x) return if x >= 0.0 then x else -x;

Library pseudocode for shared/debugfunctions/haltingeventscommon/RunHaltingStepAlign

// RunHaltingStep() // ================// Align() // ======= integer RunHaltingStep(boolean exception_generated, bits(2) exception_target, boolean syscall, boolean reset) // "exception_generated" is TRUE if the previous instruction generated a synchronous exception // or was cancelled by an asynchronous exception. // // if "exception_generated" is TRUE then "exception_target" is the target of the exception, and // "syscall" is TRUE if the exception is a synchronous exception where the preferred return // address is the instruction following that which generated the exception. // // "reset" is TRUE if exiting reset state into the highest EL. Align(integer x, integer y) return y * (x DIV y); if reset then assert !// Align() // ======= bits(N)HaltedAlign(); // Cannot come out of reset halted active = EDECR.SS == '1' && !(bits(N) x, integer y) returnHaltedAlign(); if active && reset then // Coming out of reset with EDECR.SS set EDESR.SS = '1'; elsif active &&( HaltingAllowedUInt() then if exception_generated && exception_target == EL3 then advance = syscall || ExternalSecureInvasiveDebugEnabled(); else advance = TRUE; if advance then EDESR.SS = '1'; return;(x), y)<N-1:0>;

Library pseudocode for shared/debugfunctions/interruptscommon/ExternalDebugInterruptsDisabledBitCount

// ExternalDebugInterruptsDisabled() // ================================= // Determine whether EDSCR disables interrupts routed to 'target' // BitCount() // ========== booleaninteger ExternalDebugInterruptsDisabled(bits(2) target) ifBitCount(bits(N) x) integer result = 0; for i = 0 to N-1 if x<i> == '1' then result = result + 1; return result; Havev8p4Debug() then if target == EL3 || IsSecure() then int_dis = (EDSCR.INTdis[0] == '1' && ExternalSecureInvasiveDebugEnabled()); else int_dis = (EDSCR.INTdis[0] == '1'); else case target of when EL3 int_dis = (EDSCR.INTdis == '11' && ExternalSecureInvasiveDebugEnabled()); when EL2 int_dis = (EDSCR.INTdis == '1x' && ExternalInvasiveDebugEnabled()); when EL1 if IsSecure() then int_dis = (EDSCR.INTdis == '1x' && ExternalSecureInvasiveDebugEnabled()); else int_dis = (EDSCR.INTdis != '00' && ExternalInvasiveDebugEnabled()); return int_dis;

Library pseudocode for shared/debugfunctions/interruptscommon/InterruptIDCountLeadingSignBits

enumeration// CountLeadingSignBits() // ====================== integer InterruptID {CountLeadingSignBits(bits(N) x) returnInterruptID_PMUIRQ, InterruptID_COMMIRQ, InterruptID_CTIIRQ, InterruptID_COMMRX, InterruptID_COMMTX};(x<N-1:1> EOR x<N-2:0>);

Library pseudocode for shared/debugfunctions/interruptscommon/SetInterruptRequestLevelCountLeadingZeroBits

// Set a level-sensitive interrupt to the specified level. SetInterruptRequestLevel(// CountLeadingZeroBits() // ====================== integerCountLeadingZeroBits(bits(N) x) return N - (HighestSetBitInterruptID id, signal level);(x) + 1);

Library pseudocode for shared/debugfunctions/samplebasedprofilingcommon/CreatePCSampleElem

// CreatePCSample() // ================// Elem[] - non-assignment form // ============================ bits(size) CreatePCSample() // In a simple sequential execution of the program, CreatePCSample is executed each time the PE // executes an instruction that can be sampled. An implementation is not constrained such that // reads of EDPCSRlo return the current values of PC, etc. Elem[bits(N) vector, integer e, integer size] assert e >= 0 && (e+1)*size <= N; return vector<e*size+size-1 : e*size>; pc_sample.valid =// Elem[] - non-assignment form // ============================ bits(size) ExternalNoninvasiveDebugAllowed() && !Elem[bits(N) vector, integer e] returnHaltedElem(); pc_sample.pc =[vector, e, size]; // Elem[] - assignment form // ======================== ThisInstrAddr(); pc_sample.el = PSTATE.EL; pc_sample.rw = ifElem[bits(N) &vector, integer e, integer size] = bits(size) value assert e >= 0 && (e+1)*size <= N; vector<(e+1)*size-1:e*size> = value; return; // Elem[] - assignment form // ======================== UsingAArch32() then '0' else '1'; pc_sample.ns = ifElem[bits(N) &vector, integer e] = bits(size) value IsSecureElem() then '0' else '1'; pc_sample.contextidr = if ELUsingAArch32(EL1) then CONTEXTIDR else CONTEXTIDR_EL1<31:0>; pc_sample.has_el2 = EL2Enabled(); if EL2Enabled() then if ELUsingAArch32(EL2) then pc_sample.vmid = ZeroExtend(VTTBR.VMID, 16); elsif !Have16bitVMID() || VTCR_EL2.VS == '0' then pc_sample.vmid = ZeroExtend(VTTBR_EL2.VMID<7:0>, 16); else pc_sample.vmid = VTTBR_EL2.VMID; if (HaveVirtHostExt() || HaveV82Debug()) && !ELUsingAArch32(EL2) then pc_sample.contextidr_el2 = CONTEXTIDR_EL2<31:0>; else pc_sample.contextidr_el2 = bits(32) UNKNOWN; pc_sample.el0h = PSTATE.EL == EL0 && IsInHost(); [vector, e, size] = value; return;

Library pseudocode for shared/debugfunctions/samplebasedprofilingcommon/EDPCSRloExtend

// EDPCSRlo[] (read) // ================= // Extend() // ======== bits(32)bits(N) EDPCSRlo[boolean memory_mapped] if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits IMPLEMENTATION_DEFINED "generate error response"; return bits(32) UNKNOWN; // The Software lock is OPTIONAL. update = !memory_mapped || EDLSR.SLK == '0'; // Software locked: no side-effects if pc_sample.valid then sample = pc_sample.pc<31:0>; if update then ifExtend(bits(M) x, integer N, boolean unsigned) return if unsigned then HaveVirtHostExtZeroExtend() && EDSCR.SC2 == '1' then EDPCSRhi.PC = (if pc_sample.rw == '0' then(x, N) else ZerosSignExtend(24) else pc_sample.pc<55:32>); EDPCSRhi.EL = pc_sample.el; EDPCSRhi.NS = pc_sample.ns; else EDPCSRhi = (if pc_sample.rw == '0' then(x, N); // Extend() // ======== bits(N) Zeros(32) else pc_sample.pc<63:32>); EDCIDSR = pc_sample.contextidr; if (Extend(bits(M) x, boolean unsigned) returnHaveVirtHostExtExtend() || HaveV82Debug()) && EDSCR.SC2 == '1' then EDVIDSR = (if HaveEL(EL2) && pc_sample.ns == '1' then pc_sample.contextidr_el2 else bits(32) UNKNOWN); else if HaveEL(EL2) && pc_sample.ns == '1' && pc_sample.el IN {EL1,EL0} then EDVIDSR.VMID = pc_sample.vmid; else EDVIDSR.VMID = Zeros(); EDVIDSR.NS = pc_sample.ns; EDVIDSR.E2 = (if pc_sample.el == EL2 then '1' else '0'); EDVIDSR.E3 = (if pc_sample.el == EL3 then '1' else '0') AND pc_sample.rw; // The conditions for setting HV are not specified if PCSRhi is zero. // An example implementation may be "pc_sample.rw". EDVIDSR.HV = (if !IsZero(EDPCSRhi) then '1' else bit IMPLEMENTATION_DEFINED "0 or 1"); else sample = Ones(32); if update then EDPCSRhi = bits(32) UNKNOWN; EDCIDSR = bits(32) UNKNOWN; EDVIDSR = bits(32) UNKNOWN; return sample;(x, N, unsigned);

Library pseudocode for shared/debugfunctions/samplebasedprofilingcommon/PCSampleHighestSetBit

type// HighestSetBit() // =============== integer PCSample is ( boolean valid, bits(64) pc, bits(2) el, bit rw, bit ns, boolean has_el2, bits(32) contextidr, bits(32) contextidr_el2, boolean el0h, bits(16) vmid )HighestSetBit(bits(N) x) for i = N-1 downto 0 if x<i> == '1' then return i; return -1; PCSample pc_sample;

Library pseudocode for shared/debugfunctions/samplebasedprofilingcommon/PMPCSRInt

// PMPCSR[] (read) // =============== // Int() // ===== bits(32)integer PMPCSR[boolean memory_mapped] if EDPRSR<6:5,0> != '001' then // Check DLK, OSLK and PU bits IMPLEMENTATION_DEFINED "generate error response"; return bits(32) UNKNOWN; // The Software lock is OPTIONAL. update = !memory_mapped || PMLSR.SLK == '0'; // Software locked: no side-effects if pc_sample.valid then sample = pc_sample.pc<31:0>; if update then PMPCSR<55:32> = (if pc_sample.rw == '0' thenInt(bits(N) x, boolean unsigned) result = if unsigned then ZerosUInt(24) else pc_sample.pc<55:32>); PMPCSR.EL = pc_sample.el; PMPCSR.NS = pc_sample.ns; PMCID1SR = pc_sample.contextidr; PMCID2SR = if pc_sample.has_el2 then pc_sample.contextidr_el2 else bits(32) UNKNOWN; PMVIDSR.VMID = (if pc_sample.has_el2 && pc_sample.el IN {(x) elseEL1SInt,EL0} && !pc_sample.el0h then pc_sample.vmid else bits(16) UNKNOWN); else sample = Ones(32); if update then PMPCSR<55:32> = bits(24) UNKNOWN; PMPCSR.EL = bits(2) UNKNOWN; PMPCSR.NS = bit UNKNOWN; PMCID1SR = bits(32) UNKNOWN; PMCID2SR = bits(32) UNKNOWN; PMVIDSR.VMID = bits(16) UNKNOWN; return sample;(x); return result;

Library pseudocode for shared/debugfunctions/softwarestepcommon/CheckSoftwareStepIsOnes

// CheckSoftwareStep() // =================== // Take a Software Step exception if in the active-pending state// IsOnes() // ======== boolean CheckSoftwareStep() // Other self-hosted debug functions will call AArch32.GenerateDebugExceptions() if called from // AArch32 state. However, because Software Step is only active when the debug target Exception // level is using AArch64, CheckSoftwareStep only calls AArch64.GenerateDebugExceptions(). step_enabled = !IsOnes(bits(N) x) return x ==ELUsingAArch32Ones(DebugTarget()) && AArch64.GenerateDebugExceptions() && MDSCR_EL1.SS == '1'; if step_enabled && PSTATE.SS == '0' then AArch64.SoftwareStepException();(N);

Library pseudocode for shared/debugfunctions/softwarestepcommon/DebugExceptionReturnSSIsZero

// DebugExceptionReturnSS() // ======================== // Returns value to write to PSTATE.SS on an exception return or Debug state exit. // IsZero() // ======== bitboolean DebugExceptionReturnSS(bits(N) spsr) ifIsZero(bits(N) x) return x == UsingAArch32Zeros() then assert N == 32; else assert N == 64; assert Halted() || Restarting() || PSTATE.EL != EL0; if Restarting() then enabled_at_source = FALSE; elsif UsingAArch32() then enabled_at_source = AArch32.GenerateDebugExceptions(); else enabled_at_source = AArch64.GenerateDebugExceptions(); if IllegalExceptionReturn(spsr) then dest = PSTATE.EL; else (valid, dest) = ELFromSPSR(spsr); assert valid; dest_is_secure = IsSecureBelowEL3() || dest == EL3; dest_using_32 = (if dest == EL0 then spsr<4> == '1' else ELUsingAArch32(dest)); if dest_using_32 then enabled_at_dest = AArch32.GenerateDebugExceptionsFrom(dest, dest_is_secure); else mask = spsr<9>; enabled_at_dest = AArch64.GenerateDebugExceptionsFrom(dest, dest_is_secure, mask); ELd = DebugTargetFrom(dest_is_secure); if !ELUsingAArch32(ELd) && MDSCR_EL1.SS == '1' && !enabled_at_source && enabled_at_dest then SS_bit = spsr<21>; else SS_bit = '0'; return SS_bit;(N);

Library pseudocode for shared/debugfunctions/softwarestepcommon/SSAdvanceIsZeroBit

// SSAdvance() // IsZeroBit() // =========== // Advance the Software Step state machine. bit SSAdvance() // A simpler implementation of this function just clears PSTATE.SS to zero regardless of the // current Software Step state machine. However, this check is made to illustrate that the // processor only needs to consider advancing the state machine from the active-not-pending // state. target =IsZeroBit(bits(N) x) return if DebugTargetIsZero(); step_enabled = !ELUsingAArch32(target) && MDSCR_EL1.SS == '1'; active_not_pending = step_enabled && PSTATE.SS == '1'; if active_not_pending then PSTATE.SS = '0'; return;(x) then '1' else '0';

Library pseudocode for shared/debugfunctions/softwarestepcommon/SoftwareStep_DidNotStepLSL

// Returns TRUE if the previously executed instruction was executed in the inactive state, that is, // if it was not itself stepped. // Might return TRUE or FALSE if the previously executed instruction was an ISB or ERET executed // in the active-not-pending state, or if another exception was taken before the Software Step exception. // Returns FALSE otherwise, indicating that the previously executed instruction was executed in the // active-not-pending state, that is, the instruction was stepped. boolean// LSL() // ===== bits(N) SoftwareStep_DidNotStep();LSL(bits(N) x, integer shift) assert shift >= 0; if shift == 0 then result = x; else (result, -) =LSL_C(x, shift); return result;

Library pseudocode for shared/debugfunctions/softwarestepcommon/SoftwareStep_SteppedEXLSL_C

// Returns a value that describes the previously executed instruction. The result is valid only if // SoftwareStep_DidNotStep() returns FALSE. // Might return TRUE or FALSE if the instruction was an AArch32 LDREX or LDAEX that failed its condition code test. // Otherwise returns TRUE if the instruction was a Load-Exclusive class instruction, and FALSE if the // instruction was not a Load-Exclusive class instruction. boolean// LSL_C() // ======= (bits(N), bit) SoftwareStep_SteppedEX();LSL_C(bits(N) x, integer shift) assert shift > 0; extended_x = x :Zeros(shift); result = extended_x<N-1:0>; carry_out = extended_x<N>; return (result, carry_out);

Library pseudocode for shared/exceptionsfunctions/exceptionscommon/ConditionSyndromeLSR

// ConditionSyndrome() // =================== // Return CV and COND fields of instruction syndrome // LSR() // ===== bits(5)bits(N) ConditionSyndrome() bits(5) syndrome; ifLSR(bits(N) x, integer shift) assert shift >= 0; if shift == 0 then result = x; else (result, -) = UsingAArch32LSR_C() then cond = AArch32.CurrentCond(); if PSTATE.T == '0' then // A32 syndrome<4> = '1'; // A conditional A32 instruction that is known to pass its condition code check // can be presented either with COND set to 0xE, the value for unconditional, or // the COND value held in the instruction. if ConditionHolds(cond) && ConstrainUnpredictableBool(Unpredictable_ESRCONDPASS) then syndrome<3:0> = '1110'; else syndrome<3:0> = cond; else // T32 // When a T32 instruction is trapped, it is IMPLEMENTATION DEFINED whether: // * CV set to 0 and COND is set to an UNKNOWN value // * CV set to 1 and COND is set to the condition code for the condition that // applied to the instruction. if boolean IMPLEMENTATION_DEFINED "Condition valid for trapped T32" then syndrome<4> = '1'; syndrome<3:0> = cond; else syndrome<4> = '0'; syndrome<3:0> = bits(4) UNKNOWN; else syndrome<4> = '1'; syndrome<3:0> = '1110'; return syndrome;(x, shift); return result;

Library pseudocode for shared/exceptionsfunctions/exceptionscommon/ExceptionLSR_C

enumeration// LSR_C() // ======= (bits(N), bit) Exception {LSR_C(bits(N) x, integer shift) assert shift > 0; extended_x =Exception_Uncategorized, // Uncategorized or unknown reason Exception_WFxTrap, // Trapped WFI or WFE instruction Exception_CP15RTTrap, // Trapped AArch32 MCR or MRC access to CP15 Exception_CP15RRTTrap, // Trapped AArch32 MCRR or MRRC access to CP15 Exception_CP14RTTrap, // Trapped AArch32 MCR or MRC access to CP14 Exception_CP14DTTrap, // Trapped AArch32 LDC or STC access to CP14 Exception_AdvSIMDFPAccessTrap, // HCPTR-trapped access to SIMD or FP Exception_FPIDTrap, // Trapped access to SIMD or FP ID register Exception_LDST64BTrap, // Trapped access to ST64BV, ST64BV0, ST64B and LD64B // Trapped BXJ instruction not supported in Armv8 Exception_PACTrap, // Trapped invalid PAC use Exception_CP14RRTTrap, // Trapped MRRC access to CP14 from AArch32 Exception_IllegalState, // Illegal Execution state Exception_SupervisorCall, // Supervisor Call Exception_HypervisorCall, // Hypervisor Call Exception_MonitorCall, // Monitor Call or Trapped SMC instruction Exception_SystemRegisterTrap, // Trapped MRS or MSR system register access Exception_ERetTrap, // Trapped invalid ERET use Exception_InstructionAbort, // Instruction Abort or Prefetch Abort Exception_PCAlignment, // PC alignment fault Exception_DataAbort, // Data Abort Exception_NV2DataAbort, // Data abort at EL1 reported as being from EL2 Exception_PACFail, // PAC Authentication failure Exception_SPAlignment, // SP alignment fault Exception_FPTrappedException, // IEEE trapped FP exception Exception_SError, // SError interrupt Exception_Breakpoint, // (Hardware) Breakpoint Exception_SoftwareStep, // Software Step Exception_Watchpoint, // Watchpoint Exception_NV2Watchpoint, // Watchpoint at EL1 reported as being from EL2 Exception_SoftwareBreakpoint, // Software Breakpoint Instruction Exception_VectorCatch, // AArch32 Vector Catch Exception_IRQ, // IRQ interrupt Exception_SVEAccessTrap, // HCPTR trapped access to SVE Exception_BranchTarget, // Branch Target Identification Exception_FIQ}; // FIQ interrupt(x, shift+N); result = extended_x<shift+N-1:shift>; carry_out = extended_x<shift-1>; return (result, carry_out);

Library pseudocode for shared/exceptionsfunctions/exceptionscommon/ExceptionRecordLowestSetBit

type// LowestSetBit() // ============== integer ExceptionRecord is (LowestSetBit(bits(N) x) for i = 0 to N-1 if x<i> == '1' then return i; return N;Exception exceptype, // Exception class bits(25) syndrome, // Syndrome record bits(5) syndrome2, // ST64BV(0) return value register specifier bits(64) vaddress, // Virtual fault address boolean ipavalid, // Physical fault address for second stage faults is valid bits(1) NS, // Physical fault address for second stage faults is Non-secure or secure bits(52) ipaddress) // Physical fault address for second stage faults

Library pseudocode for shared/exceptionsfunctions/exceptionscommon/ExceptionSyndromeMax

// ExceptionSyndrome() // =================== // Return a blank exception syndrome record for an exception of the given type. // Max() // ===== ExceptionRecordinteger ExceptionSyndrome(Max(integer a, integer b) return if a >= b then a else b; // Max() // ===== realExceptionMax exceptype) ExceptionRecord r; r.exceptype = exceptype; // Initialize all other fields r.syndrome = Zeros(); r.syndrome2 = Zeros(); r.vaddress = Zeros(); r.ipavalid = FALSE; r.NS = '0'; r.ipaddress = Zeros(); return r;(real a, real b) return if a >= b then a else b;

Library pseudocode for shared/exceptionsfunctions/trapscommon/ReservedValueMin

// ReservedValue() // ===============// Min() // ===== integer ReservedValue() ifMin(integer a, integer b) return if a <= b then a else b; // Min() // ===== real UsingAArch32Min() && !AArch32.GeneralExceptionsToAArch64() then AArch32.TakeUndefInstrException(); else AArch64.UndefinedFault();(real a, real b) return if a <= b then a else b;

Library pseudocode for shared/exceptionsfunctions/trapscommon/UnallocatedEncodingOnes

// UnallocatedEncoding() // =====================// Ones() // ====== bits(N) UnallocatedEncoding() ifOnes(integer N) return UsingAArch32Replicate() &&('1',N); // Ones() // ====== bits(N) AArch32.ExecutingCP10or11Instr() then FPEXC.DEX = '0'; ifOnes() return UsingAArch32Ones() && !AArch32.GeneralExceptionsToAArch64() then AArch32.TakeUndefInstrException(); else AArch64.UndefinedFault();(N);

Library pseudocode for shared/functions/abortscommon/EncodeLDFSCROR

// EncodeLDFSC() // ============= // Function that gives the Long-descriptor FSC code for types of Fault // ROR() // ===== bits(6)bits(N) EncodeLDFSC(ROR(bits(N) x, integer shift) assert shift >= 0; if shift == 0 then result = x; else (result, -) =FaultROR_C statuscode, integer level) bits(6) result; case statuscode of when Fault_AddressSize result = '0000':level<1:0>; assert level IN {0,1,2,3}; when Fault_AccessFlag result = '0010':level<1:0>; assert level IN {1,2,3}; when Fault_Permission result = '0011':level<1:0>; assert level IN {1,2,3}; when Fault_Translation result = '0001':level<1:0>; assert level IN {0,1,2,3}; when Fault_SyncExternal result = '010000'; when Fault_SyncExternalOnWalk result = '0101':level<1:0>; assert level IN {0,1,2,3}; when Fault_SyncParity result = '011000'; when Fault_SyncParityOnWalk result = '0111':level<1:0>; assert level IN {0,1,2,3}; when Fault_AsyncParity result = '011001'; when Fault_AsyncExternal result = '010001'; when Fault_Alignment result = '100001'; when Fault_Debug result = '100010'; when Fault_TLBConflict result = '110000'; when Fault_HWUpdateAccessFlag result = '110001'; when Fault_Lockdown result = '110100'; // IMPLEMENTATION DEFINED when Fault_Exclusive result = '110101'; // IMPLEMENTATION DEFINED otherwise Unreachable(); (x, shift); return result;

Library pseudocode for shared/functions/abortscommon/IPAValidROR_C

// IPAValid() // ========== // Return TRUE if the IPA is reported for the abort // ROR_C() // ======= boolean(bits(N), bit) IPAValid(ROR_C(bits(N) x, integer shift) assert shift != 0; m = shift MOD N; result =FaultRecordLSR fault) assert fault.statuscode !=(x,m) OR Fault_NoneLSL; if fault.s2fs1walk then return fault.statuscode IN {Fault_AccessFlag, Fault_Permission, Fault_Translation, Fault_AddressSize}; elsif fault.secondstage then return fault.statuscode IN {Fault_AccessFlag, Fault_Translation, Fault_AddressSize}; else return FALSE;(x,N-m); carry_out = result<N-1>; return (result, carry_out);

Library pseudocode for shared/functions/abortscommon/IsAsyncAbortReplicate

// IsAsyncAbort() // ============== // Returns TRUE if the abort currently being processed is an asynchronous abort, and FALSE // otherwise. // Replicate() // =========== booleanbits(N) IsAsyncAbort(Replicate(bits(M) x) assert N MOD M == 0; returnFaultReplicate statuscode) assert statuscode !=(x, N DIV M); bits(M*N) Fault_None; return (statuscode IN {Fault_AsyncExternal, Fault_AsyncParity}); // IsAsyncAbort() // ============== boolean IsAsyncAbort(FaultRecord fault) return IsAsyncAbort(fault.statuscode);Replicate(bits(M) x, integer N);

Library pseudocode for shared/functions/abortscommon/IsDebugExceptionRoundDown

// IsDebugException() // ================== booleaninteger IsDebugException(RoundDown(real x);FaultRecord fault) assert fault.statuscode != Fault_None; return fault.statuscode == Fault_Debug;

Library pseudocode for shared/functions/abortscommon/IsExternalAbortRoundTowardsZero

// IsExternalAbort() // ================= // Returns TRUE if the abort currently being processed is an external abort and FALSE otherwise. // RoundTowardsZero() // ================== booleaninteger IsExternalAbort(RoundTowardsZero(real x) return if x == 0.0 then 0 else if x >= 0.0 thenFaultRoundDown statuscode) assert statuscode !=(x) else Fault_NoneRoundUp; return (statuscode IN {Fault_SyncExternal, Fault_SyncParity, Fault_SyncExternalOnWalk, Fault_SyncParityOnWalk, Fault_AsyncExternal, Fault_AsyncParity }); // IsExternalAbort() // ================= boolean IsExternalAbort(FaultRecord fault) return IsExternalAbort(fault.statuscode);(x);

Library pseudocode for shared/functions/abortscommon/IsExternalSyncAbortRoundUp

// IsExternalSyncAbort() // ===================== // Returns TRUE if the abort currently being processed is an external synchronous abort and FALSE otherwise. booleaninteger IsExternalSyncAbort(RoundUp(real x);Fault statuscode) assert statuscode != Fault_None; return (statuscode IN {Fault_SyncExternal, Fault_SyncParity, Fault_SyncExternalOnWalk, Fault_SyncParityOnWalk}); // IsExternalSyncAbort() // ===================== boolean IsExternalSyncAbort(FaultRecord fault) return IsExternalSyncAbort(fault.statuscode);

Library pseudocode for shared/functions/abortscommon/IsFaultSInt

// IsFault() // ========= // Return TRUE if a fault is associated with an address descriptor // SInt() // ====== booleaninteger IsFault(SInt(bits(N) x) result = 0; for i = 0 to N-1 if x<i> == '1' then result = result + 2^i; if x<N-1> == '1' then result = result - 2^N; return result;AddressDescriptor addrdesc) return addrdesc.fault.statuscode != Fault_None;

Library pseudocode for shared/functions/abortscommon/IsSErrorInterruptSignExtend

// IsSErrorInterrupt() // =================== // Returns TRUE if the abort currently being processed is an SError interrupt, and FALSE // otherwise. // SignExtend() // ============ booleanbits(N) IsSErrorInterrupt(SignExtend(bits(M) x, integer N) assert N >= M; returnFaultReplicate statuscode) assert statuscode !=(x<M-1>, N-M) : x; // SignExtend() // ============ bits(N) Fault_None; return (statuscode IN {SignExtend(bits(M) x) returnFault_AsyncExternalSignExtend, Fault_AsyncParity}); // IsSErrorInterrupt() // =================== boolean IsSErrorInterrupt(FaultRecord fault) return IsSErrorInterrupt(fault.statuscode);(x, N);

Library pseudocode for shared/functions/abortscommon/IsSecondStageUInt

// IsSecondStage() // =============== // UInt() // ====== booleaninteger IsSecondStage(UInt(bits(N) x) result = 0; for i = 0 to N-1 if x<i> == '1' then result = result + 2^i; return result;FaultRecord fault) assert fault.statuscode != Fault_None; return fault.secondstage;

Library pseudocode for shared/functions/abortscommon/LSInstructionSyndromeZeroExtend

// Returns the extended syndrome information for a second stage fault. // <10> - Syndrome valid bit. The syndrome is only valid for certain types of access instruction. // <9:8> - Access size. // <7> - Sign extended (for loads). // <6:2> - Transfer register. // <1> - Transfer register is 64-bit. // <0> - Instruction has acquire/release semantics. bits(11)// ZeroExtend() // ============ bits(N) LSInstructionSyndrome();ZeroExtend(bits(M) x, integer N) assert N >= M; returnZeros(N-M) : x; // ZeroExtend() // ============ bits(N) ZeroExtend(bits(M) x) return ZeroExtend(x, N);

Library pseudocode for shared/functions/common/ASRZeros

// ASR() // ===== // Zeros() // ======= bits(N) ASR(bits(N) x, integer shift) assert shift >= 0; if shift == 0 then result = x; else (result, -) =Zeros(integer N) return ('0',N); // Zeros() // ======= bits(N) Zeros() return ZerosASR_CReplicate(x, shift); return result;(N);

Library pseudocode for shared/functions/commoncrc/ASR_CBitReverse

// ASR_C() // ======= // BitReverse() // ============ (bits(N), bit)bits(N) ASR_C(bits(N) x, integer shift) assert shift > 0; extended_x =BitReverse(bits(N) data) bits(N) result; for i = 0 to N-1 result<N-i-1> = data<i>; return result; SignExtend(x, shift+N); result = extended_x<shift+N-1:shift>; carry_out = extended_x<shift-1>; return (result, carry_out);

Library pseudocode for shared/functions/commoncrc/AbsHaveCRCExt

// Abs() // ===== // HaveCRCExt() // ============ integerboolean Abs(integer x) return if x >= 0 then x else -x; // Abs() // ===== realHaveCRCExt() return (ARMv8p1AbsHasArchVersion(real x) return if x >= 0.0 then x else -x;) || boolean IMPLEMENTATION_DEFINED "Have CRC extension";

Library pseudocode for shared/functions/commoncrc/AlignPoly32Mod2

// Align() // ======= // Poly32Mod2() // ============ integer// Poly32Mod2 on a bitstring does a polynomial Modulus over {0,1} operation bits(32) Align(integer x, integer y) return y * (x DIV y); // Align() // ======= bits(N)Poly32Mod2(bits(N) data, bits(32) poly) assert N > 32; for i = N-1 downto 32 if data<i> == '1' then data<i-1:0> = data<i-1:0> EOR (poly: AlignZeros(bits(N) x, integer y) return Align(UInt(x), y)<N-1:0>;(i-32)); return data<31:0>;

Library pseudocode for shared/functions/commoncrypto/BitCountAESInvMixColumns

// BitCount() // ========== // AESInvMixColumns() // ================== // Transformation in the Inverse Cipher that is the inverse of AESMixColumns. integerbits(128) BitCount(bits(N) x) integer result = 0; for i = 0 to N-1 if x<i> == '1' then result = result + 1; return result;AESInvMixColumns(bits (128) op) bits(4*8) in0 = op< 96+:8> : op< 64+:8> : op< 32+:8> : op< 0+:8>; bits(4*8) in1 = op<104+:8> : op< 72+:8> : op< 40+:8> : op< 8+:8>; bits(4*8) in2 = op<112+:8> : op< 80+:8> : op< 48+:8> : op< 16+:8>; bits(4*8) in3 = op<120+:8> : op< 88+:8> : op< 56+:8> : op< 24+:8>; bits(4*8) out0; bits(4*8) out1; bits(4*8) out2; bits(4*8) out3; for c = 0 to 3 out0<c*8+:8> =FFmul0E(in0<c*8+:8>) EOR FFmul0B(in1<c*8+:8>) EOR FFmul0D(in2<c*8+:8>) EOR FFmul09(in3<c*8+:8>); out1<c*8+:8> = FFmul09(in0<c*8+:8>) EOR FFmul0E(in1<c*8+:8>) EOR FFmul0B(in2<c*8+:8>) EOR FFmul0D(in3<c*8+:8>); out2<c*8+:8> = FFmul0D(in0<c*8+:8>) EOR FFmul09(in1<c*8+:8>) EOR FFmul0E(in2<c*8+:8>) EOR FFmul0B(in3<c*8+:8>); out3<c*8+:8> = FFmul0B(in0<c*8+:8>) EOR FFmul0D(in1<c*8+:8>) EOR FFmul09(in2<c*8+:8>) EOR FFmul0E(in3<c*8+:8>); return ( out3<3*8+:8> : out2<3*8+:8> : out1<3*8+:8> : out0<3*8+:8> : out3<2*8+:8> : out2<2*8+:8> : out1<2*8+:8> : out0<2*8+:8> : out3<1*8+:8> : out2<1*8+:8> : out1<1*8+:8> : out0<1*8+:8> : out3<0*8+:8> : out2<0*8+:8> : out1<0*8+:8> : out0<0*8+:8> );

Library pseudocode for shared/functions/commoncrypto/CountLeadingSignBitsAESInvShiftRows

// CountLeadingSignBits() // ====================== // AESInvShiftRows() // ================= // Transformation in the Inverse Cipher that is inverse of AESShiftRows. integerbits(128) CountLeadingSignBits(bits(N) x) returnAESInvShiftRows(bits(128) op) return ( op< 24+:8> : op< 48+:8> : op< 72+:8> : op< 96+:8> : op<120+:8> : op< 16+:8> : op< 40+:8> : op< 64+:8> : op< 88+:8> : op<112+:8> : op< 8+:8> : op< 32+:8> : op< 56+:8> : op< 80+:8> : op<104+:8> : op< 0+:8> ); CountLeadingZeroBits(x<N-1:1> EOR x<N-2:0>);

Library pseudocode for shared/functions/commoncrypto/CountLeadingZeroBitsAESInvSubBytes

// CountLeadingZeroBits() // ====================== // AESInvSubBytes() // ================ // Transformation in the Inverse Cipher that is the inverse of AESSubBytes. integerbits(128) CountLeadingZeroBits(bits(N) x) return N - (AESInvSubBytes(bits(128) op) // Inverse S-box values bits(16*16*8) GF2_inv = ( /* F E D C B A 9 8 7 6 5 4 3 2 1 0 */ /*F*/ 0x7d0c2155631469e126d677ba7e042b17<127:0> : /*E*/ 0x619953833cbbebc8b0f52aae4d3be0a0<127:0> : /*D*/ 0xef9cc9939f7ae52d0d4ab519a97f5160<127:0> : /*C*/ 0x5fec8027591012b131c7078833a8dd1f<127:0> : /*B*/ 0xf45acd78fec0db9a2079d2c64b3e56fc<127:0> : /*A*/ 0x1bbe18aa0e62b76f89c5291d711af147<127:0> : /*9*/ 0x6edf751ce837f9e28535ade72274ac96<127:0> : /*8*/ 0x73e6b4f0cecff297eadc674f4111913a<127:0> : /*7*/ 0x6b8a130103bdafc1020f3fca8f1e2cd0<127:0> : /*6*/ 0x0645b3b80558e4f70ad3bc8c00abd890<127:0> : /*5*/ 0x849d8da75746155edab9edfd5048706c<127:0> : /*4*/ 0x92b6655dcc5ca4d41698688664f6f872<127:0> : /*3*/ 0x25d18b6d49a25b76b224d92866a12e08<127:0> : /*2*/ 0x4ec3fa420b954cee3d23c2a632947b54<127:0> : /*1*/ 0xcbe9dec444438e3487ff2f9b8239e37c<127:0> : /*0*/ 0xfbd7f3819ea340bf38a53630d56a0952<127:0> ); bits(128) out; for i = 0 to 15 out<i*8+:8> = GF2_inv<HighestSetBitUInt(x) + 1);(op<i*8+:8>)*8+:8>; return out;

Library pseudocode for shared/functions/commoncrypto/ElemAESMixColumns

// Elem[] - non-assignment form // ============================ // AESMixColumns() // =============== // Transformation in the Cipher that takes all of the columns of the // State and mixes their data (independently of one another) to // produce new columns. bits(size)bits(128) Elem[bits(N) vector, integer e, integer size] assert e >= 0 && (e+1)*size <= N; return vector<e*size+size-1 : e*size>; AESMixColumns(bits (128) op) bits(4*8) in0 = op< 96+:8> : op< 64+:8> : op< 32+:8> : op< 0+:8>; bits(4*8) in1 = op<104+:8> : op< 72+:8> : op< 40+:8> : op< 8+:8>; bits(4*8) in2 = op<112+:8> : op< 80+:8> : op< 48+:8> : op< 16+:8>; bits(4*8) in3 = op<120+:8> : op< 88+:8> : op< 56+:8> : op< 24+:8>; // Elem[] - non-assignment form // ============================ bits(4*8) out0; bits(4*8) out1; bits(4*8) out2; bits(4*8) out3; bits(size) for c = 0 to 3 out0<c*8+:8> = Elem[bits(N) vector, integer e] return(in0<c*8+:8>) EOR ElemFFmul03[vector, e, size]; // Elem[] - assignment form // ========================(in1<c*8+:8>) EOR in2<c*8+:8> EOR in3<c*8+:8>; out1<c*8+:8> = in0<c*8+:8> EOR Elem[bits(N) &vector, integer e, integer size] = bits(size) value assert e >= 0 && (e+1)*size <= N; vector<(e+1)*size-1:e*size> = value; return; // Elem[] - assignment form // ========================(in1<c*8+:8>) EOR Elem[bits(N) &vector, integer e] = bits(size) value(in2<c*8+:8>) EOR in3<c*8+:8>; out2<c*8+:8> = in0<c*8+:8> EOR in1<c*8+:8> EOR (in2<c*8+:8>) EOR FFmul03(in3<c*8+:8>); out3<c*8+:8> = FFmul03(in0<c*8+:8>) EOR in1<c*8+:8> EOR in2<c*8+:8> EOR FFmul02ElemFFmul02[vector, e, size] = value; return;(in3<c*8+:8>); return ( out3<3*8+:8> : out2<3*8+:8> : out1<3*8+:8> : out0<3*8+:8> : out3<2*8+:8> : out2<2*8+:8> : out1<2*8+:8> : out0<2*8+:8> : out3<1*8+:8> : out2<1*8+:8> : out1<1*8+:8> : out0<1*8+:8> : out3<0*8+:8> : out2<0*8+:8> : out1<0*8+:8> : out0<0*8+:8> );

Library pseudocode for shared/functions/commoncrypto/ExtendAESShiftRows

// Extend() // ======== // AESShiftRows() // ============== // Transformation in the Cipher that processes the State by cyclically // shifting the last three rows of the State by different offsets. bits(N)bits(128) Extend(bits(M) x, integer N, boolean unsigned) return if unsigned thenAESShiftRows(bits(128) op) return ( op< 88+:8> : op< 48+:8> : op< 8+:8> : op< 96+:8> : op< 56+:8> : op< 16+:8> : op<104+:8> : op< 64+:8> : op< 24+:8> : op<112+:8> : op< 72+:8> : op< 32+:8> : op<120+:8> : op< 80+:8> : op< 40+:8> : op< 0+:8> ); ZeroExtend(x, N) else SignExtend(x, N); // Extend() // ======== bits(N) Extend(bits(M) x, boolean unsigned) return Extend(x, N, unsigned);

Library pseudocode for shared/functions/commoncrypto/HighestSetBitAESSubBytes

// HighestSetBit() // =============== // AESSubBytes() // ============= // Transformation in the Cipher that processes the State using a nonlinear // byte substitution table (S-box) that operates on each of the State bytes // independently. integerbits(128) HighestSetBit(bits(N) x) for i = N-1 downto 0 if x<i> == '1' then return i; return -1;AESSubBytes(bits(128) op) // S-box values bits(16*16*8) GF2 = ( /* F E D C B A 9 8 7 6 5 4 3 2 1 0 */ /*F*/ 0x16bb54b00f2d99416842e6bf0d89a18c<127:0> : /*E*/ 0xdf2855cee9871e9b948ed9691198f8e1<127:0> : /*D*/ 0x9e1dc186b95735610ef6034866b53e70<127:0> : /*C*/ 0x8a8bbd4b1f74dde8c6b4a61c2e2578ba<127:0> : /*B*/ 0x08ae7a65eaf4566ca94ed58d6d37c8e7<127:0> : /*A*/ 0x79e4959162acd3c25c2406490a3a32e0<127:0> : /*9*/ 0xdb0b5ede14b8ee4688902a22dc4f8160<127:0> : /*8*/ 0x73195d643d7ea7c41744975fec130ccd<127:0> : /*7*/ 0xd2f3ff1021dab6bcf5389d928f40a351<127:0> : /*6*/ 0xa89f3c507f02f94585334d43fbaaefd0<127:0> : /*5*/ 0xcf584c4a39becb6a5bb1fc20ed00d153<127:0> : /*4*/ 0x842fe329b3d63b52a05a6e1b1a2c8309<127:0> : /*3*/ 0x75b227ebe28012079a059618c323c704<127:0> : /*2*/ 0x1531d871f1e5a534ccf73f362693fdb7<127:0> : /*1*/ 0xc072a49cafa2d4adf04759fa7dc982ca<127:0> : /*0*/ 0x76abd7fe2b670130c56f6bf27b777c63<127:0> ); bits(128) out; for i = 0 to 15 out<i*8+:8> = GF2<UInt(op<i*8+:8>)*8+:8>; return out;

Library pseudocode for shared/functions/commoncrypto/IntFFmul02

// Int() // ===== // FFmul02() // ========= integerbits(8) Int(bits(N) x, boolean unsigned) result = if unsigned thenFFmul02(bits(8) b) bits(256*8) FFmul_02 = ( /* F E D C B A 9 8 7 6 5 4 3 2 1 0 */ /*F*/ 0xE5E7E1E3EDEFE9EBF5F7F1F3FDFFF9FB<127:0> : /*E*/ 0xC5C7C1C3CDCFC9CBD5D7D1D3DDDFD9DB<127:0> : /*D*/ 0xA5A7A1A3ADAFA9ABB5B7B1B3BDBFB9BB<127:0> : /*C*/ 0x858781838D8F898B959791939D9F999B<127:0> : /*B*/ 0x656761636D6F696B757771737D7F797B<127:0> : /*A*/ 0x454741434D4F494B555751535D5F595B<127:0> : /*9*/ 0x252721232D2F292B353731333D3F393B<127:0> : /*8*/ 0x050701030D0F090B151711131D1F191B<127:0> : /*7*/ 0xFEFCFAF8F6F4F2F0EEECEAE8E6E4E2E0<127:0> : /*6*/ 0xDEDCDAD8D6D4D2D0CECCCAC8C6C4C2C0<127:0> : /*5*/ 0xBEBCBAB8B6B4B2B0AEACAAA8A6A4A2A0<127:0> : /*4*/ 0x9E9C9A98969492908E8C8A8886848280<127:0> : /*3*/ 0x7E7C7A78767472706E6C6A6866646260<127:0> : /*2*/ 0x5E5C5A58565452504E4C4A4846444240<127:0> : /*1*/ 0x3E3C3A38363432302E2C2A2826242220<127:0> : /*0*/ 0x1E1C1A18161412100E0C0A0806040200<127:0> ); return FFmul_02< UInt(x) else(b)*8+:8>; SInt(x); return result;

Library pseudocode for shared/functions/commoncrypto/IsOnesFFmul03

// IsOnes() // ======== // FFmul03() // ========= booleanbits(8) IsOnes(bits(N) x) return x ==FFmul03(bits(8) b) bits(256*8) FFmul_03 = ( /* F E D C B A 9 8 7 6 5 4 3 2 1 0 */ /*F*/ 0x1A191C1F16151013020104070E0D080B<127:0> : /*E*/ 0x2A292C2F26252023323134373E3D383B<127:0> : /*D*/ 0x7A797C7F76757073626164676E6D686B<127:0> : /*C*/ 0x4A494C4F46454043525154575E5D585B<127:0> : /*B*/ 0xDAD9DCDFD6D5D0D3C2C1C4C7CECDC8CB<127:0> : /*A*/ 0xEAE9ECEFE6E5E0E3F2F1F4F7FEFDF8FB<127:0> : /*9*/ 0xBAB9BCBFB6B5B0B3A2A1A4A7AEADA8AB<127:0> : /*8*/ 0x8A898C8F86858083929194979E9D989B<127:0> : /*7*/ 0x818287848D8E8B88999A9F9C95969390<127:0> : /*6*/ 0xB1B2B7B4BDBEBBB8A9AAAFACA5A6A3A0<127:0> : /*5*/ 0xE1E2E7E4EDEEEBE8F9FAFFFCF5F6F3F0<127:0> : /*4*/ 0xD1D2D7D4DDDEDBD8C9CACFCCC5C6C3C0<127:0> : /*3*/ 0x414247444D4E4B48595A5F5C55565350<127:0> : /*2*/ 0x717277747D7E7B78696A6F6C65666360<127:0> : /*1*/ 0x212227242D2E2B28393A3F3C35363330<127:0> : /*0*/ 0x111217141D1E1B18090A0F0C05060300<127:0> ); return FFmul_03< OnesUInt(N);(b)*8+:8>;

Library pseudocode for shared/functions/commoncrypto/IsZeroFFmul09

// IsZero() // ======== // FFmul09() // ========= booleanbits(8) IsZero(bits(N) x) return x ==FFmul09(bits(8) b) bits(256*8) FFmul_09 = ( /* F E D C B A 9 8 7 6 5 4 3 2 1 0 */ /*F*/ 0x464F545D626B70790E071C152A233831<127:0> : /*E*/ 0xD6DFC4CDF2FBE0E99E978C85BAB3A8A1<127:0> : /*D*/ 0x7D746F6659504B42353C272E1118030A<127:0> : /*C*/ 0xEDE4FFF6C9C0DBD2A5ACB7BE8188939A<127:0> : /*B*/ 0x3039222B141D060F78716A635C554E47<127:0> : /*A*/ 0xA0A9B2BB848D969FE8E1FAF3CCC5DED7<127:0> : /*9*/ 0x0B0219102F263D34434A5158676E757C<127:0> : /*8*/ 0x9B928980BFB6ADA4D3DAC1C8F7FEE5EC<127:0> : /*7*/ 0xAAA3B8B18E879C95E2EBF0F9C6CFD4DD<127:0> : /*6*/ 0x3A3328211E170C05727B6069565F444D<127:0> : /*5*/ 0x9198838AB5BCA7AED9D0CBC2FDF4EFE6<127:0> : /*4*/ 0x0108131A252C373E49405B526D647F76<127:0> : /*3*/ 0xDCD5CEC7F8F1EAE3949D868FB0B9A2AB<127:0> : /*2*/ 0x4C455E5768617A73040D161F2029323B<127:0> : /*1*/ 0xE7EEF5FCC3CAD1D8AFA6BDB48B829990<127:0> : /*0*/ 0x777E656C535A41483F362D241B120900<127:0> ); return FFmul_09< ZerosUInt(N);(b)*8+:8>;

Library pseudocode for shared/functions/commoncrypto/IsZeroBitFFmul0B

// IsZeroBit() // =========== // FFmul0B() // ========= bitbits(8) IsZeroBit(bits(N) x) return ifFFmul0B(bits(8) b) bits(256*8) FFmul_0B = ( /* F E D C B A 9 8 7 6 5 4 3 2 1 0 */ /*F*/ 0xA3A8B5BE8F849992FBF0EDE6D7DCC1CA<127:0> : /*E*/ 0x1318050E3F3429224B405D56676C717A<127:0> : /*D*/ 0xD8D3CEC5F4FFE2E9808B969DACA7BAB1<127:0> : /*C*/ 0x68637E75444F5259303B262D1C170A01<127:0> : /*B*/ 0x555E434879726F640D061B10212A373C<127:0> : /*A*/ 0xE5EEF3F8C9C2DFD4BDB6ABA0919A878C<127:0> : /*9*/ 0x2E2538330209141F767D606B5A514C47<127:0> : /*8*/ 0x9E958883B2B9A4AFC6CDD0DBEAE1FCF7<127:0> : /*7*/ 0x545F424978736E650C071A11202B363D<127:0> : /*6*/ 0xE4EFF2F9C8C3DED5BCB7AAA1909B868D<127:0> : /*5*/ 0x2F2439320308151E777C616A5B504D46<127:0> : /*4*/ 0x9F948982B3B8A5AEC7CCD1DAEBE0FDF6<127:0> : /*3*/ 0xA2A9B4BF8E859893FAF1ECE7D6DDC0CB<127:0> : /*2*/ 0x1219040F3E3528234A415C57666D707B<127:0> : /*1*/ 0xD9D2CFC4F5FEE3E8818A979CADA6BBB0<127:0> : /*0*/ 0x69627F74454E5358313A272C1D160B00<127:0> ); return FFmul_0B< IsZeroUInt(x) then '1' else '0';(b)*8+:8>;

Library pseudocode for shared/functions/commoncrypto/LSLFFmul0D

// LSL() // ===== // FFmul0D() // ========= bits(N)bits(8) LSL(bits(N) x, integer shift) assert shift >= 0; if shift == 0 then result = x; else (result, -) =FFmul0D(bits(8) b) bits(256*8) FFmul_0D = ( /* F E D C B A 9 8 7 6 5 4 3 2 1 0 */ /*F*/ 0x979A8D80A3AEB9B4FFF2E5E8CBC6D1DC<127:0> : /*E*/ 0x474A5D50737E69642F2235381B16010C<127:0> : /*D*/ 0x2C21363B1815020F44495E53707D6A67<127:0> : /*C*/ 0xFCF1E6EBC8C5D2DF94998E83A0ADBAB7<127:0> : /*B*/ 0xFAF7E0EDCEC3D4D9929F8885A6ABBCB1<127:0> : /*A*/ 0x2A27303D1E130409424F5855767B6C61<127:0> : /*9*/ 0x414C5B5675786F622924333E1D10070A<127:0> : /*8*/ 0x919C8B86A5A8BFB2F9F4E3EECDC0D7DA<127:0> : /*7*/ 0x4D40575A7974636E25283F32111C0B06<127:0> : /*6*/ 0x9D90878AA9A4B3BEF5F8EFE2C1CCDBD6<127:0> : /*5*/ 0xF6FBECE1C2CFD8D59E938489AAA7B0BD<127:0> : /*4*/ 0x262B3C31121F08054E4354597A77606D<127:0> : /*3*/ 0x202D3A3714190E034845525F7C71666B<127:0> : /*2*/ 0xF0FDEAE7C4C9DED39895828FACA1B6BB<127:0> : /*1*/ 0x9B96818CAFA2B5B8F3FEE9E4C7CADDD0<127:0> : /*0*/ 0x4B46515C7F726568232E3934171A0D00<127:0> ); return FFmul_0D< LSL_CUInt(x, shift); return result;(b)*8+:8>;

Library pseudocode for shared/functions/commoncrypto/LSL_CFFmul0E

// LSL_C() // ======= // FFmul0E() // ========= (bits(N), bit)bits(8) LSL_C(bits(N) x, integer shift) assert shift > 0; extended_x = x :FFmul0E(bits(8) b) bits(256*8) FFmul_0E = ( /* F E D C B A 9 8 7 6 5 4 3 2 1 0 */ /*F*/ 0x8D83919FB5BBA9A7FDF3E1EFC5CBD9D7<127:0> : /*E*/ 0x6D63717F555B49471D13010F252B3937<127:0> : /*D*/ 0x56584A446E60727C26283A341E10020C<127:0> : /*C*/ 0xB6B8AAA48E80929CC6C8DAD4FEF0E2EC<127:0> : /*B*/ 0x202E3C321816040A505E4C426866747A<127:0> : /*A*/ 0xC0CEDCD2F8F6E4EAB0BEACA28886949A<127:0> : /*9*/ 0xFBF5E7E9C3CDDFD18B859799B3BDAFA1<127:0> : /*8*/ 0x1B150709232D3F316B657779535D4F41<127:0> : /*7*/ 0xCCC2D0DEF4FAE8E6BCB2A0AE848A9896<127:0> : /*6*/ 0x2C22303E141A08065C52404E646A7876<127:0> : /*5*/ 0x17190B052F21333D67697B755F51434D<127:0> : /*4*/ 0xF7F9EBE5CFC1D3DD87899B95BFB1A3AD<127:0> : /*3*/ 0x616F7D735957454B111F0D032927353B<127:0> : /*2*/ 0x818F9D93B9B7A5ABF1FFEDE3C9C7D5DB<127:0> : /*1*/ 0xBAB4A6A8828C9E90CAC4D6D8F2FCEEE0<127:0> : /*0*/ 0x5A544648626C7E702A243638121C0E00<127:0> ); return FFmul_0E< ZerosUInt(shift); result = extended_x<N-1:0>; carry_out = extended_x<N>; return (result, carry_out);(b)*8+:8>;

Library pseudocode for shared/functions/commoncrypto/LSRHaveAESExt

// LSR() // ===== // HaveAESExt() // ============ // TRUE if AES cryptographic instructions support is implemented, // FALSE otherwise. bits(N)boolean LSR(bits(N) x, integer shift) assert shift >= 0; if shift == 0 then result = x; else (result, -) =HaveAESExt() return boolean IMPLEMENTATION_DEFINED "Has AES Crypto instructions"; LSR_C(x, shift); return result;

Library pseudocode for shared/functions/commoncrypto/LSR_CHaveBit128PMULLExt

// LSR_C() // ======= // HaveBit128PMULLExt() // ==================== // TRUE if 128 bit form of PMULL instructions support is implemented, // FALSE otherwise. (bits(N), bit)boolean LSR_C(bits(N) x, integer shift) assert shift > 0; extended_x =HaveBit128PMULLExt() return boolean IMPLEMENTATION_DEFINED "Has 128-bit form of PMULL instructions"; ZeroExtend(x, shift+N); result = extended_x<shift+N-1:shift>; carry_out = extended_x<shift-1>; return (result, carry_out);

Library pseudocode for shared/functions/commoncrypto/LowestSetBitHaveSHA1Ext

// LowestSetBit() // ============== // HaveSHA1Ext() // ============= // TRUE if SHA1 cryptographic instructions support is implemented, // FALSE otherwise. integerboolean LowestSetBit(bits(N) x) for i = 0 to N-1 if x<i> == '1' then return i; return N;HaveSHA1Ext() return boolean IMPLEMENTATION_DEFINED "Has SHA1 Crypto instructions";

Library pseudocode for shared/functions/commoncrypto/MaxHaveSHA256Ext

// Max() // ===== // HaveSHA256Ext() // =============== // TRUE if SHA256 cryptographic instructions support is implemented, // FALSE otherwise. integerboolean Max(integer a, integer b) return if a >= b then a else b; // Max() // ===== realHaveSHA256Ext() return boolean IMPLEMENTATION_DEFINED "Has SHA256 Crypto instructions"; Max(real a, real b) return if a >= b then a else b;

Library pseudocode for shared/functions/commoncrypto/MinHaveSHA3Ext

// Min() // ===== // HaveSHA3Ext() // ============= // TRUE if SHA3 cryptographic instructions support is implemented, // and when SHA1 and SHA2 basic cryptographic instructions support is implemented, // FALSE otherwise. integerboolean Min(integer a, integer b) return if a <= b then a else b; // Min() // ===== realHaveSHA3Ext() if ! (ARMv8p2) || !(HaveSHA1Ext() && HaveSHA256ExtMinHasArchVersion(real a, real b) return if a <= b then a else b;()) then return FALSE; return boolean IMPLEMENTATION_DEFINED "Has SHA3 Crypto instructions";

Library pseudocode for shared/functions/commoncrypto/OnesHaveSHA512Ext

// Ones() // ====== // HaveSHA512Ext() // =============== // TRUE if SHA512 cryptographic instructions support is implemented, // and when SHA1 and SHA2 basic cryptographic instructions support is implemented, // FALSE otherwise. bits(N)boolean Ones(integer N) returnHaveSHA512Ext() if ! ReplicateHasArchVersion('1',N); // Ones() // ====== bits(N)( Ones() return) || !( () && HaveSHA256ExtOnesHaveSHA1Ext(N);()) then return FALSE; return boolean IMPLEMENTATION_DEFINED "Has SHA512 Crypto instructions";

Library pseudocode for shared/functions/commoncrypto/RORHaveSM3Ext

// ROR() // ===== // HaveSM3Ext() // ============ // TRUE if SM3 cryptographic instructions support is implemented, // FALSE otherwise. bits(N)boolean ROR(bits(N) x, integer shift) assert shift >= 0; if shift == 0 then result = x; else (result, -) =HaveSM3Ext() if ! (ARMv8p2ROR_CHasArchVersion(x, shift); return result;) then return FALSE; return boolean IMPLEMENTATION_DEFINED "Has SM3 Crypto instructions";

Library pseudocode for shared/functions/commoncrypto/ROR_CHaveSM4Ext

// ROR_C() // ======= // HaveSM4Ext() // ============ // TRUE if SM4 cryptographic instructions support is implemented, // FALSE otherwise. (bits(N), bit)boolean ROR_C(bits(N) x, integer shift) assert shift != 0; m = shift MOD N; result =HaveSM4Ext() if ! LSRHasArchVersion(x,m) OR( LSLARMv8p2(x,N-m); carry_out = result<N-1>; return (result, carry_out);) then return FALSE; return boolean IMPLEMENTATION_DEFINED "Has SM4 Crypto instructions";

Library pseudocode for shared/functions/commoncrypto/ReplicateROL

// Replicate() // =========== // ROL() // ===== bits(N) Replicate(bits(M) x) assert N MOD M == 0; ROL(bits(N) x, integer shift) assert shift >= 0 && shift <= N; if (shift == 0) then return x; return ReplicateROR(x, N DIV M); bits(M*N) Replicate(bits(M) x, integer N);(x, N-shift);

Library pseudocode for shared/functions/commoncrypto/RoundDownSHA256hash

integer// SHA256hash() // ============ bits(128) RoundDown(real x);SHA256hash(bits (128) X, bits(128) Y, bits(128) W, boolean part1) bits(32) chs, maj, t; for e = 0 to 3 chs =SHAchoose(Y<31:0>, Y<63:32>, Y<95:64>); maj = SHAmajority(X<31:0>, X<63:32>, X<95:64>); t = Y<127:96> + SHAhashSIGMA1(Y<31:0>) + chs + Elem[W, e, 32]; X<127:96> = t + X<127:96>; Y<127:96> = t + SHAhashSIGMA0(X<31:0>) + maj; <Y, X> = ROL(Y : X, 32); return (if part1 then X else Y);

Library pseudocode for shared/functions/commoncrypto/RoundTowardsZeroSHAchoose

// RoundTowardsZero() // ================== // SHAchoose() // =========== integerbits(32) RoundTowardsZero(real x) return if x == 0.0 then 0 else if x >= 0.0 thenSHAchoose(bits(32) x, bits(32) y, bits(32) z) return (((y EOR z) AND x) EOR z); RoundDown(x) else RoundUp(x);

Library pseudocode for shared/functions/commoncrypto/RoundUpSHAhashSIGMA0

integer// SHAhashSIGMA0() // =============== bits(32) RoundUp(real x);SHAhashSIGMA0(bits(32) x) returnROR(x, 2) EOR ROR(x, 13) EOR ROR(x, 22);

Library pseudocode for shared/functions/commoncrypto/SIntSHAhashSIGMA1

// SInt() // ====== // SHAhashSIGMA1() // =============== integerbits(32) SInt(bits(N) x) result = 0; for i = 0 to N-1 if x<i> == '1' then result = result + 2^i; if x<N-1> == '1' then result = result - 2^N; return result;SHAhashSIGMA1(bits(32) x) returnROR(x, 6) EOR ROR(x, 11) EOR ROR(x, 25);

Library pseudocode for shared/functions/commoncrypto/SignExtendSHAmajority

// SignExtend() // ============ // SHAmajority() // ============= bits(N)bits(32) SignExtend(bits(M) x, integer N) assert N >= M; returnSHAmajority(bits(32) x, bits(32) y, bits(32) z) return ((x AND y) OR ((x OR y) AND z)); Replicate(x<M-1>, N-M) : x; // SignExtend() // ============ bits(N) SignExtend(bits(M) x) return SignExtend(x, N);

Library pseudocode for shared/functions/commoncrypto/UIntSHAparity

// UInt() // ====== // SHAparity() // =========== integerbits(32) UInt(bits(N) x) result = 0; for i = 0 to N-1 if x<i> == '1' then result = result + 2^i; return result;SHAparity(bits(32) x, bits(32) y, bits(32) z) return (x EOR y EOR z);

Library pseudocode for shared/functions/commoncrypto/ZeroExtendSbox

// ZeroExtend() // ============ // Sbox() // ====== // Used in SM4E crypto instruction bits(N)bits(8) ZeroExtend(bits(M) x, integer N) assert N >= M; returnSbox(bits(8) sboxin) bits(8) sboxout; bits(2048) sboxstring = 0xd690e9fecce13db716b614c228fb2c052b679a762abe04c3aa441326498606999c4250f491ef987a33540b43edcfac62e4b31ca9c908e89580df94fa758f3fa64707a7fcf37317ba83593c19e6854fa8686b81b27164da8bf8eb0f4b70569d351e240e5e6358d1a225227c3b01217887d40046579fd327524c3602e7a0c4c89eeabf8ad240c738b5a3f7f2cef96115a1e0ae5da49b341a55ad933230f58cb1e31df6e22e8266ca60c02923ab0d534e6fd5db3745defd8e2f03ff6a726d6c5b518d1baf92bbddbc7f11d95c411f105ad80ac13188a5cd7bbd2d74d012b8e5b4b08969974a0c96777e65b9f109c56ec68418f07dec3adc4d2079ee5f3ed7cb3948<2047:0>; sboxout = sboxstring<(255- ZerosUInt(N-M) : x; // ZeroExtend() // ============ bits(N)(sboxin))*8+7:(255- ZeroExtend(bits(M) x) return ZeroExtend(x, N);(sboxin))*8>; return sboxout;

Library pseudocode for shared/functions/commonexclusive/ZerosClearExclusiveByAddress

// Zeros() // ======= bits(N)// Clear the global Exclusives monitors for all PEs EXCEPT processorid if they // record any part of the physical address region of size bytes starting at paddress. // It is IMPLEMENTATION DEFINED whether the global Exclusives monitor for processorid // is also cleared if it records any part of the address region. Zeros(integer N) returnClearExclusiveByAddress( ReplicateFullAddress('0',N); // Zeros() // ======= bits(N) Zeros() return Zeros(N);paddress, integer processorid, integer size);

Library pseudocode for shared/functions/crcexclusive/BitReverseClearExclusiveLocal

// BitReverse() // ============ bits(N)// Clear the local Exclusives monitor for the specified processorid. BitReverse(bits(N) data) bits(N) result; for i = 0 to N-1 result<N-i-1> = data<i>; return result;ClearExclusiveLocal(integer processorid);

Library pseudocode for shared/functions/crcexclusive/HaveCRCExtClearExclusiveMonitors

// HaveCRCExt() // ============ // ClearExclusiveMonitors() // ======================== boolean// Clear the local Exclusives monitor for the executing PE. HaveCRCExt() returnClearExclusiveMonitors() HasArchVersionClearExclusiveLocal(ARMv8p1ProcessorID) || boolean IMPLEMENTATION_DEFINED "Have CRC extension";());

Library pseudocode for shared/functions/crcexclusive/Poly32Mod2ExclusiveMonitorsStatus

// Poly32Mod2() // ============ // Poly32Mod2 on a bitstring does a polynomial Modulus over {0,1} operation bits(32)// Returns '0' to indicate success if the last memory write by this PE was to // the same physical address region endorsed by ExclusiveMonitorsPass(). // Returns '1' to indicate failure if address translation resulted in a different // physical address. bit Poly32Mod2(bits(N) data, bits(32) poly) assert N > 32; for i = N-1 downto 32 if data<i> == '1' then data<i-1:0> = data<i-1:0> EOR (poly:ExclusiveMonitorsStatus();Zeros(i-32)); return data<31:0>;

Library pseudocode for shared/functions/cryptoexclusive/AESInvMixColumnsIsExclusiveGlobal

// AESInvMixColumns() // ================== // Transformation in the Inverse Cipher that is the inverse of AESMixColumns. bits(128)// Return TRUE if the global Exclusives monitor for processorid includes all of // the physical address region of size bytes starting at paddress. boolean AESInvMixColumns(bits (128) op) bits(4*8) in0 = op< 96+:8> : op< 64+:8> : op< 32+:8> : op< 0+:8>; bits(4*8) in1 = op<104+:8> : op< 72+:8> : op< 40+:8> : op< 8+:8>; bits(4*8) in2 = op<112+:8> : op< 80+:8> : op< 48+:8> : op< 16+:8>; bits(4*8) in3 = op<120+:8> : op< 88+:8> : op< 56+:8> : op< 24+:8>; bits(4*8) out0; bits(4*8) out1; bits(4*8) out2; bits(4*8) out3; for c = 0 to 3 out0<c*8+:8> =IsExclusiveGlobal( FFmul0EFullAddress(in0<c*8+:8>) EOR FFmul0B(in1<c*8+:8>) EOR FFmul0D(in2<c*8+:8>) EOR FFmul09(in3<c*8+:8>); out1<c*8+:8> = FFmul09(in0<c*8+:8>) EOR FFmul0E(in1<c*8+:8>) EOR FFmul0B(in2<c*8+:8>) EOR FFmul0D(in3<c*8+:8>); out2<c*8+:8> = FFmul0D(in0<c*8+:8>) EOR FFmul09(in1<c*8+:8>) EOR FFmul0E(in2<c*8+:8>) EOR FFmul0B(in3<c*8+:8>); out3<c*8+:8> = FFmul0B(in0<c*8+:8>) EOR FFmul0D(in1<c*8+:8>) EOR FFmul09(in2<c*8+:8>) EOR FFmul0E(in3<c*8+:8>); return ( out3<3*8+:8> : out2<3*8+:8> : out1<3*8+:8> : out0<3*8+:8> : out3<2*8+:8> : out2<2*8+:8> : out1<2*8+:8> : out0<2*8+:8> : out3<1*8+:8> : out2<1*8+:8> : out1<1*8+:8> : out0<1*8+:8> : out3<0*8+:8> : out2<0*8+:8> : out1<0*8+:8> : out0<0*8+:8> );paddress, integer processorid, integer size);

Library pseudocode for shared/functions/cryptoexclusive/AESInvShiftRowsIsExclusiveLocal

// AESInvShiftRows() // ================= // Transformation in the Inverse Cipher that is inverse of AESShiftRows. bits(128)// Return TRUE if the local Exclusives monitor for processorid includes all of // the physical address region of size bytes starting at paddress. boolean AESInvShiftRows(bits(128) op) return ( op< 24+:8> : op< 48+:8> : op< 72+:8> : op< 96+:8> : op<120+:8> : op< 16+:8> : op< 40+:8> : op< 64+:8> : op< 88+:8> : op<112+:8> : op< 8+:8> : op< 32+:8> : op< 56+:8> : op< 80+:8> : op<104+:8> : op< 0+:8> );IsExclusiveLocal(FullAddress paddress, integer processorid, integer size);

Library pseudocode for shared/functions/cryptoexclusive/AESInvSubBytesMarkExclusiveGlobal

// AESInvSubBytes() // ================ // Transformation in the Inverse Cipher that is the inverse of AESSubBytes. bits(128)// Record the physical address region of size bytes starting at paddress in // the global Exclusives monitor for processorid. AESInvSubBytes(bits(128) op) // Inverse S-box values bits(16*16*8) GF2_inv = ( /* F E D C B A 9 8 7 6 5 4 3 2 1 0 */ /*F*/ 0x7d0c2155631469e126d677ba7e042b17<127:0> : /*E*/ 0x619953833cbbebc8b0f52aae4d3be0a0<127:0> : /*D*/ 0xef9cc9939f7ae52d0d4ab519a97f5160<127:0> : /*C*/ 0x5fec8027591012b131c7078833a8dd1f<127:0> : /*B*/ 0xf45acd78fec0db9a2079d2c64b3e56fc<127:0> : /*A*/ 0x1bbe18aa0e62b76f89c5291d711af147<127:0> : /*9*/ 0x6edf751ce837f9e28535ade72274ac96<127:0> : /*8*/ 0x73e6b4f0cecff297eadc674f4111913a<127:0> : /*7*/ 0x6b8a130103bdafc1020f3fca8f1e2cd0<127:0> : /*6*/ 0x0645b3b80558e4f70ad3bc8c00abd890<127:0> : /*5*/ 0x849d8da75746155edab9edfd5048706c<127:0> : /*4*/ 0x92b6655dcc5ca4d41698688664f6f872<127:0> : /*3*/ 0x25d18b6d49a25b76b224d92866a12e08<127:0> : /*2*/ 0x4ec3fa420b954cee3d23c2a632947b54<127:0> : /*1*/ 0xcbe9dec444438e3487ff2f9b8239e37c<127:0> : /*0*/ 0xfbd7f3819ea340bf38a53630d56a0952<127:0> ); bits(128) out; for i = 0 to 15 out<i*8+:8> = GF2_inv<MarkExclusiveGlobal(UIntFullAddress(op<i*8+:8>)*8+:8>; return out;paddress, integer processorid, integer size);

Library pseudocode for shared/functions/cryptoexclusive/AESMixColumnsMarkExclusiveLocal

// AESMixColumns() // =============== // Transformation in the Cipher that takes all of the columns of the // State and mixes their data (independently of one another) to // produce new columns. bits(128)// Record the physical address region of size bytes starting at paddress in // the local Exclusives monitor for processorid. AESMixColumns(bits (128) op) bits(4*8) in0 = op< 96+:8> : op< 64+:8> : op< 32+:8> : op< 0+:8>; bits(4*8) in1 = op<104+:8> : op< 72+:8> : op< 40+:8> : op< 8+:8>; bits(4*8) in2 = op<112+:8> : op< 80+:8> : op< 48+:8> : op< 16+:8>; bits(4*8) in3 = op<120+:8> : op< 88+:8> : op< 56+:8> : op< 24+:8>; bits(4*8) out0; bits(4*8) out1; bits(4*8) out2; bits(4*8) out3; for c = 0 to 3 out0<c*8+:8> =MarkExclusiveLocal( FFmul02FullAddress(in0<c*8+:8>) EOR FFmul03(in1<c*8+:8>) EOR in2<c*8+:8> EOR in3<c*8+:8>; out1<c*8+:8> = in0<c*8+:8> EOR FFmul02(in1<c*8+:8>) EOR FFmul03(in2<c*8+:8>) EOR in3<c*8+:8>; out2<c*8+:8> = in0<c*8+:8> EOR in1<c*8+:8> EOR FFmul02(in2<c*8+:8>) EOR FFmul03(in3<c*8+:8>); out3<c*8+:8> = FFmul03(in0<c*8+:8>) EOR in1<c*8+:8> EOR in2<c*8+:8> EOR FFmul02(in3<c*8+:8>); return ( out3<3*8+:8> : out2<3*8+:8> : out1<3*8+:8> : out0<3*8+:8> : out3<2*8+:8> : out2<2*8+:8> : out1<2*8+:8> : out0<2*8+:8> : out3<1*8+:8> : out2<1*8+:8> : out1<1*8+:8> : out0<1*8+:8> : out3<0*8+:8> : out2<0*8+:8> : out1<0*8+:8> : out0<0*8+:8> );paddress, integer processorid, integer size);

Library pseudocode for shared/functions/cryptoexclusive/AESShiftRowsProcessorID

// AESShiftRows() // ============== // Transformation in the Cipher that processes the State by cyclically // shifting the last three rows of the State by different offsets. bits(128)// Return the ID of the currently executing PE. integer AESShiftRows(bits(128) op) return ( op< 88+:8> : op< 48+:8> : op< 8+:8> : op< 96+:8> : op< 56+:8> : op< 16+:8> : op<104+:8> : op< 64+:8> : op< 24+:8> : op<112+:8> : op< 72+:8> : op< 32+:8> : op<120+:8> : op< 80+:8> : op< 40+:8> : op< 0+:8> );ProcessorID();

Library pseudocode for shared/functions/cryptoextension/AESSubBytesAArch32.HaveHPDExt

// AESSubBytes() // ============= // Transformation in the Cipher that processes the State using a nonlinear // byte substitution table (S-box) that operates on each of the State bytes // independently. // AArch32.HaveHPDExt() // ==================== bits(128)boolean AESSubBytes(bits(128) op) // S-box values bits(16*16*8) GF2 = ( /* F E D C B A 9 8 7 6 5 4 3 2 1 0 */ /*F*/ 0x16bb54b00f2d99416842e6bf0d89a18c<127:0> : /*E*/ 0xdf2855cee9871e9b948ed9691198f8e1<127:0> : /*D*/ 0x9e1dc186b95735610ef6034866b53e70<127:0> : /*C*/ 0x8a8bbd4b1f74dde8c6b4a61c2e2578ba<127:0> : /*B*/ 0x08ae7a65eaf4566ca94ed58d6d37c8e7<127:0> : /*A*/ 0x79e4959162acd3c25c2406490a3a32e0<127:0> : /*9*/ 0xdb0b5ede14b8ee4688902a22dc4f8160<127:0> : /*8*/ 0x73195d643d7ea7c41744975fec130ccd<127:0> : /*7*/ 0xd2f3ff1021dab6bcf5389d928f40a351<127:0> : /*6*/ 0xa89f3c507f02f94585334d43fbaaefd0<127:0> : /*5*/ 0xcf584c4a39becb6a5bb1fc20ed00d153<127:0> : /*4*/ 0x842fe329b3d63b52a05a6e1b1a2c8309<127:0> : /*3*/ 0x75b227ebe28012079a059618c323c704<127:0> : /*2*/ 0x1531d871f1e5a534ccf73f362693fdb7<127:0> : /*1*/ 0xc072a49cafa2d4adf04759fa7dc982ca<127:0> : /*0*/ 0x76abd7fe2b670130c56f6bf27b777c63<127:0> ); bits(128) out; for i = 0 to 15 out<i*8+:8> = GF2<AArch32.HaveHPDExt() return(ARMv8p2UIntHasArchVersion(op<i*8+:8>)*8+:8>; return out;);

Library pseudocode for shared/functions/cryptoextension/FFmul02AArch64.HaveHPDExt

// FFmul02() // ========= // AArch64.HaveHPDExt() // ==================== bits(8)boolean FFmul02(bits(8) b) bits(256*8) FFmul_02 = ( /* F E D C B A 9 8 7 6 5 4 3 2 1 0 */ /*F*/ 0xE5E7E1E3EDEFE9EBF5F7F1F3FDFFF9FB<127:0> : /*E*/ 0xC5C7C1C3CDCFC9CBD5D7D1D3DDDFD9DB<127:0> : /*D*/ 0xA5A7A1A3ADAFA9ABB5B7B1B3BDBFB9BB<127:0> : /*C*/ 0x858781838D8F898B959791939D9F999B<127:0> : /*B*/ 0x656761636D6F696B757771737D7F797B<127:0> : /*A*/ 0x454741434D4F494B555751535D5F595B<127:0> : /*9*/ 0x252721232D2F292B353731333D3F393B<127:0> : /*8*/ 0x050701030D0F090B151711131D1F191B<127:0> : /*7*/ 0xFEFCFAF8F6F4F2F0EEECEAE8E6E4E2E0<127:0> : /*6*/ 0xDEDCDAD8D6D4D2D0CECCCAC8C6C4C2C0<127:0> : /*5*/ 0xBEBCBAB8B6B4B2B0AEACAAA8A6A4A2A0<127:0> : /*4*/ 0x9E9C9A98969492908E8C8A8886848280<127:0> : /*3*/ 0x7E7C7A78767472706E6C6A6866646260<127:0> : /*2*/ 0x5E5C5A58565452504E4C4A4846444240<127:0> : /*1*/ 0x3E3C3A38363432302E2C2A2826242220<127:0> : /*0*/ 0x1E1C1A18161412100E0C0A0806040200<127:0> ); return FFmul_02<AArch64.HaveHPDExt() return(ARMv8p1UIntHasArchVersion(b)*8+:8>;);

Library pseudocode for shared/functions/cryptoextension/FFmul03Have52BitPAExt

// FFmul03() // ========= // Have52BitPAExt() // ================ // Returns TRUE if Large Physical Address extension // support is implemented and FALSE otherwise. bits(8)boolean FFmul03(bits(8) b) bits(256*8) FFmul_03 = ( /* F E D C B A 9 8 7 6 5 4 3 2 1 0 */ /*F*/ 0x1A191C1F16151013020104070E0D080B<127:0> : /*E*/ 0x2A292C2F26252023323134373E3D383B<127:0> : /*D*/ 0x7A797C7F76757073626164676E6D686B<127:0> : /*C*/ 0x4A494C4F46454043525154575E5D585B<127:0> : /*B*/ 0xDAD9DCDFD6D5D0D3C2C1C4C7CECDC8CB<127:0> : /*A*/ 0xEAE9ECEFE6E5E0E3F2F1F4F7FEFDF8FB<127:0> : /*9*/ 0xBAB9BCBFB6B5B0B3A2A1A4A7AEADA8AB<127:0> : /*8*/ 0x8A898C8F86858083929194979E9D989B<127:0> : /*7*/ 0x818287848D8E8B88999A9F9C95969390<127:0> : /*6*/ 0xB1B2B7B4BDBEBBB8A9AAAFACA5A6A3A0<127:0> : /*5*/ 0xE1E2E7E4EDEEEBE8F9FAFFFCF5F6F3F0<127:0> : /*4*/ 0xD1D2D7D4DDDEDBD8C9CACFCCC5C6C3C0<127:0> : /*3*/ 0x414247444D4E4B48595A5F5C55565350<127:0> : /*2*/ 0x717277747D7E7B78696A6F6C65666360<127:0> : /*1*/ 0x212227242D2E2B28393A3F3C35363330<127:0> : /*0*/ 0x111217141D1E1B18090A0F0C05060300<127:0> ); return FFmul_03<Have52BitPAExt() return(ARMv8p2UIntHasArchVersion(b)*8+:8>;) && boolean IMPLEMENTATION_DEFINED "Has large 52-bit PA/IPA support";

Library pseudocode for shared/functions/cryptoextension/FFmul09Have52BitVAExt

// FFmul09() // ========= // Have52BitVAExt() // ================ // Returns TRUE if Large Virtual Address extension // support is implemented and FALSE otherwise. bits(8)boolean FFmul09(bits(8) b) bits(256*8) FFmul_09 = ( /* F E D C B A 9 8 7 6 5 4 3 2 1 0 */ /*F*/ 0x464F545D626B70790E071C152A233831<127:0> : /*E*/ 0xD6DFC4CDF2FBE0E99E978C85BAB3A8A1<127:0> : /*D*/ 0x7D746F6659504B42353C272E1118030A<127:0> : /*C*/ 0xEDE4FFF6C9C0DBD2A5ACB7BE8188939A<127:0> : /*B*/ 0x3039222B141D060F78716A635C554E47<127:0> : /*A*/ 0xA0A9B2BB848D969FE8E1FAF3CCC5DED7<127:0> : /*9*/ 0x0B0219102F263D34434A5158676E757C<127:0> : /*8*/ 0x9B928980BFB6ADA4D3DAC1C8F7FEE5EC<127:0> : /*7*/ 0xAAA3B8B18E879C95E2EBF0F9C6CFD4DD<127:0> : /*6*/ 0x3A3328211E170C05727B6069565F444D<127:0> : /*5*/ 0x9198838AB5BCA7AED9D0CBC2FDF4EFE6<127:0> : /*4*/ 0x0108131A252C373E49405B526D647F76<127:0> : /*3*/ 0xDCD5CEC7F8F1EAE3949D868FB0B9A2AB<127:0> : /*2*/ 0x4C455E5768617A73040D161F2029323B<127:0> : /*1*/ 0xE7EEF5FCC3CAD1D8AFA6BDB48B829990<127:0> : /*0*/ 0x777E656C535A41483F362D241B120900<127:0> ); return FFmul_09<Have52BitVAExt() return(ARMv8p2UIntHasArchVersion(b)*8+:8>;) && boolean IMPLEMENTATION_DEFINED "Has large 52-bit VA support";

Library pseudocode for shared/functions/cryptoextension/FFmul0BHaveAArch32BF16Ext

// FFmul0B() // ========= // HaveAArch32BF16Ext() // ==================== // Returns TRUE if AArch32 BFloat16 instruction support is implemented, and FALSE otherwise. bits(8)boolean FFmul0B(bits(8) b) bits(256*8) FFmul_0B = ( /* F E D C B A 9 8 7 6 5 4 3 2 1 0 */ /*F*/ 0xA3A8B5BE8F849992FBF0EDE6D7DCC1CA<127:0> : /*E*/ 0x1318050E3F3429224B405D56676C717A<127:0> : /*D*/ 0xD8D3CEC5F4FFE2E9808B969DACA7BAB1<127:0> : /*C*/ 0x68637E75444F5259303B262D1C170A01<127:0> : /*B*/ 0x555E434879726F640D061B10212A373C<127:0> : /*A*/ 0xE5EEF3F8C9C2DFD4BDB6ABA0919A878C<127:0> : /*9*/ 0x2E2538330209141F767D606B5A514C47<127:0> : /*8*/ 0x9E958883B2B9A4AFC6CDD0DBEAE1FCF7<127:0> : /*7*/ 0x545F424978736E650C071A11202B363D<127:0> : /*6*/ 0xE4EFF2F9C8C3DED5BCB7AAA1909B868D<127:0> : /*5*/ 0x2F2439320308151E777C616A5B504D46<127:0> : /*4*/ 0x9F948982B3B8A5AEC7CCD1DAEBE0FDF6<127:0> : /*3*/ 0xA2A9B4BF8E859893FAF1ECE7D6DDC0CB<127:0> : /*2*/ 0x1219040F3E3528234A415C57666D707B<127:0> : /*1*/ 0xD9D2CFC4F5FEE3E8818A979CADA6BBB0<127:0> : /*0*/ 0x69627F74454E5358313A272C1D160B00<127:0> ); return FFmul_0B<HaveAArch32BF16Ext() return(ARMv8p2UIntHasArchVersion(b)*8+:8>;) && boolean IMPLEMENTATION_DEFINED "Has AArch32 BFloat16 extension";

Library pseudocode for shared/functions/cryptoextension/FFmul0DHaveAArch32Int8MatMulExt

// FFmul0D() // ========= // HaveAArch32Int8MatMulExt() // ========================== // Returns TRUE if AArch32 8-bit integer matrix multiply instruction support // implemented, and FALSE otherwise. bits(8)boolean FFmul0D(bits(8) b) bits(256*8) FFmul_0D = ( /* F E D C B A 9 8 7 6 5 4 3 2 1 0 */ /*F*/ 0x979A8D80A3AEB9B4FFF2E5E8CBC6D1DC<127:0> : /*E*/ 0x474A5D50737E69642F2235381B16010C<127:0> : /*D*/ 0x2C21363B1815020F44495E53707D6A67<127:0> : /*C*/ 0xFCF1E6EBC8C5D2DF94998E83A0ADBAB7<127:0> : /*B*/ 0xFAF7E0EDCEC3D4D9929F8885A6ABBCB1<127:0> : /*A*/ 0x2A27303D1E130409424F5855767B6C61<127:0> : /*9*/ 0x414C5B5675786F622924333E1D10070A<127:0> : /*8*/ 0x919C8B86A5A8BFB2F9F4E3EECDC0D7DA<127:0> : /*7*/ 0x4D40575A7974636E25283F32111C0B06<127:0> : /*6*/ 0x9D90878AA9A4B3BEF5F8EFE2C1CCDBD6<127:0> : /*5*/ 0xF6FBECE1C2CFD8D59E938489AAA7B0BD<127:0> : /*4*/ 0x262B3C31121F08054E4354597A77606D<127:0> : /*3*/ 0x202D3A3714190E034845525F7C71666B<127:0> : /*2*/ 0xF0FDEAE7C4C9DED39895828FACA1B6BB<127:0> : /*1*/ 0x9B96818CAFA2B5B8F3FEE9E4C7CADDD0<127:0> : /*0*/ 0x4B46515C7F726568232E3934171A0D00<127:0> ); return FFmul_0D<HaveAArch32Int8MatMulExt() return(ARMv8p2UIntHasArchVersion(b)*8+:8>;) && boolean IMPLEMENTATION_DEFINED "Has AArch32 Int8 Mat Mul extension";

Library pseudocode for shared/functions/cryptoextension/FFmul0EHaveAtomicExt

// FFmul0E() // ========= // HaveAtomicExt() // =============== bits(8)boolean FFmul0E(bits(8) b) bits(256*8) FFmul_0E = ( /* F E D C B A 9 8 7 6 5 4 3 2 1 0 */ /*F*/ 0x8D83919FB5BBA9A7FDF3E1EFC5CBD9D7<127:0> : /*E*/ 0x6D63717F555B49471D13010F252B3937<127:0> : /*D*/ 0x56584A446E60727C26283A341E10020C<127:0> : /*C*/ 0xB6B8AAA48E80929CC6C8DAD4FEF0E2EC<127:0> : /*B*/ 0x202E3C321816040A505E4C426866747A<127:0> : /*A*/ 0xC0CEDCD2F8F6E4EAB0BEACA28886949A<127:0> : /*9*/ 0xFBF5E7E9C3CDDFD18B859799B3BDAFA1<127:0> : /*8*/ 0x1B150709232D3F316B657779535D4F41<127:0> : /*7*/ 0xCCC2D0DEF4FAE8E6BCB2A0AE848A9896<127:0> : /*6*/ 0x2C22303E141A08065C52404E646A7876<127:0> : /*5*/ 0x17190B052F21333D67697B755F51434D<127:0> : /*4*/ 0xF7F9EBE5CFC1D3DD87899B95BFB1A3AD<127:0> : /*3*/ 0x616F7D735957454B111F0D032927353B<127:0> : /*2*/ 0x818F9D93B9B7A5ABF1FFEDE3C9C7D5DB<127:0> : /*1*/ 0xBAB4A6A8828C9E90CAC4D6D8F2FCEEE0<127:0> : /*0*/ 0x5A544648626C7E702A243638121C0E00<127:0> ); return FFmul_0E<HaveAtomicExt() return(ARMv8p1UIntHasArchVersion(b)*8+:8>;);

Library pseudocode for shared/functions/cryptoextension/HaveAESExtHaveBF16Ext

// HaveAESExt() // ============ // TRUE if AES cryptographic instructions support is implemented, // FALSE otherwise. // HaveBF16Ext() // ============= // Returns TRUE if AArch64 BFloat16 instruction support is implemented, and FALSE otherwise. boolean HaveAESExt() return boolean IMPLEMENTATION_DEFINED "Has AES Crypto instructions";HaveBF16Ext() returnHasArchVersion(ARMv8p6) || (HasArchVersion(ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Has AArch64 BFloat16 extension");

Library pseudocode for shared/functions/cryptoextension/HaveBit128PMULLExtHaveBTIExt

// HaveBit128PMULLExt() // ==================== // TRUE if 128 bit form of PMULL instructions support is implemented, // FALSE otherwise. // HaveBTIExt() // ============ // Returns TRUE if support for Branch Target Indentification is implemented. boolean HaveBit128PMULLExt() return boolean IMPLEMENTATION_DEFINED "Has 128-bit form of PMULL instructions";HaveBTIExt() returnHasArchVersion(ARMv8p5);

Library pseudocode for shared/functions/cryptoextension/HaveSHA1ExtHaveBlockBBM

// HaveSHA1Ext() // ============= // TRUE if SHA1 cryptographic instructions support is implemented, // FALSE otherwise. // HaveBlockBBM() // ============== // Returns TRUE if support for changing block size without requring break-before-make is implemented. boolean HaveSHA1Ext() return boolean IMPLEMENTATION_DEFINED "Has SHA1 Crypto instructions";HaveBlockBBM() returnHasArchVersion(ARMv8p4);

Library pseudocode for shared/functions/cryptoextension/HaveSHA256ExtHaveCommonNotPrivateTransExt

// HaveSHA256Ext() // =============== // TRUE if SHA256 cryptographic instructions support is implemented, // FALSE otherwise. // HaveCommonNotPrivateTransExt() // ============================== boolean HaveSHA256Ext() return boolean IMPLEMENTATION_DEFINED "Has SHA256 Crypto instructions";HaveCommonNotPrivateTransExt() returnHasArchVersion(ARMv8p2);

Library pseudocode for shared/functions/cryptoextension/HaveSHA3ExtHaveDGHExt

// HaveSHA3Ext() // ============= // TRUE if SHA3 cryptographic instructions support is implemented, // and when SHA1 and SHA2 basic cryptographic instructions support is implemented, // FALSE otherwise. // HaveDGHExt() // ============ // Returns TRUE if Data Gathering Hint instruction support is implemented, and FALSE otherwise. boolean HaveSHA3Ext() if !HaveDGHExt() return boolean IMPLEMENTATION_DEFINED "Has AArch64 DGH extension";HasArchVersion(ARMv8p2) || !(HaveSHA1Ext() && HaveSHA256Ext()) then return FALSE; return boolean IMPLEMENTATION_DEFINED "Has SHA3 Crypto instructions";

Library pseudocode for shared/functions/cryptoextension/HaveSHA512ExtHaveDITExt

// HaveSHA512Ext() // =============== // TRUE if SHA512 cryptographic instructions support is implemented, // and when SHA1 and SHA2 basic cryptographic instructions support is implemented, // FALSE otherwise. // HaveDITExt() // ============ boolean HaveSHA512Ext() if !HaveDITExt() returnHasArchVersion(ARMv8p2ARMv8p4) || !(HaveSHA1Ext() && HaveSHA256Ext()) then return FALSE; return boolean IMPLEMENTATION_DEFINED "Has SHA512 Crypto instructions";);

Library pseudocode for shared/functions/cryptoextension/HaveSM3ExtHaveDOTPExt

// HaveSM3Ext() // ============ // TRUE if SM3 cryptographic instructions support is implemented, // FALSE otherwise. // HaveDOTPExt() // ============= // Returns TRUE if Dot Product feature support is implemented, and FALSE otherwise. boolean HaveSM3Ext() if !HaveDOTPExt() returnHasArchVersion(ARMv8p4) || (HasArchVersion(ARMv8p2) then return FALSE; return boolean IMPLEMENTATION_DEFINED "Has SM3 Crypto instructions";) && boolean IMPLEMENTATION_DEFINED "Has Dot Product extension");

Library pseudocode for shared/functions/cryptoextension/HaveSM4ExtHaveDoPD

// HaveSM4Ext() // ============ // TRUE if SM4 cryptographic instructions support is implemented, // FALSE otherwise. // HaveDoPD() // ========== // Returns TRUE if Debug Over Power Down extension // support is implemented and FALSE otherwise. boolean HaveSM4Ext() if !HaveDoPD() returnHasArchVersion(ARMv8p2) then return FALSE; return boolean IMPLEMENTATION_DEFINED "Has SM4 Crypto instructions";) && boolean IMPLEMENTATION_DEFINED "Has DoPD extension";

Library pseudocode for shared/functions/cryptoextension/ROLHaveDoubleFaultExt

// ROL() // ===== // HaveDoubleFaultExt() // ==================== bits(N)boolean ROL(bits(N) x, integer shift) assert shift >= 0 && shift <= N; if (shift == 0) then return x; returnHaveDoubleFaultExt() return ( (ARMv8p4) && HaveEL(EL3) && !ELUsingAArch32(EL3) && HaveIESBRORHasArchVersion(x, N-shift);());

Library pseudocode for shared/functions/cryptoextension/SHA256hashHaveDoubleLock

// SHA256hash() // ============ // HaveDoubleLock() // ================ // Returns TRUE if support for the OS Double Lock is implemented. bits(128)boolean SHA256hash(bits (128) X, bits(128) Y, bits(128) W, boolean part1) bits(32) chs, maj, t; for e = 0 to 3 chs =HaveDoubleLock() return ! SHAchooseHasArchVersion(Y<31:0>, Y<63:32>, Y<95:64>); maj =( SHAmajorityARMv8p4(X<31:0>, X<63:32>, X<95:64>); t = Y<127:96> + SHAhashSIGMA1(Y<31:0>) + chs + Elem[W, e, 32]; X<127:96> = t + X<127:96>; Y<127:96> = t + SHAhashSIGMA0(X<31:0>) + maj; <Y, X> = ROL(Y : X, 32); return (if part1 then X else Y);) || boolean IMPLEMENTATION_DEFINED "OS Double Lock is implemented";

Library pseudocode for shared/functions/cryptoextension/SHAchooseHaveE0PDExt

// SHAchoose() // =========== // HaveE0PDExt() // ============= // Returns TRUE if support for constant fault times for unprivileged accesses // to the memory map is implemented. bits(32)boolean SHAchoose(bits(32) x, bits(32) y, bits(32) z) return (((y EOR z) AND x) EOR z);HaveE0PDExt() returnHasArchVersion(ARMv8p5);

Library pseudocode for shared/functions/cryptoextension/SHAhashSIGMA0HaveECVExt

// SHAhashSIGMA0() // =============== // HaveECVExt() // ============ // Returns TRUE if Enhanced Counter Virtualization extension // support is implemented, and FALSE otherwise. bits(32)boolean SHAhashSIGMA0(bits(32) x) HaveECVExt() return RORHasArchVersion(x, 2) EOR( RORARMv8p6(x, 13) EOR ROR(x, 22););

Library pseudocode for shared/functions/cryptoextension/SHAhashSIGMA1HaveEMPAMExt

// SHAhashSIGMA1() // =============== // HaveEMPAMExt() // ============== // Returns TRUE if Enhanced MPAM is implemented, and FALSE otherwise. bits(32)boolean SHAhashSIGMA1(bits(32) x) returnHaveEMPAMExt() return ( RORHasArchVersion(x, 6) EOR( RORARMv8p6(x, 11) EOR) && RORHaveMPAMExt(x, 25);() && boolean IMPLEMENTATION_DEFINED "Has enhanced MPAM extension");

Library pseudocode for shared/functions/cryptoextension/SHAmajorityHaveExtendedCacheSets

// SHAmajority() // ============= // HaveExtendedCacheSets() // ======================= bits(32)boolean SHAmajority(bits(32) x, bits(32) y, bits(32) z) return ((x AND y) OR ((x OR y) AND z));HaveExtendedCacheSets() returnHasArchVersion(ARMv8p3);

Library pseudocode for shared/functions/cryptoextension/SHAparityHaveExtendedECDebugEvents

// SHAparity() // =========== // HaveExtendedECDebugEvents() // =========================== bits(32)boolean SHAparity(bits(32) x, bits(32) y, bits(32) z) return (x EOR y EOR z);HaveExtendedECDebugEvents() returnHasArchVersion(ARMv8p2);

Library pseudocode for shared/functions/cryptoextension/SboxHaveExtendedExecuteNeverExt

// Sbox() // ====== // Used in SM4E crypto instruction // HaveExtendedExecuteNeverExt() // ============================= bits(8)boolean Sbox(bits(8) sboxin) bits(8) sboxout; bits(2048) sboxstring = 0xd690e9fecce13db716b614c228fb2c052b679a762abe04c3aa441326498606999c4250f491ef987a33540b43edcfac62e4b31ca9c908e89580df94fa758f3fa64707a7fcf37317ba83593c19e6854fa8686b81b27164da8bf8eb0f4b70569d351e240e5e6358d1a225227c3b01217887d40046579fd327524c3602e7a0c4c89eeabf8ad240c738b5a3f7f2cef96115a1e0ae5da49b341a55ad933230f58cb1e31df6e22e8266ca60c02923ab0d534e6fd5db3745defd8e2f03ff6a726d6c5b518d1baf92bbddbc7f11d95c411f105ad80ac13188a5cd7bbd2d74d012b8e5b4b08969974a0c96777e65b9f109c56ec68418f07dec3adc4d2079ee5f3ed7cb3948<2047:0>; sboxout = sboxstring<(255-HaveExtendedExecuteNeverExt() returnUIntHasArchVersion(sboxin))*8+7:(255-(UIntARMv8p2(sboxin))*8>; return sboxout;);

Library pseudocode for shared/functions/exclusiveextension/ClearExclusiveByAddressHaveFCADDExt

// Clear the global Exclusives monitors for all PEs EXCEPT processorid if they // record any part of the physical address region of size bytes starting at paddress. // It is IMPLEMENTATION DEFINED whether the global Exclusives monitor for processorid // is also cleared if it records any part of the address region.// HaveFCADDExt() // ============== boolean ClearExclusiveByAddress(HaveFCADDExt() return(ARMv8p3FullAddressHasArchVersion paddress, integer processorid, integer size););

Library pseudocode for shared/functions/exclusiveextension/ClearExclusiveLocalHaveFGTExt

// Clear the local Exclusives monitor for the specified processorid.// HaveFGTExt() // ============ // Returns TRUE if Fine Grained Trap is implemented, and FALSE otherwise. boolean ClearExclusiveLocal(integer processorid);HaveFGTExt() returnHasArchVersion(ARMv8p6) && !ELUsingAArch32(EL2);

Library pseudocode for shared/functions/exclusiveextension/ClearExclusiveMonitorsHaveFJCVTZSExt

// ClearExclusiveMonitors() // ======================== // HaveFJCVTZSExt() // ================ // Clear the local Exclusives monitor for the executing PE.boolean ClearExclusiveMonitors()HaveFJCVTZSExt() return ClearExclusiveLocalHasArchVersion(ProcessorIDARMv8p3()););

Library pseudocode for shared/functions/exclusiveextension/ExclusiveMonitorsStatusHaveFP16MulNoRoundingToFP32Ext

// Returns '0' to indicate success if the last memory write by this PE was to // the same physical address region endorsed by ExclusiveMonitorsPass(). // Returns '1' to indicate failure if address translation resulted in a different // physical address. bit// HaveFP16MulNoRoundingToFP32Ext() // ================================ // Returns TRUE if has FP16 multiply with no intermediate rounding accumulate to FP32 instructions, // and FALSE otherwise boolean ExclusiveMonitorsStatus();HaveFP16MulNoRoundingToFP32Ext() if !HaveFP16Ext() then return FALSE; if HasArchVersion(ARMv8p4) then return TRUE; return (HasArchVersion(ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Has accumulate FP16 product into FP32 extension");

Library pseudocode for shared/functions/exclusiveextension/IsExclusiveGlobalHaveFlagFormatExt

// Return TRUE if the global Exclusives monitor for processorid includes all of // the physical address region of size bytes starting at paddress. // HaveFlagFormatExt() // =================== // Returns TRUE if flag format conversion instructions implemented. boolean IsExclusiveGlobal(HaveFlagFormatExt() return(ARMv8p5FullAddressHasArchVersion paddress, integer processorid, integer size););

Library pseudocode for shared/functions/exclusiveextension/IsExclusiveLocalHaveFlagManipulateExt

// Return TRUE if the local Exclusives monitor for processorid includes all of // the physical address region of size bytes starting at paddress. // HaveFlagManipulateExt() // ======================= // Returns TRUE if flag manipulate instructions are implemented. boolean IsExclusiveLocal(HaveFlagManipulateExt() return(ARMv8p4FullAddressHasArchVersion paddress, integer processorid, integer size););

Library pseudocode for shared/functions/exclusiveextension/MarkExclusiveGlobalHaveFrintExt

// Record the physical address region of size bytes starting at paddress in // the global Exclusives monitor for processorid.// HaveFrintExt() // ============== // Returns TRUE if FRINT instructions are implemented. boolean MarkExclusiveGlobal(HaveFrintExt() return(ARMv8p5FullAddressHasArchVersion paddress, integer processorid, integer size););

Library pseudocode for shared/functions/exclusiveextension/MarkExclusiveLocalHaveHPMDExt

// Record the physical address region of size bytes starting at paddress in // the local Exclusives monitor for processorid.// HaveHPMDExt() // ============= boolean MarkExclusiveLocal(HaveHPMDExt() return(ARMv8p1FullAddressHasArchVersion paddress, integer processorid, integer size););

Library pseudocode for shared/functions/exclusiveextension/ProcessorIDHaveIDSExt

// Return the ID of the currently executing PE. integer// HaveIDSExt() // ============ // Returns TRUE if ID register handling feature is implemented. boolean ProcessorID();HaveIDSExt() returnHasArchVersion(ARMv8p4);

Library pseudocode for shared/functions/extension/AArch32.HaveHPDExtHaveIESB

// AArch32.HaveHPDExt() // ==================== // HaveIESB() // ========== boolean AArch32.HaveHPDExt() returnHaveIESB() return ( HasArchVersionHaveRASExt(ARMv8p2);() && boolean IMPLEMENTATION_DEFINED "Has Implicit Error Synchronization Barrier");

Library pseudocode for shared/functions/extension/AArch64.HaveHPDExtHaveInt8MatMulExt

// AArch64.HaveHPDExt() // ==================== // HaveInt8MatMulExt() // =================== // Returns TRUE if AArch64 8-bit integer matrix multiply instruction support // implemented, and FALSE otherwise. boolean AArch64.HaveHPDExt() HaveInt8MatMulExt() return HasArchVersion() || (HasArchVersion(ARMv8p2ARMv8p1ARMv8p6);) && boolean IMPLEMENTATION_DEFINED "Has AArch64 Int8 Mat Mul extension");

Library pseudocode for shared/functions/extension/Have52BitIPAAndPASpaceExtHaveLSE2Ext

// Have52BitIPAAndPASpaceExt() // =========================== // Returns TRUE if 52-bit IPA and PA extension support // is implemented, and FALSE otherwise. // HaveLSE2Ext() // ============= // Returns TRUE if LSE2 is implemented, and FALSE otherwise. boolean Have52BitIPAAndPASpaceExt() return (HaveLSE2Ext() returnHasArchVersion(ARMv8p7ARMv8p4) && boolean IMPLEMENTATION_DEFINED "Has 52-bit IPA and PA support" && Have52BitVAExt() && Have52BitPAExt()););

Library pseudocode for shared/functions/extension/Have52BitPAExtHaveMPAMExt

// Have52BitPAExt() // ================ // Returns TRUE if Large Physical Address extension // support is implemented and FALSE otherwise. // HaveMPAMExt() // ============= // Returns TRUE if MPAM is implemented, and FALSE otherwise. boolean Have52BitPAExt() returnHaveMPAMExt() return ( HasArchVersion(ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Has large 52-bit PA/IPA support";) && boolean IMPLEMENTATION_DEFINED "Has MPAM extension");

Library pseudocode for shared/functions/extension/Have52BitVAExtHaveMTEExt

// Have52BitVAExt() // ================ // Returns TRUE if Large Virtual Address extension // support is implemented and FALSE otherwise. // HaveMTEExt() // ============ // Returns TRUE if MTE implemented, and FALSE otherwise. boolean Have52BitVAExt() returnHaveMTEExt() if ! HasArchVersion(ARMv8p2ARMv8p5) && boolean IMPLEMENTATION_DEFINED "Has large 52-bit VA support";) then return FALSE; return boolean IMPLEMENTATION_DEFINED "Has MTE extension";

Library pseudocode for shared/functions/extension/HaveAArch32BF16ExtHaveNV2Ext

// HaveAArch32BF16Ext() // ==================== // Returns TRUE if AArch32 BFloat16 instruction support is implemented, and FALSE otherwise. // HaveNV2Ext() // ============ // Returns TRUE if Enhanced Nested Virtualization is implemented. boolean HaveAArch32BF16Ext() returnHaveNV2Ext() return ( HasArchVersion() && HaveNVExtARMv8p2ARMv8p4) && boolean IMPLEMENTATION_DEFINED "Has AArch32 BFloat16 extension";() && boolean IMPLEMENTATION_DEFINED "Has support for Enhanced Nested Virtualization");

Library pseudocode for shared/functions/extension/HaveAArch32Int8MatMulExtHaveNVExt

// HaveAArch32Int8MatMulExt() // ========================== // Returns TRUE if AArch32 8-bit integer matrix multiply instruction support // implemented, and FALSE otherwise. // HaveNVExt() // =========== // Returns TRUE if Nested Virtualization is implemented. boolean HaveAArch32Int8MatMulExt() HaveNVExt() return HasArchVersion(ARMv8p2ARMv8p3) && boolean IMPLEMENTATION_DEFINED "Has AArch32 Int8 Mat Mul extension";) && boolean IMPLEMENTATION_DEFINED "Has Nested Virtualization";

Library pseudocode for shared/functions/extension/HaveAltFPHaveNoSecurePMUDisableOverride

// HaveAltFP() // =========== // Returns TRUE if alternative Floating-point extension support // is implemented, and FALSE otherwise. // HaveNoSecurePMUDisableOverride() // ================================ boolean HaveAltFP() HaveNoSecurePMUDisableOverride() return HasArchVersion(ARMv8p7ARMv8p2);

Library pseudocode for shared/functions/extension/HaveAtomicExtHaveNoninvasiveDebugAuth

// HaveAtomicExt() // =============== // HaveNoninvasiveDebugAuth() // ========================== // Returns TRUE if the Non-invasive debug controls are implemented. boolean HaveAtomicExt() returnHaveNoninvasiveDebugAuth() return ! HasArchVersion(ARMv8p1ARMv8p4);

Library pseudocode for shared/functions/extension/HaveBF16ExtHavePANExt

// HaveBF16Ext() // ============= // Returns TRUE if AArch64 BFloat16 instruction support is implemented, and FALSE otherwise. // HavePANExt() // ============ boolean HaveBF16Ext() HavePANExt() return HasArchVersion(ARMv8p6ARMv8p1) || (HasArchVersion(ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Has AArch64 BFloat16 extension"););

Library pseudocode for shared/functions/extension/HaveBTIExtHavePageBasedHardwareAttributes

// HaveBTIExt() // ============ // Returns TRUE if support for Branch Target Indentification is implemented. // HavePageBasedHardwareAttributes() // ================================= boolean HaveBTIExt() HavePageBasedHardwareAttributes() return HasArchVersion(ARMv8p5ARMv8p2);

Library pseudocode for shared/functions/extension/HaveBlockBBMHavePrivATExt

// HaveBlockBBM() // ============== // Returns TRUE if support for changing block size without requring break-before-make is implemented. // HavePrivATExt() // =============== boolean HaveBlockBBM() HavePrivATExt() return HasArchVersion(ARMv8p4ARMv8p2);

Library pseudocode for shared/functions/extension/HaveCommonNotPrivateTransExtHaveQRDMLAHExt

// HaveCommonNotPrivateTransExt() // ============================== // HaveQRDMLAHExt() // ================ boolean HaveCommonNotPrivateTransExt() HaveQRDMLAHExt() return HasArchVersion(); boolean HaveAccessFlagUpdateExt() return HasArchVersion(ARMv8p1); boolean HaveDirtyBitModifierExt() return HasArchVersion(ARMv8p1ARMv8p2ARMv8p1);

Library pseudocode for shared/functions/extension/HaveDGHExtHaveRASExt

// HaveDGHExt() // HaveRASExt() // ============ // Returns TRUE if Data Gathering Hint instruction support is implemented, and FALSE otherwise. boolean HaveDGHExt() return boolean IMPLEMENTATION_DEFINED "Has AArch64 DGH extension";HaveRASExt() return (HasArchVersion(ARMv8p2) || boolean IMPLEMENTATION_DEFINED "Has RAS extension");

Library pseudocode for shared/functions/extension/HaveDITExtHaveRNG

// HaveDITExt() // ============ // HaveRNG() // ========= // Returns TRUE if Random Number Generator extension // support is implemented and FALSE otherwise. boolean HaveDITExt() HaveRNG() return HasArchVersion(ARMv8p4ARMv8p5);) && boolean IMPLEMENTATION_DEFINED "Has RNG extension";

Library pseudocode for shared/functions/extension/HaveDOTPExtHaveSBExt

// HaveDOTPExt() // ============= // Returns TRUE if Dot Product feature support is implemented, and FALSE otherwise. // HaveSBExt() // =========== // Returns TRUE if support for SB is implemented, and FALSE otherwise. boolean HaveDOTPExt() HaveSBExt() return HasArchVersion(ARMv8p4ARMv8p5) || (HasArchVersion(ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Has Dot Product extension");) || boolean IMPLEMENTATION_DEFINED "Has SB extension";

Library pseudocode for shared/functions/extension/HaveDoPDHaveSSBSExt

// HaveDoPD() // ========== // Returns TRUE if Debug Over Power Down extension // support is implemented and FALSE otherwise. // HaveSSBSExt() // ============= // Returns TRUE if support for SSBS is implemented, and FALSE otherwise. boolean HaveDoPD() HaveSSBSExt() return HasArchVersion(ARMv8p2ARMv8p5) && boolean IMPLEMENTATION_DEFINED "Has DoPD extension";) || boolean IMPLEMENTATION_DEFINED "Has SSBS extension";

Library pseudocode for shared/functions/extension/HaveDoubleFaultExtHaveSecureEL2Ext

// HaveDoubleFaultExt() // ==================== // HaveSecureEL2Ext() // ================== // Returns TRUE if Secure EL2 is implemented. boolean HaveDoubleFaultExt() return (HaveSecureEL2Ext() returnHasArchVersion(ARMv8p4) &&); HaveEL(EL3) && !ELUsingAArch32(EL3) && HaveIESB());

Library pseudocode for shared/functions/extension/HaveDoubleLockHaveSecureExtDebugView

// HaveDoubleLock() // ================ // Returns TRUE if support for the OS Double Lock is implemented. // HaveSecureExtDebugView() // ======================== // Returns TRUE if support for Secure and Non-secure views of debug peripherals is implemented. boolean HaveDoubleLock() return !HaveSecureExtDebugView() returnHasArchVersion(ARMv8p4) || boolean IMPLEMENTATION_DEFINED "OS Double Lock is implemented";);

Library pseudocode for shared/functions/extension/HaveE0PDExtHaveSelfHostedTrace

// HaveE0PDExt() // ============= // Returns TRUE if support for constant fault times for unprivileged accesses // to the memory map is implemented. // HaveSelfHostedTrace() // ===================== boolean HaveE0PDExt() HaveSelfHostedTrace() return HasArchVersion(ARMv8p5ARMv8p4);

Library pseudocode for shared/functions/extension/HaveECVExtHaveSmallPageTblExt

// HaveECVExt() // ============ // Returns TRUE if Enhanced Counter Virtualization extension // support is implemented, and FALSE otherwise. // HaveSmallPageTblExt() // ===================== // Returns TRUE if Small Page Table Support is implemented. boolean HaveECVExt() HaveSmallPageTblExt() return HasArchVersion(ARMv8p6ARMv8p4);) && boolean IMPLEMENTATION_DEFINED "Has Small Page Table extension";

Library pseudocode for shared/functions/extension/HaveEMPAMExtHaveStage2MemAttrControl

// HaveEMPAMExt() // ============== // Returns TRUE if Enhanced MPAM is implemented, and FALSE otherwise. // HaveStage2MemAttrControl() // ========================== // Returns TRUE if support for Stage2 control of memory types and cacheability attributes is implemented. boolean HaveEMPAMExt() return (HaveStage2MemAttrControl() returnHasArchVersion(ARMv8p6ARMv8p4) && HaveMPAMExt() && boolean IMPLEMENTATION_DEFINED "Has enhanced MPAM extension"););

Library pseudocode for shared/functions/extension/HaveExtendedCacheSetsHaveStatisticalProfiling

// HaveExtendedCacheSets() // ======================= // HaveStatisticalProfiling() // ========================== boolean HaveExtendedCacheSets() HaveStatisticalProfiling() return HasArchVersion(ARMv8p3ARMv8p2);

Library pseudocode for shared/functions/extension/HaveExtendedECDebugEventsHaveTWEDExt

// HaveExtendedECDebugEvents() // =========================== // HaveTWEDExt() // ============= // Returns TRUE if Delayed Trapping of WFE instruction support is implemented, and FALSE otherwise. boolean HaveExtendedECDebugEvents() returnHaveTWEDExt() return boolean IMPLEMENTATION_DEFINED "Has TWED extension"; HasArchVersion(ARMv8p2);

Library pseudocode for shared/functions/extension/HaveExtendedExecuteNeverExtHaveTraceExt

// HaveExtendedExecuteNeverExt() // ============================= // HaveTraceExt() // ============== // Returns TRUE if Trace functionality as described by the Trace Architecture // is implemented. boolean HaveExtendedExecuteNeverExt() returnHaveTraceExt() return boolean IMPLEMENTATION_DEFINED "Has Trace Architecture functionality"; HasArchVersion(ARMv8p2);

Library pseudocode for shared/functions/extension/HaveFCADDExtHaveTrapLoadStoreMultipleDeviceExt

// HaveFCADDExt() // ============== // HaveTrapLoadStoreMultipleDeviceExt() // ==================================== boolean HaveFCADDExt() HaveTrapLoadStoreMultipleDeviceExt() return HasArchVersion(ARMv8p3ARMv8p2);

Library pseudocode for shared/functions/extension/HaveFGTExtHaveUAOExt

// HaveFGTExt() // HaveUAOExt() // ============ // Returns TRUE if Fine Grained Trap is implemented, and FALSE otherwise. boolean HaveFGTExt() HaveUAOExt() return HasArchVersion(ARMv8p6ARMv8p2) && !ELUsingAArch32(EL2);

Library pseudocode for shared/functions/extension/HaveFJCVTZSExtHaveV82Debug

// HaveFJCVTZSExt() // ================ // HaveV82Debug() // ============== boolean HaveFJCVTZSExt() HaveV82Debug() return HasArchVersion(ARMv8p3ARMv8p2);

Library pseudocode for shared/functions/extension/HaveFP16MulNoRoundingToFP32ExtHaveVirtHostExt

// HaveFP16MulNoRoundingToFP32Ext() // ================================ // Returns TRUE if has FP16 multiply with no intermediate rounding accumulate to FP32 instructions, // and FALSE otherwise // HaveVirtHostExt() // ================= boolean HaveFP16MulNoRoundingToFP32Ext() if !HaveVirtHostExt() returnHaveFP16Ext() then return FALSE; if HasArchVersion(ARMv8p4ARMv8p1) then return TRUE; return (HasArchVersion(ARMv8p2) && boolean IMPLEMENTATION_DEFINED "Has accumulate FP16 product into FP32 extension"););

Library pseudocode for shared/functions/extension/HaveFeatLS64Havev85PMU

// HaveFeatLS64() // ============== // Returns TRUE if the LD64B, ST64B, ST64BV, and ST64BV0 instructions are // supported, and FALSE otherwise. // Havev85PMU() // ============ // Returns TRUE if v8.5-Performance Monitor Unit extension // support is implemented, and FALSE otherwise. boolean HaveFeatLS64() return (Havev85PMU() returnHasArchVersion(ARMv8p7ARMv8p5) && boolean IMPLEMENTATION_DEFINED "Has Load Store 64-Byte instruction support");) && boolean IMPLEMENTATION_DEFINED "Has PMUv3p5 extension";

Library pseudocode for shared/functions/extension/HaveFeatRPRESInsertIESBBeforeException

// HaveFeatRPRES() // =============== // Returns TRUE if the Reciprocal Estimate and Reciprocal Square Root Estimate // instructions have increased precision, and FALSE otherwise. // If SCTLR_ELx.IESB is 1 when an exception is generated to ELx, any pending Unrecoverable // SError interrupt must be taken before executing any instructions in the exception handler. // However, this can be before the branch to the exception handler is made. boolean HaveFeatRPRES() return (InsertIESBBeforeException(bits(2) el);HasArchVersion(ARMv8p7) && (boolean IMPLEMENTATION_DEFINED "Has increased Reciprocal Estimate and Square Root Estimate precision support") && HaveAltFP());

Library pseudocode for shared/functions/extensionfloat/HaveFeatWFxTbfloat/BFAdd

// HaveFeatWFxT() // ============== // Returns TRUE if WFET and WFIT instruction support is implemented, // and FALSE otherwise. // BFAdd() // ======= // Single-precision add following BFloat16 computation behaviors. booleanbits(32) HaveFeatWFxT() returnBFAdd(bits(32) op1, bits(32) op2) bits(32) result; (type1,sign1,value1) = HasArchVersionBFUnpack((op1); (type2,sign2,value2) =(op2); if type1 == FPType_QNaN || type2 == FPType_QNaN then result = FPDefaultNaN(); else inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); if inf1 && inf2 && sign1 == NOT(sign2) then result = FPDefaultNaN(); elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '0') then result = FPInfinity('0'); elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '1') then result = FPInfinity('1'); elsif zero1 && zero2 && sign1 == sign2 then result = FPZero(sign1); else result_value = value1 + value2; if result_value == 0.0 then result = FPZero('0'); // Positive sign when Round to Odd else result = BFRoundARMv8p7BFUnpack);(result_value); return result;

Library pseudocode for shared/functions/extensionfloat/HaveFeatXSbfloat/BFMatMulAdd

// HaveFeatXS() // ============ // Returns TRUE if XS attribute and the TLBI and DSB instructions with nXS qualifier // are supported, and FALSE otherwise. // BFMatMulAdd() // ============= // BFloat16 matrix multiply and add to single-precision matrix // result[2, 2] = addend[2, 2] + (op1[2, 4] * op2[4, 2]) booleanbits(N) HaveFeatXS() returnBFMatMulAdd(bits(N) addend, bits(N) op1, bits(N) op2) assert N == 128; bits(N) result; bits(32) sum, prod0, prod1; for i = 0 to 1 for j = 0 to 1 sum = HasArchVersionElem([addend, 2*i + j, 32]; for k = 0 to 1 prod0 =(Elem[op1, 4*i + 2*k + 0, 16], Elem[op2, 4*j + 2*k + 0, 16]); prod1 = BFMul(Elem[op1, 4*i + 2*k + 1, 16], Elem[op2, 4*j + 2*k + 1, 16]); sum = BFAdd(sum, BFAdd(prod0, prod1)); ElemARMv8p7BFMul);[result, 2*i + j, 32] = sum; return result;

Library pseudocode for shared/functions/extensionfloat/HaveFlagFormatExtbfloat/BFMul

// HaveFlagFormatExt() // =================== // Returns TRUE if flag format conversion instructions implemented. // BFMul() // ======= // BFloat16 widening multiply to single-precision following BFloat16 // computation behaviors. booleanbits(32) HaveFlagFormatExt() returnBFMul(bits(16) op1, bits(16) op2) bits(32) result; (type1,sign1,value1) = HasArchVersionBFUnpack((op1); (type2,sign2,value2) =(op2); if type1 == FPType_QNaN || type2 == FPType_QNaN then result = FPDefaultNaN(); else inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); if (inf1 && zero2) || (zero1 && inf2) then result = FPDefaultNaN(); elsif inf1 || inf2 then result = FPInfinity(sign1 EOR sign2); elsif zero1 || zero2 then result = FPZero(sign1 EOR sign2); else result = BFRoundARMv8p5BFUnpack);(value1*value2); return result;

Library pseudocode for shared/functions/extensionfloat/HaveFlagManipulateExtbfloat/BFRound

// HaveFlagManipulateExt() // ======================= // Returns TRUE if flag manipulate instructions are implemented. // BFRound() // ========= // Converts a real number OP into a single-precision value using the // Round to Odd rounding mode and following BFloat16 computation behaviors. booleanbits(32) HaveFlagManipulateExt() returnBFRound(real op) assert op != 0.0; bits(32) result; // Format parameters - minimum exponent, numbers of exponent and fraction bits. minimum_exp = -126; E = 8; F = 23; // Split value into sign, unrounded mantissa and exponent. if op < 0.0 then sign = '1'; mantissa = -op; else sign = '0'; mantissa = op; exponent = 0; while mantissa < 1.0 do mantissa = mantissa * 2.0; exponent = exponent - 1; while mantissa >= 2.0 do mantissa = mantissa / 2.0; exponent = exponent + 1; // Fixed Flush-to-zero. if exponent < minimum_exp then return HasArchVersionFPZero((sign); // Start creating the exponent value for the result. Start by biasing the actual exponent // so that the minimum exponent becomes 1, lower values 0 (indicating possible underflow). biased_exp =(exponent - minimum_exp + 1, 0); if biased_exp == 0 then mantissa = mantissa / 2.0^(minimum_exp - exponent); // Get the unrounded mantissa as an integer, and the "units in last place" rounding error. int_mant = RoundDown(mantissa * 2.0^F); // < 2.0^F if biased_exp == 0, >= 2.0^F if not error = mantissa * 2.0^F - Real(int_mant); // Round to Odd if error != 0.0 then int_mant<0> = '1'; // Deal with overflow and generate result. if biased_exp >= 2^E - 1 then result = FPInfinityARMv8p4Max);(sign); // Overflows generate appropriately-signed Infinity else result = sign : biased_exp<30-F:0> : int_mant<F-1:0>; return result;

Library pseudocode for shared/functions/extensionfloat/HaveFrintExtbfloat/BFUnpack

// HaveFrintExt() // ============== // Returns TRUE if FRINT instructions are implemented. // BFUnpack() // ========== // Unpacks a BFloat16 or single-precision value into its type, // sign bit and real number that it represents. // The real number result has the correct sign for numbers and infinities, // is very large in magnitude for infinities, and is 0.0 for NaNs. // (These values are chosen to simplify the description of // comparisons and conversions.) boolean(FPType, bit, real) HaveFrintExt() returnBFUnpack(bits(N) fpval) assert N IN {16,32}; if N == 16 then sign = fpval<15>; exp = fpval<14:7>; frac = fpval<6:0> : HasArchVersionZeros((16); else // N == 32 sign = fpval<31>; exp = fpval<30:23>; frac = fpval<22:0>; if(exp) then fptype = FPType_Zero; value = 0.0; // Fixed Flush to Zero elsif IsOnes(exp) then if IsZero(frac) then fptype = FPType_Infinity; value = 2.0^1000000; else // no SNaN for BF16 arithmetic fptype = FPType_QNaN; value = 0.0; else fptype = FPType_Nonzero; value = 2.0^(UInt(exp)-127) * (1.0 + Real(UIntARMv8p5IsZero);(frac)) * 2.0^-23); if sign == '1' then value = -value; return (fptype, sign, value);

Library pseudocode for shared/functions/extensionfloat/HaveHCRXEL2Extbfloat/FPConvertBF

// HaveHCRXEL2Ext() // ================ // Returns TRUE if HCRX_EL2 Trap Control register is implemented, // and FALSE otherwise. // FPConvertBF() // ============= // Converts a single-precision OP to BFloat16 value with rounding controlled by FPCR/FPSCR. booleanbits(16) HaveHCRXEL2Ext() returnFPConvertBF(bits(32) op, HasArchVersionFPCRType(fpcr, rounding) bits(32) result; // BF16 value in top 16 bits // Unpack floating-point operand optionally with flush-to-zero. (fptype,sign,value) = FPUnpack(op, fpcr); if fptype == FPType_SNaN || fptype == FPType_QNaN then if fpcr.DN == '1' then result = FPDefaultNaN(); else result = FPConvertNaN(op); if fptype == FPType_SNaN then FPProcessException(FPExc_InvalidOp, fpcr); elsif fptype == FPType_Infinity then result = FPInfinity(sign); elsif fptype == FPType_Zero then result = FPZero(sign); else result = FPRoundCVBF(value, fpcr, rounding); // Returns correctly rounded BF16 value from top 16 bits return result<31:16>; // FPConvertBF() // ============= // Converts a single-precision operand to BFloat16 value. bits(16) FPConvertBF(bits(32) op, FPCRType fpcr) return FPConvertBF(op, fpcr, FPRoundingModeARMv8p7FPRounding);(fpcr));

Library pseudocode for shared/functions/extensionfloat/HaveHPMDExtbfloat/FPRoundCVBF

// HaveHPMDExt() // FPRoundCVBF() // ============= // Converts a real number OP into a BFloat16 value using the supplied rounding mode RMODE. booleanbits(32) HaveHPMDExt() returnFPRoundCVBF(real op, HasArchVersionFPCRType(fpcr, rounding) boolean isbfloat16 = TRUE; return FPRoundBaseARMv8p1FPRounding);(op, fpcr, rounding, isbfloat16);

Library pseudocode for shared/functions/extensionfloat/HaveIDSExtfixedtofp/FixedToFP

// HaveIDSExt() // ============ // Returns TRUE if ID register handling feature is implemented. // FixedToFP() // =========== boolean// Convert M-bit fixed point OP with FBITS fractional bits to // N-bit precision floating point, controlled by UNSIGNED and ROUNDING. bits(N) HaveIDSExt() returnFixedToFP(bits(M) op, integer fbits, boolean unsigned, HasArchVersionFPCRType(fpcr, rounding) assert N IN {16,32,64}; assert M IN {16,32,64}; bits(N) result; assert fbits >= 0; assert rounding != FPRounding_ODD; // Correct signed-ness int_operand = Int(op, unsigned); // Scale by fractional bits and generate a real value real_operand = Real(int_operand) / 2.0^fbits; if real_operand == 0.0 then result = FPZero('0'); else result = FPRoundARMv8p4FPRounding);(real_operand, fpcr, rounding); return result;

Library pseudocode for shared/functions/extensionfloat/HaveIESBfpabs/FPAbs

// HaveIESB() // ========== // FPAbs() // ======= booleanbits(N) HaveIESB() return (FPAbs(bits(N) op) assert N IN {16,32,64}; return '0' : op<N-2:0>;HaveRASExt() && boolean IMPLEMENTATION_DEFINED "Has Implicit Error Synchronization Barrier");

Library pseudocode for shared/functions/extensionfloat/HaveInt8MatMulExtfpadd/FPAdd

// HaveInt8MatMulExt() // =================== // Returns TRUE if AArch64 8-bit integer matrix multiply instruction support // implemented, and FALSE otherwise. // FPAdd() // ======= booleanbits(N) HaveInt8MatMulExt() returnFPAdd(bits(N) op1, bits(N) op2, HasArchVersionFPCRType(fpcr) assert N IN {16,32,64}; rounding =ARMv8p6FPRoundingMode) || ((fpcr); (type1,sign1,value1) =HasArchVersionFPUnpack((op1, fpcr); (type2,sign2,value2) =(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); if inf1 && inf2 && sign1 == NOT(sign2) then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '0') then result = FPInfinity('0'); elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '1') then result = FPInfinity('1'); elsif zero1 && zero2 && sign1 == sign2 then result = FPZero(sign1); else result_value = value1 + value2; if result_value == 0.0 then // Sign of exact zero result depends on rounding mode result_sign = if rounding == FPRounding_NEGINF then '1' else '0'; result = FPZero(result_sign); else result = FPRoundARMv8p2FPUnpack) && boolean IMPLEMENTATION_DEFINED "Has AArch64 Int8 Mat Mul extension");(result_value, fpcr, rounding); return result;

Library pseudocode for shared/functions/extensionfloat/HaveLSE2Extfpcommon/IsDenormalizedValue

// HaveLSE2Ext() // ============= // Returns TRUE if LSE2 is implemented, and FALSE otherwise. // IsDenormalizedValue() // ===================== // Checks either a single-precision or a double-precision floating-point // value is denormalized. boolean HaveLSE2Ext() returnIsDenormalizedValue(bits(N) fpval) assert N IN {32,64}; case N of when 32 exp32 = fpval<30:23>; frac32 = fpval<22:0>; isDenormal = HasArchVersionIsZero((exp32) && !(frac32); when 64 exp64 = fpval<62:52>; frac64 = fpval<51:0>; isDenormal = IsZero(exp64) && !IsZeroARMv8p4IsZero);(frac64); return isDenormal;

Library pseudocode for shared/functions/extensionfloat/HaveMPAMExtfpcompare/FPCompare

// HaveMPAMExt() // ============= // Returns TRUE if MPAM is implemented, and FALSE otherwise. // FPCompare() // =========== booleanbits(4) HaveMPAMExt() return (FPCompare(bits(N) op1, bits(N) op2, boolean signal_nans,HasArchVersionFPCRType(fpcr) assert N IN {16,32,64}; (type1,sign1,value1) =(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); op1_nan = type1 IN {FPType_SNaN, FPType_QNaN}; op2_nan = type2 IN {FPType_SNaN, FPType_QNaN}; if op1_nan || op2_nan then result = '0011'; if type1 == FPType_SNaN || type2 == FPType_SNaN || signal_nans then FPProcessException(FPExc_InvalidOpARMv8p2FPUnpack) && boolean IMPLEMENTATION_DEFINED "Has MPAM extension");, fpcr); else // All non-NaN cases can be evaluated on the values produced by FPUnpack() if value1 == value2 then result = '0110'; elsif value1 < value2 then result = '1000'; else // value1 > value2 result = '0010'; return result;

Library pseudocode for shared/functions/extensionfloat/HaveMTE2Extfpcompareeq/FPCompareEQ

// HaveMTE2Ext() // FPCompareEQ() // ============= // Returns TRUE if MTE support is beyond EL0, and FALSE otherwise. boolean HaveMTE2Ext() if !FPCompareEQ(bits(N) op1, bits(N) op2,HasArchVersionFPCRType(fpcr) assert N IN {16,32,64}; (type1,sign1,value1) =(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); op1_nan = type1 IN {FPType_SNaN, FPType_QNaN}; op2_nan = type2 IN {FPType_SNaN, FPType_QNaN}; if op1_nan || op2_nan then result = FALSE; if type1 == FPType_SNaN || type2 == FPType_SNaN then FPProcessException(FPExc_InvalidOpARMv8p5FPUnpack) then return FALSE; return boolean IMPLEMENTATION_DEFINED "Has MTE2 extension";, fpcr); else // All non-NaN cases can be evaluated on the values produced by FPUnpack() result = (value1 == value2); return result;

Library pseudocode for shared/functions/extensionfloat/HaveMTE3Extfpcomparege/FPCompareGE

// HaveMTE3Ext() // FPCompareGE() // ============= // Returns TRUE if MTE Asymmetric Fault Handling support is // implemented, and FALSE otherwise. boolean HaveMTE3Ext() return ((FPCompareGE(bits(N) op1, bits(N) op2,HasArchVersionFPCRType(fpcr) assert N IN {16,32,64}; (type1,sign1,value1) =ARMv8p7FPUnpack) &&(op1, fpcr); (type2,sign2,value2) = HaveMTE2ExtFPUnpack()) || ((op2, fpcr); op1_nan = type1 IN {HasArchVersionFPType_SNaN(,}; op2_nan = type2 IN {FPType_SNaN, FPType_QNaN}; if op1_nan || op2_nan then result = FALSE; FPProcessException(FPExc_InvalidOpARMv8p5FPType_QNaN) && boolean IMPLEMENTATION_DEFINED "Has MTE3 extension"));, fpcr); else // All non-NaN cases can be evaluated on the values produced by FPUnpack() result = (value1 >= value2); return result;

Library pseudocode for shared/functions/extensionfloat/HaveMTEExtfpcomparegt/FPCompareGT

// HaveMTEExt() // ============ // Returns TRUE if MTE implemented, and FALSE otherwise. // FPCompareGT() // ============= boolean HaveMTEExt() if !FPCompareGT(bits(N) op1, bits(N) op2,HasArchVersionFPCRType(fpcr) assert N IN {16,32,64}; (type1,sign1,value1) =(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); op1_nan = type1 IN {FPType_SNaN, FPType_QNaN}; op2_nan = type2 IN {FPType_SNaN, FPType_QNaN}; if op1_nan || op2_nan then result = FALSE; FPProcessException(FPExc_InvalidOpARMv8p5FPUnpack) then return FALSE; return boolean IMPLEMENTATION_DEFINED "Has MTE extension";, fpcr); else // All non-NaN cases can be evaluated on the values produced by FPUnpack() result = (value1 > value2); return result;

Library pseudocode for shared/functions/extensionfloat/HaveNV2Extfpconvert/FPConvert

// HaveNV2Ext() // ============ // Returns TRUE if Enhanced Nested Virtualization is implemented. // FPConvert() // =========== boolean// Convert floating point OP with N-bit precision to M-bit precision, // with rounding controlled by ROUNDING. // This is used by the FP-to-FP conversion instructions and so for // half-precision data ignores FZ16, but observes AHP. bits(M) HaveNV2Ext() return (FPConvert(bits(N) op,HasArchVersionFPCRType(fpcr,ARMv8p4FPRounding) &&rounding) assert M IN {16,32,64}; assert N IN {16,32,64}; bits(M) result; // Unpack floating-point operand optionally with flush-to-zero. (fptype,sign,value) = (op, fpcr); alt_hp = (M == 16) && (fpcr.AHP == '1'); if fptype == FPType_SNaN || fptype == FPType_QNaN then if alt_hp then result = FPZero(sign); elsif fpcr.DN == '1' then result = FPDefaultNaN(); else result = FPConvertNaN(op); if fptype == FPType_SNaN || alt_hp then FPProcessException(FPExc_InvalidOp,fpcr); elsif fptype == FPType_Infinity then if alt_hp then result = sign:Ones(M-1); FPProcessException(FPExc_InvalidOp, fpcr); else result = FPInfinity(sign); elsif fptype == FPType_Zero then result = FPZero(sign); else result = FPRoundCV(value, fpcr, rounding); return result; // FPConvert() // =========== bits(M) FPConvert(bits(N) op, FPCRType fpcr) return FPConvert(op, fpcr, FPRoundingModeHaveNVExtFPUnpackCV() && boolean IMPLEMENTATION_DEFINED "Has support for Enhanced Nested Virtualization");(fpcr));

Library pseudocode for shared/functions/extensionfloat/HaveNVExtfpconvertnan/FPConvertNaN

// HaveNVExt() // =========== // Returns TRUE if Nested Virtualization is implemented. // FPConvertNaN() // ============== // Converts a NaN of one floating-point type to another booleanbits(M) HaveNVExt() returnFPConvertNaN(bits(N) op) assert N IN {16,32,64}; assert M IN {16,32,64}; bits(M) result; bits(51) frac; sign = op<N-1>; // Unpack payload from input NaN case N of when 64 frac = op<50:0>; when 32 frac = op<21:0>: HasArchVersionZeros((29); when 16 frac = op<8:0>:(42); // Repack payload into output NaN, while // converting an SNaN to a QNaN. case M of when 64 result = sign:Ones(M-52):frac; when 32 result = sign:Ones(M-23):frac<50:29>; when 16 result = sign:OnesARMv8p3Zeros) && boolean IMPLEMENTATION_DEFINED "Has Nested Virtualization";(M-10):frac<50:42>; return result;

Library pseudocode for shared/functions/extensionfloat/HaveNoSecurePMUDisableOverridefpcrtype/FPCRType

// HaveNoSecurePMUDisableOverride() // ================================ booleantype HaveNoSecurePMUDisableOverride() returnFPCRType; HasArchVersion(ARMv8p2);

Library pseudocode for shared/functions/extensionfloat/HaveNoninvasiveDebugAuthfpdecoderm/FPDecodeRM

// HaveNoninvasiveDebugAuth() // ========================== // Returns TRUE if the Non-invasive debug controls are implemented. // FPDecodeRM() // ============ boolean// Decode most common AArch32 floating-point rounding encoding. FPRounding HaveNoninvasiveDebugAuth() return !FPDecodeRM(bits(2) rm) case rm of when '00' result =HasArchVersionFPRounding_TIEAWAY(; // A when '01' result =; // N when '10' result = FPRounding_POSINF; // P when '11' result = FPRounding_NEGINFARMv8p4FPRounding_TIEEVEN);; // M return result;

Library pseudocode for shared/functions/extensionfloat/HavePAN3Extfpdecoderounding/FPDecodeRounding

// HavePAN3Ext() // ============= // Returns TRUE if SCTLR_EL1.EPAN and SCTLR_EL2.EPAN support is implemented, // and FALSE otherwise. // FPDecodeRounding() // ================== boolean// Decode floating-point rounding mode and common AArch64 encoding. FPRounding HavePAN3Ext() returnFPDecodeRounding(bits(2) rmode) case rmode of when '00' return HasArchVersionFPRounding_TIEEVEN(; // N when '01' returnARMv8p7FPRounding_POSINF) || (; // P when '10' returnHasArchVersionFPRounding_NEGINF(; // M when '11' returnARMv8p1FPRounding_ZERO) && boolean IMPLEMENTATION_DEFINED "Has PAN3 extension");; // Z

Library pseudocode for shared/functions/extensionfloat/HavePANExtfpdefaultnan/FPDefaultNaN

// HavePANExt() // ============ // FPDefaultNaN() // ============== booleanbits(N) HavePANExt() returnFPDefaultNaN() assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); sign = '0'; bits(E) exp = HasArchVersionOnes((E); bits(F) frac = '1':ARMv8p1Zeros);(F-1); return sign : exp : frac;

Library pseudocode for shared/functions/extensionfloat/HavePMUv3p7fpdiv/FPDiv

// HavePMUv3p7() // ============= // Returns TRUE if the PMUv3p7 extension is implemented, and FALSE otherwise. // FPDiv() // ======= booleanbits(N) HavePMUv3p7() return (FPDiv(bits(N) op1, bits(N) op2,HasArchVersionFPCRType(fpcr) assert N IN {16,32,64}; (type1,sign1,value1) =ARMv8p7FPUnpack) &&(op1, fpcr); (type2,sign2,value2) = (op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); invalidop = (inf1 && inf2) || (zero1 && zero2); if invalidop then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); elsif inf1 || zero2 then result = FPInfinity(sign1 EOR sign2); if !inf1 then FPProcessException(FPExc_DivideByZero, fpcr); elsif zero1 || inf2 then result = FPZero(sign1 EOR sign2); else result = FPRoundHavev85PMUFPUnpack() && boolean IMPLEMENTATION_DEFINED "Has PMUv3p7 extension");(value1/value2, fpcr); return result;

Library pseudocode for shared/functions/extensionfloat/HavePageBasedHardwareAttributesfpexc/FPExc

// HavePageBasedHardwareAttributes() // ================================= booleanenumeration HavePageBasedHardwareAttributes() returnFPExc { HasArchVersion(FPExc_InvalidOp,FPExc_DivideByZero, FPExc_Overflow, FPExc_Underflow, FPExc_Inexact, ARMv8p2);FPExc_InputDenorm};

Library pseudocode for shared/functions/extensionfloat/HavePrivATExtfpinfinity/FPInfinity

// HavePrivATExt() // =============== // FPInfinity() // ============ booleanbits(N) HavePrivATExt() returnFPInfinity(bit sign) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); bits(E) exp = HasArchVersionOnes((E); bits(F) frac =ARMv8p2Zeros);(F); return sign : exp : frac;

Library pseudocode for shared/functions/extensionfloat/HaveQRDMLAHExtfpmatmul/FPMatMulAdd

// HaveQRDMLAHExt() // ================ // FPMatMulAdd() // ============= // // Floating point matrix multiply and add to same precision matrix // result[2, 2] = addend[2, 2] + (op1[2, 2] * op2[2, 2]) booleanbits(N) HaveQRDMLAHExt() returnFPMatMulAdd(bits(N) addend, bits(N) op1, bits(N) op2, integer esize, HasArchVersionFPCRType(fpcr) assert N == esize * 2 * 2; bits(N) result; bits(esize) prod0, prod1, sum; for i = 0 to 1 for j = 0 to 1 sum =ARMv8p1Elem); boolean[addend, 2*i + j, esize]; prod0 = HaveAccessFlagUpdateExt() return( HasArchVersionElem([op1, 2*i + 0, esize],ARMv8p1Elem); boolean[op2, 2*j + 0, esize], fpcr); prod1 = HaveDirtyBitModifierExt() return( HasArchVersionElem([op1, 2*i + 1, esize],[op2, 2*j + 1, esize], fpcr); sum = FPAdd(sum, FPAdd(prod0, prod1, fpcr), fpcr); ElemARMv8p1Elem);[result, 2*i + j, esize] = sum; return result;

Library pseudocode for shared/functions/extensionfloat/HaveRASExtfpmax/FPMax

// HaveRASExt() // ============ // FPMax() // ======= booleanbits(N) HaveRASExt() return (FPMax(bits(N) op1, bits(N) op2,HasArchVersionFPCRType(fpcr) assert N IN {16,32,64}; (type1,sign1,value1) =(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then if value1 > value2 then (fptype,sign,value) = (type1,sign1,value1); else (fptype,sign,value) = (type2,sign2,value2); if fptype == FPType_Infinity then result = FPInfinity(sign); elsif fptype == FPType_Zero then sign = sign1 AND sign2; // Use most positive sign result = FPZero(sign); else // The use of FPRound() covers the case where there is a trapped underflow exception // for a denormalized number even though the result is exact. result = FPRoundARMv8p2FPUnpack) || boolean IMPLEMENTATION_DEFINED "Has RAS extension");(value, fpcr); return result;

Library pseudocode for shared/functions/extensionfloat/HaveRNGfpmaxnormal/FPMaxNormal

// HaveRNG() // ========= // Returns TRUE if Random Number Generator extension // support is implemented and FALSE otherwise. // FPMaxNormal() // ============= booleanbits(N) HaveRNG() returnFPMaxNormal(bit sign) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); exp = HasArchVersionOnes((E-1):'0'; frac =ARMv8p5Ones) && boolean IMPLEMENTATION_DEFINED "Has RNG extension";(F); return sign : exp : frac;

Library pseudocode for shared/functions/extensionfloat/HaveSBExtfpmaxnum/FPMaxNum

// HaveSBExt() // =========== // Returns TRUE if support for SB is implemented, and FALSE otherwise. // FPMaxNum() // ========== booleanbits(N) HaveSBExt() returnFPMaxNum(bits(N) op1, bits(N) op2, HasArchVersionFPCRType(fpcr) assert N IN {16,32,64}; (type1,-,-) =(op1, fpcr); (type2,-,-) = FPUnpack(op2, fpcr); // treat a single quiet-NaN as -Infinity if type1 == FPType_QNaN && type2 != FPType_QNaN then op1 = FPInfinity('1'); elsif type1 != FPType_QNaN && type2 == FPType_QNaN then op2 = FPInfinity('1'); result = FPMaxARMv8p5FPUnpack) || boolean IMPLEMENTATION_DEFINED "Has SB extension";(op1, op2, fpcr); return result;

Library pseudocode for shared/functions/extensionfloat/HaveSSBSExtfpmin/FPMin

// HaveSSBSExt() // ============= // Returns TRUE if support for SSBS is implemented, and FALSE otherwise. // FPMin() // ======= booleanbits(N) HaveSSBSExt() returnFPMin(bits(N) op1, bits(N) op2, HasArchVersionFPCRType(fpcr) assert N IN {16,32,64}; (type1,sign1,value1) =(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then if value1 < value2 then (fptype,sign,value) = (type1,sign1,value1); else (fptype,sign,value) = (type2,sign2,value2); if fptype == FPType_Infinity then result = FPInfinity(sign); elsif fptype == FPType_Zero then sign = sign1 OR sign2; // Use most negative sign result = FPZero(sign); else // The use of FPRound() covers the case where there is a trapped underflow exception // for a denormalized number even though the result is exact. result = FPRoundARMv8p5FPUnpack) || boolean IMPLEMENTATION_DEFINED "Has SSBS extension";(value, fpcr); return result;

Library pseudocode for shared/functions/extensionfloat/HaveSecureEL2Extfpminnum/FPMinNum

// HaveSecureEL2Ext() // ================== // Returns TRUE if Secure EL2 is implemented. // FPMinNum() // ========== booleanbits(N) HaveSecureEL2Ext() returnFPMinNum(bits(N) op1, bits(N) op2, HasArchVersionFPCRType(fpcr) assert N IN {16,32,64}; (type1,-,-) =(op1, fpcr); (type2,-,-) = FPUnpack(op2, fpcr); // Treat a single quiet-NaN as +Infinity if type1 == FPType_QNaN && type2 != FPType_QNaN then op1 = FPInfinity('0'); elsif type1 != FPType_QNaN && type2 == FPType_QNaN then op2 = FPInfinity('0'); result = FPMinARMv8p4FPUnpack);(op1, op2, fpcr); return result;

Library pseudocode for shared/functions/extensionfloat/HaveSecureExtDebugViewfpmul/FPMul

// HaveSecureExtDebugView() // ======================== // Returns TRUE if support for Secure and Non-secure views of debug peripherals is implemented. // FPMul() // ======= booleanbits(N) HaveSecureExtDebugView() returnFPMul(bits(N) op1, bits(N) op2, HasArchVersionFPCRType(fpcr) assert N IN {16,32,64}; (type1,sign1,value1) =(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); invalidop = (inf1 && zero2) || (zero1 && inf2); if invalidop then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); elsif inf1 || inf2 then result = FPInfinity(sign1 EOR sign2); elsif zero1 || zero2 then result = FPZero(sign1 EOR sign2); else result = FPRoundARMv8p4FPUnpack);(value1*value2, fpcr); return result;

Library pseudocode for shared/functions/extensionfloat/HaveSelfHostedTracefpmuladd/FPMulAdd

// HaveSelfHostedTrace() // ===================== // FPMulAdd() // ========== // // Calculates addend + op1*op2 with a single rounding. The 'fpcr' argument // supplies the FPCR control bits. booleanbits(N) HaveSelfHostedTrace() returnFPMulAdd(bits(N) addend, bits(N) op1, bits(N) op2, HasArchVersionFPCRType(fpcr) assert N IN {16,32,64}; rounding =(fpcr); (typeA,signA,valueA) = FPUnpack(addend, fpcr); (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); inf1 = (type1 == FPType_Infinity); zero1 = (type1 == FPType_Zero); inf2 = (type2 == FPType_Infinity); zero2 = (type2 == FPType_Zero); (done,result) = FPProcessNaNs3(typeA, type1, type2, addend, op1, op2, fpcr); if typeA == FPType_QNaN && ((inf1 && zero2) || (zero1 && inf2)) then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); if !done then infA = (typeA == FPType_Infinity); zeroA = (typeA == FPType_Zero); // Determine sign and type product will have if it does not cause an // Invalid Operation. signP = sign1 EOR sign2; infP = inf1 || inf2; zeroP = zero1 || zero2; // Non SNaN-generated Invalid Operation cases are multiplies of zero // by infinity and additions of opposite-signed infinities. invalidop = (inf1 && zero2) || (zero1 && inf2) || (infA && infP && signA != signP); if invalidop then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); // Other cases involving infinities produce an infinity of the same sign. elsif (infA && signA == '0') || (infP && signP == '0') then result = FPInfinity('0'); elsif (infA && signA == '1') || (infP && signP == '1') then result = FPInfinity('1'); // Cases where the result is exactly zero and its sign is not determined by the // rounding mode are additions of same-signed zeros. elsif zeroA && zeroP && signA == signP then result = FPZero(signA); // Otherwise calculate numerical result and round it. else result_value = valueA + (value1 * value2); if result_value == 0.0 then // Sign of exact zero result depends on rounding mode result_sign = if rounding == FPRounding_NEGINF then '1' else '0'; result = FPZero(result_sign); else result = FPRoundARMv8p4FPRoundingMode);(result_value, fpcr); return result;

Library pseudocode for shared/functions/extensionfloat/HaveSmallTranslationTblExtfpmuladdh/FPMulAddH

// HaveSmallTranslationTblExt() // ============================ // Returns TRUE if Small Translation Table Support is implemented. // FPMulAddH() // =========== // Calculates addend + op1*op2. booleanbits(N) FPMulAddH(bits(N) addend, bits(N DIV 2) op1, bits(N DIV 2) op2, HaveSmallTranslationTableExt() returnfpcr) assert N IN {32,64}; rounding = HasArchVersionFPRoundingMode((fpcr); (typeA,signA,valueA) =(addend, fpcr); (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); inf1 = (type1 == FPType_Infinity); zero1 = (type1 == FPType_Zero); inf2 = (type2 == FPType_Infinity); zero2 = (type2 == FPType_Zero); (done,result) = FPProcessNaNs3H(typeA, type1, type2, addend, op1, op2, fpcr); if typeA == FPType_QNaN && ((inf1 && zero2) || (zero1 && inf2)) then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); if !done then infA = (typeA == FPType_Infinity); zeroA = (typeA == FPType_Zero); // Determine sign and type product will have if it does not cause an // Invalid Operation. signP = sign1 EOR sign2; infP = inf1 || inf2; zeroP = zero1 || zero2; // Non SNaN-generated Invalid Operation cases are multiplies of zero by infinity and // additions of opposite-signed infinities. invalidop = (inf1 && zero2) || (zero1 && inf2) || (infA && infP && signA != signP); if invalidop then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); // Other cases involving infinities produce an infinity of the same sign. elsif (infA && signA == '0') || (infP && signP == '0') then result = FPInfinity('0'); elsif (infA && signA == '1') || (infP && signP == '1') then result = FPInfinity('1'); // Cases where the result is exactly zero and its sign is not determined by the // rounding mode are additions of same-signed zeros. elsif zeroA && zeroP && signA == signP then result = FPZero(signA); // Otherwise calculate numerical result and round it. else result_value = valueA + (value1 * value2); if result_value == 0.0 then // Sign of exact zero result depends on rounding mode result_sign = if rounding == FPRounding_NEGINF then '1' else '0'; result = FPZero(result_sign); else result = FPRoundARMv8p4FPUnpack) && boolean IMPLEMENTATION_DEFINED "Has Small Translation Table extension";(result_value, fpcr); return result;

Library pseudocode for shared/functions/extensionfloat/HaveStage2MemAttrControlfpmuladdh/FPProcessNaNs3H

// HaveStage2MemAttrControl() // ========================== // Returns TRUE if support for Stage2 control of memory types and cacheability attributes is implemented. // FPProcessNaNs3H() // ================= boolean(boolean, bits(N)) FPProcessNaNs3H( HaveStage2MemAttrControl() returntype1, HasArchVersionFPType(type2, type3, bits(N) op1, bits(N DIV 2) op2, bits(N DIV 2) op3, FPCRType fpcr) assert N IN {32,64}; bits(N) result; if type1 == FPType_SNaN then done = TRUE; result = FPProcessNaN(type1, op1, fpcr); elsif type2 == FPType_SNaN then done = TRUE; result = FPConvertNaN(FPProcessNaN(type2, op2, fpcr)); elsif type3 == FPType_SNaN then done = TRUE; result = FPConvertNaN(FPProcessNaN(type3, op3, fpcr)); elsif type1 == FPType_QNaN then done = TRUE; result = FPProcessNaN(type1, op1, fpcr); elsif type2 == FPType_QNaN then done = TRUE; result = FPConvertNaN(FPProcessNaN(type2, op2, fpcr)); elsif type3 == FPType_QNaN then done = TRUE; result = FPConvertNaN(FPProcessNaN(type3, op3, fpcr)); else done = FALSE; result = ZerosARMv8p4FPType);(); // 'Don't care' result return (done, result);

Library pseudocode for shared/functions/extensionfloat/HaveStatisticalProfilingfpmulx/FPMulX

// HaveStatisticalProfiling() // ========================== // Returns TRUE if Statistical Profiling Extension is implemented, // and FALSE otherwise. // FPMulX() // ======== booleanbits(N) HaveStatisticalProfiling() returnFPMulX(bits(N) op1, bits(N) op2, HasArchVersionFPCRType(fpcr) assert N IN {16,32,64}; bits(N) result; (type1,sign1,value1) =(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); if (inf1 && zero2) || (zero1 && inf2) then result = FPTwo(sign1 EOR sign2); elsif inf1 || inf2 then result = FPInfinity(sign1 EOR sign2); elsif zero1 || zero2 then result = FPZero(sign1 EOR sign2); else result = FPRoundARMv8p2FPUnpack);(value1*value2, fpcr); return result;

Library pseudocode for shared/functions/extensionfloat/HaveStatisticalProfilingv1p1fpneg/FPNeg

// HaveStatisticalProfilingv1p1() // ============================== // Returns TRUE if the SPEv1p1 extension is implemented, and FALSE otherwise. // FPNeg() // ======= booleanbits(N) HaveStatisticalProfilingv1p1() return (FPNeg(bits(N) op) assert N IN {16,32,64}; return NOT(op<N-1>) : op<N-2:0>;HasArchVersion(ARMv8p3) && boolean IMPLEMENTATION_DEFINED "Has SPEv1p1 extension");

Library pseudocode for shared/functions/extensionfloat/HaveStatisticalProfilingv1p2fponepointfive/FPOnePointFive

// HaveStatisticalProfilingv1p2() // ============================== // Returns TRUE if the SPEv1p2 extension is implemented, and FALSE otherwise. // FPOnePointFive() // ================ booleanbits(N) HaveStatisticalProfilingv1p2() return (FPOnePointFive(bit sign) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); exp = '0':HasArchVersionOnes((E-1); frac = '1':ARMv8p7Zeros) && HaveStatisticalProfiling() && boolean IMPLEMENTATION_DEFINED "Has SPEv1p2 extension");(F-1); result = sign : exp : frac; return result;

Library pseudocode for shared/functions/extensionfloat/HaveTWEDExtfpprocessexception/FPProcessException

// HaveTWEDExt() // ============= // Returns TRUE if Delayed Trapping of WFE instruction support is implemented, and FALSE otherwise. boolean// FPProcessException() // ==================== // // The 'fpcr' argument supplies FPCR control bits. Status information is // updated directly in the FPSR where appropriate. HaveTWEDExt() return boolean IMPLEMENTATION_DEFINED "Has TWED extension";FPProcessException(FPExc exception, FPCRType fpcr) // Determine the cumulative exception bit number case exception of when FPExc_InvalidOp cumul = 0; when FPExc_DivideByZero cumul = 1; when FPExc_Overflow cumul = 2; when FPExc_Underflow cumul = 3; when FPExc_Inexact cumul = 4; when FPExc_InputDenorm cumul = 7; enable = cumul + 8; if fpcr<enable> == '1' then // Trapping of the exception enabled. // It is IMPLEMENTATION DEFINED whether the enable bit may be set at all, and // if so then how exceptions may be accumulated before calling FPTrappedException() IMPLEMENTATION_DEFINED "floating-point trap handling"; elsif UsingAArch32() then // Set the cumulative exception bit FPSCR<cumul> = '1'; else // Set the cumulative exception bit FPSR<cumul> = '1'; return;

Library pseudocode for shared/functions/extensionfloat/HaveTraceExtfpprocessnan/FPProcessNaN

// HaveTraceExt() // FPProcessNaN() // ============== // Returns TRUE if Trace functionality as described by the Trace Architecture // is implemented. booleanbits(N) HaveTraceExt() return boolean IMPLEMENTATION_DEFINED "Has Trace Architecture functionality";FPProcessNaN(FPType fptype, bits(N) op, FPCRType fpcr) assert N IN {16,32,64}; assert fptype IN {FPType_QNaN, FPType_SNaN}; case N of when 16 topfrac = 9; when 32 topfrac = 22; when 64 topfrac = 51; result = op; if fptype == FPType_SNaN then result<topfrac> = '1'; FPProcessException(FPExc_InvalidOp, fpcr); if fpcr.DN == '1' then // DefaultNaN requested result = FPDefaultNaN(); return result;

Library pseudocode for shared/functions/extensionfloat/HaveTrapLoadStoreMultipleDeviceExtfpprocessnans/FPProcessNaNs

// HaveTrapLoadStoreMultipleDeviceExt() // ==================================== // FPProcessNaNs() // =============== // // The boolean part of the return value says whether a NaN has been found and // processed. The bits(N) part is only relevant if it has and supplies the // result of the operation. // // The 'fpcr' argument supplies FPCR control bits. Status information is // updated directly in the FPSR where appropriate. boolean(boolean, bits(N)) HaveTrapLoadStoreMultipleDeviceExt() returnFPProcessNaNs( HasArchVersionFPType(type1, type2, bits(N) op1, bits(N) op2, FPCRType fpcr) assert N IN {16,32,64}; if type1 == FPType_SNaN then done = TRUE; result = FPProcessNaN(type1, op1, fpcr); elsif type2 == FPType_SNaN then done = TRUE; result = FPProcessNaN(type2, op2, fpcr); elsif type1 == FPType_QNaN then done = TRUE; result = FPProcessNaN(type1, op1, fpcr); elsif type2 == FPType_QNaN then done = TRUE; result = FPProcessNaN(type2, op2, fpcr); else done = FALSE; result = ZerosARMv8p2FPType);(); // 'Don't care' result return (done, result);

Library pseudocode for shared/functions/extensionfloat/HaveUAOExtfpprocessnans3/FPProcessNaNs3

// HaveUAOExt() // ============ // FPProcessNaNs3() // ================ // // The boolean part of the return value says whether a NaN has been found and // processed. The bits(N) part is only relevant if it has and supplies the // result of the operation. // // The 'fpcr' argument supplies FPCR control bits. Status information is // updated directly in the FPSR where appropriate. boolean(boolean, bits(N)) HaveUAOExt() returnFPProcessNaNs3( HasArchVersionFPType(type1, type2, FPType type3, bits(N) op1, bits(N) op2, bits(N) op3, FPCRType fpcr) assert N IN {16,32,64}; if type1 == FPType_SNaN then done = TRUE; result = FPProcessNaN(type1, op1, fpcr); elsif type2 == FPType_SNaN then done = TRUE; result = FPProcessNaN(type2, op2, fpcr); elsif type3 == FPType_SNaN then done = TRUE; result = FPProcessNaN(type3, op3, fpcr); elsif type1 == FPType_QNaN then done = TRUE; result = FPProcessNaN(type1, op1, fpcr); elsif type2 == FPType_QNaN then done = TRUE; result = FPProcessNaN(type2, op2, fpcr); elsif type3 == FPType_QNaN then done = TRUE; result = FPProcessNaN(type3, op3, fpcr); else done = FALSE; result = ZerosARMv8p2FPType);(); // 'Don't care' result return (done, result);

Library pseudocode for shared/functions/extensionfloat/HaveV82Debugfprecipestimate/FPRecipEstimate

// HaveV82Debug() // ============== // FPRecipEstimate() // ================= booleanbits(N) HaveV82Debug() returnFPRecipEstimate(bits(N) operand, HasArchVersionFPCRType(fpcr) assert N IN {16,32,64}; (fptype,sign,value) =(operand, fpcr); if fptype == FPType_SNaN || fptype == FPType_QNaN then result = FPProcessNaN(fptype, operand, fpcr); elsif fptype == FPType_Infinity then result = FPZero(sign); elsif fptype == FPType_Zero then result = FPInfinity(sign); FPProcessException(FPExc_DivideByZero, fpcr); elsif ( (N == 16 && Abs(value) < 2.0^-16) || (N == 32 && Abs(value) < 2.0^-128) || (N == 64 && Abs(value) < 2.0^-1024) ) then case FPRoundingMode(fpcr) of when FPRounding_TIEEVEN overflow_to_inf = TRUE; when FPRounding_POSINF overflow_to_inf = (sign == '0'); when FPRounding_NEGINF overflow_to_inf = (sign == '1'); when FPRounding_ZERO overflow_to_inf = FALSE; result = if overflow_to_inf then FPInfinity(sign) else FPMaxNormal(sign); FPProcessException(FPExc_Overflow, fpcr); FPProcessException(FPExc_Inexact, fpcr); elsif ((fpcr.FZ == '1' && N != 16) || (fpcr.FZ16 == '1' && N == 16)) && ( (N == 16 && Abs(value) >= 2.0^14) || (N == 32 && Abs(value) >= 2.0^126) || (N == 64 && Abs(value) >= 2.0^1022) ) then // Result flushed to zero of correct sign result = FPZero(sign); // Flush-to-zero never generates a trapped exception. if UsingAArch32() then FPSCR.UFC = '1'; else FPSR.UFC = '1'; else // Scale to a fixed point value in the range 0.5 <= x < 1.0 in steps of 1/512, and // calculate result exponent. Scaled value has copied sign bit, // exponent = 1022 = double-precision biased version of -1, // fraction = original fraction case N of when 16 fraction = operand<9:0> : Zeros(42); exp = UInt(operand<14:10>); when 32 fraction = operand<22:0> : Zeros(29); exp = UInt(operand<30:23>); when 64 fraction = operand<51:0>; exp = UInt(operand<62:52>); if exp == 0 then if fraction<51> == '0' then exp = -1; fraction = fraction<49:0>:'00'; else fraction = fraction<50:0>:'0'; integer scaled = UInt('1':fraction<51:44>); case N of when 16 result_exp = 29 - exp; // In range 29-30 = -1 to 29+1 = 30 when 32 result_exp = 253 - exp; // In range 253-254 = -1 to 253+1 = 254 when 64 result_exp = 2045 - exp; // In range 2045-2046 = -1 to 2045+1 = 2046 // scaled is in range 256..511 representing a fixed-point number in range [0.5..1.0) estimate = RecipEstimate(scaled); // estimate is in the range 256..511 representing a fixed point result in the range [1.0..2.0) // Convert to scaled floating point result with copied sign bit, // high-order bits from estimate, and exponent calculated above. fraction = estimate<7:0> : ZerosARMv8p2FPUnpack);(44); if result_exp == 0 then fraction = '1' : fraction<51:1>; elsif result_exp == -1 then fraction = '01' : fraction<51:2>; result_exp = 0; case N of when 16 result = sign : result_exp<N-12:0> : fraction<51:42>; when 32 result = sign : result_exp<N-25:0> : fraction<51:29>; when 64 result = sign : result_exp<N-54:0> : fraction<51:0>; return result;

Library pseudocode for shared/functions/extensionfloat/HaveVirtHostExtfprecipestimate/RecipEstimate

// HaveVirtHostExt() // ================= // Compute estimate of reciprocal of 9-bit fixed-point number // // a is in range 256 .. 511 representing a number in the range 0.5 <= x < 1.0. // result is in the range 256 .. 511 representing a number in the range in the range 1.0 to 511/256. booleaninteger HaveVirtHostExt() returnRecipEstimate(integer a) assert 256 <= a && a < 512; a = a*2+1; // round to nearest integer b = (2 ^ 19) DIV a; r = (b+1) DIV 2; // round to nearest assert 256 <= r && r < 512; return r; HasArchVersion(ARMv8p1);

Library pseudocode for shared/functions/extensionfloat/Havev85PMUfprecpx/FPRecpX

// Havev85PMU() // ============ // Returns TRUE if v8.5-Performance Monitor Unit extension // support is implemented, and FALSE otherwise. // FPRecpX() // ========= booleanbits(N) Havev85PMU() returnFPRecpX(bits(N) op, HasArchVersionFPCRType(fpcr) assert N IN {16,32,64}; case N of when 16 esize = 5; when 32 esize = 8; when 64 esize = 11; bits(N) result; bits(esize) exp; bits(esize) max_exp; bits(N-(esize+1)) frac =(); case N of when 16 exp = op<10+esize-1:10>; when 32 exp = op<23+esize-1:23>; when 64 exp = op<52+esize-1:52>; max_exp = Ones(esize) - 1; (fptype,sign,value) = FPUnpack(op, fpcr); if fptype == FPType_SNaN || fptype == FPType_QNaN then result = FPProcessNaN(fptype, op, fpcr); else if IsZeroARMv8p5Zeros) && boolean IMPLEMENTATION_DEFINED "Has PMUv3p5 extension";(exp) then // Zero and denormals result = sign:max_exp:frac; else // Infinities and normals result = sign:NOT(exp):frac; return result;

Library pseudocode for shared/functions/extensionfloat/Havev8p4Debugfpround/FPRound

// Havev8p4Debug() // =============== // Returns TRUE if support for the Debugv8p4 feature is implemented and FALSE otherwise. // FPRound() // ========= // Used by data processing and int/fixed <-> FP conversion instructions. // For half-precision data it ignores AHP, and observes FZ16. booleanbits(N) Havev8p4Debug() returnFPRound(real op, HasArchVersionFPCRType(fpcr, rounding) fpcr.AHP = '0'; boolean isbfloat16 = FALSE; return FPRoundBase(op, fpcr, rounding, isbfloat16); // FPRound() // ========= bits(N) FPRound(real op, FPCRType fpcr) return FPRound(op, fpcr, FPRoundingModeARMv8p4FPRounding);(fpcr));

Library pseudocode for shared/functions/extensionfloat/InsertIESBBeforeExceptionfpround/FPRoundBase

// If SCTLR_ELx.IESB is 1 when an exception is generated to ELx, any pending Unrecoverable // SError interrupt must be taken before executing any instructions in the exception handler. // However, this can be before the branch to the exception handler is made. boolean// FPRoundBase() // ============= // Convert a real number OP into an N-bit floating-point value using the // supplied rounding mode RMODE. bits(N) InsertIESBBeforeException(bits(2) el);FPRoundBase(real op,FPCRType fpcr, FPRounding rounding, boolean isbfloat16) assert N IN {16,32,64}; assert op != 0.0; assert rounding != FPRounding_TIEAWAY; bits(N) result; // Obtain format parameters - minimum exponent, numbers of exponent and fraction bits. if N == 16 then minimum_exp = -14; E = 5; F = 10; elsif N == 32 && isbfloat16 then minimum_exp = -126; E = 8; F = 7; elsif N == 32 then minimum_exp = -126; E = 8; F = 23; else // N == 64 minimum_exp = -1022; E = 11; F = 52; // Split value into sign, unrounded mantissa and exponent. if op < 0.0 then sign = '1'; mantissa = -op; else sign = '0'; mantissa = op; exponent = 0; while mantissa < 1.0 do mantissa = mantissa * 2.0; exponent = exponent - 1; while mantissa >= 2.0 do mantissa = mantissa / 2.0; exponent = exponent + 1; if (((fpcr.FZ == '1' && N != 16) || (fpcr.FZ16 == '1' && N == 16)) && exponent < minimum_exp) then // Flush-to-zero never generates a trapped exception. if UsingAArch32() then FPSCR.UFC = '1'; else FPSR.UFC = '1'; return FPZero(sign); // Start creating the exponent value for the result. Start by biasing the actual exponent // so that the minimum exponent becomes 1, lower values 0 (indicating possible underflow). biased_exp = Max(exponent - minimum_exp + 1, 0); if biased_exp == 0 then mantissa = mantissa / 2.0^(minimum_exp - exponent); // Get the unrounded mantissa as an integer, and the "units in last place" rounding error. int_mant = RoundDown(mantissa * 2.0^F); // < 2.0^F if biased_exp == 0, >= 2.0^F if not error = mantissa * 2.0^F - Real(int_mant); // Underflow occurs if exponent is too small before rounding, and result is inexact or // the Underflow exception is trapped. if biased_exp == 0 && (error != 0.0 || fpcr.UFE == '1') then FPProcessException(FPExc_Underflow, fpcr); // Round result according to rounding mode. case rounding of when FPRounding_TIEEVEN round_up = (error > 0.5 || (error == 0.5 && int_mant<0> == '1')); overflow_to_inf = TRUE; when FPRounding_POSINF round_up = (error != 0.0 && sign == '0'); overflow_to_inf = (sign == '0'); when FPRounding_NEGINF round_up = (error != 0.0 && sign == '1'); overflow_to_inf = (sign == '1'); when FPRounding_ZERO, FPRounding_ODD round_up = FALSE; overflow_to_inf = FALSE; if round_up then int_mant = int_mant + 1; if int_mant == 2^F then // Rounded up from denormalized to normalized biased_exp = 1; if int_mant == 2^(F+1) then // Rounded up to next exponent biased_exp = biased_exp + 1; int_mant = int_mant DIV 2; // Handle rounding to odd aka Von Neumann rounding if error != 0.0 && rounding == FPRounding_ODD then int_mant<0> = '1'; // Deal with overflow and generate result. if N != 16 || fpcr.AHP == '0' then // Single, double or IEEE half precision if biased_exp >= 2^E - 1 then result = if overflow_to_inf then FPInfinity(sign) else FPMaxNormal(sign); FPProcessException(FPExc_Overflow, fpcr); error = 1.0; // Ensure that an Inexact exception occurs else result = sign : biased_exp<E-1:0> : int_mant<F-1:0> : Zeros(N-(E+F+1)); else // Alternative half precision if biased_exp >= 2^E then result = sign : Ones(N-1); FPProcessException(FPExc_InvalidOp, fpcr); error = 0.0; // Ensure that an Inexact exception does not occur else result = sign : biased_exp<E-1:0> : int_mant<F-1:0> : Zeros(N-(E+F+1)); // Deal with Inexact exception. if error != 0.0 then FPProcessException(FPExc_Inexact, fpcr); return result;

Library pseudocode for shared/functions/float/bfloatfpround/BFAddFPRoundCV

// BFAdd() // ======= // Single-precision add following BFloat16 computation behaviors. // FPRoundCV() // =========== // Used for FP <-> FP conversion instructions. // For half-precision data ignores FZ16 and observes AHP. bits(32)bits(N) BFAdd(bits(32) op1, bits(32) op2) bits(32) result; (type1,sign1,value1) =FPRoundCV(real op, BFUnpackFPCRType(op1); (type2,sign2,value2) =fpcr, BFUnpackFPRounding(op2); if type1 ==rounding) fpcr.FZ16 = '0'; boolean isbfloat16 = FALSE; return FPType_QNaNFPRoundBase || type2 == FPType_QNaN then result = FPDefaultNaN(); else inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); if inf1 && inf2 && sign1 == NOT(sign2) then result = FPDefaultNaN(); elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '0') then result = FPInfinity('0'); elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '1') then result = FPInfinity('1'); elsif zero1 && zero2 && sign1 == sign2 then result = FPZero(sign1); else result_value = value1 + value2; if result_value == 0.0 then result = FPZero('0'); // Positive sign when Round to Odd else result = BFRound(result_value); return result;(op, fpcr, rounding, isbfloat16);

Library pseudocode for shared/functions/float/bfloatfprounding/BFMatMulAddFPRounding

// BFMatMulAdd() // ============= // BFloat16 matrix multiply and add to single-precision matrix // result[2, 2] = addend[2, 2] + (op1[2, 4] * op2[4, 2]) bits(N)enumeration BFMatMulAdd(bits(N) addend, bits(N) op1, bits(N) op2) assert N == 128; bits(N) result; bits(32) sum, prod0, prod1; for i = 0 to 1 for j = 0 to 1 sum =FPRounding { Elem[addend, 2*i + j, 32]; for k = 0 to 1 prod0 =FPRounding_TIEEVEN, BFMul(FPRounding_POSINF,Elem[op1, 4*i + 2*k + 0, 16],FPRounding_NEGINF, Elem[op2, 4*j + 2*k + 0, 16]); prod1 =FPRounding_ZERO, BFMul(FPRounding_TIEAWAY,Elem[op1, 4*i + 2*k + 1, 16], Elem[op2, 4*j + 2*k + 1, 16]); sum = BFAdd(sum, BFAdd(prod0, prod1)); Elem[result, 2*i + j, 32] = sum; return result;FPRounding_ODD};

Library pseudocode for shared/functions/float/bfloatfproundingmode/BFMulFPRoundingMode

// BFMul() // ======= // BFloat16 widening multiply to single-precision following BFloat16 // computation behaviors. // FPRoundingMode() // ================ bits(32)// Return the current floating-point rounding mode. FPRounding BFMul(bits(16) op1, bits(16) op2) bits(32) result; (type1,sign1,value1) =FPRoundingMode( BFUnpackFPCRType(op1); (type2,sign2,value2) =fpcr) return BFUnpackFPDecodeRounding(op2); if type1 == FPType_QNaN || type2 == FPType_QNaN then result = FPDefaultNaN(); else inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); if (inf1 && zero2) || (zero1 && inf2) then result = FPDefaultNaN(); elsif inf1 || inf2 then result = FPInfinity(sign1 EOR sign2); elsif zero1 || zero2 then result = FPZero(sign1 EOR sign2); else result = BFRound(value1*value2); return result;(fpcr.RMode);

Library pseudocode for shared/functions/float/bfloatfproundint/BFMulAddFPRoundInt

// BFMulAdd() // ========== // Used by BFMLALB and BFMLALT instructions. // FPRoundInt() // ============ // Round op to nearest integral floating point value using rounding mode in FPCR/FPSCR. // If EXACT is TRUE, set FPSR.IXC if result is not numerically equal to op. bits(N) BFMulAdd(bits(N) addend, bits(N) op1, bits(N) op2,FPRoundInt(bits(N) op, FPCRType fpcr) boolean altfp =fpcr, HaveAltFPFPRounding() && fpcr.AH == '1'; // When TRUE: boolean fpexc = !altfp; // Do not generate floating point exceptions if altfp then fpcr.<FIZ,FZ> = '11'; // Flush denormal input and output to zero if altfp then fpcr.RMode = '00'; // Use RNE rounding mode returnrounding, boolean exact) assert rounding != ; assert N IN {16,32,64}; // Unpack using FPCR to determine if subnormals are flushed-to-zero. (fptype,sign,value) = FPUnpack(op, fpcr); if fptype == FPType_SNaN || fptype == FPType_QNaN then result = FPProcessNaN(fptype, op, fpcr); elsif fptype == FPType_Infinity then result = FPInfinity(sign); elsif fptype == FPType_Zero then result = FPZero(sign); else // Extract integer component. int_result = RoundDown(value); error = value - Real(int_result); // Determine whether supplied rounding mode requires an increment. case rounding of when FPRounding_TIEEVEN round_up = (error > 0.5 || (error == 0.5 && int_result<0> == '1')); when FPRounding_POSINF round_up = (error != 0.0); when FPRounding_NEGINF round_up = FALSE; when FPRounding_ZERO round_up = (error != 0.0 && int_result < 0); when FPRounding_TIEAWAY round_up = (error > 0.5 || (error == 0.5 && int_result >= 0)); if round_up then int_result = int_result + 1; // Convert integer value into an equivalent real value. real_result = Real(int_result); // Re-encode as a floating-point value, result is always exact. if real_result == 0.0 then result = FPZero(sign); else result = FPRound(real_result, fpcr, FPRounding_ZERO); // Generate inexact exceptions. if error != 0.0 && exact then FPProcessException(FPExc_InexactFPMulAddFPRounding_ODD(addend, op1, op2, fpcr, fpexc);, fpcr); return result;

Library pseudocode for shared/functions/float/bfloatfproundintn/BFRoundFPRoundIntN

// BFRound() // ========= // Converts a real number OP into a single-precision value using the // Round to Odd rounding mode and following BFloat16 computation behaviors. // FPRoundIntN() // ============= bits(32)bits(N) BFRound(real op) assert op != 0.0; bits(32) result; // Format parameters - minimum exponent, numbers of exponent and fraction bits. minimum_exp = -126; E = 8; F = 23; // Split value into sign, unrounded mantissa and exponent. if op < 0.0 then sign = '1'; mantissa = -op; else sign = '0'; mantissa = op; exponent = 0; while mantissa < 1.0 do mantissa = mantissa * 2.0; exponent = exponent - 1; while mantissa >= 2.0 do mantissa = mantissa / 2.0; exponent = exponent + 1; // Fixed Flush-to-zero. if exponent < minimum_exp then returnFPRoundIntN(bits(N) op, FPCRType fpcr, FPRounding rounding, integer intsize) assert rounding != FPRounding_ODD; assert N IN {32,64}; assert intsize IN {32, 64}; integer exp; constant integer E = (if N == 32 then 8 else 11); constant integer F = N - (E + 1); // Unpack using FPCR to determine if subnormals are flushed-to-zero. (fptype,sign,value) = FPUnpack(op, fpcr); if fptype IN {FPType_SNaN, FPType_QNaN, FPType_Infinity} then if N == 32 then exp = 126 + intsize; result = '1':exp<(E-1):0>:Zeros(F); else exp = 1022+intsize; result = '1':exp<(E-1):0>:Zeros(F); FPProcessException(FPExc_InvalidOp, fpcr); elsif fptype == FPType_Zero then result = FPZero(sign); // Start creating the exponent value for the result. Start by biasing the actual exponent // so that the minimum exponent becomes 1, lower values 0 (indicating possible underflow). biased_exp = else // Extract integer component. int_result = Max(exponent - minimum_exp + 1, 0); if biased_exp == 0 then mantissa = mantissa / 2.0^(minimum_exp - exponent); // Get the unrounded mantissa as an integer, and the "units in last place" rounding error. int_mant = RoundDown(mantissa * 2.0^F); // < 2.0^F if biased_exp == 0, >= 2.0^F if not error = mantissa * 2.0^F - Real(int_mant); (value); error = value - Real(int_result); // Round to Odd if error != 0.0 then int_mant<0> = '1'; // Deal with overflow and generate result. if biased_exp >= 2^E - 1 then result = // Determine whether supplied rounding mode requires an increment. case rounding of when round_up = error > 0.5 || (error == 0.5 && int_result<0> == '1'); when FPRounding_POSINF round_up = error != 0.0; when FPRounding_NEGINF round_up = FALSE; when FPRounding_ZERO round_up = error != 0.0 && int_result < 0; when FPRounding_TIEAWAY round_up = error > 0.5 || (error == 0.5 && int_result >= 0); if round_up then int_result = int_result + 1; overflow = int_result > 2^(intsize-1)-1 || int_result < -1*2^(intsize-1); if overflow then if N == 32 then exp = 126 + intsize; result = '1':exp<(E-1):0>:Zeros(F); else exp = 1022 + intsize; result = '1':exp<(E-1):0>:Zeros(F); FPProcessException(FPExc_InvalidOp, fpcr); // This case shouldn't set Inexact. error = 0.0; else // Convert integer value into an equivalent real value. real_result = Real(int_result); // Re-encode as a floating-point value, result is always exact. if real_result == 0.0 then result = FPZero(sign); else result = FPRound(real_result, fpcr, FPRounding_ZERO); // Generate inexact exceptions. if error != 0.0 then FPProcessException(FPExc_InexactFPInfinityFPRounding_TIEEVEN(sign); // Overflows generate appropriately-signed Infinity else result = sign : biased_exp<30-F:0> : int_mant<F-1:0>; , fpcr); return result;

Library pseudocode for shared/functions/float/bfloatfprsqrtestimate/BFUnpackFPRSqrtEstimate

// BFUnpack() // ========== // Unpacks a BFloat16 or single-precision value into its type, // sign bit and real number that it represents. // The real number result has the correct sign for numbers and infinities, // is very large in magnitude for infinities, and is 0.0 for NaNs. // (These values are chosen to simplify the description of // comparisons and conversions.) // FPRSqrtEstimate() // ================= (FPType, bit, real)bits(N) BFUnpack(bits(N) fpval) assert N IN {16,32}; if N == 16 then sign = fpval<15>; exp = fpval<14:7>; frac = fpval<6:0> :FPRSqrtEstimate(bits(N) operand, ZerosFPCRType(16); else // N == 32 sign = fpval<31>; exp = fpval<30:23>; frac = fpval<22:0>; fpcr) if assert N IN {16,32,64}; (fptype,sign,value) = IsZeroFPUnpack(exp) then fptype =(operand, fpcr); if fptype == FPType_SNaN || fptype == FPType_QNaN then result = FPProcessNaN(fptype, operand, fpcr); elsif fptype == FPType_Zero; value = 0.0; // Fixed Flush to Zero elsifthen result = IsOnesFPInfinity(exp) then if(sign); IsZeroFPProcessException(frac) then fptype =( FPExc_DivideByZero, fpcr); elsif sign == '1' then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); elsif fptype == FPType_Infinity; value = 2.0^1000000; else // no SNaN for BF16 arithmetic fptype =then result = FPType_QNaNFPZero; value = 0.0; ('0'); else fptype = // Scale to a fixed-point value in the range 0.25 <= x < 1.0 in steps of 512, with the // evenness or oddness of the exponent unchanged, and calculate result exponent. // Scaled value has copied sign bit, exponent = 1022 or 1021 = double-precision // biased version of -1 or -2, fraction = original fraction extended with zeros. case N of when 16 fraction = operand<9:0> : FPType_NonzeroZeros; value = 2.0^((42); exp =UInt(exp)-127) * (1.0 + Real((operand<14:10>); when 32 fraction = operand<22:0> :Zeros(29); exp = UInt(operand<30:23>); when 64 fraction = operand<51:0>; exp = UInt(operand<62:52>); if exp == 0 then while fraction<51> == '0' do fraction = fraction<50:0> : '0'; exp = exp - 1; fraction = fraction<50:0> : '0'; if exp<0> == '0' then scaled = UInt('1':fraction<51:44>); else scaled = UInt('01':fraction<51:45>); case N of when 16 result_exp = ( 44 - exp) DIV 2; when 32 result_exp = ( 380 - exp) DIV 2; when 64 result_exp = (3068 - exp) DIV 2; estimate = RecipSqrtEstimate(scaled); // estimate is in the range 256..511 representing a fixed point result in the range [1.0..2.0) // Convert to scaled floating point result with copied sign bit and high-order // fraction bits, and exponent calculated above. case N of when 16 result = '0' : result_exp<N-12:0> : estimate<7:0>:Zeros( 2); when 32 result = '0' : result_exp<N-25:0> : estimate<7:0>:Zeros(15); when 64 result = '0' : result_exp<N-54:0> : estimate<7:0>:Zeros(frac)) * 2.0^-23); (44); if sign == '1' then value = -value; return (fptype, sign, value); return result;

Library pseudocode for shared/functions/float/bfloatfprsqrtestimate/FPConvertBFRecipSqrtEstimate

// FPConvertBF() // ============= // Converts a single-precision OP to BFloat16 value with using rounding mode of // Round to Nearest Even when executed from AArch64 state and // FPCR.AH == '1', otherwise rounding is controlled by FPCR/FPSCR. // Compute estimate of reciprocal square root of 9-bit fixed-point number // // a is in range 128 .. 511 representing a number in the range 0.25 <= x < 1.0. // result is in the range 256 .. 511 representing a number in the range in the range 1.0 to 511/256. bits(16)integer FPConvertBF(bits(32) op,RecipSqrtEstimate(integer a) assert 128 <= a && a < 512; if a < 256 then // 0.25 .. 0.5 a = a*2+1; // a in units of 1/512 rounded to nearest else // 0.5 .. 1.0 a = (a >> 1) << 1; // discard bottom bit a = (a+1)*2; // a in units of 1/256 rounded to nearest integer b = 512; while a*(b+1)*(b+1) < 2^28 do b = b+1; // b = largest b such that b < 2^14 / sqrt(a) do r = (b+1) DIV 2; // round to nearest assert 256 <= r && r < 512; return r; FPCRType fpcr, FPRounding rounding) bits(32) result; // BF16 value in top 16 bits boolean altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1'; boolean fpexc = !altfp; // Generate no floating-point exceptions if altfp then fpcr.<FIZ,FZ> = '11'; // Flush denormal input and output to zero if altfp then rounding = FPRounding_TIEEVEN; // Use RNE rounding mode // Unpack floating-point operand, with always flush-to-zero if fpcr.AH == '1'. (fptype,sign,value) = FPUnpack(op, fpcr, fpexc); if fptype == FPType_SNaN || fptype == FPType_QNaN then if fpcr.DN == '1' then result = FPDefaultNaN(); else result = FPConvertNaN(op); if fptype == FPType_SNaN then if fpexc then FPProcessException(FPExc_InvalidOp, fpcr); elsif fptype == FPType_Infinity then result = FPInfinity(sign); elsif fptype == FPType_Zero then result = FPZero(sign); else result = FPRoundCVBF(value, fpcr, rounding, fpexc); // Returns correctly rounded BF16 value from top 16 bits return result<31:16>; // FPConvertBF() // ============= // Converts a single-precision operand to BFloat16 value. bits(16) FPConvertBF(bits(32) op, FPCRType fpcr) return FPConvertBF(op, fpcr, FPRoundingMode(fpcr));

Library pseudocode for shared/functions/float/bfloatfpsqrt/FPRoundCVBFFPSqrt

// FPRoundCVBF() // ============= // Converts a real number OP into a BFloat16 value using the supplied // rounding mode RMODE. The 'fpexc' argument controls the generation of // floating-point exceptions. // FPSqrt() // ======== bits(32)bits(N) FPRoundCVBF(real op,FPSqrt(bits(N) op, FPCRType fpcr,fpcr) assert N IN {16,32,64}; (fptype,sign,value) = FPRoundingFPUnpack rounding, boolean fpexc) boolean isbfloat16 = TRUE; return(op, fpcr); if fptype == || fptype == FPType_QNaN then result = FPProcessNaN(fptype, op, fpcr); elsif fptype == FPType_Zero then result = FPZero(sign); elsif fptype == FPType_Infinity && sign == '0' then result = FPInfinity(sign); elsif sign == '1' then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); else result = FPRoundFPRoundBaseFPType_SNaN(op, fpcr, rounding, isbfloat16, fpexc);(Sqrt(value), fpcr); return result;

Library pseudocode for shared/functions/float/fixedtofpfpsub/FixedToFPFPSub

// FixedToFP() // =========== // Convert M-bit fixed point OP with FBITS fractional bits to // N-bit precision floating point, controlled by UNSIGNED and ROUNDING. // FPSub() // ======= bits(N) FixedToFP(bits(M) op, integer fbits, boolean unsigned,FPSub(bits(N) op1, bits(N) op2, FPCRType fpcr,fpcr) assert N IN {16,32,64}; rounding = FPRoundingFPRoundingMode rounding) assert N IN {16,32,64}; assert M IN {16,32,64}; bits(N) result; assert fbits >= 0; assert rounding !=(fpcr); (type1,sign1,value1) = FPRounding_ODDFPUnpack; // Correct signed-ness int_operand =(op1, fpcr); (type2,sign2,value2) = (op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); invalidop = inf1 && inf2 && sign1 == sign2; if invalidop then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '1') then result = FPInfinity('0'); elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '0') then result = FPInfinity('1'); elsif zero1 && zero2 && sign1 == NOT(sign2) then result = FPZero(sign1); else result_value = value1 - value2; if result_value == 0.0 then // Sign of exact zero result depends on rounding mode result_sign = if rounding == FPRounding_NEGINFIntFPUnpack(op, unsigned); // Scale by fractional bits and generate a real value real_operand = Real(int_operand) / 2.0^fbits; if real_operand == 0.0 then result =then '1' else '0'; result = FPZero('0'); else result =(result_sign); else result = FPRound(real_operand, fpcr, rounding); (result_value, fpcr, rounding); return result;

Library pseudocode for shared/functions/float/fpabsfpthree/FPAbsFPThree

// FPAbs() // ======= // FPThree() // ========= bits(N) FPAbs(bits(N) op) FPThree(bit sign) assert N IN {16,32,64}; if ! constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); exp = '1':UsingAArch32Zeros() &&(E-1); frac = '1': HaveAltFPZeros() then FPCRType fpcr = FPCR[]; if fpcr.AH == '1' then (fptype, -, -) = FPUnpack(op, fpcr, FALSE); if fptype IN {FPType_SNaN, FPType_QNaN} then return op; // When fpcr.AH=1, sign of NaN has no consequence return '0' : op<N-2:0>;(F-1); result = sign : exp : frac; return result;

Library pseudocode for shared/functions/float/fpaddfptofixed/FPAddFPToFixed

// FPAdd() // ======= // FPToFixed() // =========== bits(N)// Convert N-bit precision floating point OP to M-bit fixed point with // FBITS fractional bits, controlled by UNSIGNED and ROUNDING. bits(M) FPAdd(bits(N) op1, bits(N) op2,FPToFixed(bits(N) op, integer fbits, boolean unsigned, FPCRType fpcr) assert N IN {16,32,64}; rounding =fpcr, FPRoundingModeFPRounding(fpcr); (type1,sign1,value1) =rounding) assert N IN {16,32,64}; assert M IN {16,32,64}; assert fbits >= 0; assert rounding != FPUnpackFPRounding_ODD(op1, fpcr); (type2,sign2,value2) =; // Unpack using fpcr to determine if subnormals are flushed-to-zero. (fptype,sign,value) = FPUnpack(op2, fpcr); (op, fpcr); (done,result) = // If NaN, set cumulative flag or take exception. if fptype == FPProcessNaNsFPType_SNaN(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 ==|| fptype == FPType_InfinityFPType_QNaN); inf2 = (type2 ==then FPType_InfinityFPProcessException); zero1 = (type1 ==( FPType_ZeroFPExc_InvalidOp); zero2 = (type2 ==, fpcr); // Scale by fractional bits and produce integer rounded towards minus-infinity. value = value * 2.0^fbits; int_result = FPType_ZeroRoundDown); if inf1 && inf2 && sign1 == NOT(sign2) then result =(value); error = value - Real(int_result); // Determine whether supplied rounding mode requires an increment. case rounding of when FPDefaultNaNFPRounding_TIEEVEN();round_up = (error > 0.5 || (error == 0.5 && int_result<0> == '1')); when FPProcessExceptionFPRounding_POSINF(round_up = (error != 0.0); whenFPExc_InvalidOpFPRounding_NEGINF, fpcr); elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '0') then result =round_up = FALSE; when FPInfinityFPRounding_ZERO('0'); elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '1') then result =round_up = (error != 0.0 && int_result < 0); when FPInfinityFPRounding_TIEAWAY('1'); elsif zero1 && zero2 && sign1 == sign2 then result =round_up = (error > 0.5 || (error == 0.5 && int_result >= 0)); if round_up then int_result = int_result + 1; // Generate saturated result and exceptions. (result, overflow) = FPZeroSatQ(sign1); else result_value = value1 + value2; if result_value == 0.0 then // Sign of exact zero result depends on rounding mode result_sign = if rounding ==(int_result, M, unsigned); if overflow then FPRounding_NEGINFFPProcessException then '1' else '0'; result =( FPZeroFPExc_InvalidOp(result_sign); else result =, fpcr); elsif error != 0.0 then FPRoundFPProcessException(result_value, fpcr, rounding);( FPProcessDenormsFPExc_Inexact(type1, type2, N, fpcr); , fpcr); return result;

Library pseudocode for shared/functions/float/fpcomparefptofixedjs/FPCompareFPToFixedJS

// FPCompare() // =========== // FPToFixedJS() // ============= bits(4)// Converts a double precision floating point input value // to a signed integer, with rounding to zero. (bits(N), bit) FPCompare(bits(N) op1, bits(N) op2, boolean signal_nans,FPToFixedJS(bits(M) op, FPCRType fpcr) assert N IN {16,32,64}; (type1,sign1,value1) =fpcr, boolean Is64) assert M == 64 && N == 32; // Unpack using fpcr to determine if subnormals are flushed-to-zero. (fptype,sign,value) = FPUnpack(op1, fpcr); (type2,sign2,value2) =(op, fpcr); Z = '1'; // If NaN, set cumulative flag or take exception. if fptype == FPUnpack(op2, fpcr); if type1 IN {FPType_SNaN,|| fptype == FPType_QNaN} || type2 IN {thenFPType_SNaNFPProcessException,( FPType_QNaNFPExc_InvalidOp} then result = '0011'; if type1 ==, fpcr); Z = '0'; int_result = FPType_SNaNRoundDown || type2 ==(value); error = value - Real(int_result); // Determine whether supplied rounding mode requires an increment. round_it_up = (error != 0.0 && int_result < 0); if round_it_up then int_result = int_result + 1; if int_result < 0 then result = int_result - 2^32* FPType_SNaNRoundUp || signal_nans then(Real(int_result)/Real(2^32)); else result = int_result - 2^32* RoundDown(Real(int_result)/Real(2^32)); // Generate exceptions. if int_result < -(2^31) || int_result > (2^31)-1 then FPProcessException(FPExc_InvalidOp, fpcr); else // All non-NaN cases can be evaluated on the values produced by FPUnpack() if value1 == value2 then result = '0110'; elsif value1 < value2 then result = '1000'; else // value1 > value2 result = '0010'; Z = '0'; elsif error != 0.0 then (FPExc_Inexact, fpcr); Z = '0'; elsif sign == '1' && value == 0.0 then Z = '0'; elsif sign == '0' && value == 0.0 && !IsZero(op<51:0>) then Z = '0'; if fptype == FPType_InfinityFPProcessDenormsFPProcessException(type1, type2, N, fpcr); then result = 0; return result; return (result<N-1:0>, Z);

Library pseudocode for shared/functions/float/fpcompareeqfptwo/FPCompareEQFPTwo

// FPCompareEQ() // ============= // FPTwo() // ======= booleanbits(N) FPCompareEQ(bits(N) op1, bits(N) op2,FPTwo(bit sign) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); exp = '1': FPCRTypeZeros fpcr) assert N IN {16,32,64}; (type1,sign1,value1) =(E-1); frac = FPUnpackZeros(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); if type1 IN {FPType_SNaN, FPType_QNaN} || type2 IN {FPType_SNaN, FPType_QNaN} then result = FALSE; if type1 == FPType_SNaN || type2 == FPType_SNaN then FPProcessException(FPExc_InvalidOp, fpcr); else // All non-NaN cases can be evaluated on the values produced by FPUnpack() result = (value1 == value2); FPProcessDenorms(type1, type2, N, fpcr); (F); result = sign : exp : frac; return result;

Library pseudocode for shared/functions/float/fpcomparegefptype/FPCompareGEFPType

// FPCompareGE() // ============= booleanenumeration FPCompareGE(bits(N) op1, bits(N) op2,FPType { FPCRType fpcr) assert N IN {16,32,64}; (type1,sign1,value1) =FPType_Nonzero, FPUnpack(op1, fpcr); (type2,sign2,value2) =FPType_Zero, FPUnpack(op2, fpcr); if type1 IN {FPType_Infinity,FPType_SNaN,FPType_QNaN, FPType_QNaN} || type2 IN {FPType_SNaN, FPType_QNaN} then result = FALSE; FPProcessException(FPExc_InvalidOp, fpcr); else // All non-NaN cases can be evaluated on the values produced by FPUnpack() result = (value1 >= value2); FPProcessDenorms(type1, type2, N, fpcr); return result;FPType_SNaN};

Library pseudocode for shared/functions/float/fpcomparegtfpunpack/FPCompareGTFPUnpack

// FPCompareGT() // ============= // FPUnpack() // ========== // // Used by data processing and int/fixed <-> FP conversion instructions. // For half-precision data it ignores AHP, and observes FZ16. boolean(FPType, bit, real) FPCompareGT(bits(N) op1, bits(N) op2,FPUnpack(bits(N) fpval, FPCRType fpcr) assert N IN {16,32,64}; (type1,sign1,value1) = fpcr.AHP = '0'; (fp_type, sign, value) = FPUnpackFPUnpackBase(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); if type1 IN {FPType_SNaN, FPType_QNaN} || type2 IN {FPType_SNaN, FPType_QNaN} then result = FALSE; FPProcessException(FPExc_InvalidOp, fpcr); else // All non-NaN cases can be evaluated on the values produced by FPUnpack() result = (value1 > value2); FPProcessDenorms(type1, type2, N, fpcr); return result;(fpval, fpcr); return (fp_type, sign, value);

Library pseudocode for shared/functions/float/fpconvertfpunpack/FPConvertFPUnpackBase

// FPConvert() // =========== // FPUnpackBase() // ============== // // Unpack a floating-point number into its type, sign bit and the real number // that it represents. The real number result has the correct sign for numbers // and infinities, is very large in magnitude for infinities, and is 0.0 for // NaNs. (These values are chosen to simplify the description of comparisons // and conversions.) // // The 'fpcr' argument supplies FPCR control bits. Status information is // updated directly in the FPSR where appropriate. // Convert floating point OP with N-bit precision to M-bit precision, // with rounding controlled by ROUNDING. // This is used by the FP-to-FP conversion instructions and so for // half-precision data ignores FZ16, but observes AHP. bits(M)(FPType, bit, real) FPConvert(bits(N) op,FPUnpackBase(bits(N) fpval, FPCRType fpcr,fpcr) assert N IN {16,32,64}; if N == 16 then sign = fpval<15>; exp16 = fpval<14:10>; frac16 = fpval<9:0>; if FPRoundingIsZero rounding) assert M IN {16,32,64}; assert N IN {16,32,64}; bits(M) result; // Unpack floating-point operand optionally with flush-to-zero. (fptype,sign,value) =(exp16) then // Produce zero if value is zero or flush-to-zero is selected if FPUnpackCVIsZero(op, fpcr); alt_hp = (M == 16) && (fpcr.AHP == '1'); if fptype ==(frac16) || fpcr.FZ16 == '1' then fptype = FPType_SNaNFPType_Zero || fptype ==; value = 0.0; else fptype = FPType_QNaNFPType_Nonzero then if alt_hp then result =; value = 2.0^-14 * (Real( FPZeroUInt(sign); elsif fpcr.DN == '1' then result =(frac16)) * 2.0^-10); elsif FPDefaultNaNIsOnes(); else result =(exp16) && fpcr.AHP == '0' then // Infinity or NaN in IEEE format if FPConvertNaNIsZero(op); if fptype ==(frac16) then fptype = FPType_Infinity; value = 2.0^1000000; else fptype = if frac16<9> == '1' then FPType_QNaN else FPType_SNaN || alt_hp then; value = 0.0; else fptype = FPType_Nonzero; value = 2.0^(UInt(exp16)-15) * (1.0 + Real(UInt(frac16)) * 2.0^-10); elsif N == 32 then sign = fpval<31>; exp32 = fpval<30:23>; frac32 = fpval<22:0>; if IsZero(exp32) then // Produce zero if value is zero or flush-to-zero is selected. if IsZero(frac32) || fpcr.FZ == '1' then fptype = FPType_Zero; value = 0.0; if !IsZero(frac32) then // Denormalized input flushed to zero FPProcessException(FPExc_InvalidOpFPExc_InputDenorm,fpcr); elsif fptype ==, fpcr); else fptype = FPType_Nonzero; value = 2.0^-126 * (Real(UInt(frac32)) * 2.0^-23); elsif IsOnes(exp32) then if IsZero(frac32) then fptype = FPType_Infinity then if alt_hp then result = sign:; value = 2.0^1000000; else fptype = if frac32<22> == '1' thenOnesFPType_QNaN(M-1);else FPProcessExceptionFPType_SNaN(; value = 0.0; else fptype =FPExc_InvalidOpFPType_Nonzero, fpcr); else result =; value = 2.0^( FPInfinityUInt(sign); elsif fptype ==(exp32)-127) * (1.0 + Real( UInt(frac32)) * 2.0^-23); else // N == 64 sign = fpval<63>; exp64 = fpval<62:52>; frac64 = fpval<51:0>; if IsZero(exp64) then // Produce zero if value is zero or flush-to-zero is selected. if IsZero(frac64) || fpcr.FZ == '1' then fptype = FPType_Zero then result =; value = 0.0; if ! FPZeroIsZero(sign); else result =(frac64) then // Denormalized input flushed to zero FPRoundCVFPProcessException(value, fpcr, rounding);( FPProcessDenormFPExc_InputDenorm(fptype, N, fpcr); return result; // FPConvert() // =========== bits(M), fpcr); else fptype = FPConvert(bits(N) op,; value = 2.0^-1022 * (Real( FPCRTypeUInt fpcr) return(frac64)) * 2.0^-52); elsif FPConvertIsOnes(op, fpcr,(exp64) then if (frac64) then fptype = FPType_Infinity; value = 2.0^1000000; else fptype = if frac64<51> == '1' then FPType_QNaN else FPType_SNaN; value = 0.0; else fptype = FPType_Nonzero; value = 2.0^(UInt(exp64)-1023) * (1.0 + Real(UIntFPRoundingModeIsZero(fpcr));(frac64)) * 2.0^-52); if sign == '1' then value = -value; return (fptype, sign, value);

Library pseudocode for shared/functions/float/fpconvertnanfpunpack/FPConvertNaNFPUnpackCV

// FPConvertNaN() // ============== // Converts a NaN of one floating-point type to another // FPUnpackCV() // ============ // // Used for FP <-> FP conversion instructions. // For half-precision data ignores FZ16 and observes AHP. bits(M)(FPType, bit, real) FPConvertNaN(bits(N) op) assert N IN {16,32,64}; assert M IN {16,32,64}; bits(M) result; bits(51) frac; sign = op<N-1>; // Unpack payload from input NaN case N of when 64 frac = op<50:0>; when 32 frac = op<21:0>:FPUnpackCV(bits(N) fpval,ZerosFPCRType(29); when 16 frac = op<8:0>:fpcr) fpcr.FZ16 = '0'; (fp_type, sign, value) =ZerosFPUnpackBase(42); // Repack payload into output NaN, while // converting an SNaN to a QNaN. case M of when 64 result = sign:Ones(M-52):frac; when 32 result = sign:Ones(M-23):frac<50:29>; when 16 result = sign:Ones(M-10):frac<50:42>; return result;(fpval, fpcr); return (fp_type, sign, value);

Library pseudocode for shared/functions/float/fpcrtypefpzero/FPCRTypeFPZero

type// FPZero() // ======== bits(N) FPCRType;FPZero(bit sign) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); exp =Zeros(E); frac = Zeros(F); result = sign : exp : frac; return result;

Library pseudocode for shared/functions/float/fpdecodermvfpexpandimm/FPDecodeRMVFPExpandImm

// FPDecodeRM() // ============ // VFPExpandImm() // ============== // Decode most common AArch32 floating-point rounding encoding. FPRoundingbits(N) FPDecodeRM(bits(2) rm) case rm of when '00' result =VFPExpandImm(bits(8) imm8) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - E - 1; sign = imm8<7>; exp = NOT(imm8<6>): FPRounding_TIEAWAYReplicate; // A when '01' result =(imm8<6>,E-3):imm8<5:4>; frac = imm8<3:0>: FPRounding_TIEEVENZeros; // N when '10' result = FPRounding_POSINF; // P when '11' result = FPRounding_NEGINF; // M (F-4); result = sign : exp : frac; return result;

Library pseudocode for shared/functions/floatinteger/fpdecoderounding/FPDecodeRoundingAddWithCarry

// FPDecodeRounding() // ================== // AddWithCarry() // ============== // Integer addition with carry input, returning result and NZCV flags // Decode floating-point rounding mode and common AArch64 encoding. FPRounding(bits(N), bits(4)) FPDecodeRounding(bits(2) rmode) case rmode of when '00' returnAddWithCarry(bits(N) x, bits(N) y, bit carry_in) integer unsigned_sum = FPRounding_TIEEVENUInt; // N when '01' return(x) + FPRounding_POSINFUInt; // P when '10' return(y) + FPRounding_NEGINFUInt; // M when '11' return(carry_in); integer signed_sum = (x) + SInt(y) + UInt(carry_in); bits(N) result = unsigned_sum<N-1:0>; // same value as signed_sum<N-1:0> bit n = result<N-1>; bit z = if IsZero(result) then '1' else '0'; bit c = if UInt(result) == unsigned_sum then '0' else '1'; bit v = if SIntFPRounding_ZEROSInt; // Z(result) == signed_sum then '0' else '1'; return (result, n:z:c:v);

Library pseudocode for shared/functions/floatmemory/fpdefaultnan/FPDefaultNaNAArch64.BranchAddr

// FPDefaultNaN() // ============== // AArch64.BranchAddr() // ==================== // Return the virtual address with tag bits removed for storing to the program counter. bits(N)bits(64) FPDefaultNaN() assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); bit sign; if !AArch64.BranchAddr(bits(64) vaddress) assert !UsingAArch32() &&(); msbit = HaveAltFPAddrTop() then(vaddress, TRUE, PSTATE.EL); if msbit == 63 then return vaddress; elsif (PSTATE.EL IN { FPCRTypeEL0 fpcr = FPCR[]; sign = if fpcr.AH == '1' then '1' else '0'; else sign = '0'; bits(E) exp =, OnesEL1(E); bits(F) frac = '1':} ||()) && vaddress<msbit> == '1' then return SignExtend(vaddress<msbit:0>); else return ZeroExtendZerosIsInHost(F-1); return sign : exp : frac;(vaddress<msbit:0>);

Library pseudocode for shared/functions/floatmemory/fpdiv/FPDivAccType

// FPDiv() // ======= bits(N)enumeration FPDiv(bits(N) op1, bits(N) op2,AccType { FPCRType fpcr) assert N IN {16,32,64}; (type1,sign1,value1) =AccType_NORMAL, FPUnpack(op1, fpcr); (type2,sign2,value2) =AccType_VEC, // Normal loads and stores FPUnpack(op2, fpcr); (done,result) =AccType_STREAM, FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = type1 ==AccType_VECSTREAM, // Streaming loads and stores FPType_Infinity; inf2 = type2 ==AccType_ATOMIC, FPType_Infinity; zero1 = type1 ==AccType_ATOMICRW, // Atomic loads and stores FPType_Zero; zero2 = type2 ==AccType_ORDERED, FPType_Zero; if (inf1 && inf2) || (zero1 && zero2) then result =AccType_ORDEREDRW, // Load-Acquire and Store-Release FPDefaultNaN();AccType_ORDEREDATOMIC, // Load-Acquire and Store-Release with atomic access FPProcessException(AccType_ORDEREDATOMICRW,FPExc_InvalidOp, fpcr); elsif inf1 || zero2 then result =AccType_LIMITEDORDERED, // Load-LOAcquire and Store-LORelease FPInfinity(sign1 EOR sign2); if !inf1 thenAccType_UNPRIV, // Load and store unprivileged FPProcessException(AccType_IFETCH, // Instruction fetchFPExc_DivideByZero, fpcr); elsif zero1 || inf2 then result =AccType_PTW, // Page table walk FPZero(sign1 EOR sign2); else result =AccType_NONFAULT, // Non-faulting loads FPRound(value1/value2, fpcr); if !zero2 thenAccType_CNOTFIRST, // Contiguous FF load, not first element AccType_NV2REGISTER, // MRS/MSR instruction used at EL1 and which is converted // to a memory access that uses the EL2 translation regime // Other operations AccType_DC, // Data cache maintenance AccType_DC_UNPRIV, // Data cache maintenance instruction used at EL0 AccType_IC, // Instruction cache maintenance AccType_DCZVA, // DC ZVA instructions FPProcessDenorms(type1, type2, N, fpcr); return result;AccType_AT}; // Address translation

Library pseudocode for shared/functions/floatmemory/fpexc/FPExcAccessDescriptor

enumerationtype FPExc {AccessDescriptor is (FPExc_InvalidOp,acctype, FPExc_DivideByZero, FPExc_Overflow, FPExc_Underflow, FPExc_Inexact, FPExc_InputDenorm};mpam, boolean page_table_walk, boolean secondstage, boolean s2fs1walk, integer level )

Library pseudocode for shared/functions/floatmemory/fpinfinity/FPInfinityAddrTop

// FPInfinity() // ============ // AddrTop() // ========= // Return the MSB number of a virtual address in the stage 1 translation regime for "el". // If EL1 is using AArch64 then addresses from EL0 using AArch32 are zero-extended to 64 bits. bits(N)integer FPInfinity(bit sign) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); bits(E) exp =AddrTop(bits(64) address, boolean IsInstr, bits(2) el) assert OnesHaveEL(E); bits(F) frac =(el); regime = (el); if ELUsingAArch32(regime) then // AArch32 translation regime. return 31; else if EffectiveTBIZerosS1TranslationRegime(F); return sign : exp : frac;(address, IsInstr, el) == '1' then return 55; else return 63;

Library pseudocode for shared/functions/floatmemory/fpmatmul/FPMatMulAddAddressDescriptor

// FPMatMulAdd() // ============= // // Floating point matrix multiply and add to same precision matrix // result[2, 2] = addend[2, 2] + (op1[2, 2] * op2[2, 2]) bits(N)type FPMatMulAdd(bits(N) addend, bits(N) op1, bits(N) op2, integer esize,AddressDescriptor is ( FPCRTypeFaultRecord fpcr) assert N == esize * 2 * 2; bits(N) result; bits(esize) prod0, prod1, sum; for i = 0 to 1 for j = 0 to 1 sum =fault, // fault.statuscode indicates whether the address is valid ElemMemoryAttributes[addend, 2*i + j, esize]; prod0 =memattrs, FPMulFullAddress(Elem[op1, 2*i + 0, esize], Elem[op2, 2*j + 0, esize], fpcr); prod1 = FPMul(Elem[op1, 2*i + 1, esize], Elem[op2, 2*j + 1, esize], fpcr); sum = FPAdd(sum, FPAdd(prod0, prod1, fpcr), fpcr); Elem[result, 2*i + j, esize] = sum; return result;paddress, bits(64) vaddress )

Library pseudocode for shared/functions/floatmemory/fpmax/FPMaxAllocation

// FPMax() // ======= bits(N)constant bits(2) FPMax(bits(N) op1, bits(N) op2,MemHint_No = '00'; // No Read-Allocate, No Write-Allocate constant bits(2) FPCRType fpcr) boolean altfp =MemHint_WA = '01'; // No Read-Allocate, Write-Allocate constant bits(2) HaveAltFP() && !MemHint_RA = '10'; // Read-Allocate, No Write-Allocate constant bits(2)UsingAArch32() && fpcr.AH == '1'; return FPMax(op1, op2, fpcr, altfp); // FPMax() // ======= // Compare two inputs and return the larger value after rounding. The // 'fpcr' argument supplies the FPCR control bits and 'altfp' determines // if the function should use alternative floating-point behaviour. bits(N) FPMax(bits(N) op1, bits(N) op2, FPCRType fpcr, boolean altfp) assert N IN {16,32,64}; (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); if (altfp && type1 == FPType_Zero && type2 == FPType_Zero && ((sign1 == '0' && sign2 == '1') || (sign1 == '1' && sign2 == '0'))) then return FPZero(sign2); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr, altfp, TRUE); if !done then if value1 > value2 then (fptype,sign,value) = (type1,sign1,value1); else (fptype,sign,value) = (type2,sign2,value2); if fptype == FPType_Infinity then result = FPInfinity(sign); elsif fptype == FPType_Zero then sign = sign1 AND sign2; // Use most positive sign result = FPZero(sign); else // The use of FPRound() covers the case where there is a trapped underflow exception // for a denormalized number even though the result is exact. rounding = FPRoundingMode(fpcr); if altfp then // Denormal output is not flushed to zero fpcr.FZ = '0'; fpcr.FZ16 = '0'; result = FPRound(value, fpcr, rounding, TRUE); FPProcessDenorms(type1, type2, N, fpcr); return result;MemHint_RWA = '11'; // Read-Allocate, Write-Allocate

Library pseudocode for shared/functions/floatmemory/fpmaxnormal/FPMaxNormalBigEndian

// FPMaxNormal() // ============= // BigEndian() // =========== bits(N)boolean FPMaxNormal(bit sign) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); exp =BigEndian() boolean bigend; if OnesUsingAArch32(E-1):'0'; frac =() then bigend = (PSTATE.E != '0'); elsif PSTATE.EL == then bigend = (SCTLR[].E0E != '0'); else bigend = (SCTLROnesEL0(F); return sign : exp : frac;[].EE != '0'); return bigend;

Library pseudocode for shared/functions/floatmemory/fpmaxnum/FPMaxNumBigEndianReverse

// FPMaxNum() // ========== // BigEndianReverse() // ================== bits(N)bits(width) FPMaxNum(bits(N) op1, bits(N) op2,BigEndianReverse (bits(width) value) assert width IN {8, 16, 32, 64, 128}; integer half = width DIV 2; if width == 8 then return value; return FPCRTypeBigEndianReverse fpcr) assert N IN {16,32,64}; (type1,-,-) =(value<half-1:0>) : FPUnpackBigEndianReverse(op1, fpcr); (type2,-,-) = FPUnpack(op2, fpcr); boolean type1_nan = type1 IN {FPType_QNaN, FPType_SNaN}; boolean type2_nan = type2 IN {FPType_QNaN, FPType_SNaN}; boolean altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1'; if !(altfp && type1_nan && type2_nan) then // Treat a single quiet-NaN as -Infinity. if type1 == FPType_QNaN && type2 != FPType_QNaN then op1 = FPInfinity('1'); elsif type1 != FPType_QNaN && type2 == FPType_QNaN then op2 = FPInfinity('1'); altfmaxfmin = FALSE; // Restrict use of FMAX/FMIN NaN propagation rules result = FPMax(op1, op2, fpcr, altfmaxfmin); return result;(value<width-1:half>);

Library pseudocode for shared/functions/floatmemory/fpmerge/IsMergingCacheability

// IsMerging() // =========== // Returns TRUE if the output elements other than the lowest are taken from // the destination register. booleanconstant bits(2) IsMerging(MemAttr_NC = '00'; // Non-cacheable constant bits(2)FPCRType fpcr) boolean merge =MemAttr_WT = '10'; // Write-through constant bits(2) HaveAltFP() && !UsingAArch32() && fpcr.NEP == '1'; return merge;MemAttr_WB = '11'; // Write-back

Library pseudocode for shared/functions/floatmemory/fpmin/FPMinCreateAccessDescriptor

// FPMin() // ======= // CreateAccessDescriptor() // ======================== bits(N)AccessDescriptor FPMin(bits(N) op1, bits(N) op2,CreateAccessDescriptor( FPCRTypeAccType fpcr) boolean altfp =acctype) HaveAltFPAccessDescriptor() && !UsingAArch32() && fpcr.AH == '1'; return FPMin(op1, op2, fpcr, altfp); // FPMin() // ======= // Compare two operands and return the smaller operand after rounding. The // 'fpcr' argument supplies the FPCR control bits and 'altfp' determines // if the function should use alternative behaviour. bits(N) FPMin(bits(N) op1, bits(N) op2, FPCRType fpcr, boolean altfp) assert N IN {16,32,64}; (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); if (altfp && type1 == FPType_Zero && type2 == FPType_Zero && ((sign1 == '0' && sign2 == '1') || (sign1 == '1' && sign2 == '0'))) then return FPZero(sign2); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr, altfp, TRUE); if !done then if value1 < value2 then (fptype,sign,value) = (type1,sign1,value1); else (fptype,sign,value) = (type2,sign2,value2); if fptype == FPType_Infinity then result = FPInfinity(sign); elsif fptype == FPType_Zero then sign = sign1 OR sign2; // Use most negative sign result = FPZero(sign); else // The use of FPRound() covers the case where there is a trapped underflow exception // for a denormalized number even though the result is exact. rounding = FPRoundingMode(fpcr); if altfp then // Denormal output is not flushed to zero fpcr.FZ = '0'; fpcr.FZ16 = '0'; result = FPRound(value, fpcr, rounding, TRUE); FPProcessDenorms(type1, type2, N, fpcr); return result;accdesc; accdesc.acctype = acctype; accdesc.mpam = GenMPAMcurEL(acctype IN {AccType_IFETCH, AccType_IC}); accdesc.page_table_walk = FALSE; return accdesc;

Library pseudocode for shared/functions/floatmemory/fpminnum/FPMinNumCreateAccessDescriptorPTW

// FPMinNum() // ========== // CreateAccessDescriptorPTW() // =========================== bits(N)AccessDescriptor FPMinNum(bits(N) op1, bits(N) op2,CreateAccessDescriptorPTW( FPCRTypeAccType fpcr) assert N IN {16,32,64}; (type1,-,-) =acctype, boolean secondstage, boolean s2fs1walk, integer level) FPUnpackAccessDescriptor(op1, fpcr); (type2,-,-) = FPUnpack(op2, fpcr); boolean type1_nan = type1 IN {FPType_QNaN, FPType_SNaN}; boolean type2_nan = type2 IN {FPType_QNaN, FPType_SNaN}; boolean altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1'; if !(altfp && type1_nan && type2_nan) then // Treat a single quiet-NaN as +Infinity. if type1 == FPType_QNaN && type2 != FPType_QNaN then op1 = FPInfinity('0'); elsif type1 != FPType_QNaN && type2 == FPType_QNaN then op2 = FPInfinity('0'); altfmaxfmin = FALSE; // Restrict use of FMAX/FMIN NaN propagation rules result = FPMin(op1, op2, fpcr, altfmaxfmin); return result;accdesc; accdesc.acctype = acctype; accdesc.mpam = GenMPAMcurEL(acctype IN {AccType_IFETCH, AccType_IC}); accdesc.page_table_walk = TRUE; accdesc.s2fs1walk = s2fs1walk; accdesc.secondstage = secondstage; accdesc.level = level; return accdesc;

Library pseudocode for shared/functions/floatmemory/fpmul/FPMulDataMemoryBarrier

// FPMul() // ======= bits(N) FPMul(bits(N) op1, bits(N) op2,DataMemoryBarrier( FPCRTypeMBReqDomain fpcr) assert N IN {16,32,64}; (type1,sign1,value1) =domain, FPUnpackMBReqTypes(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); if (inf1 && zero2) || (zero1 && inf2) then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); elsif inf1 || inf2 then result = FPInfinity(sign1 EOR sign2); elsif zero1 || zero2 then result = FPZero(sign1 EOR sign2); else result = FPRound(value1*value2, fpcr); FPProcessDenorms(type1, type2, N, fpcr); return result;types);

Library pseudocode for shared/functions/floatmemory/fpmuladd/FPMulAddDataSynchronizationBarrier

// FPMulAdd() // ========== bits(N) FPMulAdd(bits(N) addend, bits(N) op1, bits(N) op2,DataSynchronizationBarrier( FPCRTypeMBReqDomain fpcr) boolean fpexc = TRUE; // Generate floating-point exceptions returndomain, FPMulAddMBReqTypes(addend, op1, op2, fpcr, fpexc); // FPMulAdd() // ========== // // Calculates addend + op1*op2 with a single rounding. The 'fpcr' argument // supplies the FPCR control bits, and 'fpexc' controls the generation of // floating-point exceptions. bits(N) FPMulAdd(bits(N) addend, bits(N) op1, bits(N) op2, FPCRType fpcr, boolean fpexc) assert N IN {16,32,64}; (typeA,signA,valueA) = FPUnpack(addend, fpcr, fpexc); (type1,sign1,value1) = FPUnpack(op1, fpcr, fpexc); (type2,sign2,value2) = FPUnpack(op2, fpcr, fpexc); rounding = FPRoundingMode(fpcr); inf1 = (type1 == FPType_Infinity); zero1 = (type1 == FPType_Zero); inf2 = (type2 == FPType_Infinity); zero2 = (type2 == FPType_Zero); (done,result) = FPProcessNaNs3(typeA, type1, type2, addend, op1, op2, fpcr, fpexc); if !(HaveAltFP() && !UsingAArch32() && fpcr.AH == '1') then if typeA == FPType_QNaN && ((inf1 && zero2) || (zero1 && inf2)) then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); if !done then infA = (typeA == FPType_Infinity); zeroA = (typeA == FPType_Zero); // Determine sign and type product will have if it does not cause an // Invalid Operation. signP = sign1 EOR sign2; infP = inf1 || inf2; zeroP = zero1 || zero2; // Non SNaN-generated Invalid Operation cases are multiplies of zero // by infinity and additions of opposite-signed infinities. invalidop = (inf1 && zero2) || (zero1 && inf2) || (infA && infP && signA != signP); if invalidop then result = FPDefaultNaN(); if fpexc then FPProcessException(FPExc_InvalidOp, fpcr); // Other cases involving infinities produce an infinity of the same sign. elsif (infA && signA == '0') || (infP && signP == '0') then result = FPInfinity('0'); elsif (infA && signA == '1') || (infP && signP == '1') then result = FPInfinity('1'); // Cases where the result is exactly zero and its sign is not determined by the // rounding mode are additions of same-signed zeros. elsif zeroA && zeroP && signA == signP then result = FPZero(signA); // Otherwise calculate numerical result and round it. else result_value = valueA + (value1 * value2); if result_value == 0.0 then // Sign of exact zero result depends on rounding mode result_sign = if rounding == FPRounding_NEGINF then '1' else '0'; result = FPZero(result_sign); else result = FPRound(result_value, fpcr, rounding, fpexc); if !invalidop && fpexc then FPProcessDenorms3(typeA, type1, type2, N, fpcr); return result;types);

Library pseudocode for shared/functions/floatmemory/fpmuladdh/FPMulAddHDescriptorUpdate

// FPMulAddH() // =========== // Calculates addend + op1*op2. bits(N) FPMulAddH(bits(N) addend, bits(N DIV 2) op1, bits(N DIV 2) op2,type FPCRType fpcr) assert N == 32; rounding =DescriptorUpdate is ( boolean AF, // AF needs to be set boolean AP, // AP[2] / S2AP[2] will be modified FPRoundingModeAddressDescriptor(fpcr); (typeA,signA,valueA) = FPUnpack(addend, fpcr); (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); inf1 = (type1 == FPType_Infinity); zero1 = (type1 == FPType_Zero); inf2 = (type2 == FPType_Infinity); zero2 = (type2 == FPType_Zero); (done,result) = FPProcessNaNs3H(typeA, type1, type2, addend, op1, op2, fpcr); if !(HaveAltFP() && !UsingAArch32() && fpcr.AH == '1') then if typeA == FPType_QNaN && ((inf1 && zero2) || (zero1 && inf2)) then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); if !done then infA = (typeA == FPType_Infinity); zeroA = (typeA == FPType_Zero); // Determine sign and type product will have if it does not cause an // Invalid Operation. signP = sign1 EOR sign2; infP = inf1 || inf2; zeroP = zero1 || zero2; // Non SNaN-generated Invalid Operation cases are multiplies of zero by infinity and // additions of opposite-signed infinities. invalidop = (inf1 && zero2) || (zero1 && inf2) || (infA && infP && signA != signP); if invalidop then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); // Other cases involving infinities produce an infinity of the same sign. elsif (infA && signA == '0') || (infP && signP == '0') then result = FPInfinity('0'); elsif (infA && signA == '1') || (infP && signP == '1') then result = FPInfinity('1'); // Cases where the result is exactly zero and its sign is not determined by the // rounding mode are additions of same-signed zeros. elsif zeroA && zeroP && signA == signP then result = FPZero(signA); // Otherwise calculate numerical result and round it. else result_value = valueA + (value1 * value2); if result_value == 0.0 then // Sign of exact zero result depends on rounding mode result_sign = if rounding == FPRounding_NEGINF then '1' else '0'; result = FPZero(result_sign); else result = FPRound(result_value, fpcr); if !invalidop then FPProcessDenorm(typeA, N, fpcr); return result;descaddr // Descriptor to be updated )

Library pseudocode for shared/functions/floatmemory/fpmuladdh/FPProcessNaNs3HDeviceType

// FPProcessNaNs3H() // ================= (boolean, bits(N)) FPProcessNaNs3H(enumerationFPType type1,DeviceType { FPType type2,DeviceType_GRE, FPType type3, bits(N) op1, bits(N DIV 2) op2, bits(N DIV 2) op3,DeviceType_nGRE, FPCRType fpcr) assert N IN {32,64}; bits(N) result; // When TRUE, use alternative NaN propagation rules. boolean altfp =DeviceType_nGnRE, HaveAltFP() && !UsingAArch32() && fpcr.AH == '1'; boolean op1_nan = type1 IN {FPType_SNaN, FPType_QNaN}; boolean op2_nan = type2 IN {FPType_SNaN, FPType_QNaN}; boolean op3_nan = type3 IN {FPType_SNaN, FPType_QNaN}; boolean fpexc = TRUE; if altfp then if (type1 == FPType_SNaN || type2 == FPType_SNaN || type3 == FPType_SNaN) then type_nan = FPType_SNaN; else type_nan = FPType_QNaN; if altfp && op1_nan && op2_nan && op3_nan then done = TRUE; result = FPConvertNaN(FPProcessNaN(type_nan, op2, fpcr, fpexc)); // <n> register NaN selected elsif altfp && op2_nan && (op1_nan || op3_nan) then done = TRUE; result = FPConvertNaN(FPProcessNaN(type_nan, op2, fpcr, fpexc)); // <n> register NaN selected elsif altfp && op3_nan && op1_nan then done = TRUE; result = FPConvertNaN(FPProcessNaN(type_nan, op3, fpcr, fpexc)); // <m> register NaN selected elsif type1 == FPType_SNaN then done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc); elsif type2 == FPType_SNaN then done = TRUE; result = FPConvertNaN(FPProcessNaN(type2, op2, fpcr, fpexc)); elsif type3 == FPType_SNaN then done = TRUE; result = FPConvertNaN(FPProcessNaN(type3, op3, fpcr, fpexc)); elsif type1 == FPType_QNaN then done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc); elsif type2 == FPType_QNaN then done = TRUE; result = FPConvertNaN(FPProcessNaN(type2, op2, fpcr, fpexc)); elsif type3 == FPType_QNaN then done = TRUE; result = FPConvertNaN(FPProcessNaN(type3, op3, fpcr, fpexc)); else done = FALSE; result = Zeros(); // 'Don't care' result return (done, result);DeviceType_nGnRnE};

Library pseudocode for shared/functions/floatmemory/fpmulx/FPMulXEffectiveTBI

// FPMulX() // ======== // EffectiveTBI() // ============== // Returns the effective TBI in the AArch64 stage 1 translation regime for "el". bits(N)bit FPMulX(bits(N) op1, bits(N) op2,EffectiveTBI(bits(64) address, boolean IsInstr, bits(2) el) assert FPCRTypeHaveEL fpcr) assert N IN {16,32,64}; bits(N) result; (type1,sign1,value1) =(el); regime = FPUnpackS1TranslationRegime(op1, fpcr); (type2,sign2,value2) =(el); assert(! FPUnpackELUsingAArch32(op2, fpcr); (regime)); (done,result) = case regime of when FPProcessNaNsEL1(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 ==tbi = if address<55> == '1' then TCR_EL1.TBI1 else TCR_EL1.TBI0; if FPType_InfinityHavePACExt); inf2 = (type2 ==() then tbid = if address<55> == '1' then TCR_EL1.TBID1 else TCR_EL1.TBID0; when FPType_InfinityEL2); zero1 = (type1 ==if FPType_ZeroHaveVirtHostExt); zero2 = (type2 ==() && FPType_ZeroELIsInHost); if (inf1 && zero2) || (zero1 && inf2) then result =(el) then tbi = if address<55> == '1' then TCR_EL2.TBI1 else TCR_EL2.TBI0; if FPTwoHavePACExt(sign1 EOR sign2); elsif inf1 || inf2 then result =() then tbid = if address<55> == '1' then TCR_EL2.TBID1 else TCR_EL2.TBID0; else tbi = TCR_EL2.TBI; if FPInfinityHavePACExt(sign1 EOR sign2); elsif zero1 || zero2 then result =() then tbid = TCR_EL2.TBID; when FPZeroEL3(sign1 EOR sign2); else result =tbi = TCR_EL3.TBI; if FPRoundHavePACExt(value1*value2, fpcr);() then tbid = TCR_EL3.TBID; return (if tbi == '1' && (! FPProcessDenormsHavePACExt(type1, type2, N, fpcr); return result;() || tbid == '0' || !IsInstr) then '1' else '0');

Library pseudocode for shared/functions/floatmemory/fpneg/FPNegEffectiveTCMA

// FPNeg() // ======= // EffectiveTCMA() // =============== // Returns the effective TCMA of a virtual address in the stage 1 translation regime for "el". bits(N)bit FPNeg(bits(N) op) assert N IN {16,32,64}; if !EffectiveTCMA(bits(64) address, bits(2) el) assertUsingAArch32HaveEL() &&(el); regime = HaveAltFPS1TranslationRegime() then(el); assert(! FPCRTypeELUsingAArch32 fpcr = FPCR[]; if fpcr.AH == '1' then (fptype, -, -) =(regime)); case regime of when FPUnpackEL1(op, fpcr, FALSE); if fptype IN {tcma = if address<55> == '1' then TCR_EL1.TCMA1 else TCR_EL1.TCMA0; whenFPType_SNaNEL2,if () && ELIsInHost(el) then tcma = if address<55> == '1' then TCR_EL2.TCMA1 else TCR_EL2.TCMA0; else tcma = TCR_EL2.TCMA; when EL3FPType_QNaNHaveVirtHostExt} then return op; // When fpcr.AH=1, sign of NaN has no consequence return NOT(op<N-1>) : op<N-2:0>;tcma = TCR_EL3.TCMA; return tcma;

Library pseudocode for shared/functions/floatmemory/fponepointfive/FPOnePointFiveFault

// FPOnePointFive() // ================ bits(N)enumeration FPOnePointFive(bit sign) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); exp = '0':Fault {Ones(E-1); frac = '1':Fault_None,Fault_AccessFlag, Fault_Alignment, Fault_Background, Fault_Domain, Fault_Permission, Fault_Translation, Fault_AddressSize, Fault_SyncExternal, Fault_SyncExternalOnWalk, Fault_SyncParity, Fault_SyncParityOnWalk, Fault_AsyncParity, Fault_AsyncExternal, Fault_Debug, Fault_TLBConflict, Fault_BranchTarget, Fault_HWUpdateAccessFlag, Fault_Lockdown, Fault_Exclusive, Zeros(F-1); result = sign : exp : frac; return result;Fault_ICacheMaint};

Library pseudocode for shared/functions/floatmemory/fpprocessdenorms/FPProcessDenormFaultRecord

// FPProcessDenorm() // ================= // Handles denormal input in case of single-precision or double-precision // when using alternative floating-point mode.type FPProcessDenorm(FaultRecord is (FPTypeFault fptype, integer N,statuscode, // Fault Status FPCRTypeAccType fpcr) boolean altfp =acctype, // Type of access that faulted HaveAltFPFullAddress() && !ipaddress, // Intermediate physical address boolean s2fs1walk, // Is on a Stage 1 page table walk boolean write, // TRUE for a write, FALSE for a read integer level, // For translation, access flag and permission faults bit extflag, // IMPLEMENTATION DEFINED syndrome for external aborts boolean secondstage, // Is a Stage 2 abort bits(4) domain, // Domain number, AArch32 only bits(2) errortype, // [Armv8.2 RAS] AArch32 AET or AArch64 SET bits(4) debugmoe) // Debug method of entry, from AArch32 only typeUsingAArch32() && fpcr.AH == '1'; if altfp && N != 16 && fptype ==PARTIDtype = bits(16); type FPType_Denormal thenPMGtype = bits(8); type FPProcessException(MPAMinfo is ( bit mpam_ns, partid, PMGtypeFPExc_InputDenormPARTIDtype, fpcr);pmg )

Library pseudocode for shared/functions/floatmemory/fpprocessdenorms/FPProcessDenormsFullAddress

// FPProcessDenorms() // ================== // Handles denormal input in case of single-precision or double-precision // when using alternative floating-point mode.type FPProcessDenorms(FullAddress is ( bits(52) address, bit NS // '0' = Secure, '1' = Non-secure )FPType type1, FPType type2, integer N, FPCRType fpcr) boolean altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1'; if altfp && N != 16 && (type1 == FPType_Denormal || type2 == FPType_Denormal) then FPProcessException(FPExc_InputDenorm, fpcr);

Library pseudocode for shared/functions/floatmemory/fpprocessdenorms/FPProcessDenorms3Hint_Prefetch

// FPProcessDenorms3() // =================== // Handles denormal input in case of single-precision or double-precision // when using alternative floating-point mode.// Signals the memory system that memory accesses of type HINT to or from the specified address are // likely in the near future. The memory system may take some action to speed up the memory // accesses when they do occur, such as pre-loading the the specified address into one or more // caches as indicated by the innermost cache level target (0=L1, 1=L2, etc) and non-temporal hint // stream. Any or all prefetch hints may be treated as a NOP. A prefetch hint must not cause a // synchronous abort due to Alignment or Translation faults and the like. Its only effect on // software-visible state should be on caches and TLBs associated with address, which must be // accessible by reads, writes or execution, as defined in the translation regime of the current // Exception level. It is guaranteed not to access Device memory. // A Prefetch_EXEC hint must not result in an access that could not be performed by a speculative // instruction fetch, therefore if all associated MMUs are disabled, then it cannot access any // memory location that cannot be accessed by instruction fetches. FPProcessDenorms3(Hint_Prefetch(bits(64) address,FPTypePrefetchHint type1, FPType type2, FPType type3, integer N, FPCRType fpcr) boolean altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1'; if altfp && N != 16 && (type1 == FPType_Denormal || type2 == FPType_Denormal || type3 == FPType_Denormal) then FPProcessException(FPExc_InputDenorm, fpcr);hint, integer target, boolean stream);

Library pseudocode for shared/functions/floatmemory/fpprocessexception/FPProcessExceptionMBReqDomain

// FPProcessException() // ==================== // // The 'fpcr' argument supplies FPCR control bits. Status information is // updated directly in the FPSR where appropriate.enumeration FPProcessException(MBReqDomain {FPExc exception,MBReqDomain_Nonshareable, FPCRType fpcr) // Determine the cumulative exception bit number case exception of whenMBReqDomain_InnerShareable, FPExc_InvalidOp cumul = 0; whenMBReqDomain_OuterShareable, FPExc_DivideByZero cumul = 1; when FPExc_Overflow cumul = 2; when FPExc_Underflow cumul = 3; when FPExc_Inexact cumul = 4; when FPExc_InputDenorm cumul = 7; enable = cumul + 8; if fpcr<enable> == '1' then // Trapping of the exception enabled. // It is IMPLEMENTATION DEFINED whether the enable bit may be set at all, and // if so then how exceptions may be accumulated before calling FPTrappedException() IMPLEMENTATION_DEFINED "floating-point trap handling"; elsif UsingAArch32() then // Set the cumulative exception bit FPSCR<cumul> = '1'; else // Set the cumulative exception bit FPSR<cumul> = '1'; return;MBReqDomain_FullSystem};

Library pseudocode for shared/functions/floatmemory/fpprocessnan/FPProcessNaNMBReqTypes

// FPProcessNaN() // ============== bits(N)enumeration FPProcessNaN(MBReqTypes {FPType fptype, bits(N) op,MBReqTypes_Reads, FPCRType fpcr) boolean fpexc = TRUE; // Generate floating-point exceptions returnMBReqTypes_Writes, FPProcessNaN(fptype, op, fpcr, fpexc); // FPProcessNaN() // ============== // Handle NaN input operands, returning the operand or default NaN value // if fpcr.DN is selected. The 'fpcr' argument supplies the FPCR control bits. // The 'fpexc' argument controls the generation of exceptions, regardless of // whether 'fptype' is a signalling NaN or a quiet NaN. bits(N) FPProcessNaN(FPType fptype, bits(N) op, FPCRType fpcr, boolean fpexc) assert N IN {16,32,64}; assert fptype IN {FPType_QNaN, FPType_SNaN}; case N of when 16 topfrac = 9; when 32 topfrac = 22; when 64 topfrac = 51; result = op; if fptype == FPType_SNaN then result<topfrac> = '1'; if fpexc then FPProcessException(FPExc_InvalidOp, fpcr); if fpcr.DN == '1' then // DefaultNaN requested result = FPDefaultNaN(); return result;MBReqTypes_All};

Library pseudocode for shared/functions/floatmemory/fpprocessnans/FPProcessNaNsMemAttrHints

// FPProcessNaNs() // =============== (boolean, bits(N))type FPProcessNaNs(MemAttrHints is ( bits(2) attrs, // See MemAttr_*, Cacheability attributes bits(2) hints, // See MemHint_*, Allocation hints boolean transient )FPType type1, FPType type2, bits(N) op1, bits(N) op2, FPCRType fpcr) boolean altfmaxfmin = FALSE; // Do not use alfp mode for FMIN, FMAX and variants boolean fpexc = TRUE; // Generate floating-point exceptions return FPProcessNaNs(type1, type2, op1, op2, fpcr, altfmaxfmin, fpexc); // FPProcessNaNs() // =============== // // The boolean part of the return value says whether a NaN has been found and // processed. The bits(N) part is only relevant if it has and supplies the // result of the operation. // // The 'fpcr' argument supplies FPCR control bits and 'altfmaxfmin' controls // alternative floating-point behaviour for FMAX, FMIN and variants. 'fpexc' // controls the generation of floating-point exceptions. Status information // is updated directly in the FPSR where appropriate. (boolean, bits(N)) FPProcessNaNs(FPType type1, FPType type2, bits(N) op1, bits(N) op2, FPCRType fpcr, boolean altfmaxfmin, boolean fpexc) assert N IN {16,32,64}; boolean altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1'; boolean op1_nan = type1 IN {FPType_SNaN, FPType_QNaN}; boolean op2_nan = type2 IN {FPType_SNaN, FPType_QNaN}; boolean any_snan = type1 == FPType_SNaN || type2 == FPType_SNaN; FPType type_nan = if any_snan then FPType_SNaN else FPType_QNaN; if altfmaxfmin && (op1_nan || op2_nan) then FPProcessException(FPExc_InvalidOp, fpcr); done = TRUE; sign2 = op2<N-1>; result = if type2 == FPType_Zero then FPZero(sign2) else op2; elsif altfp && op1_nan && op2_nan then done = TRUE; result = FPProcessNaN(type_nan, op1, fpcr, fpexc); // <n> register NaN selected elsif type1 == FPType_SNaN then done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc); elsif type2 == FPType_SNaN then done = TRUE; result = FPProcessNaN(type2, op2, fpcr, fpexc); elsif type1 == FPType_QNaN then done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc); elsif type2 == FPType_QNaN then done = TRUE; result = FPProcessNaN(type2, op2, fpcr, fpexc); else done = FALSE; result = Zeros(); // 'Don't care' result return (done, result);

Library pseudocode for shared/functions/floatmemory/fpprocessnans3/FPProcessNaNs3MemType

// FPProcessNaNs3() // ================ (boolean, bits(N))enumeration FPProcessNaNs3(MemType {FPType type1,MemType_Normal, FPType type2, FPType type3, bits(N) op1, bits(N) op2, bits(N) op3, FPCRType fpcr) boolean fpexc = TRUE; // Generate floating-point exceptions return FPProcessNaNs3(type1, type2, type3, op1, op2, op3, fpcr, fpexc); // FPProcessNaNs3() // ================ // The boolean part of the return value says whether a NaN has been found and // processed. The bits(N) part is only relevant if it has and supplies the // result of the operation. // // The 'fpcr' argument supplies FPCR control bits and 'fpexc' controls the // generation of floating-point exceptions. Status information is updated // directly in the FPSR where appropriate. (boolean, bits(N)) FPProcessNaNs3(FPType type1, FPType type2, FPType type3, bits(N) op1, bits(N) op2, bits(N) op3, FPCRType fpcr, boolean fpexc) assert N IN {16,32,64}; boolean op1_nan = type1 IN {FPType_SNaN, FPType_QNaN}; boolean op2_nan = type2 IN {FPType_SNaN, FPType_QNaN}; boolean op3_nan = type3 IN {FPType_SNaN, FPType_QNaN}; boolean altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1'; if altfp then if type1 == FPType_SNaN || type2 == FPType_SNaN || type3 == FPType_SNaN then type_nan = FPType_SNaN; else type_nan = FPType_QNaN; if altfp && op1_nan && op2_nan && op3_nan then done = TRUE; result = FPProcessNaN(type_nan, op2, fpcr, fpexc); // <n> register NaN selected elsif altfp && op2_nan && (op1_nan || op3_nan) then done = TRUE; result = FPProcessNaN(type_nan, op2, fpcr, fpexc); // <n> register NaN selected elsif altfp && op3_nan && op1_nan then done = TRUE; result = FPProcessNaN(type_nan, op3, fpcr, fpexc); // <m> register NaN selected elsif type1 == FPType_SNaN then done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc); elsif type2 == FPType_SNaN then done = TRUE; result = FPProcessNaN(type2, op2, fpcr, fpexc); elsif type3 == FPType_SNaN then done = TRUE; result = FPProcessNaN(type3, op3, fpcr, fpexc); elsif type1 == FPType_QNaN then done = TRUE; result = FPProcessNaN(type1, op1, fpcr, fpexc); elsif type2 == FPType_QNaN then done = TRUE; result = FPProcessNaN(type2, op2, fpcr, fpexc); elsif type3 == FPType_QNaN then done = TRUE; result = FPProcessNaN(type3, op3, fpcr, fpexc); else done = FALSE; result = Zeros(); // 'Don't care' result return (done, result);MemType_Device};

Library pseudocode for shared/functions/floatmemory/fprecipestimate/FPRecipEstimateMemoryAttributes

// FPRecipEstimate() // ================= bits(N)type FPRecipEstimate(bits(N) operand,MemoryAttributes is ( FPCRTypeMemType fpcr) assert N IN {16,32,64}; // When using alternative floating-point behaviour, do not generate // floating-point exceptions, flush denormal input and output to zero, // and use RNE rounding mode. boolean altfp =memtype, HaveAltFPDeviceType() && !device, // For Device memory typesUsingAArch32MemAttrHints() && fpcr.AH == '1'; boolean fpexc = !altfp; if altfp then fpcr.<FIZ,FZ> = '11'; if altfp then fpcr.RMode = '00'; (fptype,sign,value) =inner, // Inner hints and attributes FPUnpackMemAttrHints(operand, fpcr, fpexc); FPRounding rounding = FPRoundingMode(fpcr); if fptype == FPType_SNaN || fptype == FPType_QNaN then result = FPProcessNaN(fptype, operand, fpcr, fpexc); elsif fptype == FPType_Infinity then result = FPZero(sign); elsif fptype == FPType_Zero then result = FPInfinity(sign); if fpexc then FPProcessException(FPExc_DivideByZero, fpcr); elsif ( (N == 16 && Abs(value) < 2.0^-16) || (N == 32 && Abs(value) < 2.0^-128) || (N == 64 && Abs(value) < 2.0^-1024) ) then case rounding of when FPRounding_TIEEVEN overflow_to_inf = TRUE; when FPRounding_POSINF overflow_to_inf = (sign == '0'); when FPRounding_NEGINF overflow_to_inf = (sign == '1'); when FPRounding_ZERO overflow_to_inf = FALSE; result = if overflow_to_inf then FPInfinity(sign) else FPMaxNormal(sign); if fpexc then FPProcessException(FPExc_Overflow, fpcr); FPProcessException(FPExc_Inexact, fpcr); elsif ((fpcr.FZ == '1' && N != 16) || (fpcr.FZ16 == '1' && N == 16)) && ( (N == 16 && Abs(value) >= 2.0^14) || (N == 32 && Abs(value) >= 2.0^126) || (N == 64 && Abs(value) >= 2.0^1022) ) then // Result flushed to zero of correct sign result = FPZero(sign); // Flush-to-zero never generates a trapped exception. if UsingAArch32() then FPSCR.UFC = '1'; else if fpexc then FPSR.UFC = '1'; else // Scale to a fixed point value in the range 0.5 <= x < 1.0 in steps of 1/512, and // calculate result exponent. Scaled value has copied sign bit, // exponent = 1022 = double-precision biased version of -1, // fraction = original fraction case N of when 16 fraction = operand<9:0> : Zeros(42); exp = UInt(operand<14:10>); when 32 fraction = operand<22:0> : Zeros(29); exp = UInt(operand<30:23>); when 64 fraction = operand<51:0>; exp = UInt(operand<62:52>); if exp == 0 then if fraction<51> == '0' then exp = -1; fraction = fraction<49:0>:'00'; else fraction = fraction<50:0>:'0'; integer scaled; boolean increasedprecision = N==32 && HaveFeatRPRES() && altfp; if !increasedprecision then scaled = UInt('1':fraction<51:44>); else scaled = UInt('1':fraction<51:41>); case N of when 16 result_exp = 29 - exp; // In range 29-30 = -1 to 29+1 = 30 when 32 result_exp = 253 - exp; // In range 253-254 = -1 to 253+1 = 254 when 64 result_exp = 2045 - exp; // In range 2045-2046 = -1 to 2045+1 = 2046 // Scaled is in range 256 .. 511 or 2048 .. 4095 range representing a // fixed-point number in range [0.5 .. 1.0]. estimate = RecipEstimate(scaled, increasedprecision); // Estimate is in the range 256 .. 511 or 4096 .. 8191 representing a // fixed-point result in the range [1.0 .. 2.0]. // Convert to scaled floating point result with copied sign bit, // high-order bits from estimate, and exponent calculated above. if !increasedprecision then fraction = estimate<7:0> : Zeros(44); else fraction = estimate<11:0> : Zeros(40); if result_exp == 0 then fraction = '1' : fraction<51:1>; elsif result_exp == -1 then fraction = '01' : fraction<51:2>; result_exp = 0; case N of when 16 result = sign : result_exp<N-12:0> : fraction<51:42>; when 32 result = sign : result_exp<N-25:0> : fraction<51:29>; when 64 result = sign : result_exp<N-54:0> : fraction<51:0>; return result;outer, // Outer hints and attributes boolean tagged, // Tagged access boolean shareable, boolean outershareable )

Library pseudocode for shared/functions/floatmemory/fprecipestimate/RecipEstimatePermissions

// RecipEstimate() // =============== // Compute estimate of reciprocal of 9-bit fixed-point number. // // a is in range 256 .. 511 or 2048 .. 4096 representing a number in // the range 0.5 <= x < 1.0. // increasedprecision determines if the mantissa is 8-bit or 12-bit. // result is in the range 256 .. 511 or 4096 .. 8191 representing a // number in the range 1.0 to 511/256 or 1.00 to 8191/4096. integertype RecipEstimate(integer a, boolean increasedprecision) integer r; if !increasedprecision then assert 256 <= a && a < 512; a = a*2+1; // Round to nearest integer b = (2 ^ 19) DIV a; r = (b+1) DIV 2; // Round to nearest assert 256 <= r && r < 512; else assert 2048 <= a && a < 4096; a = a*2+1; // Round to nearest real real_val = Real(2^25)/Real(a); r =Permissions is ( bits(3) ap, // Access permission bits bit xn, // Execute-never bit bit xxn, // [Armv8.2] Extended execute-never bit for stage 2 bit pxn // Privileged execute-never bit ) RoundDown(real_val); real error = real_val - Real(r); boolean round_up = error > 0.5; // Error cannot be exactly 0.5 so do not need tie case if round_up then r = r+1; assert 4096 <= r && r < 8192; return r;

Library pseudocode for shared/functions/floatmemory/fprecpx/FPRecpXPrefetchHint

// FPRecpX() // ========= bits(N)enumeration FPRecpX(bits(N) op,PrefetchHint { FPCRType fpcr) assert N IN {16,32,64}; case N of when 16 esize = 5; when 32 esize = 8; when 64 esize = 11; bits(N) result; bits(esize) exp; bits(esize) max_exp; bits(N-(esize+1)) frac =Prefetch_READ, Zeros(); boolean altfp =Prefetch_WRITE, HaveAltFP() && fpcr.AH == '1'; boolean fpexc = !altfp; // Generate no floating-point exceptions if altfp then fpcr.<FIZ,FZ> = '11'; // Flush denormal input and output to zero (fptype,sign,value) = FPUnpack(op, fpcr, fpexc); case N of when 16 exp = op<10+esize-1:10>; when 32 exp = op<23+esize-1:23>; when 64 exp = op<52+esize-1:52>; max_exp = Ones(esize) - 1; if fptype == FPType_SNaN || fptype == FPType_QNaN then result = FPProcessNaN(fptype, op, fpcr, fpexc); else if IsZero(exp) then // Zero and denormals result = sign:max_exp:frac; else // Infinities and normals result = sign:NOT(exp):frac; return result;Prefetch_EXEC};

Library pseudocode for shared/functions/floatmemory/fpround/FPRoundSpeculativeStoreBypassBarrierToPA

// FPRound() // ========= // Used by data processing and int/fixed <-> FP conversion instructions. // For half-precision data it ignores AHP, and observes FZ16. bits(N) FPRound(real op, FPCRType fpcr, FPRounding rounding) fpcr.AHP = '0'; boolean fpexc = TRUE; // Generate floating-point exceptions boolean isbfloat16 = FALSE; return FPRoundBase(op, fpcr, rounding, isbfloat16, fpexc); // FPRound() // ========= // Used by data processing and int/fixed <-> FP conversion instructions. // For half-precision data it ignores AHP, and observes FZ16. // // The 'fpcr' argument supplies FPCR control bits and 'fpexc' controls the // generation of floating-point exceptions. Status information is updated // directly in the FPSR where appropriate. bits(N) FPRound(real op, FPCRType fpcr, FPRounding rounding, boolean fpexc) fpcr.AHP = '0'; boolean isbfloat16 = FALSE; return FPRoundBase(op, fpcr, rounding, isbfloat16, fpexc); // FPRound() // ========= bits(N) FPRound(real op, FPCRType fpcr) return FPRound(op, fpcr, FPRoundingMode(fpcr));SpeculativeStoreBypassBarrierToPA();

Library pseudocode for shared/functions/floatmemory/fpround/FPRoundBaseSpeculativeStoreBypassBarrierToVA

// FPRoundBase() // ============= bits(N) FPRoundBase(real op, FPCRType fpcr, FPRounding rounding, boolean isbfloat16) boolean fpexc = TRUE; // Generate floating-point exceptions return FPRoundBase(op, fpcr, rounding, isbfloat16, fpexc); // FPRoundBase() // ============= // Convert a real number OP into an N-bit floating-point value using the // supplied rounding mode RMODE. // // The 'fpcr' argument supplies FPCR control bits and 'fpexc' controls the // generation of floating-point exceptions. Status information is updated // directly in the FPSR where appropriate. bits(N) FPRoundBase(real op, FPCRType fpcr, FPRounding rounding, boolean isbfloat16, boolean fpexc) assert N IN {16,32,64}; assert op != 0.0; assert rounding != FPRounding_TIEAWAY; bits(N) result; // Obtain format parameters - minimum exponent, numbers of exponent and fraction bits. if N == 16 then minimum_exp = -14; E = 5; F = 10; elsif N == 32 && isbfloat16 then minimum_exp = -126; E = 8; F = 7; elsif N == 32 then minimum_exp = -126; E = 8; F = 23; else // N == 64 minimum_exp = -1022; E = 11; F = 52; // Split value into sign, unrounded mantissa and exponent. if op < 0.0 then sign = '1'; mantissa = -op; else sign = '0'; mantissa = op; exponent = 0; while mantissa < 1.0 do mantissa = mantissa * 2.0; exponent = exponent - 1; while mantissa >= 2.0 do mantissa = mantissa / 2.0; exponent = exponent + 1; // When TRUE, detection of underflow occurs after rounding and the test for a // denormalized number for single and double precision values occurs after rounding. altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1'; // Deal with flush-to-zero before rounding if FPCR.AH != '1'. if (!altfp && ((fpcr.FZ == '1' && N != 16) || (fpcr.FZ16 == '1' && N == 16)) && exponent < minimum_exp) then // Flush-to-zero never generates a trapped exception. if UsingAArch32() then FPSCR.UFC = '1'; else FPSR.UFC = '1'; return FPZero(sign); biased_exp_unconstrained = exponent - minimum_exp + 1; int_mant_unconstrained = RoundDown(mantissa * 2.0^F); error_unconstrained = mantissa * 2.0^F - Real(int_mant_unconstrained); // Start creating the exponent value for the result. Start by biasing the actual exponent // so that the minimum exponent becomes 1, lower values 0 (indicating possible underflow). biased_exp = Max(exponent - minimum_exp + 1, 0); if biased_exp == 0 then mantissa = mantissa / 2.0^(minimum_exp - exponent); // Get the unrounded mantissa as an integer, and the "units in last place" rounding error. int_mant = RoundDown(mantissa * 2.0^F); // < 2.0^F if biased_exp == 0, >= 2.0^F if not error = mantissa * 2.0^F - Real(int_mant); // Underflow occurs if exponent is too small before rounding, and result is inexact or // the Underflow exception is trapped. This applies before rounding if FPCR.AH != '1'. if !altfp && biased_exp == 0 && (error != 0.0 || fpcr.UFE == '1') then if fpexc then FPProcessException(FPExc_Underflow, fpcr); // Round result according to rounding mode. if altfp then case rounding of when FPRounding_TIEEVEN round_up_unconstrained = (error_unconstrained > 0.5 || (error_unconstrained == 0.5 && int_mant_unconstrained<0> == '1')); round_up = (error > 0.5 || (error == 0.5 && int_mant<0> == '1')); overflow_to_inf = TRUE; when FPRounding_POSINF round_up_unconstrained = (error_unconstrained != 0.0 && sign == '0'); round_up = (error != 0.0 && sign == '0'); overflow_to_inf = (sign == '0'); when FPRounding_NEGINF round_up_unconstrained = (error_unconstrained != 0.0 && sign == '1'); round_up = (error != 0.0 && sign == '1'); overflow_to_inf = (sign == '1'); when FPRounding_ZERO, FPRounding_ODD round_up_unconstrained = FALSE; round_up = FALSE; overflow_to_inf = FALSE; if round_up_unconstrained then int_mant_unconstrained = int_mant_unconstrained + 1; if int_mant_unconstrained == 2^(F+1) then // Rounded up to next exponent biased_exp_unconstrained = biased_exp_unconstrained + 1; int_mant_unconstrained = int_mant_unconstrained DIV 2; // Deal with flush-to-zero and underflow after rounding if FPCR.AH == '1'. if biased_exp_unconstrained < 1 && int_mant_unconstrained != 0 then // the result of unconstrained rounding is less than the minimum normalized number if (fpcr.FZ == '1' && N != 16) || (fpcr.FZ16 == '1' && N == 16) then // Flush-to-zero if fpexc then FPSR.UFC = '1'; FPProcessException(FPExc_Inexact, fpcr); return FPZero(sign); elsif error != 0.0 || fpcr.UFE == '1' then if fpexc then FPProcessException(FPExc_Underflow, fpcr); else // altfp == FALSE case rounding of when FPRounding_TIEEVEN round_up = (error > 0.5 || (error == 0.5 && int_mant<0> == '1')); overflow_to_inf = TRUE; when FPRounding_POSINF round_up = (error != 0.0 && sign == '0'); overflow_to_inf = (sign == '0'); when FPRounding_NEGINF round_up = (error != 0.0 && sign == '1'); overflow_to_inf = (sign == '1'); when FPRounding_ZERO, FPRounding_ODD round_up = FALSE; overflow_to_inf = FALSE; if round_up then int_mant = int_mant + 1; if int_mant == 2^F then // Rounded up from denormalized to normalized biased_exp = 1; if int_mant == 2^(F+1) then // Rounded up to next exponent biased_exp = biased_exp + 1; int_mant = int_mant DIV 2; // Handle rounding to odd if error != 0.0 && rounding == FPRounding_ODD then int_mant<0> = '1'; // Deal with overflow and generate result. if N != 16 || fpcr.AHP == '0' then // Single, double or IEEE half precision if biased_exp >= 2^E - 1 then result = if overflow_to_inf then FPInfinity(sign) else FPMaxNormal(sign); if fpexc then FPProcessException(FPExc_Overflow, fpcr); error = 1.0; // Ensure that an Inexact exception occurs else result = sign : biased_exp<E-1:0> : int_mant<F-1:0> : Zeros(N-(E+F+1)); else // Alternative half precision if biased_exp >= 2^E then result = sign : Ones(N-1); if fpexc then FPProcessException(FPExc_InvalidOp, fpcr); error = 0.0; // Ensure that an Inexact exception does not occur else result = sign : biased_exp<E-1:0> : int_mant<F-1:0> : Zeros(N-(E+F+1)); // Deal with Inexact exception. if error != 0.0 then if fpexc then FPProcessException(FPExc_Inexact, fpcr); return result;SpeculativeStoreBypassBarrierToVA();

Library pseudocode for shared/functions/floatmemory/fpround/FPRoundCVTLBRecord

// FPRoundCV() // =========== // Used for FP <-> FP conversion instructions. // For half-precision data ignores FZ16 and observes AHP. bits(N)type FPRoundCV(real op,TLBRecord is ( FPCRTypePermissions fpcr,perms, bit nG, // '0' = Global, '1' = not Global bits(4) domain, // AArch32 only bit GP, // Guarded Page boolean contiguous, // Contiguous bit from page table integer level, // AArch32 Short-descriptor format: Indicates Section/Page integer blocksize, // Describes size of memory translated in KBytes FPRoundingDescriptorUpdate rounding) fpcr.FZ16 = '0'; boolean fpexc = TRUE; // Generate floating-point exceptions boolean isbfloat16 = FALSE; returndescupdate, // [Armv8.1] Context for h/w update of table descriptor bit CnP, // [Armv8.2] TLB entry can be shared between different PEs FPRoundBaseAddressDescriptor(op, fpcr, rounding, isbfloat16, fpexc);addrdesc )

Library pseudocode for shared/functions/floatmemory/fprounding/FPRoundingTag

enumerationconstant integer FPRounding {LOG2_TAG_GRANULE = 4; constant integerFPRounding_TIEEVEN,TAG_GRANULE = 1 << FPRounding_POSINF, FPRounding_NEGINF, FPRounding_ZERO, FPRounding_TIEAWAY, FPRounding_ODD};;

Library pseudocode for shared/functions/floatmemory/fproundingmode/FPRoundingMode_Mem

// FPRoundingMode() // ================ // Return the current floating-point rounding mode. FPRounding// These two _Mem[] accessors are the hardware operations which perform single-copy atomic, // aligned, little-endian memory accesses of size bytes from/to the underlying physical // memory array of bytes. // // The functions address the array using desc.paddress which supplies: // * A 52-bit physical address // * A single NS bit to select between Secure and Non-secure parts of the array. // // The accdesc descriptor describes the access type: normal, exclusive, ordered, streaming, // etc and other parameters required to access the physical memory or for setting syndrome // register in the event of an external abort. bits(8*size) FPRoundingMode(_Mem[FPCRTypeAddressDescriptor fpcr) returndesc, integer size, accdesc]; _Mem[AddressDescriptor desc, integer size, AccessDescriptorFPDecodeRoundingAccessDescriptor(fpcr.RMode);accdesc] = bits(8*size) value;

Library pseudocode for shared/functions/floatmpam/fproundint/FPRoundIntDefaultMPAMinfo

// FPRoundInt() // ============ // DefaultMPAMinfo // =============== // Returns default MPAM info. If secure is TRUE return default Secure // MPAMinfo, otherwise return default Non-secure MPAMinfo. // Round op to nearest integral floating point value using rounding mode in FPCR/FPSCR. // If EXACT is TRUE, set FPSR.IXC if result is not numerically equal to op. bits(N)MPAMinfo FPRoundInt(bits(N) op,DefaultMPAMinfo(boolean secure) FPCRTypeMPAMinfo fpcr,DefaultInfo; DefaultInfo.mpam_ns = if secure then '0' else '1'; DefaultInfo.partid = FPRoundingDefaultPARTID rounding, boolean exact) assert rounding !=; DefaultInfo.pmg = FPRounding_ODDDefaultPMG; assert N IN {16,32,64}; // When alternative floating-point support is TRUE, do not generate // Input Denormal floating-point exceptions. altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1'; fpexc = !altfp; // Unpack using FPCR to determine if subnormals are flushed-to-zero. (fptype,sign,value) = FPUnpack(op, fpcr, fpexc); if fptype == FPType_SNaN || fptype == FPType_QNaN then result = FPProcessNaN(fptype, op, fpcr); elsif fptype == FPType_Infinity then result = FPInfinity(sign); elsif fptype == FPType_Zero then result = FPZero(sign); else // Extract integer component. int_result = RoundDown(value); error = value - Real(int_result); // Determine whether supplied rounding mode requires an increment. case rounding of when FPRounding_TIEEVEN round_up = (error > 0.5 || (error == 0.5 && int_result<0> == '1')); when FPRounding_POSINF round_up = (error != 0.0); when FPRounding_NEGINF round_up = FALSE; when FPRounding_ZERO round_up = (error != 0.0 && int_result < 0); when FPRounding_TIEAWAY round_up = (error > 0.5 || (error == 0.5 && int_result >= 0)); if round_up then int_result = int_result + 1; // Convert integer value into an equivalent real value. real_result = Real(int_result); // Re-encode as a floating-point value, result is always exact. if real_result == 0.0 then result = FPZero(sign); else result = FPRound(real_result, fpcr, FPRounding_ZERO); // Generate inexact exceptions. if error != 0.0 && exact then FPProcessException(FPExc_Inexact, fpcr); return result;; return DefaultInfo;

Library pseudocode for shared/functions/floatmpam/fproundintn/FPRoundIntNDefaultPARTID

// FPRoundIntN() // ============= bits(N)constant PARTIDtype FPRoundIntN(bits(N) op,DefaultPARTID = 0<15:0>; FPCRType fpcr, FPRounding rounding, integer intsize) assert rounding != FPRounding_ODD; assert N IN {32,64}; assert intsize IN {32, 64}; integer exp; constant integer E = (if N == 32 then 8 else 11); constant integer F = N - (E + 1); // When alternative floating-point support is TRUE, do not generate // Input Denormal floating-point exceptions. altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1'; fpexc = !altfp; // Unpack using FPCR to determine if subnormals are flushed-to-zero. (fptype,sign,value) = FPUnpack(op, fpcr, fpexc); if fptype IN {FPType_SNaN, FPType_QNaN, FPType_Infinity} then if N == 32 then exp = 126 + intsize; result = '1':exp<(E-1):0>:Zeros(F); else exp = 1022+intsize; result = '1':exp<(E-1):0>:Zeros(F); FPProcessException(FPExc_InvalidOp, fpcr); elsif fptype == FPType_Zero then result = FPZero(sign); else // Extract integer component. int_result = RoundDown(value); error = value - Real(int_result); // Determine whether supplied rounding mode requires an increment. case rounding of when FPRounding_TIEEVEN round_up = error > 0.5 || (error == 0.5 && int_result<0> == '1'); when FPRounding_POSINF round_up = error != 0.0; when FPRounding_NEGINF round_up = FALSE; when FPRounding_ZERO round_up = error != 0.0 && int_result < 0; when FPRounding_TIEAWAY round_up = error > 0.5 || (error == 0.5 && int_result >= 0); if round_up then int_result = int_result + 1; overflow = int_result > 2^(intsize-1)-1 || int_result < -1*2^(intsize-1); if overflow then if N == 32 then exp = 126 + intsize; result = '1':exp<(E-1):0>:Zeros(F); else exp = 1022 + intsize; result = '1':exp<(E-1):0>:Zeros(F); FPProcessException(FPExc_InvalidOp, fpcr); // This case shouldn't set Inexact. error = 0.0; else // Convert integer value into an equivalent real value. real_result = Real(int_result); // Re-encode as a floating-point value, result is always exact. if real_result == 0.0 then result = FPZero(sign); else result = FPRound(real_result, fpcr, FPRounding_ZERO); // Generate inexact exceptions. if error != 0.0 then FPProcessException(FPExc_Inexact, fpcr); return result;

Library pseudocode for shared/functions/floatmpam/fprsqrtestimate/FPRSqrtEstimateDefaultPMG

// FPRSqrtEstimate() // ================= bits(N)constant PMGtype FPRSqrtEstimate(bits(N) operand,DefaultPMG = 0<7:0>; FPCRType fpcr) assert N IN {16,32,64}; // When using alternative floating-point behaviour, do not generate // floating-point exceptions and flush denormal input to zero. boolean altfp = HaveAltFP() && !UsingAArch32() && fpcr.AH == '1'; boolean fpexc = !altfp; if altfp then fpcr.<FIZ,FZ> = '11'; (fptype,sign,value) = FPUnpack(operand, fpcr, fpexc); if fptype == FPType_SNaN || fptype == FPType_QNaN then result = FPProcessNaN(fptype, operand, fpcr, fpexc); elsif fptype == FPType_Zero then result = FPInfinity(sign); if fpexc then FPProcessException(FPExc_DivideByZero, fpcr); elsif sign == '1' then result = FPDefaultNaN(); if fpexc then FPProcessException(FPExc_InvalidOp, fpcr); elsif fptype == FPType_Infinity then result = FPZero('0'); else // Scale to a fixed-point value in the range 0.25 <= x < 1.0 in steps of 512, with the // evenness or oddness of the exponent unchanged, and calculate result exponent. // Scaled value has copied sign bit, exponent = 1022 or 1021 = double-precision // biased version of -1 or -2, fraction = original fraction extended with zeros. case N of when 16 fraction = operand<9:0> : Zeros(42); exp = UInt(operand<14:10>); when 32 fraction = operand<22:0> : Zeros(29); exp = UInt(operand<30:23>); when 64 fraction = operand<51:0>; exp = UInt(operand<62:52>); if exp == 0 then while fraction<51> == '0' do fraction = fraction<50:0> : '0'; exp = exp - 1; fraction = fraction<50:0> : '0'; integer scaled; boolean increasedprecision = N==32 && HaveFeatRPRES() && altfp; if !increasedprecision then if exp<0> == '0' then scaled = UInt('1':fraction<51:44>); else scaled = UInt('01':fraction<51:45>); else if exp<0> == '0' then scaled = UInt('1':fraction<51:41>); else scaled = UInt('01':fraction<51:42>); case N of when 16 result_exp = ( 44 - exp) DIV 2; when 32 result_exp = ( 380 - exp) DIV 2; when 64 result_exp = (3068 - exp) DIV 2; estimate = RecipSqrtEstimate(scaled, increasedprecision); // Estimate is in the range 256 .. 511 or 4096 .. 8191 representing a // fixed-point result in the range [1.0 .. 2.0]. // Convert to scaled floating point result with copied sign bit and high-order // fraction bits, and exponent calculated above. case N of when 16 result = '0' : result_exp<N-12:0> : estimate<7:0>:Zeros(2); when 32 if !increasedprecision then result = '0' : result_exp<N-25:0> : estimate<7:0>:Zeros(15); else result = '0' : result_exp<N-25:0> : estimate<11:0>:Zeros(11); when 64 result = '0' : result_exp<N-54:0> : estimate<7:0>:Zeros(44); return result;

Library pseudocode for shared/functions/floatmpam/fprsqrtestimate/RecipSqrtEstimateGenMPAMcurEL

// RecipSqrtEstimate() // =================== // Compute estimate of reciprocal square root of 9-bit fixed-point number. // // a is in range 128 .. 511 or 1024 .. 4095, with increased precision, // representing a number in the range 0.25 <= x < 1.0. // increasedprecision determines if the mantissa is 8-bit or 12-bit. // result is in the range 256 .. 511 or 4096 .. 8191, with increased precision, // representing a number in the range 1.0 to 511/256 or 8191/4096. // GenMPAMcurEL // ============ // Returns MPAMinfo for the current EL and security state. // InD is TRUE instruction access and FALSE otherwise. // May be called if MPAM is not implemented (but in an version that supports // MPAM), MPAM is disabled, or in AArch32. In AArch32, convert the mode to // EL if can and use that to drive MPAM information generation. If mode // cannot be converted, MPAM is not implemented, or MPAM is disabled return // default MPAM information for the current security state. integerMPAMinfo RecipSqrtEstimate(integer a, boolean increasedprecision) integer r; if !increasedprecision then assert 128 <= a && a < 512; if a < 256 then // 0.25 .. 0.5 a = a*2+1; // a in units of 1/512 rounded to nearest else // 0.5 .. 1.0 a = (a >> 1) << 1; // Discard bottom bit a = (a+1)*2; // a in units of 1/256 rounded to nearest integer b = 512; while a*(b+1)*(b+1) < 2^28 do b = b+1; // b = largest b such that b < 2^14 / sqrt(a) r = (b+1) DIV 2; // Round to nearest assert 256 <= r && r < 512; else assert 1024 <= a && a < 4096; real real_val; real error; integer int_val; if a < 2048 then // 0.25... 0.5 a = a*2 + 1; // Take 10 bits of fraction and force a 1 at the bottom real_val = Real(a)/2.0; else // 0.5..1.0 a = (a >> 1) << 1; // Discard bottom bit a = a+1; // Taking 10 bits of the fraction and force a 1 at the bottom real_val = Real(a); real_val = Sqrt(real_val); // This number will lie in the range of 32 to 64 // Round to nearest even for a DP float number real_val = real_val * Real(2^47); // The integer is the size of the whole DP mantissa int_val =GenMPAMcurEL(boolean InD) bits(2) mpamel; boolean validEL; boolean securempam; if RoundDownHaveEMPAMExt(real_val); // Calculate rounding value error = real_val - Real(int_val); round_up = error > 0.5; // Error cannot be exactly 0.5 so do not need tie case if round_up then int_val = int_val+1; real_val = Real(2^65)/Real(int_val); // Lies in the range 4096 <= real_val < 8192 int_val =() then boolean secure = (); securempam = MPAM3_EL3.FORCE_NS == '0' && secure; if MPAMisEnabled() && (!secure || MPAM3_EL3.SDEFLT == '0') then if UsingAArch32() then (validEL, mpamel) = ELFromM32(PSTATE.M); else validEL = TRUE; mpamel = PSTATE.EL; if validEL then return genMPAM(UInt(mpamel), InD, securempam); else securempam = IsSecure(); if HaveMPAMExt() && MPAMisEnabled() then if UsingAArch32() then (validEL, mpamel) = ELFromM32(PSTATE.M); else validEL = TRUE; mpamel = PSTATE.EL; if validEL then return genMPAM(UInt(mpamel), InD, securempam); return DefaultMPAMinfoRoundDownIsSecure(real_val); // Round that (to nearest even) to give integer error = real_val - Real(int_val); round_up = (error > 0.5 || (error == 0.5 && int_val<0> == '1')); if round_up then int_val = int_val+1; r = int_val; assert 4096 <= r && r < 8192; return r;(securempam);

Library pseudocode for shared/functions/floatmpam/fpsqrt/FPSqrtMAP_vPARTID

// FPSqrt() // ======== // MAP_vPARTID // =========== // Performs conversion of virtual PARTID into physical PARTID // Contains all of the error checking and implementation // choices for the conversion. bits(N)(PARTIDtype, boolean) FPSqrt(bits(N) op,MAP_vPARTID( FPCRTypePARTIDtype fpcr) assert N IN {16,32,64}; (fptype,sign,value) =vpartid) // should not ever be called if EL2 is not implemented // or is implemented but not enabled in the current // security state. FPUnpackPARTIDtype(op, fpcr); if fptype ==ret; boolean err; integer virt = FPType_SNaNUInt || fptype ==( vpartid ); integer vpmrmax = FPType_QNaNUInt then result =( MPAMIDR_EL1.VPMR_MAX ); // vpartid_max is largest vpartid supported integer vpartid_max = (4 * vpmrmax) + 3; // One of many ways to reduce vpartid to value less than vpartid_max. if virt > vpartid_max then virt = virt MOD (vpartid_max+1); // Check for valid mapping entry. if MPAMVPMV_EL2<virt> == '1' then // vpartid has a valid mapping so access the map. ret = FPProcessNaNmapvpmw(fptype, op, fpcr); elsif fptype ==(virt); err = FALSE; // Is the default virtual PARTID valid? elsif MPAMVPMV_EL2<0> == '1' then // Yes, so use default mapping for vpartid == 0. ret = MPAMVPM0_EL2<0 +: 16>; err = FALSE; // Neither is valid so use default physical PARTID. else ret = FPType_ZeroDefaultPARTID then result =; err = TRUE; // Check that the physical PARTID is in-range. // This physical PARTID came from a virtual mapping entry. integer partid_max = FPZeroUInt(sign); elsif fptype ==( MPAMIDR_EL1.PARTID_MAX ); if FPType_InfinityUInt && sign == '0' then result =(ret) > partid_max then // Out of range, so return default physical PARTID ret = FPInfinityDefaultPARTID(sign); elsif sign == '1' then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); else result = FPRound(Sqrt(value), fpcr); FPProcessDenorm(fptype, N, fpcr); return result;; err = TRUE; return (ret, err);

Library pseudocode for shared/functions/floatmpam/fpsub/FPSubMPAMisEnabled

// FPSub() // ======= // MPAMisEnabled // ============= // Returns TRUE if MPAMisEnabled. bits(N)boolean FPSub(bits(N) op1, bits(N) op2,MPAMisEnabled() el = FPCRTypeHighestEL fpcr) assert N IN {16,32,64}; rounding =(); case el of when FPRoundingModeEL3(fpcr); (type1,sign1,value1) =return MPAM3_EL3.MPAMEN == '1'; when FPUnpackEL2(op1, fpcr); (type2,sign2,value2) =return MPAM2_EL2.MPAMEN == '1'; when FPUnpackEL1(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); if inf1 && inf2 && sign1 == sign2 then result = FPDefaultNaN(); FPProcessException(FPExc_InvalidOp, fpcr); elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '1') then result = FPInfinity('0'); elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '0') then result = FPInfinity('1'); elsif zero1 && zero2 && sign1 == NOT(sign2) then result = FPZero(sign1); else result_value = value1 - value2; if result_value == 0.0 then // Sign of exact zero result depends on rounding mode result_sign = if rounding == FPRounding_NEGINF then '1' else '0'; result = FPZero(result_sign); else result = FPRound(result_value, fpcr, rounding); FPProcessDenorms(type1, type2, N, fpcr); return result;return MPAM1_EL1.MPAMEN == '1';

Library pseudocode for shared/functions/floatmpam/fpthree/FPThreeMPAMisVirtual

// FPThree() // ========= // MPAMisVirtual // ============= // Returns TRUE if MPAM is configured to be virtual at EL. bits(N)boolean FPThree(bit sign) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); exp = '1':MPAMisVirtual(integer el) return ( MPAMIDR_EL1.HAS_HCR == '1' &&ZerosEL2Enabled(E-1); frac = '1':Zeros(F-1); result = sign : exp : frac; return result;() && (( el == 0 && MPAMHCR_EL2.EL0_VPMEN == '1' && ( HCR_EL2.E2H == '0' || HCR_EL2.TGE == '0' )) || ( el == 1 && MPAMHCR_EL2.EL1_VPMEN == '1')));

Library pseudocode for shared/functions/floatmpam/fptofixed/FPToFixedgenMPAM

// FPToFixed() // =========== // genMPAM // ======= // Returns MPAMinfo for exception level el. // If InD is TRUE returns MPAM information using PARTID_I and PMG_I fields // of MPAMel_ELx register and otherwise using PARTID_D and PMG_D fields. // Produces a Secure PARTID if Secure is TRUE and a Non-secure PARTID otherwise. // Convert N-bit precision floating point OP to M-bit fixed point with // FBITS fractional bits, controlled by UNSIGNED and ROUNDING. bits(M)MPAMinfo FPToFixed(bits(N) op, integer fbits, boolean unsigned,genMPAM(integer el, boolean InD, boolean secure) FPCRTypeMPAMinfo fpcr,returnInfo; FPRoundingPARTIDtype rounding) assert N IN {16,32,64}; assert M IN {16,32,64}; assert fbits >= 0; assert rounding !=partidel; boolean perr; boolean gstplk = (el == 0 && FPRounding_ODDEL2Enabled; // When alternative floating-point support is TRUE, do not generate // Input Denormal floating-point exceptions. altfp =() && MPAMHCR_EL2.GSTAPP_PLK == '1' && HCR_EL2.TGE == '0'); integer eff_el = if gstplk then 1 else el; (partidel, perr) = HaveAltFPgenPARTID() && !(eff_el, InD);UsingAArch32PMGtype() && fpcr.AH == '1'; fpexc = !altfp; // Unpack using fpcr to determine if subnormals are flushed-to-zero. (fptype,sign,value) =groupel = FPUnpackgenPMG(op, fpcr, fpexc); // If NaN, set cumulative flag or take exception. if fptype == FPType_SNaN || fptype == FPType_QNaN then FPProcessException(FPExc_InvalidOp, fpcr); // Scale by fractional bits and produce integer rounded towards minus-infinity. value = value * 2.0^fbits; int_result = RoundDown(value); error = value - Real(int_result); // Determine whether supplied rounding mode requires an increment. case rounding of when FPRounding_TIEEVEN round_up = (error > 0.5 || (error == 0.5 && int_result<0> == '1')); when FPRounding_POSINF round_up = (error != 0.0); when FPRounding_NEGINF round_up = FALSE; when FPRounding_ZERO round_up = (error != 0.0 && int_result < 0); when FPRounding_TIEAWAY round_up = (error > 0.5 || (error == 0.5 && int_result >= 0)); if round_up then int_result = int_result + 1; // Generate saturated result and exceptions. (result, overflow) = SatQ(int_result, M, unsigned); if overflow then FPProcessException(FPExc_InvalidOp, fpcr); elsif error != 0.0 then FPProcessException(FPExc_Inexact, fpcr); return result;(eff_el, InD, perr); returnInfo.mpam_ns = if secure then '0' else '1'; returnInfo.partid = partidel; returnInfo.pmg = groupel; return returnInfo;

Library pseudocode for shared/functions/floatmpam/fptofixedjs/FPToFixedJSgenMPAMel

// FPToFixedJS() // ============= // genMPAMel // ========= // Returns MPAMinfo for specified EL in the current security state. // InD is TRUE for instruction access and FALSE otherwise. // Converts a double precision floating point input value // to a signed integer, with rounding to zero. (bits(N), bit)MPAMinfo FPToFixedJS(bits(M) op,genMPAMel(bits(2) el, boolean InD) boolean secure = FPCRTypeIsSecure fpcr, boolean Is64) assert M == 64 && N == 32; // If FALSE, never generate Input Denormal floating-point exceptions. fpexc_idenorm = !((); boolean securempam = secure; ifHaveAltFPHaveEMPAMExt() && !() then securempam = MPAM3_EL3.FORCE_NS == '0' && secure; ifUsingAArch32HaveMPAMExt() && fpcr.AH == '1'); // Unpack using fpcr to determine if subnormals are flushed-to-zero. (fptype,sign,value) =() && FPUnpackMPAMisEnabled(op, fpcr, fpexc_idenorm); Z = '1'; // If NaN, set cumulative flag or take exception. if fptype ==() && (!secure || MPAM3_EL3.SDEFLT == '0') then return FPType_SNaNgenMPAM || fptype ==( FPType_QNaNUInt then(el), InD, securempam); else if FPProcessExceptionHaveMPAMExt(() &&FPExc_InvalidOpMPAMisEnabled, fpcr); Z = '0'; int_result =() then return RoundDowngenMPAM(value); error = value - Real(int_result); // Determine whether supplied rounding mode requires an increment. round_it_up = (error != 0.0 && int_result < 0); if round_it_up then int_result = int_result + 1; if int_result < 0 then result = int_result - 2^32*(RoundUpUInt(Real(int_result)/Real(2^32)); else result = int_result - 2^32*(el), InD, securempam); returnRoundDownDefaultMPAMinfo(Real(int_result)/Real(2^32)); // Generate exceptions. if int_result < -(2^31) || int_result > (2^31)-1 then FPProcessException(FPExc_InvalidOp, fpcr); Z = '0'; elsif error != 0.0 then FPProcessException(FPExc_Inexact, fpcr); Z = '0'; elsif sign == '1' && value == 0.0 then Z = '0'; elsif sign == '0' && value == 0.0 && !IsZero(op<51:0>) then Z = '0'; if fptype == FPType_Infinity then result = 0; return (result<N-1:0>, Z);(securempam);

Library pseudocode for shared/functions/floatmpam/fptwo/FPTwogenPARTID

// FPTwo() // ======= // genPARTID // ========= // Returns physical PARTID and error boolean for exception level el. // If InD is TRUE then PARTID is from MPAMel_ELx.PARTID_I and // otherwise from MPAMel_ELx.PARTID_D. bits(N)(PARTIDtype, boolean) FPTwo(bit sign) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); exp = '1':genPARTID(integer el, boolean InD)ZerosPARTIDtype(E-1); frac =partidel = (el, InD); integer partid_max = UInt(MPAMIDR_EL1.PARTID_MAX); if UInt(partidel) > partid_max then return (DefaultPARTID, TRUE); if MPAMisVirtual(el) then return MAP_vPARTIDZerosgetMPAM_PARTID(F); result = sign : exp : frac; return result;(partidel); else return (partidel, FALSE);

Library pseudocode for shared/functions/floatmpam/fptype/FPTypegenPMG

enumeration// genPMG // ====== // Returns PMG for exception level el and I- or D-side (InD). // If PARTID generation (genPARTID) encountered an error, genPMG() should be // called with partid_err as TRUE. PMGtype FPType {genPMG(integer el, boolean InD, boolean partid_err) integer pmg_max =FPType_Zero,(MPAMIDR_EL1.PMG_MAX); // It is CONSTRAINED UNPREDICTABLE whether partid_err forces PMG to // use the default or if it uses the PMG from getMPAM_PMG. if partid_err then return FPType_Denormal,; FPType_Nonzero,groupel = FPType_Infinity,(el, InD); if FPType_QNaN,(groupel) <= pmg_max then return groupel; return FPType_SNaN};;

Library pseudocode for shared/functions/floatmpam/fpunpack/FPUnpackgetMPAM_PARTID

// FPUnpack() // ========== // getMPAM_PARTID // ============== // Returns a PARTID from one of the MPAMn_ELx registers. // MPAMn selects the MPAMn_ELx register used. // If InD is TRUE, selects the PARTID_I field of that // register. Otherwise, selects the PARTID_D field. (FPType, bit, real)PARTIDtype FPUnpack(bits(N) fpval,getMPAM_PARTID(integer MPAMn, boolean InD) FPCRTypePARTIDtype fpcr) fpcr.AHP = '0'; boolean fpexc = TRUE; // Generate floating-point exceptions (fp_type, sign, value) =partid; boolean el2avail = FPUnpackBaseEL2Enabled(fpval, fpcr, fpexc); return (fp_type, sign, value); (); // FPUnpack() // ========== // // Used by data processing and int/fixed <-> FP conversion instructions. // For half-precision data it ignores AHP, and observes FZ16. (FPType, bit, real) if InD then case MPAMn of when 3 partid = MPAM3_EL3.PARTID_I; when 2 partid = if el2avail then MPAM2_EL2.PARTID_I else FPUnpack(bits(N) fpval,(); when 1 partid = MPAM1_EL1.PARTID_I; when 0 partid = MPAM0_EL1.PARTID_I; otherwise partid = FPCRTypePARTIDtype fpcr, boolean fpexc) fpcr.AHP = '0'; (fp_type, sign, value) =UNKNOWN; else case MPAMn of when 3 partid = MPAM3_EL3.PARTID_D; when 2 partid = if el2avail then MPAM2_EL2.PARTID_D else (); when 1 partid = MPAM1_EL1.PARTID_D; when 0 partid = MPAM0_EL1.PARTID_D; otherwise partid = PARTIDtypeFPUnpackBaseZeros(fpval, fpcr, fpexc); return (fp_type, sign, value);UNKNOWN; return partid;

Library pseudocode for shared/functions/floatmpam/fpunpack/FPUnpackBasegetMPAM_PMG

// FPUnpackBase() // ============== // getMPAM_PMG // =========== // Returns a PMG from one of the MPAMn_ELx registers. // MPAMn selects the MPAMn_ELx register used. // If InD is TRUE, selects the PMG_I field of that // register. Otherwise, selects the PMG_D field. (FPType, bit, real)PMGtype FPUnpackBase(bits(N) fpval,getMPAM_PMG(integer MPAMn, boolean InD) FPCRTypePMGtype fpcr) boolean fpexc = TRUE; // Generate floating-point exceptions (fp_type, sign, value) =pmg; boolean el2avail = FPUnpackBaseEL2Enabled(fpval, fpcr, fpexc); return (fp_type, sign, value); (); // FPUnpackBase() // ============== // // Unpack a floating-point number into its type, sign bit and the real number // that it represents. The real number result has the correct sign for numbers // and infinities, is very large in magnitude for infinities, and is 0.0 for // NaNs. (These values are chosen to simplify the description of comparisons // and conversions.) // // The 'fpcr' argument supplies FPCR control bits and 'fpexc' controls the // generation of floating-point exceptions. Status information is updated // directly in the FPSR where appropriate. (FPType, bit, real) if InD then case MPAMn of when 3 pmg = MPAM3_EL3.PMG_I; when 2 pmg = if el2avail then MPAM2_EL2.PMG_I else FPUnpackBase(bits(N) fpval,(); when 1 pmg = MPAM1_EL1.PMG_I; when 0 pmg = MPAM0_EL1.PMG_I; otherwise pmg = FPCRTypePMGtype fpcr, boolean fpexc) assert N IN {16,32,64}; boolean altfp =UNKNOWN; else case MPAMn of when 3 pmg = MPAM3_EL3.PMG_D; when 2 pmg = if el2avail then MPAM2_EL2.PMG_D else HaveAltFPZeros() && !(); when 1 pmg = MPAM1_EL1.PMG_D; when 0 pmg = MPAM0_EL1.PMG_D; otherwise pmg =UsingAArch32PMGtype(); boolean fiz = altfp && fpcr.FIZ == '1'; boolean fz = fpcr.FZ == '1' && !(altfp && fpcr.AH == '1'); if N == 16 then sign = fpval<15>; exp16 = fpval<14:10>; frac16 = fpval<9:0>; if IsZero(exp16) then if IsZero(frac16) || fpcr.FZ16 == '1' then fptype = FPType_Zero; value = 0.0; else fptype = FPType_Denormal; value = 2.0^-14 * (Real(UInt(frac16)) * 2.0^-10); elsif IsOnes(exp16) && fpcr.AHP == '0' then // Infinity or NaN in IEEE format if IsZero(frac16) then fptype = FPType_Infinity; value = 2.0^1000000; else fptype = if frac16<9> == '1' then FPType_QNaN else FPType_SNaN; value = 0.0; else fptype = FPType_Nonzero; value = 2.0^(UInt(exp16)-15) * (1.0 + Real(UInt(frac16)) * 2.0^-10); elsif N == 32 then sign = fpval<31>; exp32 = fpval<30:23>; frac32 = fpval<22:0>; if IsZero(exp32) then if IsZero(frac32) then // Produce zero if value is zero. fptype = FPType_Zero; value = 0.0; elsif fz || fiz then // Flush-to-zero if FIZ==1 or AH,FZ==01 fptype = FPType_Zero; value = 0.0; // Check whether to raise Input Denormal floating-point exception. // fpcr.FIZ==1 does not raise Input Denormal exception. if fz then // Denormalized input flushed to zero if fpexc then FPProcessException(FPExc_InputDenorm, fpcr); else fptype = FPType_Denormal; value = 2.0^-126 * (Real(UInt(frac32)) * 2.0^-23); elsif IsOnes(exp32) then if IsZero(frac32) then fptype = FPType_Infinity; value = 2.0^1000000; else fptype = if frac32<22> == '1' then FPType_QNaN else FPType_SNaN; value = 0.0; else fptype = FPType_Nonzero; value = 2.0^(UInt(exp32)-127) * (1.0 + Real(UInt(frac32)) * 2.0^-23); else // N == 64 sign = fpval<63>; exp64 = fpval<62:52>; frac64 = fpval<51:0>; if IsZero(exp64) then if IsZero(frac64) then // Produce zero if value is zero. fptype = FPType_Zero; value = 0.0; elsif fz || fiz then // Flush-to-zero if FIZ==1 or AH,FZ==01 fptype = FPType_Zero; value = 0.0; // Check whether to raise Input Denormal floating-point exception. // fpcr.FIZ==1 does not raise Input Denormal exception. if fz then // Denormalized input flushed to zero if fpexc then FPProcessException(FPExc_InputDenorm, fpcr); else fptype = FPType_Denormal; value = 2.0^-1022 * (Real(UInt(frac64)) * 2.0^-52); elsif IsOnes(exp64) then if IsZero(frac64) then fptype = FPType_Infinity; value = 2.0^1000000; else fptype = if frac64<51> == '1' then FPType_QNaN else FPType_SNaN; value = 0.0; else fptype = FPType_Nonzero; value = 2.0^(UInt(exp64)-1023) * (1.0 + Real(UInt(frac64)) * 2.0^-52); if sign == '1' then value = -value; return (fptype, sign, value);UNKNOWN; return pmg;

Library pseudocode for shared/functions/floatmpam/fpunpack/FPUnpackCVmapvpmw

// FPUnpackCV() // ============ // // Used for FP <-> FP conversion instructions. // For half-precision data ignores FZ16 and observes AHP. // mapvpmw // ======= // Map a virtual PARTID into a physical PARTID using // the MPAMVPMn_EL2 registers. // vpartid is now assumed in-range and valid (checked by caller) // returns physical PARTID from mapping entry. (FPType, bit, real)PARTIDtype FPUnpackCV(bits(N) fpval,mapvpmw(integer vpartid) bits(64) vpmw; integer wd = vpartid DIV 4; case wd of when 0 vpmw = MPAMVPM0_EL2; when 1 vpmw = MPAMVPM1_EL2; when 2 vpmw = MPAMVPM2_EL2; when 3 vpmw = MPAMVPM3_EL2; when 4 vpmw = MPAMVPM4_EL2; when 5 vpmw = MPAMVPM5_EL2; when 6 vpmw = MPAMVPM6_EL2; when 7 vpmw = MPAMVPM7_EL2; otherwise vpmw = FPCRTypeZeros fpcr) fpcr.FZ16 = '0'; boolean fpexc = TRUE; // Generate floating-point exceptions (fp_type, sign, value) = FPUnpackBase(fpval, fpcr, fpexc); return (fp_type, sign, value);(64); // vpme_lsb selects LSB of field within register integer vpme_lsb = (vpartid MOD 4) * 16; return vpmw<vpme_lsb +: 16>;

Library pseudocode for shared/functions/floatregisters/fpzero/FPZeroBranchTo

// FPZero() // ======== // BranchTo() // ========== bits(N)// Set program counter to a new address, with a branch type // In AArch64 state the address might include a tag in the top eight bits. FPZero(bit sign) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - (E + 1); exp =BranchTo(bits(N) target, ZerosBranchType(E); frac =branch_type) (branch_type); if N == 32 then assert UsingAArch32(); _PC = ZeroExtend(target); else assert N == 64 && !UsingAArch32(); _PC = AArch64.BranchAddrZerosHint_Branch(F); result = sign : exp : frac; return result;(target<63:0>); return;

Library pseudocode for shared/functions/floatregisters/vfpexpandimm/VFPExpandImmBranchToAddr

// VFPExpandImm() // BranchToAddr() // ============== bits(N)// Set program counter to a new address, with a branch type // In AArch64 state the address does not include a tag in the top eight bits. VFPExpandImm(bits(8) imm8) assert N IN {16,32,64}; constant integer E = (if N == 16 then 5 elsif N == 32 then 8 else 11); constant integer F = N - E - 1; sign = imm8<7>; exp = NOT(imm8<6>):BranchToAddr(bits(N) target,ReplicateBranchType(imm8<6>,E-3):imm8<5:4>; frac = imm8<3:0>:branch_type)(branch_type); if N == 32 then assert UsingAArch32(); _PC = ZeroExtend(target); else assert N == 64 && !UsingAArch32ZerosHint_Branch(F-4); result = sign : exp : frac; return result;(); _PC = target<63:0>; return;

Library pseudocode for shared/functions/integerregisters/AddWithCarryBranchType

// AddWithCarry() // ============== // Integer addition with carry input, returning result and NZCV flags (bits(N), bits(4))enumeration AddWithCarry(bits(N) x, bits(N) y, bit carry_in) integer unsigned_sum =BranchType { UInt(x) +BranchType_DIRCALL, // Direct Branch with link UInt(y) +BranchType_INDCALL, // Indirect Branch with link UInt(carry_in); integer signed_sum =BranchType_ERET, // Exception return (indirect) SInt(x) +BranchType_DBGEXIT, // Exit from Debug state SInt(y) +BranchType_RET, // Indirect branch with function return hint UInt(carry_in); bits(N) result = unsigned_sum<N-1:0>; // same value as signed_sum<N-1:0> bit n = result<N-1>; bit z = ifBranchType_DIR, // Direct branch IsZero(result) then '1' else '0'; bit c = ifBranchType_INDIR, // Indirect branch UInt(result) == unsigned_sum then '0' else '1'; bit v = ifBranchType_EXCEPTION, // Exception entry BranchType_RESET, // Reset SInt(result) == signed_sum then '0' else '1'; return (result, n:z:c:v);BranchType_UNKNOWN}; // Other

Library pseudocode for shared/functions/memoryregisters/AArch64.BranchAddrHint_Branch

// AArch64.BranchAddr() // ==================== // Return the virtual address with tag bits removed for storing to the program counter. bits(64)// Report the hint passed to BranchTo() and BranchToAddr(), for consideration when processing // the next instruction. AArch64.BranchAddr(bits(64) vaddress) assert !Hint_Branch(UsingAArch32BranchType(); msbit = AddrTop(vaddress, TRUE, PSTATE.EL); if msbit == 63 then return vaddress; elsif (PSTATE.EL IN {EL0, EL1} || IsInHost()) && vaddress<msbit> == '1' then return SignExtend(vaddress<msbit:0>); else return ZeroExtend(vaddress<msbit:0>);hint);

Library pseudocode for shared/functions/memoryregisters/AccTypeNextInstrAddr

enumeration// Return address of the sequentially next instruction. bits(N) AccType {NextInstrAddr();AccType_NORMAL, AccType_VEC, // Normal loads and stores AccType_STREAM, AccType_VECSTREAM, // Streaming loads and stores AccType_ATOMIC, AccType_ATOMICRW, // Atomic loads and stores AccType_ORDERED, AccType_ORDEREDRW, // Load-Acquire and Store-Release AccType_ORDEREDATOMIC, // Load-Acquire and Store-Release with atomic access AccType_ORDEREDATOMICRW, AccType_ATOMICLS64, // Atomic 64-byte loads and stores AccType_LIMITEDORDERED, // Load-LOAcquire and Store-LORelease AccType_UNPRIV, // Load and store unprivileged AccType_IFETCH, // Instruction fetch AccType_TTW, // Translation table walk AccType_NONFAULT, // Non-faulting loads AccType_CNOTFIRST, // Contiguous FF load, not first element AccType_NV2REGISTER, // MRS/MSR instruction used at EL1 and which is converted // to a memory access that uses the EL2 translation regime // Other operations AccType_DC, // Data cache maintenance AccType_DC_UNPRIV, // Data cache maintenance instruction used at EL0 AccType_IC, // Instruction cache maintenance AccType_DCZVA, // DC ZVA instructions AccType_AT}; // Address translation

Library pseudocode for shared/functions/memoryregisters/AccessDescriptorResetExternalDebugRegisters

type// Reset the External Debug registers in the Core power domain. AccessDescriptor is (ResetExternalDebugRegisters(boolean cold_reset); AccType acctype, MPAMinfo mpam, boolean page_table_walk, boolean secondstage, boolean s2fs1walk, integer level )

Library pseudocode for shared/functions/memoryregisters/AddrTopThisInstrAddr

// AddrTop() // ========= // Return the MSB number of a virtual address in the stage 1 translation regime for "el". // If EL1 is using AArch64 then addresses from EL0 using AArch32 are zero-extended to 64 bits. // ThisInstrAddr() // =============== // Return address of the current instruction. integerbits(N) AddrTop(bits(64) address, boolean IsInstr, bits(2) el) assertThisInstrAddr() assert N == 64 || (N == 32 && HaveELUsingAArch32(el); regime = S1TranslationRegime(el); if ELUsingAArch32(regime) then // AArch32 translation regime. return 31; else if EffectiveTBI(address, IsInstr, el) == '1' then return 55; else return 63;()); return _PC<N-1:0>;

Library pseudocode for shared/functions/memoryregisters/AddressDescriptor_PC

typebits(64) _PC; AddressDescriptor is ( FaultRecord fault, // fault.statuscode indicates whether the address is valid MemoryAttributes memattrs, FullAddress paddress, bits(64) vaddress )

Library pseudocode for shared/functions/memoryregisters/Allocation_R

constant bits(2)array bits(64) _R[0..30]; MemHint_No = '00'; // No Read-Allocate, No Write-Allocate constant bits(2) MemHint_WA = '01'; // No Read-Allocate, Write-Allocate constant bits(2) MemHint_RA = '10'; // Read-Allocate, No Write-Allocate constant bits(2) MemHint_RWA = '11'; // Read-Allocate, Write-Allocate

Library pseudocode for shared/functions/memorysysregisters/BigEndianSPSR

// BigEndian() // =========== // SPSR[] - non-assignment form // ============================ booleanbits(32) BigEndian(SPSR[] bits(32) result; ifAccTypeUsingAArch32 acctype) boolean bigend; if() then case PSTATE.M of when HaveNV2ExtM32_FIQ() && acctype ==result = SPSR_fiq; when AccType_NV2REGISTERM32_IRQ then return SCTLR_EL2.EE == '1'; ifresult = SPSR_irq; when M32_Svc result = SPSR_svc; when M32_Monitor result = SPSR_mon; when M32_Abort result = SPSR_abt; when M32_Hyp result = SPSR_hyp; when M32_Undef result = SPSR_und; otherwise Unreachable(); else case PSTATE.EL of when EL1 result = SPSR_EL1; when EL2 result = SPSR_EL2; when EL3 result = SPSR_EL3; otherwise Unreachable(); return result; // SPSR[] - assignment form // ======================== SPSR[] = bits(32) value if UsingAArch32() then bigend = (PSTATE.E != '0'); elsif PSTATE.EL == case PSTATE.M of when EL0M32_FIQ then bigend = (SPSR_fiq = value; whenSCTLRM32_IRQ[].E0E != '0'); else bigend = (SPSR_irq = value; when SPSR_svc = value; when M32_Monitor SPSR_mon = value; when M32_Abort SPSR_abt = value; when M32_Hyp SPSR_hyp = value; when M32_Undef SPSR_und = value; otherwise Unreachable(); else case PSTATE.EL of when EL1 SPSR_EL1 = value; when EL2 SPSR_EL2 = value; when EL3 SPSR_EL3 = value; otherwise UnreachableSCTLRM32_Svc[].EE != '0'); return bigend;(); return;

Library pseudocode for shared/functions/memorysystem/BigEndianReverseArchVersion

// BigEndianReverse() // ================== bits(width)enumeration BigEndianReverse (bits(width) value) assert width IN {8, 16, 32, 64, 128}; integer half = width DIV 2; if width == 8 then return value; returnArchVersion { BigEndianReverse(value<half-1:0>) :ARMv8p0 , ARMv8p1 , ARMv8p2 , ARMv8p3 , ARMv8p4 , ARMv8p5 , ARMv9p0 , ARMv8p6 , BigEndianReverse(value<width-1:half>);ARMv9p1 };

Library pseudocode for shared/functions/memorysystem/CacheabilityBranchTargetCheck

constant bits(2)// BranchTargetCheck() // =================== // This function is executed checks if the current instruction is a valid target for a branch // taken into, or inside, a guarded page. It is executed on every cycle once the current // instruction has been decoded and the values of InGuardedPage and BTypeCompatible have been // determined for the current instruction. MemAttr_NC = '00'; // Non-cacheable constant bits(2)BranchTargetCheck() assert MemAttr_WT = '10'; // Write-through constant bits(2)() && ! (); // The branch target check considers two state variables: // * InGuardedPage, which is evaluated during instruction fetch. // * BTypeCompatible, which is evaluated during instruction decode. if InGuardedPage && PSTATE.BTYPE != '00' && !BTypeCompatible && !Halted() then bits(64) pc = ThisInstrAddr(); AArch64.BranchTargetException(pc<51:0>); boolean branch_instr = AArch64.ExecutingBROrBLROrRetInstr(); boolean bti_instr = AArch64.ExecutingBTIInstrMemAttr_WB = '11'; // Write-back(); // PSTATE.BTYPE defaults to 00 for instructions that do not explictly set BTYPE. if !(branch_instr || bti_instr) then BTypeNext = '00';

Library pseudocode for shared/functions/memorysystem/CreateAccessDescriptorClearEventRegister

// CreateAccessDescriptor() // ======================== AccessDescriptor// ClearEventRegister() // ==================== // Clear the Event Register of this PE CreateAccessDescriptor(ClearEventRegister() EventRegister = '0'; return;AccType acctype) AccessDescriptor accdesc; accdesc.acctype = acctype; accdesc.mpam = GenMPAMcurEL(acctype); accdesc.page_table_walk = FALSE; return accdesc;

Library pseudocode for shared/functions/memorysystem/CreateAccessDescriptorTTWClearPendingPhysicalSError

// CreateAccessDescriptorTTW() // =========================== AccessDescriptor// Clear a pending physical SError interrupt CreateAccessDescriptorTTW(ClearPendingPhysicalSError();AccType acctype, boolean secondstage, boolean s2fs1walk, integer level) AccessDescriptor accdesc; accdesc.acctype = acctype; accdesc.mpam = GenMPAMcurEL(acctype); accdesc.page_table_walk = TRUE; accdesc.s2fs1walk = s2fs1walk; accdesc.secondstage = secondstage; accdesc.level = level; return accdesc;

Library pseudocode for shared/functions/memorysystem/DataMemoryBarrierClearPendingVirtualSError

// Clear a pending virtual SError interrupt DataMemoryBarrier(MBReqDomain domain, MBReqTypes types);ClearPendingVirtualSError();

Library pseudocode for shared/functions/memorysystem/DataSynchronizationBarrierConditionHolds

// ConditionHolds() // ================ // Return TRUE iff COND currently holds boolean DataSynchronizationBarrier(MBReqDomain domain, MBReqTypes types);ConditionHolds(bits(4) cond) // Evaluate base condition. case cond<3:1> of when '000' result = (PSTATE.Z == '1'); // EQ or NE when '001' result = (PSTATE.C == '1'); // CS or CC when '010' result = (PSTATE.N == '1'); // MI or PL when '011' result = (PSTATE.V == '1'); // VS or VC when '100' result = (PSTATE.C == '1' && PSTATE.Z == '0'); // HI or LS when '101' result = (PSTATE.N == PSTATE.V); // GE or LT when '110' result = (PSTATE.N == PSTATE.V && PSTATE.Z == '0'); // GT or LE when '111' result = TRUE; // AL // Condition flag values in the set '111x' indicate always true // Otherwise, invert condition if necessary. if cond<0> == '1' && cond != '1111' then result = !result; return result;

Library pseudocode for shared/functions/memorysystem/DescriptorUpdateConsumptionOfSpeculativeDataBarrier

type DescriptorUpdate is ( boolean AF, // AF needs to be set boolean AP, // AP[2] / S2AP[2] will be modified AddressDescriptor descaddr // Descriptor to be updated )ConsumptionOfSpeculativeDataBarrier();

Library pseudocode for shared/functions/memorysystem/DeviceTypeCurrentInstrSet

enumeration// CurrentInstrSet() // ================= InstrSet DeviceType {CurrentInstrSet() ifDeviceType_GRE,() then result = if PSTATE.T == '0' then DeviceType_nGRE,else DeviceType_nGnRE,; // PSTATE.J is RES0. Implementation of T32EE or Jazelle state not permitted. else result = DeviceType_nGnRnE};; return result;

Library pseudocode for shared/functions/memorysystem/EffectiveTBICurrentPL

// EffectiveTBI() // ============== // Returns the effective TBI in the AArch64 stage 1 translation regime for "el". // CurrentPL() // =========== bitPrivilegeLevel EffectiveTBI(bits(64) address, boolean IsInstr, bits(2) el) assertCurrentPL() return HaveELPLOfEL(el); regime = S1TranslationRegime(el); assert(!ELUsingAArch32(regime)); case regime of when EL1 tbi = if address<55> == '1' then TCR_EL1.TBI1 else TCR_EL1.TBI0; if HavePACExt() then tbid = if address<55> == '1' then TCR_EL1.TBID1 else TCR_EL1.TBID0; when EL2 if HaveVirtHostExt() && ELIsInHost(el) then tbi = if address<55> == '1' then TCR_EL2.TBI1 else TCR_EL2.TBI0; if HavePACExt() then tbid = if address<55> == '1' then TCR_EL2.TBID1 else TCR_EL2.TBID0; else tbi = TCR_EL2.TBI; if HavePACExt() then tbid = TCR_EL2.TBID; when EL3 tbi = TCR_EL3.TBI; if HavePACExt() then tbid = TCR_EL3.TBID; return (if tbi == '1' && (!HavePACExt() || tbid == '0' || !IsInstr) then '1' else '0');(PSTATE.EL);

Library pseudocode for shared/functions/memorysystem/EffectiveTCMADelayForWFETrap

// EffectiveTCMA() // =============== // Returns the effective TCMA of a virtual address in the stage 1 translation regime for "el". bit// Causes the PE to stall for 'n' cycles. EffectiveTCMA(bits(64) address, bits(2) el) assertDelayForWFETrap(integer n); HaveEL(el); regime = S1TranslationRegime(el); assert(!ELUsingAArch32(regime)); case regime of when EL1 tcma = if address<55> == '1' then TCR_EL1.TCMA1 else TCR_EL1.TCMA0; when EL2 if HaveVirtHostExt() && ELIsInHost(el) then tcma = if address<55> == '1' then TCR_EL2.TCMA1 else TCR_EL2.TCMA0; else tcma = TCR_EL2.TCMA; when EL3 tcma = TCR_EL3.TCMA; return tcma;

Library pseudocode for shared/functions/memorysystem/FaultEL0

enumerationconstant bits(2) Fault {EL3 = '11'; constant bits(2)Fault_None,EL2 = '10'; constant bits(2) Fault_AccessFlag,EL1 = '01'; constant bits(2) Fault_Alignment,EL0 = '00'; Fault_Background, Fault_Domain, Fault_Permission, Fault_Translation, Fault_AddressSize, Fault_SyncExternal, Fault_SyncExternalOnWalk, Fault_SyncParity, Fault_SyncParityOnWalk, Fault_AsyncParity, Fault_AsyncExternal, Fault_Debug, Fault_TLBConflict, Fault_BranchTarget, Fault_HWUpdateAccessFlag, Fault_Lockdown, Fault_Exclusive, Fault_ICacheMaint};

Library pseudocode for shared/functions/memorysystem/FaultRecordEL2Enabled

type// EL2Enabled() // ============ // Returns TRUE if EL2 is present and executing // - with SCR_EL3.NS==1 when Non-secure EL2 is implemented, or // - with SCR_EL3.NS==0 when Secure EL2 is implemented and enabled, or // - when EL3 is not implemented. boolean FaultRecord is (EL2Enabled() returnFaultHaveEL statuscode, // Fault Status( AccTypeEL2 acctype, // Type of access that faulted) && (! FullAddressHaveEL ipaddress, // Intermediate physical address boolean s2fs1walk, // Is on a Stage 1 translation table walk boolean write, // TRUE for a write, FALSE for a read integer level, // For translation, access flag and permission faults bit extflag, // IMPLEMENTATION DEFINED syndrome for external aborts boolean secondstage, // Is a Stage 2 abort bits(4) domain, // Domain number, AArch32 only bits(2) errortype, // [Armv8.2 RAS] AArch32 AET or AArch64 SET bits(4) debugmoe) // Debug method of entry, from AArch32 only type( PARTIDtype = bits(16); type) || SCR_EL3.NS == '1' || PMGtype = bits(8); type MPAMinfo is ( bit mpam_ns, PARTIDtype partid, PMGtype pmg )());

Library pseudocode for shared/functions/memorysystem/FullAddressELFromM32

type// ELFromM32() // =========== (boolean,bits(2)) FullAddress is ( bits(52) address, bit NS // '0' = Secure, '1' = Non-secure )ELFromM32(bits(5) mode) // Convert an AArch32 mode encoding to an Exception level. // Returns (valid,EL): // 'valid' is TRUE if 'mode<4:0>' encodes a mode that is both valid for this implementation // and the current value of SCR.NS/SCR_EL3.NS. // 'EL' is the Exception level decoded from 'mode'. bits(2) el; boolean valid = !BadMode(mode); // Check for modes that are not valid for this implementation case mode of when M32_Monitor el = EL3; when M32_Hyp el = EL2; valid = valid && (!HaveEL(EL3) || SCR_GEN[].NS == '1'); when M32_FIQ, M32_IRQ, M32_Svc, M32_Abort, M32_Undef, M32_System // If EL3 is implemented and using AArch32, then these modes are EL3 modes in Secure // state, and EL1 modes in Non-secure state. If EL3 is not implemented or is using // AArch64, then these modes are EL1 modes. el = (if HaveEL(EL3) && HighestELUsingAArch32() && SCR.NS == '0' then EL3 else EL1); when M32_User el = EL0; otherwise valid = FALSE; // Passed an illegal mode value if !valid then el = bits(2) UNKNOWN; return (valid, el);

Library pseudocode for shared/functions/memorysystem/Hint_PrefetchELFromSPSR

// Signals the memory system that memory accesses of type HINT to or from the specified address are // likely in the near future. The memory system may take some action to speed up the memory // accesses when they do occur, such as pre-loading the the specified address into one or more // caches as indicated by the innermost cache level target (0=L1, 1=L2, etc) and non-temporal hint // stream. Any or all prefetch hints may be treated as a NOP. A prefetch hint must not cause a // synchronous abort due to Alignment or Translation faults and the like. Its only effect on // software-visible state should be on caches and TLBs associated with address, which must be // accessible by reads, writes or execution, as defined in the translation regime of the current // Exception level. It is guaranteed not to access Device memory. // A Prefetch_EXEC hint must not result in an access that could not be performed by a speculative // instruction fetch, therefore if all associated MMUs are disabled, then it cannot access any // memory location that cannot be accessed by instruction fetches.// ELFromSPSR() // ============ // Convert an SPSR value encoding to an Exception level. // Returns (valid,EL): // 'valid' is TRUE if 'spsr<4:0>' encodes a valid mode for the current state. // 'EL' is the Exception level decoded from 'spsr'. (boolean,bits(2)) Hint_Prefetch(bits(64) address,ELFromSPSR(bits(32) spsr) if spsr<4> == '0' then // AArch64 state el = spsr<3:2>; if () then // No AArch64 support valid = FALSE; elsif !HaveEL(el) then // Exception level not implemented valid = FALSE; elsif spsr<1> == '1' then // M[1] must be 0 valid = FALSE; elsif el == EL0 && spsr<0> == '1' then // for EL0, M[0] must be 0 valid = FALSE; elsif el == EL2 && HaveEL(EL3) && !IsSecureEL2Enabled() && SCR_EL3.NS == '0' then valid = FALSE; // Unless Secure EL2 is enabled, EL2 only valid in Non-secure state else valid = TRUE; elsif HaveAnyAArch32() then // AArch32 state (valid, el) = ELFromM32PrefetchHintHighestELUsingAArch32 hint, integer target, boolean stream);(spsr<4:0>); else valid = FALSE; if !valid then el = bits(2) UNKNOWN; return (valid,el);

Library pseudocode for shared/functions/memorysystem/MBReqDomainELIsInHost

enumeration// ELIsInHost() // ============ boolean MBReqDomain {ELIsInHost(bits(2) el) return ((MBReqDomain_Nonshareable,() || ! MBReqDomain_InnerShareable,()) && MBReqDomain_OuterShareable,() && ! (EL2) && HCR_EL2.E2H == '1' && (el == EL2 || (el == EL0MBReqDomain_FullSystem};&& HCR_EL2.TGE == '1')));

Library pseudocode for shared/functions/memorysystem/MBReqTypesELStateUsingAArch32

enumeration// ELStateUsingAArch32() // ===================== boolean MBReqTypes {ELStateUsingAArch32(bits(2) el, boolean secure) // See ELStateUsingAArch32K() for description. Must only be called in circumstances where // result is valid (typically, that means 'el IN {EL1,EL2,EL3}'). (known, aarch32) =MBReqTypes_Reads, MBReqTypes_Writes, MBReqTypes_All};(el, secure); assert known; return aarch32;

Library pseudocode for shared/functions/memorysystem/MemAttrHintsELStateUsingAArch32K

type// ELStateUsingAArch32K() // ====================== (boolean,boolean) MemAttrHints is ( bits(2) attrs, // See MemAttr_*, Cacheability attributes bits(2) hints, // See MemHint_*, Allocation hints boolean transient )ELStateUsingAArch32K(bits(2) el, boolean secure) // Returns (known, aarch32): // 'known' is FALSE for EL0 if the current Exception level is not EL0 and EL1 is // using AArch64, since it cannot determine the state of EL0; TRUE otherwise. // 'aarch32' is TRUE if the specified Exception level is using AArch32; FALSE otherwise. if !HaveAArch32EL(el) then return (TRUE, FALSE); // Exception level is using AArch64 elsif secure && el == EL2 then return (TRUE, FALSE); // Secure EL2 is using AArch64 elsif HighestELUsingAArch32() then return (TRUE, TRUE); // Highest Exception level, and therefore all levels are using AArch32 elsif el == HighestEL() then return (TRUE, FALSE); // This is highest Exception level, so is using AArch64 // Remainder of function deals with the interprocessing cases when highest Exception level is using AArch64 boolean aarch32 = boolean UNKNOWN; boolean known = TRUE; aarch32_below_el3 = HaveEL(EL3) && SCR_EL3.RW == '0' && (!secure || !HaveSecureEL2Ext() || SCR_EL3.EEL2 == '0'); aarch32_at_el1 = (aarch32_below_el3 || (HaveEL(EL2) && ((HaveSecureEL2Ext() && SCR_EL3.EEL2 == '1') || !secure) && HCR_EL2.RW == '0' && !(HCR_EL2.E2H == '1' && HCR_EL2.TGE == '1' && HaveVirtHostExt()))); if el == EL0 && !aarch32_at_el1 then // Only know if EL0 using AArch32 from PSTATE if PSTATE.EL == EL0 then aarch32 = PSTATE.nRW == '1'; // EL0 controlled by PSTATE else known = FALSE; // EL0 state is UNKNOWN else aarch32 = (aarch32_below_el3 && el != EL3) || (aarch32_at_el1 && el IN {EL1,EL0}); if !known then aarch32 = boolean UNKNOWN; return (known, aarch32);

Library pseudocode for shared/functions/memorysystem/MemTypeELUsingAArch32

enumeration// ELUsingAArch32() // ================ boolean MemType {ELUsingAArch32(bits(2) el) returnMemType_Normal,(el, MemType_Device};());

Library pseudocode for shared/functions/memorysystem/MemoryAttributesELUsingAArch32K

type// ELUsingAArch32K() // ================= (boolean,boolean) MemoryAttributes is (ELUsingAArch32K(bits(2) el) return MemTypeELStateUsingAArch32K memtype,(el, DeviceTypeIsSecureBelowEL3 device, // For Device memory types MemAttrHints inner, // Inner hints and attributes MemAttrHints outer, // Outer hints and attributes boolean tagged, // Tagged access boolean shareable, boolean outershareable )());

Library pseudocode for shared/functions/memorysystem/PermissionsEndOfInstruction

type// Terminate processing of the current instruction. Permissions is ( bits(3) ap, // Access permission bits bit xn, // Execute-never bit bit xxn, // [Armv8.2] Extended execute-never bit for stage 2 bit pxn // Privileged execute-never bit )EndOfInstruction();

Library pseudocode for shared/functions/memorysystem/PrefetchHintEnterLowPowerState

enumeration// PE enters a low-power state PrefetchHint {EnterLowPowerState();Prefetch_READ, Prefetch_WRITE, Prefetch_EXEC};

Library pseudocode for shared/functions/memorysystem/SpeculativeStoreBypassBarrierToPAEventRegister

SpeculativeStoreBypassBarrierToPA();bits(1) EventRegister;

Library pseudocode for shared/functions/memorysystem/SpeculativeStoreBypassBarrierToVAGetPSRFromPSTATE

// GetPSRFromPSTATE() // ================== // Return a PSR value which represents the current PSTATE bits(32) GetPSRFromPSTATE() bits(32) spsr = Zeros(); spsr<31:28> = PSTATE.<N,Z,C,V>; if HavePANExt() then spsr<22> = PSTATE.PAN; spsr<20> = PSTATE.IL; if PSTATE.nRW == '1' then // AArch32 state spsr<27> = PSTATE.Q; spsr<26:25> = PSTATE.IT<1:0>; if HaveSSBSExt() then spsr<23> = PSTATE.SSBS; if HaveDITExt() then spsr<21> = PSTATE.DIT; spsr<19:16> = PSTATE.GE; spsr<15:10> = PSTATE.IT<7:2>; spsr<9> = PSTATE.E; spsr<8:6> = PSTATE.<A,I,F>; // No PSTATE.D in AArch32 state spsr<5> = PSTATE.T; assert PSTATE.M<4> == PSTATE.nRW; // bit [4] is the discriminator spsr<4:0> = PSTATE.M; else // AArch64 state if HaveMTEExt() then spsr<25> = PSTATE.TCO; if HaveDITExt() then spsr<24> = PSTATE.DIT; if HaveUAOExt() then spsr<23> = PSTATE.UAO; spsr<21> = PSTATE.SS; if HaveSSBSExt() then spsr<12> = PSTATE.SSBS; if HaveBTIExtSpeculativeStoreBypassBarrierToVA();() then spsr<11:10> = PSTATE.BTYPE; spsr<9:6> = PSTATE.<D,A,I,F>; spsr<4> = PSTATE.nRW; spsr<3:2> = PSTATE.EL; spsr<0> = PSTATE.SP; return spsr;

Library pseudocode for shared/functions/memorysystem/TLBRecordHasArchVersion

type// HasArchVersion() // ================ // Return TRUE if the implemented architecture includes the extensions defined in the specified // architecture version. boolean TLBRecord is (HasArchVersion( PermissionsArchVersion perms, bit nG, // '0' = Global, '1' = not Global bits(4) domain, // AArch32 only bit GP, // Guarded Page boolean contiguous, // Contiguous bit from translation table integer level, // AArch32 Short-descriptor format: Indicates Section/Page integer blocksize, // Describes size of memory translated in KBytesversion) return version == DescriptorUpdateARMv8p0 descupdate, // [Armv8.1] Context for h/w update of table descriptor bit CnP, // [Armv8.2] TLB entry can be shared between different PEs AddressDescriptor addrdesc )|| boolean IMPLEMENTATION_DEFINED;

Library pseudocode for shared/functions/memorysystem/TagHaveAArch32EL

constant integer// HaveAArch32EL() // =============== boolean LOG2_TAG_GRANULE = 4; constant integerHaveAArch32EL(bits(2) el) // Return TRUE if Exception level 'el' supports AArch32 in this implementation if ! TAG_GRANULE = 1 <<(el) then return FALSE; // The Exception level is not implemented elsif ! () then return FALSE; // No Exception level can use AArch32 elsif HighestELUsingAArch32() then return TRUE; // All Exception levels are using AArch32 elsif el == HighestEL() then return FALSE; // The highest Exception level is using AArch64 elsif el == EL0LOG2_TAG_GRANULEHaveAnyAArch32;then return TRUE; // EL0 must support using AArch32 if any AArch32 return boolean IMPLEMENTATION_DEFINED;

Library pseudocode for shared/functions/memorysystem/_MemHaveAnyAArch32

// These two _Mem[] accessors are the hardware operations which perform single-copy atomic, // aligned, little-endian memory accesses of size bytes from/to the underlying physical // memory array of bytes. // // The functions address the array using desc.paddress which supplies: // * A 52-bit physical address // * A single NS bit to select between Secure and Non-secure parts of the array. // // The accdesc descriptor describes the access type: normal, exclusive, ordered, streaming, // etc and other parameters required to access the physical memory or for setting syndrome // register in the event of an external abort. bits(8*size)// HaveAnyAArch32() // ================ // Return TRUE if AArch32 state is supported at any Exception level boolean _Mem[HaveAnyAArch32() return boolean IMPLEMENTATION_DEFINED;AddressDescriptor desc, integer size, AccessDescriptor accdesc]; _Mem[AddressDescriptor desc, integer size, AccessDescriptor accdesc] = bits(8*size) value;

Library pseudocode for shared/functions/mpamsystem/DefaultMPAMinfoHaveAnyAArch64

// DefaultMPAMinfo // =============== // Returns default MPAM info. If secure is TRUE return default Secure // MPAMinfo, otherwise return default Non-secure MPAMinfo. // HaveAnyAArch64() // ================ // Return TRUE if AArch64 state is supported at any Exception level MPAMinfoboolean DefaultMPAMinfo(boolean secure)HaveAnyAArch64() return ! MPAMinfoHighestELUsingAArch32 DefaultInfo; DefaultInfo.mpam_ns = if secure then '0' else '1'; DefaultInfo.partid = DefaultPARTID; DefaultInfo.pmg = DefaultPMG; return DefaultInfo;();

Library pseudocode for shared/functions/mpamsystem/DefaultPARTIDHaveEL

constant PARTIDtype// HaveEL() // ======== // Return TRUE if Exception level 'el' is supported boolean DefaultPARTID = 0<15:0>;HaveEL(bits(2) el) if el IN {EL1,EL0} then return TRUE; // EL1 and EL0 must exist return boolean IMPLEMENTATION_DEFINED;

Library pseudocode for shared/functions/mpamsystem/DefaultPMGHaveELUsingSecurityState

constant PMGtype// HaveELUsingSecurityState() // ========================== // Returns TRUE if Exception level 'el' with Security state 'secure' is supported, // FALSE otherwise. boolean DefaultPMG = 0<7:0>;HaveELUsingSecurityState(bits(2) el, boolean secure) case el of whenEL3 assert secure; return HaveEL(EL3); when EL2 if secure then return HaveEL(EL2) && HaveSecureEL2Ext(); else return HaveEL(EL2); otherwise return (HaveEL(EL3) || (secure == boolean IMPLEMENTATION_DEFINED "Secure-only implementation"));

Library pseudocode for shared/functions/mpamsystem/GenMPAMcurELHaveFP16Ext

// GenMPAMcurEL // ============ // Returns MPAMinfo for the current EL and security state. // May be called if MPAM is not implemented (but in an version that supports // MPAM), MPAM is disabled, or in AArch32. In AArch32, convert the mode to // EL if can and use that to drive MPAM information generation. If mode // cannot be converted, MPAM is not implemented, or MPAM is disabled return // default MPAM information for the current security state. // HaveFP16Ext() // ============= // Return TRUE if FP16 extension is supported MPAMinfoboolean GenMPAMcurEL(HaveFP16Ext() return boolean IMPLEMENTATION_DEFINED;AccType acctype) bits(2) mpamel; boolean validEL; boolean securempam; boolean InD = acctype IN {AccType_IFETCH, AccType_IC}; if HaveEMPAMExt() then boolean secure = IsSecure(); securempam = MPAM3_EL3.FORCE_NS == '0' && secure; if MPAMisEnabled() && (!secure || MPAM3_EL3.SDEFLT == '0') then if UsingAArch32() then (validEL, mpamel) = ELFromM32(PSTATE.M); else mpamel = PSTATE.EL; if validEL then return genMPAM(UInt(mpamel), InD, securempam); else securempam = IsSecure(); if HaveMPAMExt() && MPAMisEnabled() then if UsingAArch32() then (validEL, mpamel) = ELFromM32(PSTATE.M); else validEL = TRUE; mpamel = PSTATE.EL; if validEL then return genMPAM(UInt(mpamel), InD, securempam); return DefaultMPAMinfo(securempam);

Library pseudocode for shared/functions/mpamsystem/MAP_vPARTIDHighestEL

// MAP_vPARTID // HighestEL() // =========== // Performs conversion of virtual PARTID into physical PARTID // Contains all of the error checking and implementation // choices for the conversion. // Returns the highest implemented Exception level. (PARTIDtype, boolean)bits(2) MAP_vPARTID(HighestEL() ifPARTIDtypeHaveEL vpartid) // should not ever be called if EL2 is not implemented // or is implemented but not enabled in the current // security state.( PARTIDtypeEL3 ret; boolean err; integer virt =) then return UIntEL3( vpartid ); integer vpmrmax =; elsif UIntHaveEL( MPAMIDR_EL1.VPMR_MAX ); // vpartid_max is largest vpartid supported integer vpartid_max = (4 * vpmrmax) + 3; // One of many ways to reduce vpartid to value less than vpartid_max. if virt > vpartid_max then virt = virt MOD (vpartid_max+1); // Check for valid mapping entry. if MPAMVPMV_EL2<virt> == '1' then // vpartid has a valid mapping so access the map. ret =( mapvpmwEL2(virt); err = FALSE; // Is the default virtual PARTID valid? elsif MPAMVPMV_EL2<0> == '1' then // Yes, so use default mapping for vpartid == 0. ret = MPAMVPM0_EL2<0 +: 16>; err = FALSE; // Neither is valid so use default physical PARTID. else ret =) then return DefaultPARTIDEL2; err = TRUE; // Check that the physical PARTID is in-range. // This physical PARTID came from a virtual mapping entry. integer partid_max = else return UIntEL1( MPAMIDR_EL1.PARTID_MAX ); if UInt(ret) > partid_max then // Out of range, so return default physical PARTID ret = DefaultPARTID; err = TRUE; return (ret, err);;

Library pseudocode for shared/functions/mpamsystem/MPAMisEnabledHighestELUsingAArch32

// MPAMisEnabled // ============= // Returns TRUE if MPAMisEnabled. // HighestELUsingAArch32() // ======================= // Return TRUE if configured to boot into AArch32 operation boolean MPAMisEnabled() el =HighestELUsingAArch32() if ! HighestELHaveAnyAArch32(); case el of when EL3 return MPAM3_EL3.MPAMEN == '1'; when EL2 return MPAM2_EL2.MPAMEN == '1'; when EL1 return MPAM1_EL1.MPAMEN == '1';() then return FALSE; return boolean IMPLEMENTATION_DEFINED; // e.g. CFG32SIGNAL == HIGH

Library pseudocode for shared/functions/mpamsystem/MPAMisVirtualHint_DGH

// MPAMisVirtual // ============= // Returns TRUE if MPAM is configured to be virtual at EL. boolean// Provides a hint to close any gathering occurring within the micro-architecture. MPAMisVirtual(integer el) return ( MPAMIDR_EL1.HAS_HCR == '1' &&Hint_DGH(); EL2Enabled() && (( el == 0 && MPAMHCR_EL2.EL0_VPMEN == '1' && ( HCR_EL2.E2H == '0' || HCR_EL2.TGE == '0' )) || ( el == 1 && MPAMHCR_EL2.EL1_VPMEN == '1')));

Library pseudocode for shared/functions/mpamsystem/genMPAMHint_Yield

// genMPAM // ======= // Returns MPAMinfo for exception level el. // If InD is TRUE returns MPAM information using PARTID_I and PMG_I fields // of MPAMel_ELx register and otherwise using PARTID_D and PMG_D fields. // Produces a Secure PARTID if Secure is TRUE and a Non-secure PARTID otherwise. MPAMinfo// Provides a hint that the task performed by a thread is of low // importance so that it could yield to improve overall performance. genMPAM(integer el, boolean InD, boolean secure)Hint_Yield(); MPAMinfo returnInfo; PARTIDtype partidel; boolean perr; boolean gstplk = (el == 0 && EL2Enabled() && MPAMHCR_EL2.GSTAPP_PLK == '1' && HCR_EL2.TGE == '0'); integer eff_el = if gstplk then 1 else el; (partidel, perr) = genPARTID(eff_el, InD); PMGtype groupel = genPMG(eff_el, InD, perr); returnInfo.mpam_ns = if secure then '0' else '1'; returnInfo.partid = partidel; returnInfo.pmg = groupel; return returnInfo;

Library pseudocode for shared/functions/mpamsystem/genMPAMelIllegalExceptionReturn

// genMPAMel // ========= // Returns MPAMinfo for specified EL in the current security state. // InD is TRUE for instruction access and FALSE otherwise. // IllegalExceptionReturn() // ======================== MPAMinfoboolean genMPAMel(bits(2) el, boolean InD) boolean secure =IllegalExceptionReturn(bits(32) spsr) // Check for illegal return: // * To an unimplemented Exception level. // * To EL2 in Secure state, when SecureEL2 is not enabled. // * To EL0 using AArch64 state, with SPSR.M[0]==1. // * To AArch64 state with SPSR.M[1]==1. // * To AArch32 state with an illegal value of SPSR.M. (valid, target) = IsSecureELFromSPSR(); boolean securempam = secure; (spsr); if !valid then return TRUE; // Check for return to higher Exception level if HaveEMPAMExtUInt() then securempam = MPAM3_EL3.FORCE_NS == '0' && secure; if(target) > HaveMPAMExtUInt() &&(PSTATE.EL) then return TRUE; spsr_mode_is_aarch32 = (spsr<4> == '1'); // Check for illegal return: // * To EL1, EL2 or EL3 with register width specified in the SPSR different from the // Execution state used in the Exception level being returned to, as determined by // the SCR_EL3.RW or HCR_EL2.RW bits, or as configured from reset. // * To EL0 using AArch64 state when EL1 is using AArch32 state as determined by the // SCR_EL3.RW or HCR_EL2.RW bits or as configured from reset. // * To AArch64 state from AArch32 state (should be caught by above) (known, target_el_is_aarch32) = MPAMisEnabledELUsingAArch32K() && (!secure || MPAM3_EL3.SDEFLT == '0') then return(target); assert known || (target == genMPAMEL0(&& !UIntELUsingAArch32(el), InD, securempam); else if( HaveMPAMExtEL1() &&)); if known && spsr_mode_is_aarch32 != target_el_is_aarch32 then return TRUE; // Check for illegal return from AArch32 to AArch64 if MPAMisEnabledUsingAArch32() then return() && !spsr_mode_is_aarch32 then return TRUE; // Check for illegal return to EL1 when HCR.TGE is set and when either of // * SecureEL2 is enabled. // * SecureEL2 is not enabled and EL1 is in Non-secure state. if genMPAMHaveEL(UIntEL2(el), InD, securempam); return) && target == && HCR_EL2.TGE == '1' then if (!IsSecureBelowEL3() || IsSecureEL2EnabledDefaultMPAMinfoEL1(securempam);()) then return TRUE; return FALSE;

Library pseudocode for shared/functions/mpamsystem/genPARTIDInstrSet

// genPARTID // ========= // Returns physical PARTID and error boolean for exception level el. // If InD is TRUE then PARTID is from MPAMel_ELx.PARTID_I and // otherwise from MPAMel_ELx.PARTID_D. (PARTIDtype, boolean)enumeration genPARTID(integer el, boolean InD)InstrSet { PARTIDtype partidel =InstrSet_A64, getMPAM_PARTID(el, InD); integer partid_max =InstrSet_A32, UInt(MPAMIDR_EL1.PARTID_MAX); if UInt(partidel) > partid_max then return (DefaultPARTID, TRUE); if MPAMisVirtual(el) then return MAP_vPARTID(partidel); else return (partidel, FALSE);InstrSet_T32};

Library pseudocode for shared/functions/mpamsystem/genPMGInstructionSynchronizationBarrier

// genPMG // ====== // Returns PMG for exception level el and I- or D-side (InD). // If PARTID generation (genPARTID) encountered an error, genPMG() should be // called with partid_err as TRUE. PMGtype genPMG(integer el, boolean InD, boolean partid_err) integer pmg_max = UInt(MPAMIDR_EL1.PMG_MAX); // It is CONSTRAINED UNPREDICTABLE whether partid_err forces PMG to // use the default or if it uses the PMG from getMPAM_PMG. if partid_err then return DefaultPMG; PMGtype groupel = getMPAM_PMG(el, InD); if UInt(groupel) <= pmg_max then return groupel; return DefaultPMG;InstructionSynchronizationBarrier();

Library pseudocode for shared/functions/mpamsystem/getMPAM_PARTIDInterruptPending

// getMPAM_PARTID // ============== // Returns a PARTID from one of the MPAMn_ELx registers. // MPAMn selects the MPAMn_ELx register used. // If InD is TRUE, selects the PARTID_I field of that // register. Otherwise, selects the PARTID_D field. // InterruptPending() // ================== // Return TRUE if there are any pending physical or virtual // interrupts, and FALSE otherwise. PARTIDtypeboolean getMPAM_PARTID(integer MPAMn, boolean InD)InterruptPending() pending_physical_interrupt = (IRQPending() || FIQPending() || PARTIDtypeIsPhysicalSErrorPending partid; boolean el2avail =()); pending_virtual_interrupt = ! EL2EnabledIsInHost(); if InD then case MPAMn of when 3 partid = MPAM3_EL3.PARTID_I; when 2 partid = if el2avail then MPAM2_EL2.PARTID_I else Zeros(); when 1 partid = MPAM1_EL1.PARTID_I; when 0 partid = MPAM0_EL1.PARTID_I; otherwise partid = PARTIDtype UNKNOWN; else case MPAMn of when 3 partid = MPAM3_EL3.PARTID_D; when 2 partid = if el2avail then MPAM2_EL2.PARTID_D else Zeros(); when 1 partid = MPAM1_EL1.PARTID_D; when 0 partid = MPAM0_EL1.PARTID_D; otherwise partid = PARTIDtype UNKNOWN; return partid;() && ((HCR_EL2.<VSE,VI,VF> AND HCR_EL2.<AMO,IMO,FMO>) != '000'); return pending_physical_interrupt || pending_virtual_interrupt;

Library pseudocode for shared/functions/mpamsystem/getMPAM_PMGIsEventRegisterSet

// getMPAM_PMG // =========== // Returns a PMG from one of the MPAMn_ELx registers. // MPAMn selects the MPAMn_ELx register used. // If InD is TRUE, selects the PMG_I field of that // register. Otherwise, selects the PMG_D field. // IsEventRegisterSet() // ==================== // Return TRUE if the Event Register of this PE is set, and FALSE otherwise PMGtypeboolean getMPAM_PMG(integer MPAMn, boolean InD)IsEventRegisterSet() return EventRegister == '1'; PMGtype pmg; boolean el2avail = EL2Enabled(); if InD then case MPAMn of when 3 pmg = MPAM3_EL3.PMG_I; when 2 pmg = if el2avail then MPAM2_EL2.PMG_I else Zeros(); when 1 pmg = MPAM1_EL1.PMG_I; when 0 pmg = MPAM0_EL1.PMG_I; otherwise pmg = PMGtype UNKNOWN; else case MPAMn of when 3 pmg = MPAM3_EL3.PMG_D; when 2 pmg = if el2avail then MPAM2_EL2.PMG_D else Zeros(); when 1 pmg = MPAM1_EL1.PMG_D; when 0 pmg = MPAM0_EL1.PMG_D; otherwise pmg = PMGtype UNKNOWN; return pmg;

Library pseudocode for shared/functions/mpamsystem/mapvpmwIsHighestEL

// mapvpmw // ======= // Map a virtual PARTID into a physical PARTID using // the MPAMVPMn_EL2 registers. // vpartid is now assumed in-range and valid (checked by caller) // returns physical PARTID from mapping entry. // IsHighestEL() // ============= // Returns TRUE if given exception level is the highest exception level implemented PARTIDtypeboolean mapvpmw(integer vpartid) bits(64) vpmw; integer wd = vpartid DIV 4; case wd of when 0 vpmw = MPAMVPM0_EL2; when 1 vpmw = MPAMVPM1_EL2; when 2 vpmw = MPAMVPM2_EL2; when 3 vpmw = MPAMVPM3_EL2; when 4 vpmw = MPAMVPM4_EL2; when 5 vpmw = MPAMVPM5_EL2; when 6 vpmw = MPAMVPM6_EL2; when 7 vpmw = MPAMVPM7_EL2; otherwise vpmw =IsHighestEL(bits(2) el) return ZerosHighestEL(64); // vpme_lsb selects LSB of field within register integer vpme_lsb = (vpartid MOD 4) * 16; return vpmw<vpme_lsb +: 16>;() == el;

Library pseudocode for shared/functions/registerssystem/BranchToIsInHost

// BranchTo() // IsInHost() // ========== // Set program counter to a new address, with a branch type // In AArch64 state the address might include a tag in the top eight bits.boolean BranchTo(bits(N) target,IsInHost() return BranchTypeELIsInHost branch_type) Hint_Branch(branch_type); if N == 32 then assert UsingAArch32(); _PC = ZeroExtend(target); else assert N == 64 && !UsingAArch32(); _PC = AArch64.BranchAddr(target<63:0>); return;(PSTATE.EL);

Library pseudocode for shared/functions/registerssystem/BranchToAddrIsPhysicalSErrorPending

// BranchToAddr() // ============== // Set program counter to a new address, with a branch type // In AArch64 state the address does not include a tag in the top eight bits.// Return TRUE if a physical SError interrupt is pending boolean BranchToAddr(bits(N) target,IsPhysicalSErrorPending(); BranchType branch_type) Hint_Branch(branch_type); if N == 32 then assert UsingAArch32(); _PC = ZeroExtend(target); else assert N == 64 && !UsingAArch32(); _PC = target<63:0>; return;

Library pseudocode for shared/functions/registerssystem/BranchTypeIsSecure

enumeration// IsSecure() // ========== // Returns TRUE if current Exception level is in Secure state. boolean BranchType {IsSecure() if BranchType_DIRCALL, // Direct Branch with link( BranchType_INDCALL, // Indirect Branch with link) && ! BranchType_ERET, // Exception return (indirect)() && PSTATE.EL == BranchType_DBGEXIT, // Exit from Debug statethen return TRUE; elsif BranchType_RET, // Indirect branch with function return hint( BranchType_DIR, // Direct branch) && BranchType_INDIR, // Indirect branch() && PSTATE.M == BranchType_EXCEPTION, // Exception entrythen return TRUE; return BranchType_RESET, // Reset BranchType_UNKNOWN}; // Other();

Library pseudocode for shared/functions/registerssystem/Hint_BranchIsSecureBelowEL3

// Report the hint passed to BranchTo() and BranchToAddr(), for consideration when processing // the next instruction.// IsSecureBelowEL3() // ================== // Return TRUE if an Exception level below EL3 is in Secure state // or would be following an exception return to that level. // // Differs from IsSecure in that it ignores the current EL or Mode // in considering security state. // That is, if at AArch64 EL3 or in AArch32 Monitor mode, whether an // exception return would pass to Secure or Non-secure state. boolean Hint_Branch(IsSecureBelowEL3() if(EL3) then return SCR_GEN[].NS == '0'; elsif HaveEL(EL2) && (!HaveSecureEL2Ext() || HighestELUsingAArch32BranchTypeHaveEL hint);()) then // If Secure EL2 is not an architecture option then we must be Non-secure. return FALSE; else // TRUE if processor is Secure or FALSE if Non-secure. return boolean IMPLEMENTATION_DEFINED "Secure-only implementation";

Library pseudocode for shared/functions/registerssystem/NextInstrAddrIsSecureEL2Enabled

// Return address of the sequentially next instruction. bits(N)// IsSecureEL2Enabled() // ==================== // Returns TRUE if Secure EL2 is enabled, FALSE otherwise. boolean NextInstrAddr();IsSecureEL2Enabled() ifHaveEL(EL2) && HaveSecureEL2Ext() then if HaveEL(EL3) then if !ELUsingAArch32(EL3) && SCR_EL3.EEL2 == '1' then return TRUE; else return FALSE; else return IsSecure(); else return FALSE;

Library pseudocode for shared/functions/registerssystem/ResetExternalDebugRegistersIsSynchronizablePhysicalSErrorPending

// Reset the External Debug registers in the Core power domain.// Return TRUE if a synchronizable physical SError interrupt is pending boolean ResetExternalDebugRegisters(boolean cold_reset);IsSynchronizablePhysicalSErrorPending();

Library pseudocode for shared/functions/registerssystem/ThisInstrAddrIsVirtualSErrorPending

// ThisInstrAddr() // =============== // Return address of the current instruction. bits(N)// Return TRUE if a virtual SError interrupt is pending boolean ThisInstrAddr() assert N == 64 || (N == 32 &&IsVirtualSErrorPending(); UsingAArch32()); return _PC<N-1:0>;

Library pseudocode for shared/functions/registerssystem/_PCMode_Bits

bits(64) _PC;constant bits(5)M32_User = '10000'; constant bits(5) M32_FIQ = '10001'; constant bits(5) M32_IRQ = '10010'; constant bits(5) M32_Svc = '10011'; constant bits(5) M32_Monitor = '10110'; constant bits(5) M32_Abort = '10111'; constant bits(5) M32_Hyp = '11010'; constant bits(5) M32_Undef = '11011'; constant bits(5) M32_System = '11111';

Library pseudocode for shared/functions/registerssystem/_RPLOfEL

array bits(64) _R[0..30];// PLOfEL() // ======== PrivilegeLevelPLOfEL(bits(2) el) case el of when EL3 return if HighestELUsingAArch32() then PL1 else PL3; when EL2 return PL2; when EL1 return PL1; when EL0 return PL0;

Library pseudocode for shared/functions/sysregisterssystem/SPSRPSTATE

ProcState// SPSR[] - non-assignment form // ============================ bits(N) SPSR[] bits(N) result; if UsingAArch32() then assert N == 32; case PSTATE.M of when M32_FIQ result = SPSR_fiq<N-1:0>; when M32_IRQ result = SPSR_irq<N-1:0>; when M32_Svc result = SPSR_svc<N-1:0>; when M32_Monitor result = SPSR_mon<N-1:0>; when M32_Abort result = SPSR_abt<N-1:0>; when M32_Hyp result = SPSR_hyp<N-1:0>; when M32_Undef result = SPSR_und<N-1:0>; otherwise Unreachable(); else assert N == 64; case PSTATE.EL of when EL1 result = SPSR_EL1<N-1:0>; when EL2 result = SPSR_EL2<N-1:0>; when EL3 result = SPSR_EL3<N-1:0>; otherwise Unreachable(); return result; // SPSR[] - assignment form // ======================== SPSR[] = bits(N) value if UsingAArch32() then assert N == 32; case PSTATE.M of when M32_FIQ SPSR_fiq = ZeroExtend(value); when M32_IRQ SPSR_irq = ZeroExtend(value); when M32_Svc SPSR_svc = ZeroExtend(value); when M32_Monitor SPSR_mon = ZeroExtend(value); when M32_Abort SPSR_abt = ZeroExtend(value); when M32_Hyp SPSR_hyp = ZeroExtend(value); when M32_Undef SPSR_und = ZeroExtend(value); otherwise Unreachable(); else assert N == 64; case PSTATE.EL of when EL1 SPSR_EL1 = ZeroExtend(value); when EL2 SPSR_EL2 = ZeroExtend(value); when EL3 SPSR_EL3 = ZeroExtend(value); otherwise Unreachable(); return;PSTATE;

Library pseudocode for shared/functions/system/ArchVersionPrivilegeLevel

enumeration ArchVersion {PrivilegeLevel { ARMv8p0 ,PL3, ARMv8p1 ,PL2, ARMv8p2 ,PL1, ARMv8p3 ,PL0}; ARMv8p4 , ARMv8p5 , ARMv8p6 , ARMv8p7 };

Library pseudocode for shared/functions/system/BranchTargetCheckProcState

// BranchTargetCheck() // =================== // This function is executed checks if the current instruction is a valid target for a branch // taken into, or inside, a guarded page. It is executed on every cycle once the current // instruction has been decoded and the values of InGuardedPage and BTypeCompatible have been // determined for the current instruction.type BranchTargetCheck() assertProcState is ( bits (1) N, // Negative condition flag bits (1) Z, // Zero condition flag bits (1) C, // Carry condition flag bits (1) V, // oVerflow condition flag bits (1) D, // Debug mask bit [AArch64 only] bits (1) A, // SError interrupt mask bit bits (1) I, // IRQ mask bit bits (1) F, // FIQ mask bit bits (1) PAN, // Privileged Access Never Bit [v8.1] bits (1) UAO, // User Access Override [v8.2] bits (1) DIT, // Data Independent Timing [v8.4] bits (1) TCO, // Tag Check Override [v8.5, AArch64 only] bits (2) BTYPE, // Branch Type [v8.5] bits (1) SS, // Software step bit bits (1) IL, // Illegal Execution state bit bits (2) EL, // Exception Level bits (1) nRW, // not Register Width: 0=64, 1=32 bits (1) HaveBTIExtSP() && !UsingAArch32(); // The branch target check considers two state variables: // * InGuardedPage, which is evaluated during instruction fetch. // * BTypeCompatible, which is evaluated during instruction decode. if InGuardedPage && PSTATE.BTYPE != '00' && !BTypeCompatible && !Halted() then bits(64) pc = ThisInstrAddr(); AArch64.BranchTargetException(pc<51:0>); boolean branch_instr = AArch64.ExecutingBROrBLROrRetInstr(); boolean bti_instr = AArch64.ExecutingBTIInstr(); // PSTATE.BTYPE defaults to 00 for instructions that do not explictly set BTYPE. if !(branch_instr || bti_instr) then BTypeNext = '00';, // Stack pointer select: 0=SP0, 1=SPx [AArch64 only] bits (1) Q, // Cumulative saturation flag [AArch32 only] bits (4) GE, // Greater than or Equal flags [AArch32 only] bits (1) SSBS, // Speculative Store Bypass Safe bits (8) IT, // If-then bits, RES0 in CPSR [AArch32 only] bits (1) J, // J bit, RES0 [AArch32 only, RES0 in SPSR and CPSR] bits (1) T, // T32 bit, RES0 in CPSR [AArch32 only] bits (1) E, // Endianness bit [AArch32 only] bits (5) M // Mode field [AArch32 only] )

Library pseudocode for shared/functions/system/ClearEventRegisterRestoredITBits

// ClearEventRegister() // ==================== // Clear the Event Register of this PE.// RestoredITBits() // ================ // Get the value of PSTATE.IT to be restored on this exception return. bits(8) ClearEventRegister() EventRegister = '0'; return;RestoredITBits(bits(32) spsr) it = spsr<15:10,26:25>; // When PSTATE.IL is set, it is CONSTRAINED UNPREDICTABLE whether the IT bits are each set // to zero or copied from the SPSR. if PSTATE.IL == '1' then ifConstrainUnpredictableBool(Unpredictable_ILZEROIT) then return '00000000'; else return it; // The IT bits are forced to zero when they are set to a reserved value. if !IsZero(it<7:4>) && IsZero(it<3:0>) then return '00000000'; // The IT bits are forced to zero when returning to A32 state, or when returning to an EL // with the ITD bit set to 1, and the IT bits are describing a multi-instruction block. itd = if PSTATE.EL == EL2 then HSCTLR.ITD else SCTLR.ITD; if (spsr<5> == '0' && !IsZero(it)) || (itd == '1' && !IsZero(it<2:0>)) then return '00000000'; else return it;

Library pseudocode for shared/functions/system/ClearPendingPhysicalSErrorSCRType

// Clear a pending physical SError interrupt.type ClearPendingPhysicalSError();SCRType;

Library pseudocode for shared/functions/system/ClearPendingVirtualSErrorSCR_GEN

// Clear a pending virtual SError interrupt.// SCR_GEN[] // ========= SCRType ClearPendingVirtualSError();SCR_GEN[] // AArch32 secure & AArch64 EL3 registers are not architecturally mapped assertHaveEL(EL3); bits(64) r; if HighestELUsingAArch32() then r = ZeroExtend(SCR); else r = ZeroExtend(SCR_EL3); return r;

Library pseudocode for shared/functions/system/ConditionHoldsSendEvent

// ConditionHolds() // ================ // Return TRUE iff COND currently holds boolean// Signal an event to all PEs in a multiprocessor system to set their Event Registers. // When a PE executes the SEV instruction, it causes this function to be executed ConditionHolds(bits(4) cond) // Evaluate base condition. case cond<3:1> of when '000' result = (PSTATE.Z == '1'); // EQ or NE when '001' result = (PSTATE.C == '1'); // CS or CC when '010' result = (PSTATE.N == '1'); // MI or PL when '011' result = (PSTATE.V == '1'); // VS or VC when '100' result = (PSTATE.C == '1' && PSTATE.Z == '0'); // HI or LS when '101' result = (PSTATE.N == PSTATE.V); // GE or LT when '110' result = (PSTATE.N == PSTATE.V && PSTATE.Z == '0'); // GT or LE when '111' result = TRUE; // AL // Condition flag values in the set '111x' indicate always true // Otherwise, invert condition if necessary. if cond<0> == '1' && cond != '1111' then result = !result; return result;SendEvent();

Library pseudocode for shared/functions/system/ConsumptionOfSpeculativeDataBarrierSendEventLocal

// SendEventLocal() // ================ // Set the local Event Register of this PE. // When a PE executes the SEVL instruction, it causes this function to be executed ConsumptionOfSpeculativeDataBarrier();SendEventLocal() EventRegister = '1'; return;

Library pseudocode for shared/functions/system/CurrentInstrSetSetPSTATEFromPSR

// CurrentInstrSet() // ================= InstrSet// SetPSTATEFromPSR() // ================== // Set PSTATE based on a PSR value CurrentInstrSet() ifSetPSTATEFromPSR(bits(32) spsr) PSTATE.SS = UsingAArch32DebugExceptionReturnSS() then result = if PSTATE.T == '0' then(spsr); if InstrSet_A32IllegalExceptionReturn else(spsr) then PSTATE.IL = '1'; if InstrSet_T32HaveSSBSExt; // PSTATE.J is RES0. Implementation of T32EE or Jazelle state not permitted. else result =() then PSTATE.SSBS = bit UNKNOWN; if () then PSTATE.BTYPE = bits(2) UNKNOWN; if HaveUAOExt() then PSTATE.UAO = bit UNKNOWN; if HaveDITExt() then PSTATE.DIT = bit UNKNOWN; if HaveMTEExt() then PSTATE.TCO = bit UNKNOWN; else // State that is reinstated only on a legal exception return PSTATE.IL = spsr<20>; if spsr<4> == '1' then // AArch32 state AArch32.WriteMode(spsr<4:0>); // Sets PSTATE.EL correctly if HaveSSBSExt() then PSTATE.SSBS = spsr<23>; else // AArch64 state PSTATE.nRW = '0'; PSTATE.EL = spsr<3:2>; PSTATE.SP = spsr<0>; if HaveBTIExt() then PSTATE.BTYPE = spsr<11:10>; if HaveSSBSExt() then PSTATE.SSBS = spsr<12>; if HaveUAOExt() then PSTATE.UAO = spsr<23>; if HaveDITExt() then PSTATE.DIT = spsr<24>; if HaveMTEExt() then PSTATE.TCO = spsr<25>; // If PSTATE.IL is set and returning to AArch32 state, it is CONSTRAINED UNPREDICTABLE whether // the T bit is set to zero or copied from SPSR. if PSTATE.IL == '1' && PSTATE.nRW == '1' then if ConstrainUnpredictableBool(Unpredictable_ILZEROT) then spsr<5> = '0'; // State that is reinstated regardless of illegal exception return PSTATE.<N,Z,C,V> = spsr<31:28>; if HavePANExt() then PSTATE.PAN = spsr<22>; if PSTATE.nRW == '1' then // AArch32 state PSTATE.Q = spsr<27>; PSTATE.IT = RestoredITBits(spsr); ShouldAdvanceIT = FALSE; if HaveDITExt() then PSTATE.DIT = (if RestartingInstrSet_A64HaveBTIExt; return result;() then spsr<24> else spsr<21>); PSTATE.GE = spsr<19:16>; PSTATE.E = spsr<9>; PSTATE.<A,I,F> = spsr<8:6>; // No PSTATE.D in AArch32 state PSTATE.T = spsr<5>; // PSTATE.J is RES0 else // AArch64 state PSTATE.<D,A,I,F> = spsr<9:6>; // No PSTATE.<Q,IT,GE,E,T> in AArch64 state return;

Library pseudocode for shared/functions/system/CurrentPLShouldAdvanceIT

// CurrentPL() // =========== PrivilegeLevelboolean ShouldAdvanceIT; CurrentPL() return PLOfEL(PSTATE.EL);

Library pseudocode for shared/functions/system/EL0SpeculationBarrier

constant bits(2) EL3 = '11'; constant bits(2) EL2 = '10'; constant bits(2) EL1 = '01'; constant bits(2) EL0 = '00';SpeculationBarrier();

Library pseudocode for shared/functions/system/EL2EnabledSynchronizeContext

// EL2Enabled() // ============ // Returns TRUE if EL2 is present and executing // - with SCR_EL3.NS==1 when Non-secure EL2 is implemented, or // - with SCR_EL3.NS==0 when Secure EL2 is implemented and enabled, or // - when EL3 is not implemented. boolean EL2Enabled() return HaveEL(EL2) && (!HaveEL(EL3) || SCR_EL3.NS == '1' || IsSecureEL2Enabled());SynchronizeContext();

Library pseudocode for shared/functions/system/ELFromM32SynchronizeErrors

// ELFromM32() // =========== (boolean,bits(2))// Implements the error synchronization event. ELFromM32(bits(5) mode) // Convert an AArch32 mode encoding to an Exception level. // Returns (valid,EL): // 'valid' is TRUE if 'mode<4:0>' encodes a mode that is both valid for this implementation // and the current value of SCR.NS/SCR_EL3.NS. // 'EL' is the Exception level decoded from 'mode'. bits(2) el; boolean valid = !SynchronizeErrors();BadMode(mode); // Check for modes that are not valid for this implementation case mode of when M32_Monitor el = EL3; when M32_Hyp el = EL2; valid = valid && (!HaveEL(EL3) || SCR_GEN[].NS == '1'); when M32_FIQ, M32_IRQ, M32_Svc, M32_Abort, M32_Undef, M32_System // If EL3 is implemented and using AArch32, then these modes are EL3 modes in Secure // state, and EL1 modes in Non-secure state. If EL3 is not implemented or is using // AArch64, then these modes are EL1 modes. el = (if HaveEL(EL3) && HighestELUsingAArch32() && SCR.NS == '0' then EL3 else EL1); when M32_User el = EL0; otherwise valid = FALSE; // Passed an illegal mode value if !valid then el = bits(2) UNKNOWN; return (valid, el);

Library pseudocode for shared/functions/system/ELFromSPSRTakeUnmaskedPhysicalSErrorInterrupts

// ELFromSPSR() // ============ // Convert an SPSR value encoding to an Exception level. // Returns (valid,EL): // 'valid' is TRUE if 'spsr<4:0>' encodes a valid mode for the current state. // 'EL' is the Exception level decoded from 'spsr'. (boolean,bits(2))// Take any pending unmasked physical SError interrupt ELFromSPSR(bits(N) spsr) if spsr<4> == '0' then // AArch64 state el = spsr<3:2>; ifTakeUnmaskedPhysicalSErrorInterrupts(boolean iesb_req); HighestELUsingAArch32() then // No AArch64 support valid = FALSE; elsif !HaveEL(el) then // Exception level not implemented valid = FALSE; elsif spsr<1> == '1' then // M[1] must be 0 valid = FALSE; elsif el == EL0 && spsr<0> == '1' then // for EL0, M[0] must be 0 valid = FALSE; elsif el == EL2 && HaveEL(EL3) && !IsSecureEL2Enabled() && SCR_EL3.NS == '0' then valid = FALSE; // Unless Secure EL2 is enabled, EL2 only valid in Non-secure state else valid = TRUE; elsif HaveAnyAArch32() then // AArch32 state (valid, el) = ELFromM32(spsr<4:0>); else valid = FALSE; if !valid then el = bits(2) UNKNOWN; return (valid,el);

Library pseudocode for shared/functions/system/ELIsInHostTakeUnmaskedSErrorInterrupts

// ELIsInHost() // ============ boolean// Take any pending unmasked physical SError interrupt or unmasked virtual SError // interrupt. ELIsInHost(bits(2) el) if !TakeUnmaskedSErrorInterrupts();HaveVirtHostExt() || ELUsingAArch32(EL2) then return FALSE; case el of when EL3 return FALSE; when EL2 return HCR_EL2.E2H == '1'; when EL1 return FALSE; when EL0 return EL2Enabled() && HCR_EL2.<E2H,TGE> == '11'; otherwise Unreachable();

Library pseudocode for shared/functions/system/ELStateUsingAArch32ThisInstr

// ELStateUsingAArch32() // ===================== booleanbits(32) ELStateUsingAArch32(bits(2) el, boolean secure) // See ELStateUsingAArch32K() for description. Must only be called in circumstances where // result is valid (typically, that means 'el IN {EL1,EL2,EL3}'). (known, aarch32) =ThisInstr(); ELStateUsingAArch32K(el, secure); assert known; return aarch32;

Library pseudocode for shared/functions/system/ELStateUsingAArch32KThisInstrLength

// ELStateUsingAArch32K() // ====================== (boolean,boolean)integer ELStateUsingAArch32K(bits(2) el, boolean secure) // Returns (known, aarch32): // 'known' is FALSE for EL0 if the current Exception level is not EL0 and EL1 is // using AArch64, since it cannot determine the state of EL0; TRUE otherwise. // 'aarch32' is TRUE if the specified Exception level is using AArch32; FALSE otherwise. if !ThisInstrLength();HaveAArch32EL(el) then return (TRUE, FALSE); // Exception level is using AArch64 elsif secure && el == EL2 then return (TRUE, FALSE); // Secure EL2 is using AArch64 elsif HighestELUsingAArch32() then return (TRUE, TRUE); // Highest Exception level, and therefore all levels are using AArch32 elsif el == HighestEL() then return (TRUE, FALSE); // This is highest Exception level, so is using AArch64 // Remainder of function deals with the interprocessing cases when highest Exception level is using AArch64 boolean aarch32 = boolean UNKNOWN; boolean known = TRUE; aarch32_below_el3 = HaveEL(EL3) && SCR_EL3.RW == '0' && (!secure || !HaveSecureEL2Ext() || SCR_EL3.EEL2 == '0'); aarch32_at_el1 = (aarch32_below_el3 || (HaveEL(EL2) && ((HaveSecureEL2Ext() && SCR_EL3.EEL2 == '1') || !secure) && HCR_EL2.RW == '0' && !(HCR_EL2.E2H == '1' && HCR_EL2.TGE == '1' && HaveVirtHostExt()))); if el == EL0 && !aarch32_at_el1 then // Only know if EL0 using AArch32 from PSTATE if PSTATE.EL == EL0 then aarch32 = PSTATE.nRW == '1'; // EL0 controlled by PSTATE else known = FALSE; // EL0 state is UNKNOWN else aarch32 = (aarch32_below_el3 && el != EL3) || (aarch32_at_el1 && el IN {EL1,EL0}); if !known then aarch32 = boolean UNKNOWN; return (known, aarch32);

Library pseudocode for shared/functions/system/ELUsingAArch32Unreachable

// ELUsingAArch32() // ================ boolean ELUsingAArch32(bits(2) el) return ELStateUsingAArch32(el, IsSecureBelowEL3());Unreachable() assert FALSE;

Library pseudocode for shared/functions/system/ELUsingAArch32KUsingAArch32

// ELUsingAArch32K() // ================= // UsingAArch32() // ============== // Return TRUE if the current Exception level is using AArch32, FALSE if using AArch64. (boolean,boolean)boolean ELUsingAArch32K(bits(2) el) returnUsingAArch32() boolean aarch32 = (PSTATE.nRW == '1'); if ! ELStateUsingAArch32KHaveAnyAArch32(el,() then assert !aarch32; if IsSecureBelowEL3HighestELUsingAArch32());() then assert aarch32; return aarch32;

Library pseudocode for shared/functions/system/EndOfInstructionWaitForEvent

// Terminate processing of the current instruction.// WaitForEvent() // ============== // PE suspends its operation and enters a low-power state // if the Event Register is clear when the WFE is executed EndOfInstruction();WaitForEvent() if EventRegister == '0' thenEnterLowPowerState(); return;

Library pseudocode for shared/functions/system/EnterLowPowerStateWaitForInterrupt

// PE enters a low-power state.// WaitForInterrupt() // ================== // PE suspends its operation to enter a low-power state // until a WFI wake-up event occurs or the PE is reset EnterLowPowerState();WaitForInterrupt()EnterLowPowerState(); return;

Library pseudocode for shared/functions/systemunpredictable/EventRegisterConstrainUnpredictable

bits(1) EventRegister;// ConstrainUnpredictable() // ======================== // Return the appropriate Constraint result to control the caller's behavior. The return value // is IMPLEMENTATION DEFINED within a permitted list for each UNPREDICTABLE case. // (The permitted list is determined by an assert or case statement at the call site.) // NOTE: This version of the function uses an Unpredictable argument to define the call site. // This argument does not appear in the version used in the Armv8 Architecture Reference Manual. // The extra argument is used here to allow this example definition. This is an example only and // does not imply a fixed implementation of these behaviors. Indeed the intention is that it should // be defined by each implementation, according to its implementation choices. ConstraintConstrainUnpredictable(Unpredictable which) case which of when Unpredictable_VMSR return Constraint_UNDEF; when Unpredictable_WBOVERLAPLD return Constraint_WBSUPPRESS; // return loaded value when Unpredictable_WBOVERLAPST return Constraint_NONE; // store pre-writeback value when Unpredictable_LDPOVERLAP return Constraint_UNDEF; // instruction is UNDEFINED when Unpredictable_BASEOVERLAP return Constraint_NONE; // use original address when Unpredictable_DATAOVERLAP return Constraint_NONE; // store original value when Unpredictable_DEVPAGE2 return Constraint_FAULT; // take an alignment fault when Unpredictable_INSTRDEVICE return Constraint_NONE; // Do not take a fault when Unpredictable_RESCPACR return Constraint_TRUE; // Map to UNKNOWN value when Unpredictable_RESMAIR return Constraint_UNKNOWN; // Map to UNKNOWN value when Unpredictable_RESTEXCB return Constraint_UNKNOWN; // Map to UNKNOWN value when Unpredictable_RESDACR return Constraint_UNKNOWN; // Map to UNKNOWN value when Unpredictable_RESPRRR return Constraint_UNKNOWN; // Map to UNKNOWN value when Unpredictable_RESVTCRS return Constraint_UNKNOWN; // Map to UNKNOWN value when Unpredictable_RESTnSZ return Constraint_FORCE; // Map to the limit value when Unpredictable_OORTnSZ return Constraint_FORCE; // Map to the limit value when Unpredictable_LARGEIPA return Constraint_FORCE; // Restrict the inputsize to the PAMax value when Unpredictable_ESRCONDPASS return Constraint_FALSE; // Report as "AL" when Unpredictable_ILZEROIT return Constraint_FALSE; // Do not zero PSTATE.IT when Unpredictable_ILZEROT return Constraint_FALSE; // Do not zero PSTATE.T when Unpredictable_BPVECTORCATCHPRI return Constraint_TRUE; // Debug Vector Catch: match on 2nd halfword when Unpredictable_VCMATCHHALF return Constraint_FALSE; // No match when Unpredictable_VCMATCHDAPA return Constraint_FALSE; // No match on Data Abort or Prefetch abort when Unpredictable_WPMASKANDBAS return Constraint_FALSE; // Watchpoint disabled when Unpredictable_WPBASCONTIGUOUS return Constraint_FALSE; // Watchpoint disabled when Unpredictable_RESWPMASK return Constraint_DISABLED; // Watchpoint disabled when Unpredictable_WPMASKEDBITS return Constraint_FALSE; // Watchpoint disabled when Unpredictable_RESBPWPCTRL return Constraint_DISABLED; // Breakpoint/watchpoint disabled when Unpredictable_BPNOTIMPL return Constraint_DISABLED; // Breakpoint disabled when Unpredictable_RESBPTYPE return Constraint_DISABLED; // Breakpoint disabled when Unpredictable_BPNOTCTXCMP return Constraint_DISABLED; // Breakpoint disabled when Unpredictable_BPMATCHHALF return Constraint_FALSE; // No match when Unpredictable_BPMISMATCHHALF return Constraint_FALSE; // No match when Unpredictable_RESTARTALIGNPC return Constraint_FALSE; // Do not force alignment when Unpredictable_RESTARTZEROUPPERPC return Constraint_TRUE; // Force zero extension when Unpredictable_ZEROUPPER return Constraint_TRUE; // zero top halves of X registers when Unpredictable_ERETZEROUPPERPC return Constraint_TRUE; // zero top half of PC when Unpredictable_A32FORCEALIGNPC return Constraint_FALSE; // Do not force alignment when Unpredictable_SMD return Constraint_UNDEF; // disabled SMC is Unallocated when Unpredictable_NONFAULT return Constraint_FALSE; // Speculation enabled when Unpredictable_SVEZEROUPPER return Constraint_TRUE; // zero top bits of Z registers when Unpredictable_SVELDNFDATA return Constraint_TRUE; // Load mem data in NF loads when Unpredictable_SVELDNFZERO return Constraint_TRUE; // Write zeros in NF loads when Unpredictable_CHECKSPNONEACTIVE return Constraint_TRUE; // Check SP alignment when Unpredictable_AFUPDATE // AF update for alignment or permission fault return Constraint_TRUE; when Unpredictable_IESBinDebug // Use SCTLR[].IESB in Debug state return Constraint_TRUE; when Unpredictable_BADPMSFCR // Bad settings for PMSFCR_EL1/PMSEVFR_EL1/PMSLATFR_EL1 return Constraint_TRUE; when Unpredictable_ZEROBTYPE return Constraint_TRUE; // Save BTYPE in SPSR_ELx/DPSR_EL0 as '00' when Unpredictable_CLEARERRITEZERO // Clearing sticky errors when instruction in flight return Constraint_FALSE; when Unpredictable_ALUEXCEPTIONRETURN return Constraint_UNDEF;

Library pseudocode for shared/functions/systemunpredictable/ExceptionalOccurrenceTargetStateConstrainUnpredictableBits

enumeration// ConstrainUnpredictableBits() // ============================ // This is a variant of ConstrainUnpredictable for when the result can be Constraint_UNKNOWN. // If the result is Constraint_UNKNOWN then the function also returns UNKNOWN value, but that // value is always an allocated value; that is, one for which the behavior is not itself // CONSTRAINED. // NOTE: This version of the function uses an Unpredictable argument to define the call site. // This argument does not appear in the version used in the Armv8 Architecture Reference Manual. // See the NOTE on ConstrainUnpredictable() for more information. // This is an example placeholder only and does not imply a fixed implementation of the bits part // of the result, and may not be applicable in all cases. (Constraint,bits(width)) ExceptionalOccurrenceTargetState {ConstrainUnpredictableBits( AArch32_NonDebugState,which) c = AArch64_NonDebugState,(which); if c == then return (c, ZerosDebugState };(width)); // See notes; this is an example implementation only else return (c, bits(width) UNKNOWN); // bits result not used

Library pseudocode for shared/functions/systemunpredictable/FIQPendingConstrainUnpredictableBool

// Returns TRUE if there is any pending physical FIQ // ConstrainUnpredictableBool() // ============================ // This is a simple wrapper function for cases where the constrained result is either TRUE or FALSE. // NOTE: This version of the function uses an Unpredictable argument to define the call site. // This argument does not appear in the version used in the Armv8 Architecture Reference Manual. // See the NOTE on ConstrainUnpredictable() for more information. boolean FIQPending();ConstrainUnpredictableBool(Unpredictable which) c = ConstrainUnpredictable(which); assert c IN {Constraint_TRUE, Constraint_FALSE}; return (c == Constraint_TRUE);

Library pseudocode for shared/functions/systemunpredictable/GetPSRFromPSTATEConstrainUnpredictableInteger

// GetPSRFromPSTATE() // ================== // Return a PSR value which represents the current PSTATE // ConstrainUnpredictableInteger() // =============================== bits(N)// This is a variant of ConstrainUnpredictable for when the result can be Constraint_UNKNOWN. If // the result is Constraint_UNKNOWN then the function also returns an UNKNOWN value in the range // low to high, inclusive. // NOTE: This version of the function uses an Unpredictable argument to define the call site. // This argument does not appear in the version used in the Armv8 Architecture Reference Manual. // See the NOTE on ConstrainUnpredictable() for more information. // This is an example placeholder only and does not imply a fixed implementation of the integer part // of the result. (Constraint,integer) GetPSRFromPSTATE(ConstrainUnpredictableInteger(integer low, integer high,ExceptionalOccurrenceTargetStateUnpredictable targetELState) ifwhich) c = UsingAArch32ConstrainUnpredictable() && (targetELState IN {(which); if c ==AArch32_NonDebugStateConstraint_UNKNOWN, DebugState}) then assert N == 32; else assert N == 64; bits(N) spsr = Zeros(); spsr<31:28> = PSTATE.<N,Z,C,V>; if HavePANExt() then spsr<22> = PSTATE.PAN; spsr<20> = PSTATE.IL; if PSTATE.nRW == '1' then // AArch32 state spsr<27> = PSTATE.Q; spsr<26:25> = PSTATE.IT<1:0>; if HaveSSBSExt() then spsr<23> = PSTATE.SSBS; if HaveDITExt() then if targetELState == AArch32_NonDebugState then spsr<21> = PSTATE.DIT; else //AArch64_NonDebugState or DebugState spsr<24> = PSTATE.DIT; if targetELState IN {AArch64_NonDebugState, DebugState} then spsr<21> = PSTATE.SS; spsr<19:16> = PSTATE.GE; spsr<15:10> = PSTATE.IT<7:2>; spsr<9> = PSTATE.E; spsr<8:6> = PSTATE.<A,I,F>; // No PSTATE.D in AArch32 state spsr<5> = PSTATE.T; assert PSTATE.M<4> == PSTATE.nRW; // bit [4] is the discriminator spsr<4:0> = PSTATE.M; else // AArch64 state if HaveMTEExt() then spsr<25> = PSTATE.TCO; if HaveDITExt() then spsr<24> = PSTATE.DIT; if HaveUAOExt() then spsr<23> = PSTATE.UAO; spsr<21> = PSTATE.SS; if HaveSSBSExt() then spsr<12> = PSTATE.SSBS; if HaveBTIExt() then spsr<11:10> = PSTATE.BTYPE; spsr<9:6> = PSTATE.<D,A,I,F>; spsr<4> = PSTATE.nRW; spsr<3:2> = PSTATE.EL; spsr<0> = PSTATE.SP; return spsr;then return (c, low); // See notes; this is an example implementation only else return (c, integer UNKNOWN); // integer result not used

Library pseudocode for shared/functions/systemunpredictable/HasArchVersionConstraint

// HasArchVersion() // ================ // Returns TRUE if the implemented architecture includes the extensions defined in the specified // architecture version. booleanenumeration HasArchVersion(Constraint {// GeneralArchVersion version) return version ==Constraint_NONE, // Instruction executes with // no change or side-effect to its described behavior Constraint_UNKNOWN, // Destination register has UNKNOWN value Constraint_UNDEF, // Instruction is UNDEFINED Constraint_UNDEFEL0, // Instruction is UNDEFINED at EL0 only Constraint_NOP, // Instruction executes as NOP Constraint_TRUE, Constraint_FALSE, Constraint_DISABLED, Constraint_UNCOND, // Instruction executes unconditionally Constraint_COND, // Instruction executes conditionally Constraint_ADDITIONAL_DECODE, // Instruction executes with additional decode // Load-store Constraint_WBSUPPRESS, Constraint_FAULT, // IPA too large Constraint_FORCE, ARMv8p0 || boolean IMPLEMENTATION_DEFINED;Constraint_FORCENOSLCHECK};

Library pseudocode for shared/functions/systemunpredictable/HaveAArch32ELUnpredictable

// HaveAArch32EL() // =============== booleanenumeration HaveAArch32EL(bits(2) el) // Return TRUE if Exception level 'el' supports AArch32 in this implementation if !Unpredictable {// VMSR on MVFRHaveEL(el) then return FALSE; // The Exception level is not implemented elsif !Unpredictable_VMSR, // Writeback/transfer register overlap (load)HaveAnyAArch32() then return FALSE; // No Exception level can use AArch32 elsifUnpredictable_WBOVERLAPLD, // Writeback/transfer register overlap (store) HighestELUsingAArch32() then return TRUE; // All Exception levels are using AArch32 elsif el ==Unpredictable_WBOVERLAPST, // Load Pair transfer register overlap HighestEL() then return FALSE; // The highest Exception level is using AArch64 elsif el ==Unpredictable_LDPOVERLAP, // Store-exclusive base/status register overlap Unpredictable_BASEOVERLAP, // Store-exclusive data/status register overlap Unpredictable_DATAOVERLAP, // Load-store alignment checks Unpredictable_DEVPAGE2, // Instruction fetch from Device memory Unpredictable_INSTRDEVICE, // Reserved CPACR value Unpredictable_RESCPACR, // Reserved MAIR value Unpredictable_RESMAIR, // Reserved TEX:C:B value Unpredictable_RESTEXCB, // Reserved PRRR value Unpredictable_RESPRRR, // Reserved DACR field Unpredictable_RESDACR, // Reserved VTCR.S value Unpredictable_RESVTCRS, // Reserved TCR.TnSZ value Unpredictable_RESTnSZ, // Reserved SCTLR_ELx.TCF value Unpredictable_RESTCF, // Out-of-range TCR.TnSZ value Unpredictable_OORTnSZ, // IPA size exceeds PA size Unpredictable_LARGEIPA, // Syndrome for a known-passing conditional A32 instruction Unpredictable_ESRCONDPASS, // Illegal State exception: zero PSTATE.IT Unpredictable_ILZEROIT, // Illegal State exception: zero PSTATE.T Unpredictable_ILZEROT, // Debug: prioritization of Vector Catch Unpredictable_BPVECTORCATCHPRI, // Debug Vector Catch: match on 2nd halfword Unpredictable_VCMATCHHALF, // Debug Vector Catch: match on Data Abort or Prefetch abort Unpredictable_VCMATCHDAPA, // Debug watchpoints: non-zero MASK and non-ones BAS Unpredictable_WPMASKANDBAS, // Debug watchpoints: non-contiguous BAS Unpredictable_WPBASCONTIGUOUS, // Debug watchpoints: reserved MASK Unpredictable_RESWPMASK, // Debug watchpoints: non-zero MASKed bits of address Unpredictable_WPMASKEDBITS, // Debug breakpoints and watchpoints: reserved control bits Unpredictable_RESBPWPCTRL, // Debug breakpoints: not implemented Unpredictable_BPNOTIMPL, // Debug breakpoints: reserved type Unpredictable_RESBPTYPE, // Debug breakpoints: not-context-aware breakpoint Unpredictable_BPNOTCTXCMP, // Debug breakpoints: match on 2nd halfword of instruction Unpredictable_BPMATCHHALF, // Debug breakpoints: mismatch on 2nd halfword of instruction Unpredictable_BPMISMATCHHALF, // Debug: restart to a misaligned AArch32 PC value Unpredictable_RESTARTALIGNPC, // Debug: restart to a not-zero-extended AArch32 PC value Unpredictable_RESTARTZEROUPPERPC, // Zero top 32 bits of X registers in AArch32 state Unpredictable_ZEROUPPER, // Zero top 32 bits of PC on illegal return to AArch32 state Unpredictable_ERETZEROUPPERPC, // Force address to be aligned when interworking branch to A32 state Unpredictable_A32FORCEALIGNPC, // SMC disabled Unpredictable_SMD, // FF speculation Unpredictable_NONFAULT, // Zero top bits of Z registers in EL change Unpredictable_SVEZEROUPPER, // Load mem data in NF loads Unpredictable_SVELDNFDATA, // Write zeros in NF loads Unpredictable_SVELDNFZERO, // SP alignment fault when predicate is all zero Unpredictable_CHECKSPNONEACTIVE, // Access Flag Update by HW Unpredictable_AFUPDATE, // Consider SCTLR[].IESB in Debug state Unpredictable_IESBinDebug, // Bad settings for PMSFCR_EL1/PMSEVFR_EL1/PMSLATFR_EL1 Unpredictable_BADPMSFCR, // Zero saved BType value in SPSR_ELx/DPSR_EL0 Unpredictable_ZEROBTYPE, // Timestamp constrained to virtual or physical Unpredictable_EL2TIMESTAMP, Unpredictable_EL1TIMESTAMP, // Clearing DCC/ITR sticky flags when instruction is in flight Unpredictable_CLEARERRITEZERO, // ALUEXCEPTIONRETURN when in user/system mode in A32 instructions EL0 then return TRUE; // EL0 must support using AArch32 if any AArch32 return boolean IMPLEMENTATION_DEFINED;Unpredictable_ALUEXCEPTIONRETURN};

Library pseudocode for shared/functions/systemvector/HaveAnyAArch32AdvSIMDExpandImm

// HaveAnyAArch32() // ================ // Return TRUE if AArch32 state is supported at any Exception level // AdvSIMDExpandImm() // ================== booleanbits(64) HaveAnyAArch32() return boolean IMPLEMENTATION_DEFINED;AdvSIMDExpandImm(bit op, bits(4) cmode, bits(8) imm8) case cmode<3:1> of when '000' imm64 =Replicate(Zeros(24):imm8, 2); when '001' imm64 = Replicate(Zeros(16):imm8:Zeros(8), 2); when '010' imm64 = Replicate(Zeros(8):imm8:Zeros(16), 2); when '011' imm64 = Replicate(imm8:Zeros(24), 2); when '100' imm64 = Replicate(Zeros(8):imm8, 4); when '101' imm64 = Replicate(imm8:Zeros(8), 4); when '110' if cmode<0> == '0' then imm64 = Replicate(Zeros(16):imm8:Ones(8), 2); else imm64 = Replicate(Zeros(8):imm8:Ones(16), 2); when '111' if cmode<0> == '0' && op == '0' then imm64 = Replicate(imm8, 8); if cmode<0> == '0' && op == '1' then imm8a = Replicate(imm8<7>, 8); imm8b = Replicate(imm8<6>, 8); imm8c = Replicate(imm8<5>, 8); imm8d = Replicate(imm8<4>, 8); imm8e = Replicate(imm8<3>, 8); imm8f = Replicate(imm8<2>, 8); imm8g = Replicate(imm8<1>, 8); imm8h = Replicate(imm8<0>, 8); imm64 = imm8a:imm8b:imm8c:imm8d:imm8e:imm8f:imm8g:imm8h; if cmode<0> == '1' && op == '0' then imm32 = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19); imm64 = Replicate(imm32, 2); if cmode<0> == '1' && op == '1' then if UsingAArch32() then ReservedEncoding(); imm64 = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,8):imm8<5:0>:Zeros(48); return imm64;

Library pseudocode for shared/functions/systemvector/HaveAnyAArch64MatMulAdd

// HaveAnyAArch64() // ================ // Return TRUE if AArch64 state is supported at any Exception level // MatMulAdd() // =========== // // Signed or unsigned 8-bit integer matrix multiply and add to 32-bit integer matrix // result[2, 2] = addend[2, 2] + (op1[2, 8] * op2[8, 2]) booleanbits(N) HaveAnyAArch64() return !MatMulAdd(bits(N) addend, bits(N) op1, bits(N) op2, boolean op1_unsigned, boolean op2_unsigned) assert N == 128; bits(N) result; bits(32) sum; integer prod; for i = 0 to 1 for j = 0 to 1 sum =[addend, 2*i + j, 32]; for k = 0 to 7 prod = Int(Elem[op1, 8*i + k, 8], op1_unsigned) * Int(Elem[op2, 8*j + k, 8], op2_unsigned); sum = sum + prod; ElemHighestELUsingAArch32Elem();[result, 2*i + j, 32] = sum; return result;

Library pseudocode for shared/functions/systemvector/HaveELPolynomialMult

// HaveEL() // ======== // Return TRUE if Exception level 'el' is supported // PolynomialMult() // ================ booleanbits(M+N) HaveEL(bits(2) el) if el IN {PolynomialMult(bits(M) op1, bits(N) op2) result =EL1Zeros,(M+N); extended_op2 =(op2, M+N); for i=0 to M-1 if op1<i> == '1' then result = result EOR LSLEL0ZeroExtend} then return TRUE; // EL1 and EL0 must exist return boolean IMPLEMENTATION_DEFINED;(extended_op2, i); return result;

Library pseudocode for shared/functions/systemvector/HaveELUsingSecurityStateSatQ

// HaveELUsingSecurityState() // ========================== // Returns TRUE if Exception level 'el' with Security state 'secure' is supported, // FALSE otherwise. // SatQ() // ====== boolean(bits(N), boolean) HaveELUsingSecurityState(bits(2) el, boolean secure) case el of whenSatQ(integer i, integer N, boolean unsigned) (result, sat) = if unsigned then EL3UnsignedSatQ assert secure; return(i, N) else HaveELSignedSatQ(EL3); when EL2 if secure then return HaveEL(EL2) && HaveSecureEL2Ext(); else return HaveEL(EL2); otherwise return (HaveEL(EL3) || (secure == boolean IMPLEMENTATION_DEFINED "Secure-only implementation"));(i, N); return (result, sat);

Library pseudocode for shared/functions/systemvector/HaveFP16ExtSignedSatQ

// HaveFP16Ext() // ============= // Return TRUE if FP16 extension is supported // SignedSatQ() // ============ boolean(bits(N), boolean) HaveFP16Ext() return boolean IMPLEMENTATION_DEFINED;SignedSatQ(integer i, integer N) if i > 2^(N-1) - 1 then result = 2^(N-1) - 1; saturated = TRUE; elsif i < -(2^(N-1)) then result = -(2^(N-1)); saturated = TRUE; else result = i; saturated = FALSE; return (result<N-1:0>, saturated);

Library pseudocode for shared/functions/systemvector/HighestELUnsignedRSqrtEstimate

// HighestEL() // =========== // Returns the highest implemented Exception level. // UnsignedRSqrtEstimate() // ======================= bits(2)bits(N) HighestEL() ifUnsignedRSqrtEstimate(bits(N) operand) assert N IN {16,32}; if operand<N-1:N-2> == '00' then // Operands <= 0x3FFFFFFF produce 0xFFFFFFFF result = HaveELOnes((N); else // input is in the range 0x40000000 .. 0xffffffff representing [0.25 .. 1.0) // estimate is in the range 256 .. 511 representing [1.0 .. 2.0) case N of when 16 estimate =EL3RecipSqrtEstimate) then return( EL3UInt; elsif(operand<15:7>)); when 32 estimate = HaveELRecipSqrtEstimate(EL2UInt) then return(operand<31:23>)); // result is in the range 0x80000000 .. 0xff800000 representing [1.0 .. 2.0) result = estimate<8:0> : EL2Zeros; else return EL1;(N-9); return result;

Library pseudocode for shared/functions/systemvector/HighestELUsingAArch32UnsignedRecipEstimate

// HighestELUsingAArch32() // UnsignedRecipEstimate() // ======================= // Return TRUE if configured to boot into AArch32 operation booleanbits(N) HighestELUsingAArch32() if !UnsignedRecipEstimate(bits(N) operand) assert N IN {16,32}; if operand<N-1> == '0' then // Operands <= 0x7FFFFFFF produce 0xFFFFFFFF result =(N); else // input is in the range 0x80000000 .. 0xffffffff representing [0.5 .. 1.0) // estimate is in the range 256 to 511 representing [1.0 .. 2.0) case N of when 16 estimate = RecipEstimate(UInt(operand<15:7>)); when 32 estimate = RecipEstimate(UInt(operand<31:23>)); // result is in the range 0x80000000 .. 0xff800000 representing [1.0 .. 2.0) result = estimate<8:0> : ZerosHaveAnyAArch32Ones() then return FALSE; return boolean IMPLEMENTATION_DEFINED; // e.g. CFG32SIGNAL == HIGH(N-9); return result;

Library pseudocode for shared/functions/systemvector/Hint_DGHUnsignedSatQ

// Provides a hint to close any gathering occurring within the micro-architecture.// UnsignedSatQ() // ============== (bits(N), boolean) Hint_DGH();UnsignedSatQ(integer i, integer N) if i > 2^N - 1 then result = 2^N - 1; saturated = TRUE; elsif i < 0 then result = 0; saturated = TRUE; else result = i; saturated = FALSE; return (result<N-1:0>, saturated);

Library pseudocode for shared/functionstrace/systemselfhosted/Hint_WFESelfHostedTraceEnabled

// Hint_WFE() // ========== // Provides a hint indicating that the PE can enter a low-power state // and remain there until a wakeup event occurs or, for WFET, a local // timeout event is generated when the virtual timer value equals or // exceeds the supplied threshold value.// SelfHostedTraceEnabled() // ======================== // Returns TRUE if Self-hosted Trace is enabled. boolean Hint_WFE(integer localtimeout) ifSelfHostedTraceEnabled() if ! IsEventRegisterSetHaveTraceExt() then() || ! ClearEventRegisterHaveSelfHostedTrace(); else trap = FALSE; if PSTATE.EL ==() then return FALSE; if EL0 then // Check for traps described by the OS which may be EL1 or EL2. if HaveTWEDExt() then sctlr = SCTLR[]; trap = sctlr.nTWE == '0'; target_el = EL1; else AArch64.CheckForWFxTrap(EL1, TRUE); if !trap && PSTATE.EL IN {EL0, EL1} && EL2Enabled() && !IsInHost() then // Check for traps described by the Hypervisor. if HaveTWEDExt() then trap = HCR_EL2.TWE == '1'; target_el = EL2; else AArch64.CheckForWFxTrap(EL2, TRUE); if !trap && HaveEL(EL3) && PSTATE.EL !=) then secure_trace_enable = (if EL3ELUsingAArch32 then // Check for traps described by the Secure Monitor. if( HaveTWEDExt() then trap = SCR_EL3.TWE == '1'; target_el = EL3; else) then SDCR.STE else MDCR_EL3.STE); niden = (secure_trace_enable == '0' || AArch64.CheckForWFxTrapExternalSecureNoninvasiveDebugEnabled(()); else // If no EL3, IsSecure() returns the Effective value of (SCR_EL3.NS == '0') niden = (!EL3IsSecure, TRUE); if trap && PSTATE.EL !=() || EL3ExternalSecureNoninvasiveDebugEnabled then (delay_enabled, delay) = WFETrapDelay(target_el); // (If trap delay is enabled, Delay amount) if !WaitForEventUntilDelay(delay_enabled, delay) then // Event did not arrive before delay expired AArch64.WFxTrap(target_el, TRUE); // Trap WFE else WaitForEvent(localtimeout);()); return (EDSCR.TFO == '0' || !niden);

Library pseudocode for shared/functionstrace/systemselfhosted/Hint_WFITraceAllowed

// Hint_WFI() // ========== // Provides a hint indicating that the PE can enter a low-power state and // remain there until a wakeup event occurs or, for WFIT, a local timeout // event is generated when the virtual timer value equals or exceeds the // supplied threshold value.// TraceAllowed() // ============== // Returns TRUE if Self-hosted Trace is allowed in the current Security state and Exception Level boolean Hint_WFI(integer localtimeout) TraceAllowed() if !InterruptPendingHaveTraceExt() then if PSTATE.EL ==() then return FALSE; if EL0SelfHostedTraceEnabled then // Check for traps described by the OS which may be EL1 or EL2.() then if AArch64.CheckForWFxTrapIsSecure(() &&EL1HaveEL, FALSE); if PSTATE.EL IN {(EL0EL3,) then secure_trace_enable = (if EL1ELUsingAArch32} &&( EL3) then SDCR.STE else MDCR_EL3.STE); if secure_trace_enable == '0' then return FALSE; TGE_bit = if EL2Enabled() && !() then HCR_EL2.TGE else '0'; case PSTATE.EL of whenIsInHostEL3() then // Check for traps described by the Hypervisor.TRE_bit = if AArch64.CheckForWFxTrapHighestELUsingAArch32(() then TRFCR.E1TRE else '0'; whenEL2, FALSE); ifTRE_bit = TRFCR_EL2.E2TRE; when HaveELEL1(TRE_bit = TRFCR_EL1.E1TRE; whenEL3EL0) && PSTATE.EL !=TRE_bit = if TGE_bit == '1' then TRFCR_EL2.E0HTRE else TRFCR_EL1.E0TRE; return TRE_bit == '1'; else return (! EL3IsSecure then // Check for traps described by the Secure Monitor.() || AArch64.CheckForWFxTrapExternalSecureNoninvasiveDebugEnabled(EL3, FALSE); WaitForInterrupt(localtimeout);());

Library pseudocode for shared/functionstrace/systemselfhosted/Hint_YieldTraceContextIDR2

// Provides a hint that the task performed by a thread is of low // importance so that it could yield to improve overall performance.// TraceContextIDR2() // ================== boolean Hint_Yield();TraceContextIDR2() if !TraceAllowed()|| !HaveEL(EL2) then return FALSE; return (!SelfHostedTraceEnabled() || TRFCR_EL2.CX == '1');

Library pseudocode for shared/functionstrace/systemselfhosted/IRQPendingTraceSynchronizationBarrier

// Returns TRUE if there is any pending physical IRQ boolean// Memory barrier instruction that preserves the relative order of memory accesses to System // registers due to trace operations and other memory accesses to the same registers IRQPending();TraceSynchronizationBarrier();

Library pseudocode for shared/functionstrace/systemselfhosted/IllegalExceptionReturnTraceTimeStamp

// IllegalExceptionReturn() // ======================== // TraceTimeStamp() // ================ booleanTimeStamp IllegalExceptionReturn(bits(N) spsr) // Check for illegal return: // * To an unimplemented Exception level. // * To EL2 in Secure state, when SecureEL2 is not enabled. // * To EL0 using AArch64 state, with SPSR.M[0]==1. // * To AArch64 state with SPSR.M[1]==1. // * To AArch32 state with an illegal value of SPSR.M. (valid, target) =TraceTimeStamp() if ELFromSPSRSelfHostedTraceEnabled(spsr); if !valid then return TRUE; // Check for return to higher Exception level if() then if UIntHaveEL(target) >( UIntEL2(PSTATE.EL) then return TRUE; spsr_mode_is_aarch32 = (spsr<4> == '1'); // Check for illegal return: // * To EL1, EL2 or EL3 with register width specified in the SPSR different from the // Execution state used in the Exception level being returned to, as determined by // the SCR_EL3.RW or HCR_EL2.RW bits, or as configured from reset. // * To EL0 using AArch64 state when EL1 is using AArch32 state as determined by the // SCR_EL3.RW or HCR_EL2.RW bits or as configured from reset. // * To AArch64 state from AArch32 state (should be caught by above) (known, target_el_is_aarch32) =) then TS_el2 = TRFCR_EL2.TS; if TS_el2 == '10' then (-, TS_el2) = ELUsingAArch32KConstrainUnpredictableBits(target); assert known || (target ==( EL0Unpredictable_EL2TIMESTAMP && !); // Reserved value case TS_el2 of when '00' /* falls through to check TRFCR_EL1.TS */ when '01' returnELUsingAArch32TimeStamp_Virtual(; when '10' ifEL1HaveECVExt)); if known && spsr_mode_is_aarch32 != target_el_is_aarch32 then return TRUE; // Check for illegal return from AArch32 to AArch64 if() then return UsingAArch32TimeStamp_OffsetPhysical() && !spsr_mode_is_aarch32 then return TRUE; // Check for illegal return to EL1 when HCR.TGE is set and when either of // * SecureEL2 is enabled. // * SecureEL2 is not enabled and EL1 is in Non-secure state. if; when '11' return HaveELTimeStamp_Physical(; otherwiseEL2Unreachable) && target ==(); // ConstrainUnpredictableBits removes this case TS_el1 = TRFCR_EL1.TS; if TS_el1 == 'x0' then (-, TS_el1) = EL1ConstrainUnpredictableBits && HCR_EL2.TGE == '1' then if (!(IsSecureBelowEL3Unpredictable_EL1TIMESTAMP() ||); // Reserved values case TS_el1 of when '01' return ; when '10' if HaveECVExt() then return TimeStamp_OffsetPhysical; when '11' return TimeStamp_Physical; otherwise Unreachable(); // ConstrainUnpredictableBits removes this case else return TimeStamp_CoreSightIsSecureEL2EnabledTimeStamp_Virtual()) then return TRUE; return FALSE;;

Library pseudocode for shared/functionstranslation/systemattrs/InstrSetCombineS1S2AttrHints

enumeration// CombineS1S2AttrHints() // ====================== // Combines cacheability attributes and allocation hints from stage 1 and stage 2 MemAttrHints InstrSet {CombineS1S2AttrHints(InstrSet_A64,s1desc, InstrSet_A32,s2desc, s2acctype) MemAttrHints result; apply_force_writeback = HaveStage2MemAttrControl() && HCR_EL2.FWB == '1'; if apply_force_writeback then if S2CacheDisabled(s2acctype) then result.attrs = MemAttr_NC; // force Non-cacheable elsif s2desc.attrs == '11' then result.attrs = s1desc.attrs; elsif s2desc.attrs == '10' then result.attrs = MemAttr_WB; // force Write-back else result.attrs = MemAttr_NC; else if s2desc.attrs == '01' || s1desc.attrs == '01' then result.attrs = bits(2) UNKNOWN; // Reserved elsif s2desc.attrs == MemAttr_NC || s1desc.attrs == MemAttr_NC then result.attrs = MemAttr_NC; // Non-cacheable elsif s2desc.attrs == MemAttr_WT || s1desc.attrs == MemAttr_WT then result.attrs = MemAttr_WT; // Write-through else result.attrs = MemAttr_WB; // Write-back if result.attrs == MemAttr_NC then result.hints = MemHint_No; elsif apply_force_writeback then if s1desc.attrs != MemAttr_NC then result.hints = s1desc.hints; else result.hints = MemHint_RWAInstrSet_T32};; else result.hints = s1desc.hints; result.transient = s1desc.transient; return result;

Library pseudocode for shared/functionstranslation/systemattrs/InstructionSynchronizationBarrierCombineS1S2Device

// CombineS1S2Device() // =================== // Combines device types from stage 1 and stage 2 DeviceType CombineS1S2Device(DeviceType s1device, DeviceType s2device) if s2device == DeviceType_nGnRnE || s1device == DeviceType_nGnRnE then result = DeviceType_nGnRnE; elsif s2device == DeviceType_nGnRE || s1device == DeviceType_nGnRE then result = DeviceType_nGnRE; elsif s2device == DeviceType_nGRE || s1device == DeviceType_nGRE then result = DeviceType_nGRE; else result = DeviceType_GREInstructionSynchronizationBarrier();; return result;

Library pseudocode for shared/functionstranslation/systemattrs/InterruptPendingLongConvertAttrsHints

// InterruptPending() // ================== // Returns TRUE if there are any pending physical or virtual // interrupts, and FALSE otherwise. // LongConvertAttrsHints() // ======================= // Convert the long attribute fields for Normal memory as used in the MAIR fields // to orthogonal attributes and hints booleanMemAttrHints InterruptPending() bit vIRQstatus = (ifLongConvertAttrsHints(bits(4) attrfield, VirtualIRQPendingAccType() then '1' else '0') OR HCR_EL2.VI; bit vFIQstatus = (ifacctype) assert ! VirtualFIQPendingIsZero() then '1' else '0') OR HCR_EL2.VF; bits(3) v_interrupts = HCR_EL2.VSE : vIRQstatus : vFIQstatus; pending_physical_interrupt = ((attrfield);IRQPendingMemAttrHints() ||result; if FIQPendingS1CacheDisabled() ||(acctype) then // Force Non-cacheable result.attrs = IsPhysicalSErrorPendingMemAttr_NC()); pending_virtual_interrupt = !; result.hints =; else if attrfield<3:2> == '00' then // Write-through transient result.attrs = MemAttr_WT; result.hints = attrfield<1:0>; result.transient = TRUE; elsif attrfield<3:0> == '0100' then // Non-cacheable (no allocate) result.attrs = MemAttr_NC; result.hints = MemHint_No; result.transient = FALSE; elsif attrfield<3:2> == '01' then // Write-back transient result.attrs = MemAttr_WBIsInHostMemHint_No() && ((v_interrupts AND HCR_EL2.<AMO,IMO,FMO>) != '000'); return pending_physical_interrupt || pending_virtual_interrupt;; result.hints = attrfield<1:0>; result.transient = TRUE; else // Write-through/Write-back non-transient result.attrs = attrfield<3:2>; result.hints = attrfield<1:0>; result.transient = FALSE; return result;

Library pseudocode for shared/functionstranslation/systemattrs/IsEventRegisterSetMemAttrDefaults

// IsEventRegisterSet() // ==================== // Return TRUE if the Event Register of this PE is set, and FALSE otherwise. // MemAttrDefaults() // ================= // Supply default values for memory attributes, including overriding the shareability attributes // for Device and Non-cacheable memory types. booleanMemoryAttributes IsEventRegisterSet() return EventRegister == '1';MemAttrDefaults(MemoryAttributes memattrs) if memattrs.memtype == MemType_Device then memattrs.inner = MemAttrHints UNKNOWN; memattrs.outer = MemAttrHints UNKNOWN; memattrs.shareable = TRUE; memattrs.outershareable = TRUE; else memattrs.device = DeviceType UNKNOWN; if memattrs.inner.attrs == MemAttr_NC && memattrs.outer.attrs == MemAttr_NC then memattrs.shareable = TRUE; memattrs.outershareable = TRUE; return memattrs;

Library pseudocode for shared/functionstranslation/systemattrs/IsHighestELS1CacheDisabled

// IsHighestEL() // ============= // Returns TRUE if given exception level is the highest exception level implemented // S1CacheDisabled() // ================= boolean IsHighestEL(bits(2) el) returnS1CacheDisabled( acctype) if ELUsingAArch32(S1TranslationRegime()) then if PSTATE.EL == EL2 then enable = if acctype == AccType_IFETCH then HSCTLR.I else HSCTLR.C; else enable = if acctype == AccType_IFETCH then SCTLR.I else SCTLR.C; else enable = if acctype == AccType_IFETCH then SCTLR[].I else SCTLRHighestELAccType() == el;[].C; return enable == '0';

Library pseudocode for shared/functionstranslation/systemattrs/IsInHostS2AttrDecode

// IsInHost() // ========== // S2AttrDecode() // ============== // Converts the Stage 2 attribute fields into orthogonal attributes and hints booleanMemoryAttributes IsInHost() returnS2AttrDecode(bits(2) SH, bits(4) attr, acctype) MemoryAttributes memattrs; apply_force_writeback = HaveStage2MemAttrControl() && HCR_EL2.FWB == '1'; // Device memory if (apply_force_writeback && attr<2> == '0') || attr<3:2> == '00' then memattrs.memtype = MemType_Device; case attr<1:0> of when '00' memattrs.device = DeviceType_nGnRnE; when '01' memattrs.device = DeviceType_nGnRE; when '10' memattrs.device = DeviceType_nGRE; when '11' memattrs.device = DeviceType_GRE; // Normal memory elsif apply_force_writeback then if attr<2> == '1' then memattrs.memtype = MemType_Normal; memattrs.inner.attrs = attr<1:0>; memattrs.outer.attrs = attr<1:0>; memattrs.shareable = SH<1> == '1'; memattrs.outershareable = SH == '10'; elsif attr<1:0> != '00' then memattrs.memtype = MemType_Normal; memattrs.outer = S2ConvertAttrsHints(attr<3:2>, acctype); memattrs.inner = S2ConvertAttrsHints(attr<1:0>, acctype); memattrs.shareable = SH<1> == '1'; memattrs.outershareable = SH == '10'; else memattrs = MemoryAttributes UNKNOWN; // Reserved return MemAttrDefaultsELIsInHostAccType(PSTATE.EL);(memattrs);

Library pseudocode for shared/functionstranslation/systemattrs/IsPhysicalSErrorPendingS2CacheDisabled

// Returns TRUE if a physical SError interrupt is pending. // S2CacheDisabled() // ================= boolean IsPhysicalSErrorPending();S2CacheDisabled(AccType acctype) if ELUsingAArch32(EL2) then disable = if acctype == AccType_IFETCH then HCR2.ID else HCR2.CD; else disable = if acctype == AccType_IFETCH then HCR_EL2.ID else HCR_EL2.CD; return disable == '1';

Library pseudocode for shared/functionstranslation/systemattrs/IsSErrorEdgeTriggeredS2ConvertAttrsHints

// IsSErrorEdgeTriggered() // ======================= // Returns TRUE if the physical SError interrupt is edge-triggered // and FALSE otherwise. // S2ConvertAttrsHints() // ===================== // Converts the attribute fields for Normal memory as used in stage 2 // descriptors to orthogonal attributes and hints booleanMemAttrHints IsSErrorEdgeTriggered(bits(24) syndrome) ifS2ConvertAttrsHints(bits(2) attr, HaveRASExtAccType() then ifacctype) assert attr != '00'; HaveDoubleFaultExtMemAttrHints() then return TRUE; ifresult; if UsingAArch32S2CacheDisabled() && syndrome<11:10> != '00' then // AArch32 and not Uncontainable. return TRUE; if !(acctype) then // Force Non-cacheable result.attrs =; result.hints = MemHint_No; else case attr of when '01' // Non-cacheable (no allocate) result.attrs = MemAttr_NC; result.hints = MemHint_No; when '10' // Write-through result.attrs = MemAttr_WT; result.hints = MemHint_RWA; when '11' // Write-back result.attrs = MemAttr_WB; result.hints = MemHint_RWAUsingAArch32MemAttr_NC() && syndrome<23> == '0' && syndrome<5:0> != '000000' then // AArch64 and neither IMPLEMENTATION DEFINED syndrome nor Uncategorized. return TRUE; return boolean IMPLEMENTATION_DEFINED "Edge-triggered SError";; result.transient = FALSE; return result;

Library pseudocode for shared/functionstranslation/systemattrs/IsSecureShortConvertAttrsHints

// IsSecure() // ========== // Returns TRUE if current Exception level is in Secure state. // ShortConvertAttrsHints() // ======================== // Converts the short attribute fields for Normal memory as used in the TTBR and // TEX fields to orthogonal attributes and hints booleanMemAttrHints IsSecure() ifShortConvertAttrsHints(bits(2) RGN, HaveELAccType(acctype, boolean secondstage)EL3MemAttrHints) && !result; if (!secondstage &&UsingAArch32S1CacheDisabled() && PSTATE.EL ==(acctype)) || (secondstage && EL3S2CacheDisabled then return TRUE; elsif(acctype)) then // Force Non-cacheable result.attrs = HaveELMemAttr_NC(; result.hints =EL3MemHint_No) &&; else case RGN of when '00' // Non-cacheable (no allocate) result.attrs = UsingAArch32MemAttr_NC() && PSTATE.M ==; result.hints = M32_MonitorMemHint_No then return TRUE; return; when '01' // Write-back, Read and Write allocate result.attrs = ; result.hints = MemHint_RWA; when '10' // Write-through, Read allocate result.attrs = MemAttr_WT; result.hints = MemHint_RA; when '11' // Write-back, Read allocate result.attrs = MemAttr_WB; result.hints = MemHint_RAIsSecureBelowEL3MemAttr_WB();; result.transient = FALSE; return result;

Library pseudocode for shared/functionstranslation/systemattrs/IsSecureBelowEL3WalkAttrDecode

// IsSecureBelowEL3() // ================== // Return TRUE if an Exception level below EL3 is in Secure state // or would be following an exception return to that level. // // Differs from IsSecure in that it ignores the current EL or Mode // in considering security state. // That is, if at AArch64 EL3 or in AArch32 Monitor mode, whether an // exception return would pass to Secure or Non-secure state. // WalkAttrDecode() // ================ booleanMemoryAttributes IsSecureBelowEL3() ifWalkAttrDecode(bits(2) SH, bits(2) ORGN, bits(2) IRGN, boolean secondstage) HaveELMemoryAttributes(memattrs;EL3AccType) then returnacctype = SCR_GENAccType_NORMAL[].NS == '0'; elsif; memattrs.memtype = HaveELMemType_Normal(; memattrs.inner =EL2ShortConvertAttrsHints) && (!(IRGN, acctype, secondstage); memattrs.outer =HaveSecureEL2ExtShortConvertAttrsHints() ||(ORGN, acctype, secondstage); memattrs.shareable = SH<1> == '1'; memattrs.outershareable = SH == '10'; memattrs.tagged = FALSE; return HighestELUsingAArch32MemAttrDefaults()) then // If Secure EL2 is not an architecture option then we must be Non-secure. return FALSE; else // TRUE if processor is Secure or FALSE if Non-secure. return boolean IMPLEMENTATION_DEFINED "Secure-only implementation";(memattrs);

Library pseudocode for shared/functionstranslation/systemtranslation/IsSecureEL2EnabledHasS2Translation

// IsSecureEL2Enabled() // ==================== // Returns TRUE if Secure EL2 is enabled, FALSE otherwise. // HasS2Translation() // ================== // Returns TRUE if stage 2 translation is present for the current translation regime boolean IsSecureEL2Enabled() ifHasS2Translation() return ( HaveELEL2Enabled(() && !EL2IsInHost) &&() && PSTATE.EL IN { HaveSecureEL2ExtEL0() then if, HaveELEL1(EL3) then if !ELUsingAArch32(EL3) && SCR_EL3.EEL2 == '1' then return TRUE; else return FALSE; else return IsSecure(); else return FALSE;});

Library pseudocode for shared/functionstranslation/systemtranslation/IsSynchronizablePhysicalSErrorPendingHave16bitVMID

// Returns TRUE if a synchronizable physical SError interrupt is pending. // Have16bitVMID() // =============== // Returns TRUE if EL2 and support for a 16-bit VMID are implemented. boolean IsSynchronizablePhysicalSErrorPending();Have16bitVMID() returnHaveEL(EL2) && boolean IMPLEMENTATION_DEFINED "Has 16-bit VMID";

Library pseudocode for shared/functionstranslation/systemtranslation/IsVirtualSErrorPendingPAMax

// Returns TRUE if a virtual SError interrupt is pending. boolean// PAMax() // ======= // Returns the IMPLEMENTATION DEFINED upper limit on the physical address // size for this processor, as log2(). integer IsVirtualSErrorPending();PAMax() return integer IMPLEMENTATION_DEFINED "Maximum Physical Address Size";

Library pseudocode for shared/functionstranslation/systemtranslation/LocalTimeoutEventS1TranslationRegime

// Returns TRUE if a local timeout event is generated when the value of // CNTVCT_EL0 equals or exceeds the threshold value for the first time. // If the threshold value is less than zero a local timeout event will // not be generated. boolean// S1TranslationRegime() // ===================== // Stage 1 translation regime for the given Exception level bits(2) LocalTimeoutEvent(integer localtimeout);S1TranslationRegime(bits(2) el) if el !=EL0 then return el; elsif HaveEL(EL3) && ELUsingAArch32(EL3) && SCR.NS == '0' then return EL3; elsif HaveVirtHostExt() && ELIsInHost(el) then return EL2; else return EL1; // S1TranslationRegime() // ===================== // Returns the Exception level controlling the current Stage 1 translation regime. For the most // part this is unused in code because the system register accessors (SCTLR[], etc.) implicitly // return the correct value. bits(2) S1TranslationRegime() return S1TranslationRegime(PSTATE.EL);

Library pseudocode for shared/functionstranslation/systemtranslation/Mode_BitsVAMax

constant bits(5)// VAMax() // ======= // Returns the IMPLEMENTATION DEFINED upper limit on the virtual address // size for this processor, as log2(). integer M32_User = '10000'; constant bits(5)VAMax() return integer IMPLEMENTATION_DEFINED "Maximum Virtual Address Size"; M32_FIQ = '10001'; constant bits(5) M32_IRQ = '10010'; constant bits(5) M32_Svc = '10011'; constant bits(5) M32_Monitor = '10110'; constant bits(5) M32_Abort = '10111'; constant bits(5) M32_Hyp = '11010'; constant bits(5) M32_Undef = '11011'; constant bits(5) M32_System = '11111';

Library pseudocode for shared/functions/system/PLOfEL

// PLOfEL() // ======== PrivilegeLevel PLOfEL(bits(2) el) case el of when EL3 return if HighestELUsingAArch32() then PL1 else PL3; when EL2 return PL2; when EL1 return PL1; when EL0 return PL0;

Library pseudocode for shared/functions/system/PSTATE

ProcState PSTATE;

Library pseudocode for shared/functions/system/PrivilegeLevel

enumeration PrivilegeLevel {PL3, PL2, PL1, PL0};

Library pseudocode for shared/functions/system/ProcState

type ProcState is ( bits (1) N, // Negative condition flag bits (1) Z, // Zero condition flag bits (1) C, // Carry condition flag bits (1) V, // oVerflow condition flag bits (1) D, // Debug mask bit [AArch64 only] bits (1) A, // SError interrupt mask bit bits (1) I, // IRQ mask bit bits (1) F, // FIQ mask bit bits (1) PAN, // Privileged Access Never Bit [v8.1] bits (1) UAO, // User Access Override [v8.2] bits (1) DIT, // Data Independent Timing [v8.4] bits (1) TCO, // Tag Check Override [v8.5, AArch64 only] bits (2) BTYPE, // Branch Type [v8.5] bits (1) SS, // Software step bit bits (1) IL, // Illegal Execution state bit bits (2) EL, // Exception Level bits (1) nRW, // not Register Width: 0=64, 1=32 bits (1) SP, // Stack pointer select: 0=SP0, 1=SPx [AArch64 only] bits (1) Q, // Cumulative saturation flag [AArch32 only] bits (4) GE, // Greater than or Equal flags [AArch32 only] bits (1) SSBS, // Speculative Store Bypass Safe bits (8) IT, // If-then bits, RES0 in CPSR [AArch32 only] bits (1) J, // J bit, RES0 [AArch32 only, RES0 in SPSR and CPSR] bits (1) T, // T32 bit, RES0 in CPSR [AArch32 only] bits (1) E, // Endianness bit [AArch32 only] bits (5) M // Mode field [AArch32 only] )

Library pseudocode for shared/functions/system/RestoredITBits

// RestoredITBits() // ================ // Get the value of PSTATE.IT to be restored on this exception return. bits(8) RestoredITBits(bits(N) spsr) it = spsr<15:10,26:25>; // When PSTATE.IL is set, it is CONSTRAINED UNPREDICTABLE whether the IT bits are each set // to zero or copied from the SPSR. if PSTATE.IL == '1' then if ConstrainUnpredictableBool(Unpredictable_ILZEROIT) then return '00000000'; else return it; // The IT bits are forced to zero when they are set to a reserved value. if !IsZero(it<7:4>) && IsZero(it<3:0>) then return '00000000'; // The IT bits are forced to zero when returning to A32 state, or when returning to an EL // with the ITD bit set to 1, and the IT bits are describing a multi-instruction block. itd = if PSTATE.EL == EL2 then HSCTLR.ITD else SCTLR.ITD; if (spsr<5> == '0' && !IsZero(it)) || (itd == '1' && !IsZero(it<2:0>)) then return '00000000'; else return it;

Library pseudocode for shared/functions/system/SCRType

type SCRType;

Library pseudocode for shared/functions/system/SCR_GEN

// SCR_GEN[] // ========= SCRType SCR_GEN[] // AArch32 secure & AArch64 EL3 registers are not architecturally mapped assert HaveEL(EL3); bits(64) r; if HighestELUsingAArch32() then r = ZeroExtend(SCR); else r = SCR_EL3; return r;

Library pseudocode for shared/functions/system/SendEvent

// Signal an event to all PEs in a multiprocessor system to set their Event Registers. // When a PE executes the SEV instruction, it causes this function to be executed. SendEvent();

Library pseudocode for shared/functions/system/SendEventLocal

// SendEventLocal() // ================ // Set the local Event Register of this PE. // When a PE executes the SEVL instruction, it causes this function to be executed. SendEventLocal() EventRegister = '1'; return;

Library pseudocode for shared/functions/system/SetPSTATEFromPSR

// SetPSTATEFromPSR() // ================== // Set PSTATE based on a PSR value SetPSTATEFromPSR(bits(N) spsr) boolean from_aarch64 = !UsingAArch32(); assert N == (if from_aarch64 then 64 else 32); PSTATE.SS = DebugExceptionReturnSS(spsr); ShouldAdvanceSS = FALSE; if IllegalExceptionReturn(spsr) then PSTATE.IL = '1'; if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN; if HaveBTIExt() then PSTATE.BTYPE = bits(2) UNKNOWN; if HaveUAOExt() then PSTATE.UAO = bit UNKNOWN; if HaveDITExt() then PSTATE.DIT = bit UNKNOWN; if HaveMTEExt() then PSTATE.TCO = bit UNKNOWN; else // State that is reinstated only on a legal exception return PSTATE.IL = spsr<20>; if spsr<4> == '1' then // AArch32 state AArch32.WriteMode(spsr<4:0>); // Sets PSTATE.EL correctly if HaveSSBSExt() then PSTATE.SSBS = spsr<23>; else // AArch64 state PSTATE.nRW = '0'; PSTATE.EL = spsr<3:2>; PSTATE.SP = spsr<0>; if HaveBTIExt() then PSTATE.BTYPE = spsr<11:10>; if HaveSSBSExt() then PSTATE.SSBS = spsr<12>; if HaveUAOExt() then PSTATE.UAO = spsr<23>; if HaveDITExt() then PSTATE.DIT = spsr<24>; if HaveMTEExt() then PSTATE.TCO = spsr<25>; // If PSTATE.IL is set and returning to AArch32 state, it is CONSTRAINED UNPREDICTABLE whether // the T bit is set to zero or copied from SPSR. if PSTATE.IL == '1' && PSTATE.nRW == '1' then if ConstrainUnpredictableBool(Unpredictable_ILZEROT) then spsr<5> = '0'; // State that is reinstated regardless of illegal exception return PSTATE.<N,Z,C,V> = spsr<31:28>; if HavePANExt() then PSTATE.PAN = spsr<22>; if PSTATE.nRW == '1' then // AArch32 state PSTATE.Q = spsr<27>; PSTATE.IT = RestoredITBits(spsr); ShouldAdvanceIT = FALSE; if HaveDITExt() then PSTATE.DIT = (if (Restarting() || from_aarch64) then spsr<24> else spsr<21>); PSTATE.GE = spsr<19:16>; PSTATE.E = spsr<9>; PSTATE.<A,I,F> = spsr<8:6>; // No PSTATE.D in AArch32 state PSTATE.T = spsr<5>; // PSTATE.J is RES0 else // AArch64 state PSTATE.<D,A,I,F> = spsr<9:6>; // No PSTATE.<Q,IT,GE,E,T> in AArch64 state return;

Library pseudocode for shared/functions/system/ShouldAdvanceIT

boolean ShouldAdvanceIT;

Library pseudocode for shared/functions/system/ShouldAdvanceSS

boolean ShouldAdvanceSS;

Library pseudocode for shared/functions/system/SpeculationBarrier

SpeculationBarrier();

Library pseudocode for shared/functions/system/SynchronizeContext

SynchronizeContext();

Library pseudocode for shared/functions/system/SynchronizeErrors

// Implements the error synchronization event. SynchronizeErrors();

Library pseudocode for shared/functions/system/TakeUnmaskedPhysicalSErrorInterrupts

// Take any pending unmasked physical SError interrupt TakeUnmaskedPhysicalSErrorInterrupts(boolean iesb_req);

Library pseudocode for shared/functions/system/TakeUnmaskedSErrorInterrupts

// Take any pending unmasked physical SError interrupt or unmasked virtual SError // interrupt. TakeUnmaskedSErrorInterrupts();

Library pseudocode for shared/functions/system/ThisInstr

bits(32) ThisInstr();

Library pseudocode for shared/functions/system/ThisInstrLength

integer ThisInstrLength();

Library pseudocode for shared/functions/system/Unreachable

Unreachable() assert FALSE;

Library pseudocode for shared/functions/system/UsingAArch32

// UsingAArch32() // ============== // Return TRUE if the current Exception level is using AArch32, FALSE if using AArch64. boolean UsingAArch32() boolean aarch32 = (PSTATE.nRW == '1'); if !HaveAnyAArch32() then assert !aarch32; if HighestELUsingAArch32() then assert aarch32; return aarch32;

Library pseudocode for shared/functions/system/VirtualFIQPending

// Returns TRUE if there is any pending virtual FIQ boolean VirtualFIQPending();

Library pseudocode for shared/functions/system/VirtualIRQPending

// Returns TRUE if there is any pending virtual IRQ boolean VirtualIRQPending();

Library pseudocode for shared/functions/system/WaitForEvent

// WaitForEvent() // ============== // PE suspends its operation and enters a low-power state if the // Event Register is clear and, for WFET, there is no Local // Timeout event when the WFET is executed. WaitForEvent(integer localtimeout) if !(IsEventRegisterSet() || LocalTimeoutEvent(localtimeout)) then EnterLowPowerState(); return;

Library pseudocode for shared/functions/system/WaitForInterrupt

// WaitForInterrupt() // ================== // PE suspends its operation to enter a low-power state until // a WFI wake-up event occurs, the PE is reset, and, for WFIT, // a Local Timeout Event is generated. WaitForInterrupt(integer localtimeout) if localtimeout < 0 then EnterLowPowerState(); else if !LocalTimeoutEvent(localtimeout) then EnterLowPowerState(); return;

Library pseudocode for shared/functions/unpredictable/ConstrainUnpredictable

// ConstrainUnpredictable() // ======================== // Return the appropriate Constraint result to control the caller's behavior. The return value // is IMPLEMENTATION DEFINED within a permitted list for each UNPREDICTABLE case. // (The permitted list is determined by an assert or case statement at the call site.) // NOTE: This version of the function uses an Unpredictable argument to define the call site. // This argument does not appear in the version used in the Armv8 Architecture Reference Manual. // The extra argument is used here to allow this example definition. This is an example only and // does not imply a fixed implementation of these behaviors. Indeed the intention is that it should // be defined by each implementation, according to its implementation choices. Constraint ConstrainUnpredictable(Unpredictable which) case which of when Unpredictable_VMSR return Constraint_UNDEF; when Unpredictable_WBOVERLAPLD return Constraint_WBSUPPRESS; // return loaded value when Unpredictable_WBOVERLAPST return Constraint_NONE; // store pre-writeback value when Unpredictable_LDPOVERLAP return Constraint_UNDEF; // instruction is UNDEFINED when Unpredictable_BASEOVERLAP return Constraint_NONE; // use original address when Unpredictable_DATAOVERLAP return Constraint_NONE; // store original value when Unpredictable_DEVPAGE2 return Constraint_FAULT; // take an alignment fault when Unpredictable_DEVICETAGSTORE return Constraint_NONE; // Do not take a fault when Unpredictable_INSTRDEVICE return Constraint_NONE; // Do not take a fault when Unpredictable_RESCPACR return Constraint_TRUE; // Map to UNKNOWN value when Unpredictable_RESMAIR return Constraint_UNKNOWN; // Map to UNKNOWN value when Unpredictable_RESTEXCB return Constraint_UNKNOWN; // Map to UNKNOWN value when Unpredictable_RESDACR return Constraint_UNKNOWN; // Map to UNKNOWN value when Unpredictable_RESPRRR return Constraint_UNKNOWN; // Map to UNKNOWN value when Unpredictable_RESVTCRS return Constraint_UNKNOWN; // Map to UNKNOWN value when Unpredictable_RESTnSZ return Constraint_FORCE; // Map to the limit value when Unpredictable_OORTnSZ return Constraint_FORCE; // Map to the limit value when Unpredictable_LARGEIPA return Constraint_FORCE; // Restrict the inputsize to the PAMax value when Unpredictable_ESRCONDPASS return Constraint_FALSE; // Report as "AL" when Unpredictable_ILZEROIT return Constraint_FALSE; // Do not zero PSTATE.IT when Unpredictable_ILZEROT return Constraint_FALSE; // Do not zero PSTATE.T when Unpredictable_BPVECTORCATCHPRI return Constraint_TRUE; // Debug Vector Catch: match on 2nd halfword when Unpredictable_VCMATCHHALF return Constraint_FALSE; // No match when Unpredictable_VCMATCHDAPA return Constraint_FALSE; // No match on Data Abort or Prefetch abort when Unpredictable_WPMASKANDBAS return Constraint_FALSE; // Watchpoint disabled when Unpredictable_WPBASCONTIGUOUS return Constraint_FALSE; // Watchpoint disabled when Unpredictable_RESWPMASK return Constraint_DISABLED; // Watchpoint disabled when Unpredictable_WPMASKEDBITS return Constraint_FALSE; // Watchpoint disabled when Unpredictable_RESBPWPCTRL return Constraint_DISABLED; // Breakpoint/watchpoint disabled when Unpredictable_BPNOTIMPL return Constraint_DISABLED; // Breakpoint disabled when Unpredictable_RESBPTYPE return Constraint_DISABLED; // Breakpoint disabled when Unpredictable_BPNOTCTXCMP return Constraint_DISABLED; // Breakpoint disabled when Unpredictable_BPMATCHHALF return Constraint_FALSE; // No match when Unpredictable_BPMISMATCHHALF return Constraint_FALSE; // No match when Unpredictable_RESTARTALIGNPC return Constraint_FALSE; // Do not force alignment when Unpredictable_RESTARTZEROUPPERPC return Constraint_TRUE; // Force zero extension when Unpredictable_ZEROUPPER return Constraint_TRUE; // zero top halves of X registers when Unpredictable_ERETZEROUPPERPC return Constraint_TRUE; // zero top half of PC when Unpredictable_A32FORCEALIGNPC return Constraint_FALSE; // Do not force alignment when Unpredictable_SMD return Constraint_UNDEF; // disabled SMC is Unallocated when Unpredictable_NONFAULT return Constraint_FALSE; // Speculation enabled when Unpredictable_SVEZEROUPPER return Constraint_TRUE; // zero top bits of Z registers when Unpredictable_SVELDNFDATA return Constraint_TRUE; // Load mem data in NF loads when Unpredictable_SVELDNFZERO return Constraint_TRUE; // Write zeros in NF loads when Unpredictable_CHECKSPNONEACTIVE return Constraint_TRUE; // Check SP alignment when Unpredictable_AFUPDATE // AF update for alignment or permission fault return Constraint_TRUE; when Unpredictable_IESBinDebug // Use SCTLR[].IESB in Debug state return Constraint_TRUE; when Unpredictable_BADPMSFCR // Bad settings for PMSFCR_EL1/PMSEVFR_EL1/PMSLATFR_EL1 return Constraint_TRUE; when Unpredictable_ZEROBTYPE return Constraint_TRUE; // Save BTYPE in SPSR_ELx/DPSR_EL0 as '00' when Unpredictable_CLEARERRITEZERO // Clearing sticky errors when instruction in flight return Constraint_FALSE; when Unpredictable_ALUEXCEPTIONRETURN return Constraint_UNDEF; when Unpredicatable_DBGxVR_RESS return Constraint_FALSE; when Unpredictable_WFxTDEBUG return Constraint_FALSE; // WFxT in Debug state does not execute as a NOP when Unpredictable_LS64UNSUPPORTED return Constraint_LIMITED_ATOMICITY; // Accesses are not single-copy atomic above the byte level

Library pseudocode for shared/functions/unpredictable/ConstrainUnpredictableBits

// ConstrainUnpredictableBits() // ============================ // This is a variant of ConstrainUnpredictable for when the result can be Constraint_UNKNOWN. // If the result is Constraint_UNKNOWN then the function also returns UNKNOWN value, but that // value is always an allocated value; that is, one for which the behavior is not itself // CONSTRAINED. // NOTE: This version of the function uses an Unpredictable argument to define the call site. // This argument does not appear in the version used in the Armv8 Architecture Reference Manual. // See the NOTE on ConstrainUnpredictable() for more information. // This is an example placeholder only and does not imply a fixed implementation of the bits part // of the result, and may not be applicable in all cases. (Constraint,bits(width)) ConstrainUnpredictableBits(Unpredictable which) c = ConstrainUnpredictable(which); if c == Constraint_UNKNOWN then return (c, Zeros(width)); // See notes; this is an example implementation only else return (c, bits(width) UNKNOWN); // bits result not used

Library pseudocode for shared/functions/unpredictable/ConstrainUnpredictableBool

// ConstrainUnpredictableBool() // ============================ // This is a simple wrapper function for cases where the constrained result is either TRUE or FALSE. // NOTE: This version of the function uses an Unpredictable argument to define the call site. // This argument does not appear in the version used in the Armv8 Architecture Reference Manual. // See the NOTE on ConstrainUnpredictable() for more information. boolean ConstrainUnpredictableBool(Unpredictable which) c = ConstrainUnpredictable(which); assert c IN {Constraint_TRUE, Constraint_FALSE}; return (c == Constraint_TRUE);

Library pseudocode for shared/functions/unpredictable/ConstrainUnpredictableInteger

// ConstrainUnpredictableInteger() // =============================== // This is a variant of ConstrainUnpredictable for when the result can be Constraint_UNKNOWN. If // the result is Constraint_UNKNOWN then the function also returns an UNKNOWN value in the range // low to high, inclusive. // NOTE: This version of the function uses an Unpredictable argument to define the call site. // This argument does not appear in the version used in the Armv8 Architecture Reference Manual. // See the NOTE on ConstrainUnpredictable() for more information. // This is an example placeholder only and does not imply a fixed implementation of the integer part // of the result. (Constraint,integer) ConstrainUnpredictableInteger(integer low, integer high, Unpredictable which) c = ConstrainUnpredictable(which); if c == Constraint_UNKNOWN then return (c, low); // See notes; this is an example implementation only else return (c, integer UNKNOWN); // integer result not used

Library pseudocode for shared/functions/unpredictable/Constraint

enumeration Constraint {// General Constraint_NONE, // Instruction executes with // no change or side-effect to its described behavior Constraint_UNKNOWN, // Destination register has UNKNOWN value Constraint_UNDEF, // Instruction is UNDEFINED Constraint_UNDEFEL0, // Instruction is UNDEFINED at EL0 only Constraint_NOP, // Instruction executes as NOP Constraint_TRUE, Constraint_FALSE, Constraint_DISABLED, Constraint_UNCOND, // Instruction executes unconditionally Constraint_COND, // Instruction executes conditionally Constraint_ADDITIONAL_DECODE, // Instruction executes with additional decode // Load-store Constraint_WBSUPPRESS, Constraint_FAULT, Constraint_LIMITED_ATOMICITY, // Accesses are not single-copy atomic above the byte level // IPA too large Constraint_FORCE, Constraint_FORCENOSLCHECK};

Library pseudocode for shared/functions/unpredictable/Unpredictable

enumeration Unpredictable {// VMSR on MVFR Unpredictable_VMSR, // Writeback/transfer register overlap (load) Unpredictable_WBOVERLAPLD, // Writeback/transfer register overlap (store) Unpredictable_WBOVERLAPST, // Load Pair transfer register overlap Unpredictable_LDPOVERLAP, // Store-exclusive base/status register overlap Unpredictable_BASEOVERLAP, // Store-exclusive data/status register overlap Unpredictable_DATAOVERLAP, // Load-store alignment checks Unpredictable_DEVPAGE2, // Instruction fetch from Device memory Unpredictable_INSTRDEVICE, // Reserved CPACR value Unpredictable_RESCPACR, // Reserved MAIR value Unpredictable_RESMAIR, // Reserved TEX:C:B value Unpredictable_RESTEXCB, // Reserved PRRR value Unpredictable_RESPRRR, // Reserved DACR field Unpredictable_RESDACR, // Reserved VTCR.S value Unpredictable_RESVTCRS, // Reserved TCR.TnSZ value Unpredictable_RESTnSZ, // Reserved SCTLR_ELx.TCF value Unpredictable_RESTCF, // Tag stored to Device memory Unpredictable_DEVICETAGSTORE, // Out-of-range TCR.TnSZ value Unpredictable_OORTnSZ, // IPA size exceeds PA size Unpredictable_LARGEIPA, // Syndrome for a known-passing conditional A32 instruction Unpredictable_ESRCONDPASS, // Illegal State exception: zero PSTATE.IT Unpredictable_ILZEROIT, // Illegal State exception: zero PSTATE.T Unpredictable_ILZEROT, // Debug: prioritization of Vector Catch Unpredictable_BPVECTORCATCHPRI, // Debug Vector Catch: match on 2nd halfword Unpredictable_VCMATCHHALF, // Debug Vector Catch: match on Data Abort or Prefetch abort Unpredictable_VCMATCHDAPA, // Debug watchpoints: non-zero MASK and non-ones BAS Unpredictable_WPMASKANDBAS, // Debug watchpoints: non-contiguous BAS Unpredictable_WPBASCONTIGUOUS, // Debug watchpoints: reserved MASK Unpredictable_RESWPMASK, // Debug watchpoints: non-zero MASKed bits of address Unpredictable_WPMASKEDBITS, // Debug breakpoints and watchpoints: reserved control bits Unpredictable_RESBPWPCTRL, // Debug breakpoints: not implemented Unpredictable_BPNOTIMPL, // Debug breakpoints: reserved type Unpredictable_RESBPTYPE, // Debug breakpoints: not-context-aware breakpoint Unpredictable_BPNOTCTXCMP, // Debug breakpoints: match on 2nd halfword of instruction Unpredictable_BPMATCHHALF, // Debug breakpoints: mismatch on 2nd halfword of instruction Unpredictable_BPMISMATCHHALF, // Debug: restart to a misaligned AArch32 PC value Unpredictable_RESTARTALIGNPC, // Debug: restart to a not-zero-extended AArch32 PC value Unpredictable_RESTARTZEROUPPERPC, // Zero top 32 bits of X registers in AArch32 state Unpredictable_ZEROUPPER, // Zero top 32 bits of PC on illegal return to AArch32 state Unpredictable_ERETZEROUPPERPC, // Force address to be aligned when interworking branch to A32 state Unpredictable_A32FORCEALIGNPC, // SMC disabled Unpredictable_SMD, // FF speculation Unpredictable_NONFAULT, // Zero top bits of Z registers in EL change Unpredictable_SVEZEROUPPER, // Load mem data in NF loads Unpredictable_SVELDNFDATA, // Write zeros in NF loads Unpredictable_SVELDNFZERO, // SP alignment fault when predicate is all zero Unpredictable_CHECKSPNONEACTIVE, // Access Flag Update by HW Unpredictable_AFUPDATE, // Consider SCTLR[].IESB in Debug state Unpredictable_IESBinDebug, // Bad settings for PMSFCR_EL1/PMSEVFR_EL1/PMSLATFR_EL1 Unpredictable_BADPMSFCR, // Zero saved BType value in SPSR_ELx/DPSR_EL0 Unpredictable_ZEROBTYPE, // Timestamp constrained to virtual or physical Unpredictable_EL2TIMESTAMP, Unpredictable_EL1TIMESTAMP, // WFET or WFIT instruction in Debug state Unpredictable_WFxTDEBUG, // Address does not support LS64 instructions Unpredictable_LS64UNSUPPORTED, // Clearing DCC/ITR sticky flags when instruction is in flight Unpredictable_CLEARERRITEZERO, // ALUEXCEPTIONRETURN when in user/system mode in A32 instructions Unpredictable_ALUEXCEPTIONRETURN, // Compare DBGBVR.RESS for BP/WP Unpredicatable_DBGxVR_RESS};

Library pseudocode for shared/functions/vector/AdvSIMDExpandImm

// AdvSIMDExpandImm() // ================== bits(64) AdvSIMDExpandImm(bit op, bits(4) cmode, bits(8) imm8) case cmode<3:1> of when '000' imm64 = Replicate(Zeros(24):imm8, 2); when '001' imm64 = Replicate(Zeros(16):imm8:Zeros(8), 2); when '010' imm64 = Replicate(Zeros(8):imm8:Zeros(16), 2); when '011' imm64 = Replicate(imm8:Zeros(24), 2); when '100' imm64 = Replicate(Zeros(8):imm8, 4); when '101' imm64 = Replicate(imm8:Zeros(8), 4); when '110' if cmode<0> == '0' then imm64 = Replicate(Zeros(16):imm8:Ones(8), 2); else imm64 = Replicate(Zeros(8):imm8:Ones(16), 2); when '111' if cmode<0> == '0' && op == '0' then imm64 = Replicate(imm8, 8); if cmode<0> == '0' && op == '1' then imm8a = Replicate(imm8<7>, 8); imm8b = Replicate(imm8<6>, 8); imm8c = Replicate(imm8<5>, 8); imm8d = Replicate(imm8<4>, 8); imm8e = Replicate(imm8<3>, 8); imm8f = Replicate(imm8<2>, 8); imm8g = Replicate(imm8<1>, 8); imm8h = Replicate(imm8<0>, 8); imm64 = imm8a:imm8b:imm8c:imm8d:imm8e:imm8f:imm8g:imm8h; if cmode<0> == '1' && op == '0' then imm32 = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19); imm64 = Replicate(imm32, 2); if cmode<0> == '1' && op == '1' then if UsingAArch32() then ReservedEncoding(); imm64 = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,8):imm8<5:0>:Zeros(48); return imm64;

Library pseudocode for shared/functions/vector/MatMulAdd

// MatMulAdd() // =========== // // Signed or unsigned 8-bit integer matrix multiply and add to 32-bit integer matrix // result[2, 2] = addend[2, 2] + (op1[2, 8] * op2[8, 2]) bits(N) MatMulAdd(bits(N) addend, bits(N) op1, bits(N) op2, boolean op1_unsigned, boolean op2_unsigned) assert N == 128; bits(N) result; bits(32) sum; integer prod; for i = 0 to 1 for j = 0 to 1 sum = Elem[addend, 2*i + j, 32]; for k = 0 to 7 prod = Int(Elem[op1, 8*i + k, 8], op1_unsigned) * Int(Elem[op2, 8*j + k, 8], op2_unsigned); sum = sum + prod; Elem[result, 2*i + j, 32] = sum; return result;

Library pseudocode for shared/functions/vector/PolynomialMult

// PolynomialMult() // ================ bits(M+N) PolynomialMult(bits(M) op1, bits(N) op2) result = Zeros(M+N); extended_op2 = ZeroExtend(op2, M+N); for i=0 to M-1 if op1<i> == '1' then result = result EOR LSL(extended_op2, i); return result;

Library pseudocode for shared/functions/vector/SatQ

// SatQ() // ====== (bits(N), boolean) SatQ(integer i, integer N, boolean unsigned) (result, sat) = if unsigned then UnsignedSatQ(i, N) else SignedSatQ(i, N); return (result, sat);

Library pseudocode for shared/functions/vector/SignedSatQ

// SignedSatQ() // ============ (bits(N), boolean) SignedSatQ(integer i, integer N) if i > 2^(N-1) - 1 then result = 2^(N-1) - 1; saturated = TRUE; elsif i < -(2^(N-1)) then result = -(2^(N-1)); saturated = TRUE; else result = i; saturated = FALSE; return (result<N-1:0>, saturated);

Library pseudocode for shared/functions/vector/UnsignedRSqrtEstimate

// UnsignedRSqrtEstimate() // ======================= bits(N) UnsignedRSqrtEstimate(bits(N) operand) assert N == 32; if operand<N-1:N-2> == '00' then // Operands <= 0x3FFFFFFF produce 0xFFFFFFFF result = Ones(N); else // input is in the range 0x40000000 .. 0xffffffff representing [0.25 .. 1.0) // estimate is in the range 256 .. 511 representing [1.0 .. 2.0) estimate = RecipSqrtEstimate(UInt(operand<31:23>), FALSE); // result is in the range 0x80000000 .. 0xff800000 representing [1.0 .. 2.0) result = estimate<8:0> : Zeros(N-9); return result;

Library pseudocode for shared/functions/vector/UnsignedRecipEstimate

// UnsignedRecipEstimate() // ======================= bits(N) UnsignedRecipEstimate(bits(N) operand) assert N == 32; if operand<N-1> == '0' then // Operands <= 0x7FFFFFFF produce 0xFFFFFFFF result = Ones(N); else // input is in the range 0x80000000 .. 0xffffffff representing [0.5 .. 1.0) // estimate is in the range 256 to 511 representing [1.0 .. 2.0) estimate = RecipEstimate(UInt(operand<31:23>), FALSE); // result is in the range 0x80000000 .. 0xff800000 representing [1.0 .. 2.0) result = estimate<8:0> : Zeros(N-9); return result;

Library pseudocode for shared/functions/vector/UnsignedSatQ

// UnsignedSatQ() // ============== (bits(N), boolean) UnsignedSatQ(integer i, integer N) if i > 2^N - 1 then result = 2^N - 1; saturated = TRUE; elsif i < 0 then result = 0; saturated = TRUE; else result = i; saturated = FALSE; return (result<N-1:0>, saturated);

Library pseudocode for shared/trace/selfhosted/SelfHostedTraceEnabled

// SelfHostedTraceEnabled() // ======================== // Returns TRUE if Self-hosted Trace is enabled. boolean SelfHostedTraceEnabled() if !HaveTraceExt() || !HaveSelfHostedTrace() then return FALSE; if HaveEL(EL3) then secure_trace_enable = (if ELUsingAArch32(EL3) then SDCR.STE else MDCR_EL3.STE); niden = (secure_trace_enable == '0' || ExternalSecureNoninvasiveDebugEnabled()); else // If no EL3, IsSecure() returns the Effective value of (SCR_EL3.NS == '0') niden = (!IsSecure() || ExternalSecureNoninvasiveDebugEnabled()); return (EDSCR.TFO == '0' || !niden);

Library pseudocode for shared/trace/selfhosted/TraceAllowed

// TraceAllowed() // ============== // Returns TRUE if Self-hosted Trace is allowed in the current Security state and Exception Level boolean TraceAllowed() if !HaveTraceExt() then return FALSE; if SelfHostedTraceEnabled() then if IsSecure() && HaveEL(EL3) then secure_trace_enable = (if ELUsingAArch32(EL3) then SDCR.STE else MDCR_EL3.STE); if secure_trace_enable == '0' then return FALSE; TGE_bit = if EL2Enabled() then HCR_EL2.TGE else '0'; case PSTATE.EL of when EL3 TRE_bit = if HighestELUsingAArch32() then TRFCR.E1TRE else '0'; when EL2 TRE_bit = TRFCR_EL2.E2TRE; when EL1 TRE_bit = TRFCR_EL1.E1TRE; when EL0 TRE_bit = if TGE_bit == '1' then TRFCR_EL2.E0HTRE else TRFCR_EL1.E0TRE; return TRE_bit == '1'; else return (!IsSecure() || ExternalSecureNoninvasiveDebugEnabled());

Library pseudocode for shared/trace/selfhosted/TraceContextIDR2

// TraceContextIDR2() // ================== boolean TraceContextIDR2() if !TraceAllowed()|| !HaveEL(EL2) then return FALSE; return (!SelfHostedTraceEnabled() || TRFCR_EL2.CX == '1');

Library pseudocode for shared/trace/selfhosted/TraceSynchronizationBarrier

// Memory barrier instruction that preserves the relative order of memory accesses to System // registers due to trace operations and other memory accesses to the same registers TraceSynchronizationBarrier();

Library pseudocode for shared/trace/selfhosted/TraceTimeStamp

// TraceTimeStamp() // ================ TimeStamp TraceTimeStamp() if SelfHostedTraceEnabled() then if HaveEL(EL2) then TS_el2 = TRFCR_EL2.TS; if TS_el2 == '10' then // Reserved value (-, TS_el2) = ConstrainUnpredictableBits(Unpredictable_EL2TIMESTAMP); case TS_el2 of when '00' // Falls through to check TRFCR_EL1.TS when '01' return TimeStamp_Virtual; when '10' assert HaveECVExt(); return TimeStamp_OffsetPhysical; when '11' return TimeStamp_Physical; otherwise Unreachable(); // ConstrainUnpredictableBits removes this case TS_el1 = TRFCR_EL1.TS; if TS_el1 == 'x0' then // Reserved value (-, TS_el1) = ConstrainUnpredictableBits(Unpredictable_EL1TIMESTAMP); case TS_el1 of when '01' return TimeStamp_Virtual; when '10' assert HaveECVExt(); return TimeStamp_OffsetPhysical; when '11' return TimeStamp_Physical; otherwise Unreachable(); // ConstrainUnpredictableBits removes this case else return TimeStamp_CoreSight;

Library pseudocode for shared/translation/attrs/CombineS1S2AttrHints

// CombineS1S2AttrHints() // ====================== // Combines cacheability attributes and allocation hints from stage 1 and stage 2 MemAttrHints CombineS1S2AttrHints(MemAttrHints s1desc, MemAttrHints s2desc, AccType s2acctype) MemAttrHints result; apply_force_writeback = HaveStage2MemAttrControl() && HCR_EL2.FWB == '1'; if apply_force_writeback then if S2CacheDisabled(s2acctype) then result.attrs = MemAttr_NC; // force Non-cacheable elsif s2desc.attrs == '11' then result.attrs = s1desc.attrs; elsif s2desc.attrs == '10' then result.attrs = MemAttr_WB; // force Write-back else result.attrs = MemAttr_NC; else if s2desc.attrs == '01' || s1desc.attrs == '01' then result.attrs = bits(2) UNKNOWN; // Reserved elsif s2desc.attrs == MemAttr_NC || s1desc.attrs == MemAttr_NC then result.attrs = MemAttr_NC; // Non-cacheable elsif s2desc.attrs == MemAttr_WT || s1desc.attrs == MemAttr_WT then result.attrs = MemAttr_WT; // Write-through else result.attrs = MemAttr_WB; // Write-back if result.attrs == MemAttr_NC then result.hints = MemHint_No; elsif apply_force_writeback then if s1desc.attrs != MemAttr_NC then result.hints = s1desc.hints; else result.hints = MemHint_RWA; else result.hints = s1desc.hints; result.transient = s1desc.transient; return result;

Library pseudocode for shared/translation/attrs/CombineS1S2Device

// CombineS1S2Device() // =================== // Combines device types from stage 1 and stage 2 DeviceType CombineS1S2Device(DeviceType s1device, DeviceType s2device) if s2device == DeviceType_nGnRnE || s1device == DeviceType_nGnRnE then result = DeviceType_nGnRnE; elsif s2device == DeviceType_nGnRE || s1device == DeviceType_nGnRE then result = DeviceType_nGnRE; elsif s2device == DeviceType_nGRE || s1device == DeviceType_nGRE then result = DeviceType_nGRE; else result = DeviceType_GRE; return result;

Library pseudocode for shared/translation/attrs/LongConvertAttrsHints

// LongConvertAttrsHints() // ======================= // Convert the long attribute fields for Normal memory as used in the MAIR fields // to orthogonal attributes and hints MemAttrHints LongConvertAttrsHints(bits(4) attrfield, AccType acctype) assert !IsZero(attrfield); MemAttrHints result; if S1CacheDisabled(acctype) then // Force Non-cacheable result.attrs = MemAttr_NC; result.hints = MemHint_No; else if attrfield<3:2> == '00' then // Write-through transient result.attrs = MemAttr_WT; result.hints = attrfield<1:0>; result.transient = TRUE; elsif attrfield<3:0> == '0100' then // Non-cacheable (no allocate) result.attrs = MemAttr_NC; result.hints = MemHint_No; result.transient = FALSE; elsif attrfield<3:2> == '01' then // Write-back transient result.attrs = MemAttr_WB; result.hints = attrfield<1:0>; result.transient = TRUE; else // Write-through/Write-back non-transient result.attrs = attrfield<3:2>; result.hints = attrfield<1:0>; result.transient = FALSE; return result;

Library pseudocode for shared/translation/attrs/MemAttrDefaults

// MemAttrDefaults() // ================= // Supply default values for memory attributes, including overriding the shareability attributes // for Device and Non-cacheable memory types. MemoryAttributes MemAttrDefaults(MemoryAttributes memattrs) if memattrs.memtype == MemType_Device then memattrs.inner = MemAttrHints UNKNOWN; memattrs.outer = MemAttrHints UNKNOWN; memattrs.shareable = TRUE; memattrs.outershareable = TRUE; else memattrs.device = DeviceType UNKNOWN; if memattrs.inner.attrs == MemAttr_NC && memattrs.outer.attrs == MemAttr_NC then memattrs.shareable = TRUE; memattrs.outershareable = TRUE; return memattrs;

Library pseudocode for shared/translation/attrs/S1CacheDisabled

// S1CacheDisabled() // ================= boolean S1CacheDisabled(AccType acctype) if ELUsingAArch32(S1TranslationRegime()) then if PSTATE.EL == EL2 then enable = if acctype == AccType_IFETCH then HSCTLR.I else HSCTLR.C; else enable = if acctype == AccType_IFETCH then SCTLR.I else SCTLR.C; else enable = if acctype == AccType_IFETCH then SCTLR[].I else SCTLR[].C; return enable == '0';

Library pseudocode for shared/translation/attrs/S2AttrDecode

// S2AttrDecode() // ============== // Converts the Stage 2 attribute fields into orthogonal attributes and hints MemoryAttributes S2AttrDecode(bits(2) SH, bits(4) attr, AccType acctype) MemoryAttributes memattrs; apply_force_writeback = HaveStage2MemAttrControl() && HCR_EL2.FWB == '1'; // Device memory if (apply_force_writeback && attr<2> == '0') || attr<3:2> == '00' then memattrs.memtype = MemType_Device; case attr<1:0> of when '00' memattrs.device = DeviceType_nGnRnE; when '01' memattrs.device = DeviceType_nGnRE; when '10' memattrs.device = DeviceType_nGRE; when '11' memattrs.device = DeviceType_GRE; // Normal memory elsif apply_force_writeback then if attr<2> == '1' then memattrs.memtype = MemType_Normal; memattrs.inner.attrs = attr<1:0>; memattrs.outer.attrs = attr<1:0>; memattrs.shareable = SH<1> == '1'; memattrs.outershareable = SH == '10'; elsif attr<1:0> != '00' then memattrs.memtype = MemType_Normal; memattrs.outer = S2ConvertAttrsHints(attr<3:2>, acctype); memattrs.inner = S2ConvertAttrsHints(attr<1:0>, acctype); memattrs.shareable = SH<1> == '1'; memattrs.outershareable = SH == '10'; else memattrs = MemoryAttributes UNKNOWN; // Reserved return MemAttrDefaults(memattrs);

Library pseudocode for shared/translation/attrs/S2CacheDisabled

// S2CacheDisabled() // ================= boolean S2CacheDisabled(AccType acctype) if ELUsingAArch32(EL2) then disable = if acctype == AccType_IFETCH then HCR2.ID else HCR2.CD; else disable = if acctype == AccType_IFETCH then HCR_EL2.ID else HCR_EL2.CD; return disable == '1';

Library pseudocode for shared/translation/attrs/S2ConvertAttrsHints

// S2ConvertAttrsHints() // ===================== // Converts the attribute fields for Normal memory as used in stage 2 // descriptors to orthogonal attributes and hints MemAttrHints S2ConvertAttrsHints(bits(2) attr, AccType acctype) assert attr != '00'; MemAttrHints result; if S2CacheDisabled(acctype) then // Force Non-cacheable result.attrs = MemAttr_NC; result.hints = MemHint_No; else case attr of when '01' // Non-cacheable (no allocate) result.attrs = MemAttr_NC; result.hints = MemHint_No; when '10' // Write-through result.attrs = MemAttr_WT; result.hints = MemHint_RWA; when '11' // Write-back result.attrs = MemAttr_WB; result.hints = MemHint_RWA; result.transient = FALSE; return result;

Library pseudocode for shared/translation/attrs/ShortConvertAttrsHints

// ShortConvertAttrsHints() // ======================== // Converts the short attribute fields for Normal memory as used in the TTBR and // TEX fields to orthogonal attributes and hints MemAttrHints ShortConvertAttrsHints(bits(2) RGN, AccType acctype, boolean secondstage) MemAttrHints result; if (!secondstage && S1CacheDisabled(acctype)) || (secondstage && S2CacheDisabled(acctype)) then // Force Non-cacheable result.attrs = MemAttr_NC; result.hints = MemHint_No; else case RGN of when '00' // Non-cacheable (no allocate) result.attrs = MemAttr_NC; result.hints = MemHint_No; when '01' // Write-back, Read and Write allocate result.attrs = MemAttr_WB; result.hints = MemHint_RWA; when '10' // Write-through, Read allocate result.attrs = MemAttr_WT; result.hints = MemHint_RA; when '11' // Write-back, Read allocate result.attrs = MemAttr_WB; result.hints = MemHint_RA; result.transient = FALSE; return result;

Library pseudocode for shared/translation/attrs/WalkAttrDecode

// WalkAttrDecode() // ================ MemoryAttributes WalkAttrDecode(bits(2) SH, bits(2) ORGN, bits(2) IRGN, boolean secondstage) MemoryAttributes memattrs; AccType acctype = AccType_NORMAL; memattrs.memtype = MemType_Normal; memattrs.inner = ShortConvertAttrsHints(IRGN, acctype, secondstage); memattrs.outer = ShortConvertAttrsHints(ORGN, acctype, secondstage); memattrs.shareable = SH<1> == '1'; memattrs.outershareable = SH == '10'; memattrs.tagged = FALSE; return MemAttrDefaults(memattrs);

Library pseudocode for shared/translation/translation/HasS2Translation

// HasS2Translation() // ================== // Returns TRUE if stage 2 translation is present for the current translation regime boolean HasS2Translation() return (EL2Enabled() && !IsInHost() && PSTATE.EL IN {EL0,EL1});

Library pseudocode for shared/translation/translation/Have16bitVMID

// Have16bitVMID() // =============== // Returns TRUE if EL2 and support for a 16-bit VMID are implemented. boolean Have16bitVMID() return HaveEL(EL2) && boolean IMPLEMENTATION_DEFINED "Has 16-bit VMID";

Library pseudocode for shared/translation/translation/PAMax

// PAMax() // ======= // Returns the IMPLEMENTATION DEFINED upper limit on the physical address // size for this processor, as log2(). integer PAMax() return integer IMPLEMENTATION_DEFINED "Maximum Physical Address Size";

Library pseudocode for shared/translation/translation/S1TranslationRegime

// S1TranslationRegime() // ===================== // Stage 1 translation regime for the given Exception level bits(2) S1TranslationRegime(bits(2) el) if el != EL0 then return el; elsif HaveEL(EL3) && ELUsingAArch32(EL3) && SCR.NS == '0' then return EL3; elsif HaveVirtHostExt() && ELIsInHost(el) then return EL2; else return EL1; // S1TranslationRegime() // ===================== // Returns the Exception level controlling the current Stage 1 translation regime. For the most // part this is unused in code because the system register accessors (SCTLR[], etc.) implicitly // return the correct value. bits(2) S1TranslationRegime() return S1TranslationRegime(PSTATE.EL);

Library pseudocode for shared/translation/translation/VAMax

// VAMax() // ======= // Returns the IMPLEMENTATION DEFINED upper limit on the virtual address // size for this processor, as log2(). integer VAMax() return integer IMPLEMENTATION_DEFINED "Maximum Virtual Address Size";


Internal version only: isa v01_19v01_15, pseudocode v2020-09_xmlv2020-06_rel, sve v2020-09_rc3v2020-06-29-gc9614a3 ; Build timestamp: 2020-09-30T212020-07-03T11:3536

Copyright © 2010-2020 Arm Limited or its affiliates. All rights reserved. This document is Non-Confidential.

(old) htmldiff from-(new)