You copied the Doc URL to your clipboard.
AArch64 Functions.Memory Pseudocode
Library pseudocode for aarch64/functions/memory/AArch64.AccessIsTagChecked
// AArch64.AccessIsTagChecked() // ============================ // TRUE if a given access is tag-checked, FALSE otherwise. boolean AArch64.AccessIsTagChecked(bits(64) vaddr, AccType acctype) if PSTATE.M<4> == '1' then return FALSE; if EffectiveTBI(vaddr, FALSE, PSTATE.EL) == '0' then return FALSE; if EffectiveTCMA(vaddr, PSTATE.EL) == '1' && (vaddr<59:55> == '00000' || vaddr<59:55> == '11111') then return FALSE; if !AArch64.AllocationTagAccessIsEnabled(acctype) then return FALSE; if acctype IN {AccType_IFETCH, AccType_TTW} then return FALSE; if acctype == AccType_NV2REGISTER then return FALSE; if acctype IN {AccType_CSR_NORMAL, AccType_CSR_PRIV} then return FALSE; if PSTATE.TCO=='1' then return FALSE; if !IsTagCheckedInstruction() then return FALSE; return TRUE;
Library pseudocode for aarch64/functions/memory/AArch64.AddressWithAllocationTag
// AArch64.AddressWithAllocationTag() // ================================== // Generate a 64-bit value containing a Logical Address Tag from a 64-bit // virtual address and an Allocation Tag. // If the extension is disabled, treats the Allocation Tag as '0000'. bits(64) AArch64.AddressWithAllocationTag(bits(64) address, AccType acctype, bits(4) allocation_tag) bits(64) result = address; bits(4) tag; if AArch64.AllocationTagAccessIsEnabled(acctype) then tag = allocation_tag; else tag = '0000'; result<59:56> = tag; return result;
Library pseudocode for aarch64/functions/memory/AArch64.AllocationTagFromAddress
// AArch64.AllocationTagFromAddress() // ================================== // Generate an Allocation Tag from a 64-bit value containing a Logical Address Tag. bits(4) AArch64.AllocationTagFromAddress(bits(64) tagged_address) return tagged_address<59:56>;
Library pseudocode for aarch64/functions/memory/AArch64.CheckAlignment
// AArch64.CheckAlignment() // ======================== boolean AArch64.CheckAlignment(bits(64) address, integer alignment, AccType acctype, boolean iswrite) aligned = (address == Align(address, alignment)); atomic = acctype IN { AccType_ATOMIC, AccType_ATOMICRW, AccType_ORDEREDATOMIC, AccType_ORDEREDATOMICRW, AccType_ATOMICLS64}; ordered = acctype IN { AccType_ORDERED, AccType_ORDEREDRW, AccType_LIMITEDORDERED, AccType_ORDEREDATOMIC, AccType_ORDEREDATOMICRW }; vector = acctype == AccType_VEC; if SCTLR[].A == '1' then check = TRUE; elsif HaveLSE2Ext() then check = (UInt(address<0+:4>) + alignment > 16) && ((ordered && SCTLR[].nAA == '0') || atomic); else check = atomic || ordered; if check && !aligned then secondstage = FALSE; AArch64.Abort(address, AArch64.AlignmentFault(acctype, iswrite, secondstage)); return aligned;
Library pseudocode for aarch64/functions/memory/AArch64.CheckTag
// AArch64.CheckTag() // ================== // Performs a Tag Check operation for a memory access and returns // whether the check passed boolean AArch64.CheckTag(AddressDescriptor memaddrdesc, bits(4) ptag, boolean write) if memaddrdesc.memattrs.tagged then return ptag == _MemTag[memaddrdesc]; else return TRUE;
Library pseudocode for aarch64/functions/memory/AArch64.MemSingle
// AArch64.MemSingle[] - non-assignment (read) form // ================================================ // Perform an atomic, little-endian read of 'size' bytes. bits(size*8) AArch64.MemSingle[bits(64) address, integer size, AccType acctype, boolean wasaligned] assert size IN {1, 2, 4, 8, 16}; assert address == Align(address, size); AddressDescriptor memaddrdesc; bits(size*8) value; iswrite = FALSE; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, wasaligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); // Memory array access if HaveTME() then transactional = TSTATE.depth > 0 && !(acctype IN {AccType_IFETCH,AccType_TTW}); accdesc = CreateAccessDescriptor(acctype, transactional); else accdesc = CreateAccessDescriptor(acctype); if HaveMTEExt() then if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64)); if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then AArch64.TagCheckFault(ZeroExtend(address, 64), acctype, iswrite); value = _Mem[memaddrdesc, size, accdesc, FALSE]; return value; // AArch64.MemSingle[] - assignment (write) form // ============================================= // Perform an atomic, little-endian write of 'size' bytes. AArch64.MemSingle[bits(64) address, integer size, AccType acctype, boolean wasaligned] = bits(size*8) value assert size IN {1, 2, 4, 8, 16}; assert address == Align(address, size); AddressDescriptor memaddrdesc; iswrite = TRUE; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, wasaligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); // Effect on exclusives if memaddrdesc.memattrs.shareable then ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), size); // Memory array access if HaveTME() then transactional = TSTATE.depth > 0; accdesc = CreateAccessDescriptor(acctype, transactional); else accdesc = CreateAccessDescriptor(acctype); if HaveMTEExt() then if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64)); if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then AArch64.TagCheckFault(ZeroExtend(address, 64), acctype, iswrite); _Mem[memaddrdesc, size, accdesc] = value; return;
Library pseudocode for aarch64/functions/memory/AArch64.MemTag
// AArch64.MemTag[] - non-assignment (read) form // ============================================= // Load an Allocation Tag from memory. bits(4) AArch64.MemTag[bits(64) address, AccType acctype] assert acctype == AccType_NORMAL; AddressDescriptor memaddrdesc; bits(4) value; iswrite = FALSE; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, TRUE, TAG_GRANULE); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); // Return the granule tag if tagging is enabled... if AArch64.AllocationTagAccessIsEnabled(acctype) && memaddrdesc.memattrs.tagged then return _MemTag[memaddrdesc]; else // ...otherwise read tag as zero. return '0000'; // AArch64.MemTag[] - assignment (write) form // ========================================== // Store an Allocation Tag to memory. AArch64.MemTag[bits(64) address, AccType acctype] = bits(4) value assert acctype == AccType_NORMAL; AddressDescriptor memaddrdesc; iswrite = TRUE; // Stores of allocation tags must be aligned if address != Align(address, TAG_GRANULE) then boolean secondstage = FALSE; AArch64.Abort(address, AArch64.AlignmentFault(acctype, iswrite, secondstage)); wasaligned = TRUE; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, wasaligned, TAG_GRANULE); // It is CONSTRAINED UNPREDICTABLE if tags stored to memory locations marked as Device // generate an Alignment Fault or store the data to locations. if memaddrdesc.memattrs.memtype == MemType_Device then c = ConstrainUnpredictable(Unpredictable_DEVICETAGSTORE); assert c IN {Constraint_NONE, Constraint_FAULT}; if c == Constraint_FAULT then boolean secondstage = FALSE; AArch64.Abort(address, AArch64.AlignmentFault(acctype, iswrite, secondstage)); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); // Memory array access if AArch64.AllocationTagAccessIsEnabled(acctype) && memaddrdesc.memattrs.tagged then _MemTag[memaddrdesc] = value;
Library pseudocode for aarch64/functions/memory/AArch64.PhysicalTag
// AArch64.PhysicalTag() // ===================== // Generate a Physical Tag from a Logical Tag in an address bits(4) AArch64.PhysicalTag(bits(64) vaddr) return vaddr<59:56>;
Library pseudocode for aarch64/functions/memory/AArch64.TranslateAddressForAtomicAccess
// AArch64.TranslateAddressForAtomicAccess() // ========================================= // Performs an alignment check for atomic memory operations. // Also translates 64-bit Virtual Address into Physical Address. AddressDescriptor AArch64.TranslateAddressForAtomicAccess(bits(64) address, integer sizeinbits) boolean iswrite = FALSE; size = sizeinbits DIV 8; assert size IN {1, 2, 4, 8, 16}; aligned = AArch64.CheckAlignment(address, size, AccType_ATOMICRW, iswrite); // MMU or MPU lookup memaddrdesc = AArch64.TranslateAddress(address, AccType_ATOMICRW, iswrite, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); // Effect on exclusives if memaddrdesc.memattrs.shareable then ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), size); if HaveMTEExt() && AArch64.AccessIsTagChecked(address, AccType_ATOMICRW) then bits(4) ptag = AArch64.PhysicalTag(address); if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then AArch64.TagCheckFault(address, AccType_ATOMICRW, iswrite); return memaddrdesc;
Library pseudocode for aarch64/functions/memory/AddressSupportsLS64
// Returns TRUE if the 64-byte block following the given address supports the // LD64B and ST64B instructions, and FALSE otherwise. boolean AddressSupportsLS64(bits(64) address);
Library pseudocode for aarch64/functions/memory/CheckSPAlignment
// CheckSPAlignment() // ================== // Check correct stack pointer alignment for AArch64 state. CheckSPAlignment() bits(64) sp = SP[]; if PSTATE.EL == EL0 then stack_align_check = (SCTLR[].SA0 != '0'); else stack_align_check = (SCTLR[].SA != '0'); if stack_align_check && sp != Align(sp, 16) then AArch64.SPAlignmentFault(); return;
Library pseudocode for aarch64/functions/memory/IsBlockDescriptorNTBitValid
// If the implementation supports changing the block size without a break-before-make // approach, then for implementations that have level 1 or 2 support, the nT bit in // the block descriptor is valid. boolean IsBlockDescriptorNTBitValid();
Library pseudocode for aarch64/functions/memory/IsTagCheckedInstruction
// Returns True if the current instruction uses tag-checked memory access, // False otherwise. boolean IsTagCheckedInstruction();
Library pseudocode for aarch64/functions/memory/Mem
// Mem[] - non-assignment (read) form // ================================== // Perform a read of 'size' bytes. The access byte order is reversed for a big-endian access. // Instruction fetches would call AArch64.MemSingle directly. bits(size*8) Mem[bits(64) address, integer size, AccType acctype] assert size IN {1, 2, 4, 8, 16}; bits(size*8) value; boolean iswrite = FALSE; aligned = AArch64.CheckAlignment(address, size, acctype, iswrite); if size != 16 || !(acctype IN {AccType_VEC, AccType_VECSTREAM}) then atomic = aligned; else // 128-bit SIMD&FP loads are treated as a pair of 64-bit single-copy atomic accesses // 64-bit aligned. atomic = address == Align(address, 8); if !atomic then assert size > 1; value<7:0> = AArch64.MemSingle[address, 1, acctype, aligned]; // For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory // access will generate an Alignment Fault, as to get this far means the first byte did // not, so we must be changing to a new translation page. if !aligned then c = ConstrainUnpredictable(Unpredictable_DEVPAGE2); assert c IN {Constraint_FAULT, Constraint_NONE}; if c == Constraint_NONE then aligned = TRUE; for i = 1 to size-1 value<8*i+7:8*i> = AArch64.MemSingle[address+i, 1, acctype, aligned]; elsif size == 16 && acctype IN {AccType_VEC, AccType_VECSTREAM} then value<63:0> = AArch64.MemSingle[address, 8, acctype, aligned]; value<127:64> = AArch64.MemSingle[address+8, 8, acctype, aligned]; else value = AArch64.MemSingle[address, size, acctype, aligned]; if BigEndian(acctype) then value = BigEndianReverse(value); return value; // Mem[] - assignment (write) form // =============================== // Perform a write of 'size' bytes. The byte order is reversed for a big-endian access. Mem[bits(64) address, integer size, AccType acctype] = bits(size*8) value boolean iswrite = TRUE; if BigEndian(acctype) then value = BigEndianReverse(value); aligned = AArch64.CheckAlignment(address, size, acctype, iswrite); if size != 16 || !(acctype IN {AccType_VEC, AccType_VECSTREAM}) then atomic = aligned; else // 128-bit SIMD&FP stores are treated as a pair of 64-bit single-copy atomic accesses // 64-bit aligned. atomic = address == Align(address, 8); if !atomic then assert size > 1; AArch64.MemSingle[address, 1, acctype, aligned] = value<7:0>; // For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory // access will generate an Alignment Fault, as to get this far means the first byte did // not, so we must be changing to a new translation page. if !aligned then c = ConstrainUnpredictable(Unpredictable_DEVPAGE2); assert c IN {Constraint_FAULT, Constraint_NONE}; if c == Constraint_NONE then aligned = TRUE; for i = 1 to size-1 AArch64.MemSingle[address+i, 1, acctype, aligned] = value<8*i+7:8*i>; elsif size == 16 && acctype IN {AccType_VEC, AccType_VECSTREAM} then AArch64.MemSingle[address, 8, acctype, aligned] = value<63:0>; AArch64.MemSingle[address+8, 8, acctype, aligned] = value<127:64>; else AArch64.MemSingle[address, size, acctype, aligned] = value; return;
Library pseudocode for aarch64/functions/memory/MemAtomic
// MemAtomic() // =========== // Performs load and store memory operations for a given virtual address. bits(size) MemAtomic(bits(64) address, MemAtomicOp op, bits(size) value, AccType ldacctype, AccType stacctype) bits(size) newvalue; memaddrdesc = AArch64.TranslateAddressForAtomicAccess(address, size); ldaccdesc = CreateAccessDescriptor(ldacctype); staccdesc = CreateAccessDescriptor(stacctype); // All observers in the shareability domain observe the // following load and store atomically. oldvalue = _Mem[memaddrdesc, size DIV 8, ldaccdesc, FALSE]; if BigEndian(ldacctype) then oldvalue = BigEndianReverse(oldvalue); case op of when MemAtomicOp_ADD newvalue = oldvalue + value; when MemAtomicOp_BIC newvalue = oldvalue AND NOT(value); when MemAtomicOp_EOR newvalue = oldvalue EOR value; when MemAtomicOp_ORR newvalue = oldvalue OR value; when MemAtomicOp_SMAX newvalue = if SInt(oldvalue) > SInt(value) then oldvalue else value; when MemAtomicOp_SMIN newvalue = if SInt(oldvalue) > SInt(value) then value else oldvalue; when MemAtomicOp_UMAX newvalue = if UInt(oldvalue) > UInt(value) then oldvalue else value; when MemAtomicOp_UMIN newvalue = if UInt(oldvalue) > UInt(value) then value else oldvalue; when MemAtomicOp_SWP newvalue = value; if BigEndian(stacctype) then newvalue = BigEndianReverse(newvalue); _Mem[memaddrdesc, size DIV 8, staccdesc] = newvalue; // Load operations return the old (pre-operation) value return oldvalue;
Library pseudocode for aarch64/functions/memory/MemAtomicCompareAndSwap
// MemAtomicCompareAndSwap() // ========================= // Compares the value stored at the passed-in memory address against the passed-in expected // value. If the comparison is successful, the value at the passed-in memory address is swapped // with the passed-in new_value. bits(size) MemAtomicCompareAndSwap(bits(64) address, bits(size) expectedvalue, bits(size) newvalue, AccType ldacctype, AccType stacctype) memaddrdesc = AArch64.TranslateAddressForAtomicAccess(address, size); ldaccdesc = CreateAccessDescriptor(ldacctype); staccdesc = CreateAccessDescriptor(stacctype); // All observers in the shareability domain observe the // following load and store atomically. oldvalue = _Mem[memaddrdesc, size DIV 8, ldaccdesc, FALSE]; if BigEndian(ldacctype) then oldvalue = BigEndianReverse(oldvalue); if oldvalue == expectedvalue then if BigEndian(stacctype) then newvalue = BigEndianReverse(newvalue); _Mem[memaddrdesc, size DIV 8, staccdesc] = newvalue; return oldvalue;
Library pseudocode for aarch64/functions/memory/MemLoad64B
// MemLoad64B() // ============ // Performs an atomic 64-byte read from a given virtual address. bits(512) MemLoad64B(bits(64) address, AccType acctype) assert address == Align(address, 64); bits(512) data; boolean iswrite = FALSE; boolean aligned = TRUE; if !AddressSupportsLS64(address) then c = ConstrainUnpredictable(Unpredictable_LS64UNSUPPORTED); assert c IN {Constraint_LIMITED_ATOMICITY, Constraint_FAULT}; if c == Constraint_FAULT then // Generate a stage 1 Data Abort reported using the DFSC code of 110101. fault = AArch64.ExclusiveFault(acctype, iswrite); AArch64.Abort(address, fault); else // Accesses are not single-copy atomic above the byte level for i = 0 to 63 data<7+8*i : 8*i> = AArch64.MemSingle[address+8*i, 1, acctype, aligned]; return data; AddressDescriptor memaddrdesc; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, aligned, 64); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); // Effect on exclusives if memaddrdesc.memattrs.shareable then ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), 64); // Memory array access if HaveTME() then transactional = TSTATE.depth > 0; accdesc = CreateAccessDescriptor(acctype, transactional); else accdesc = CreateAccessDescriptor(acctype); if HaveMTEExt() then if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64)); if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then AArch64.TagCheckFault(address, acctype, iswrite); data = _Mem[memaddrdesc, 64, accdesc, iswrite]; return data;
Library pseudocode for aarch64/functions/memory/MemStore64B
// MemStore64B() // ============= // Performs an atomic 64-byte store to a given virtual address. Function does // not return the status of the store. MemStore64B(bits(64) address, bits(512) value, AccType acctype) assert address == Align(address, 64); if !AddressSupportsLS64(address) then c = ConstrainUnpredictable(Unpredictable_LS64UNSUPPORTED); assert c IN {Constraint_LIMITED_ATOMICITY, Constraint_FAULT}; if c == Constraint_FAULT then // Generate a Data Abort reported using the DFSC code of 110101. iswrite = TRUE; fault = AArch64.ExclusiveFault(acctype, iswrite); AArch64.Abort(address, fault); else // Accesses are not single-copy atomic above the byte level. aligned = TRUE; for i = 0 to 63 AArch64.MemSingle[address+8*i, 1, acctype, aligned] = value<7+8*i : 8*i>; else -= MemStore64BWithRet(address, value, acctype); // Return status is ignored by ST64B return;
Library pseudocode for aarch64/functions/memory/MemStore64BWithRet
// MemStore64BWithRet() // ==================== // Performs an atomic 64-byte store to a given virtual address returning // the status value of the operation. bits(64) MemStore64BWithRet(bits(64) address, bits(512) value, AccType acctype) assert address == Align(address, 64); AddressDescriptor memaddrdesc; boolean iswrite = TRUE; boolean aligned = TRUE; memaddrdesc = AArch64.TranslateAddress(address, acctype, iswrite, aligned, 64); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch64.Abort(address, memaddrdesc.fault); return ZeroExtend('1'); // Effect on exclusives if memaddrdesc.memattrs.shareable then ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), 64); // Memory array access if HaveTME() then transactional = TSTATE.depth > 0; accdesc = CreateAccessDescriptor(acctype, transactional); else accdesc = CreateAccessDescriptor(acctype); if HaveMTEExt() then if AArch64.AccessIsTagChecked(ZeroExtend(address, 64), acctype) then bits(4) ptag = AArch64.PhysicalTag(ZeroExtend(address, 64)); if !AArch64.CheckTag(memaddrdesc, ptag, iswrite) then AArch64.TagCheckFault(address, acctype, iswrite); return ZeroExtend('1'); _Mem[memaddrdesc, 64, accdesc] = value; status = MemStore64BWithRetStatus(); return status;
Library pseudocode for aarch64/functions/memory/MemStore64BWithRetStatus
// Generates the return status of memory write with ST64BV or ST64BV0 // instructions. The status indicates if the operation succeeded, failed, // or was not supported at this memory location. bits(64) MemStore64BWithRetStatus();
Library pseudocode for aarch64/functions/memory/NVMem
// NVMem[] - non-assignment form // ============================= // This function is the load memory access for the transformed System register read access // when Enhanced Nested Virtualisation is enabled with HCR_EL2.NV2 = 1. // The address for the load memory access is calculated using // the formula SignExtend(VNCR_EL2.BADDR : Offset<11:0>, 64) where, // * VNCR_EL2.BADDR holds the base address of the memory location, and // * Offset is the unique offset value defined architecturally for each System register that // supports transformation of register access to memory access. bits(64) NVMem[integer offset] assert offset > 0; bits(64) address = SignExtend(VNCR_EL2.BADDR:offset<11:0>, 64); return Mem[address, 8, AccType_NV2REGISTER]; // NVMem[] - assignment form // ========================= // This function is the store memory access for the transformed System register write access // when Enhanced Nested Virtualisation is enabled with HCR_EL2.NV2 = 1. // The address for the store memory access is calculated using // the formula SignExtend(VNCR_EL2.BADDR : Offset<11:0>, 64) where, // * VNCR_EL2.BADDR holds the base address of the memory location, and // * Offset is the unique offset value defined architecturally for each System register that // supports transformation of register access to memory access. NVMem[integer offset] = bits(64) value assert offset > 0; bits(64) address = SignExtend(VNCR_EL2.BADDR:offset<11:0>, 64); Mem[address, 8, AccType_NV2REGISTER] = value; return;
Library pseudocode for aarch64/functions/memory/SetTagCheckedInstruction
// Flag the current instruction as using/not using memory tag checking. SetTagCheckedInstruction(boolean checked);
Library pseudocode for aarch64/functions/memory/_MemTag
// This _MemTag[] accessor is the hardware operation which perform a single-copy atomic, // Allocation Tag granule aligned, memory access from the tag in PA space. // // The function address the array using desc.paddress which supplies: // * A 52-bit physical address // * A single NS bit to select between Secure and Non-secure parts of the array. // // The accdesc descriptor describes the access type: normal, exclusive, ordered, streaming, // etc and other parameters required to access the physical memory or for setting syndrome // register in the event of an external abort. bits(4) _MemTag[AddressDescriptor desc, AccessDescriptor accdesc]; // This _MemTag[] accessor is the hardware operation which perform a single-copy atomic, // Allocation Tag granule aligned, memory access to the tag in PA space. // // The functions address the array using desc.paddress which supplies: // * A 52-bit physical address // * A single NS bit to select between Secure and Non-secure parts of the array. // // The accdesc descriptor describes the access type: normal, exclusive, ordered, streaming, // etc and other parameters required to access the physical memory or for setting syndrome // register in the event of an external abort. _MemTag[AddressDescriptor desc, AccessDescriptor accdesc] = bits(4) value;