整理初版

This commit is contained in:
2026-05-08 19:03:33 +08:00
parent a9af16962c
commit a605fb2bea
352 changed files with 0 additions and 7278 deletions

View File

@@ -0,0 +1,80 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64ARCHTRAITS_P_H_INCLUDED
#define ASMJIT_ARM_A64ARCHTRAITS_P_H_INCLUDED
#include "../core/archtraits.h"
#include "../core/misc_p.h"
#include "../core/type.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
static const constexpr ArchTraits a64ArchTraits = {
// SP/FP/LR/PC.
Gp::kIdSp, Gp::kIdFp, Gp::kIdLr, 0xFF,
// Reserved.
{ 0, 0, 0 },
// HW stack alignment (AArch64 requires stack aligned to 64 bytes).
16,
// Min/max stack offset - byte addressing is the worst, VecQ addressing the best.
4095, 65520,
// Instruction hints [Gp, Vec, ExtraVirt2, ExtraVirt3].
{{
InstHints::kPushPop,
InstHints::kPushPop,
InstHints::kNoHints,
InstHints::kNoHints
}},
// RegInfo.
#define V(index) OperandSignature{arm::RegTraits<RegType(index)>::kSignature}
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// RegTypeToTypeId.
#define V(index) TypeId(arm::RegTraits<RegType(index)>::kTypeId)
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// TypeIdToRegType.
#define V(index) (index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt8) ? RegType::kARM_GpW : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt8) ? RegType::kARM_GpW : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt16) ? RegType::kARM_GpW : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt16) ? RegType::kARM_GpW : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt32) ? RegType::kARM_GpW : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt32) ? RegType::kARM_GpW : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt64) ? RegType::kARM_GpX : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt64) ? RegType::kARM_GpX : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kIntPtr) ? RegType::kARM_GpX : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUIntPtr) ? RegType::kARM_GpX : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat32) ? RegType::kARM_VecS : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat64) ? RegType::kARM_VecD : RegType::kNone)
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// Word names of 8-bit, 16-bit, 32-bit, and 64-bit quantities.
{
ArchTypeNameId::kByte,
ArchTypeNameId::kHWord,
ArchTypeNameId::kWord,
ArchTypeNameId::kXWord
}
};
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64ARCHTRAITS_P_H_INCLUDED

5115
third_party/asmjit/arm/a64assembler.cpp vendored Normal file

File diff suppressed because it is too large Load Diff

72
third_party/asmjit/arm/a64assembler.h vendored Normal file
View File

@@ -0,0 +1,72 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64ASSEMBLER_H_INCLUDED
#define ASMJIT_ARM_A64ASSEMBLER_H_INCLUDED
#include "../core/assembler.h"
#include "../arm/a64emitter.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \addtogroup asmjit_a64
//! \{
//! AArch64 assembler implementation.
class ASMJIT_VIRTAPI Assembler
: public BaseAssembler,
public EmitterExplicitT<Assembler> {
public:
typedef BaseAssembler Base;
//! \name Construction / Destruction
//! \{
ASMJIT_API Assembler(CodeHolder* code = nullptr) noexcept;
ASMJIT_API virtual ~Assembler() noexcept;
//! \}
//! \name Accessors
//! \{
//! Gets whether the current ARM mode is THUMB (alternative to 32-bit ARM encoding).
inline bool isInThumbMode() const noexcept { return _environment.isArchThumb(); }
//! Gets the current code alignment of the current mode (ARM vs THUMB).
inline uint32_t codeAlignment() const noexcept { return isInThumbMode() ? 2 : 4; }
//! \}
//! \name Emit
//! \{
ASMJIT_API Error _emit(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) override;
//! \}
//! \name Align
//! \{
ASMJIT_API Error align(AlignMode alignMode, uint32_t alignment) override;
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
//! \}
};
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64ASSEMBLER_H_INCLUDED

51
third_party/asmjit/arm/a64builder.cpp vendored Normal file
View File

@@ -0,0 +1,51 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64) && !defined(ASMJIT_NO_BUILDER)
#include "../arm/a64assembler.h"
#include "../arm/a64builder.h"
#include "../arm/a64emithelper_p.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::Builder - Construction & Destruction
// =========================================
Builder::Builder(CodeHolder* code) noexcept : BaseBuilder() {
_archMask = uint64_t(1) << uint32_t(Arch::kAArch64);
assignEmitterFuncs(this);
if (code)
code->attach(this);
}
Builder::~Builder() noexcept {}
// a64::Builder - Events
// =====================
Error Builder::onAttach(CodeHolder* code) noexcept {
return Base::onAttach(code);
}
Error Builder::onDetach(CodeHolder* code) noexcept {
return Base::onDetach(code);
}
// a64::Builder - Finalize
// =======================
Error Builder::finalize() {
ASMJIT_PROPAGATE(runPasses());
Assembler a(_code);
a.addEncodingOptions(encodingOptions());
a.addDiagnosticOptions(diagnosticOptions());
return serializeTo(&a);
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64 && !ASMJIT_NO_BUILDER

57
third_party/asmjit/arm/a64builder.h vendored Normal file
View File

@@ -0,0 +1,57 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64BUILDER_H_INCLUDED
#define ASMJIT_ARM_A64BUILDER_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_BUILDER
#include "../core/builder.h"
#include "../arm/a64emitter.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \addtogroup asmjit_a64
//! \{
//! AArch64 builder implementation.
class ASMJIT_VIRTAPI Builder
: public BaseBuilder,
public EmitterExplicitT<Builder> {
public:
ASMJIT_NONCOPYABLE(Builder)
typedef BaseBuilder Base;
//! \name Construction & Destruction
//! \{
ASMJIT_API explicit Builder(CodeHolder* code = nullptr) noexcept;
ASMJIT_API virtual ~Builder() noexcept;
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
//! \}
//! \name Finalize
//! \{
ASMJIT_API Error finalize() override;
//! \}
};
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_BUILDER
#endif // ASMJIT_ARM_A64BUILDER_H_INCLUDED

60
third_party/asmjit/arm/a64compiler.cpp vendored Normal file
View File

@@ -0,0 +1,60 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64) && !defined(ASMJIT_NO_COMPILER)
#include "../arm/a64assembler.h"
#include "../arm/a64compiler.h"
#include "../arm/a64emithelper_p.h"
#include "../arm/a64rapass_p.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::Compiler - Construction & Destruction
// ==========================================
Compiler::Compiler(CodeHolder* code) noexcept : BaseCompiler() {
_archMask = uint64_t(1) << uint32_t(Arch::kAArch64);
assignEmitterFuncs(this);
if (code)
code->attach(this);
}
Compiler::~Compiler() noexcept {}
// a64::Compiler - Events
// ======================
Error Compiler::onAttach(CodeHolder* code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
Error err = addPassT<ARMRAPass>();
if (ASMJIT_UNLIKELY(err)) {
onDetach(code);
return err;
}
return kErrorOk;
}
Error Compiler::onDetach(CodeHolder* code) noexcept {
return Base::onDetach(code);
}
// a64::Compiler - Finalize
// ========================
Error Compiler::finalize() {
ASMJIT_PROPAGATE(runPasses());
Assembler a(_code);
a.addEncodingOptions(encodingOptions());
a.addDiagnosticOptions(diagnosticOptions());
return serializeTo(&a);
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64 && !ASMJIT_NO_COMPILER

246
third_party/asmjit/arm/a64compiler.h vendored Normal file
View File

@@ -0,0 +1,246 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_ARMCOMPILER_H_INCLUDED
#define ASMJIT_ARM_ARMCOMPILER_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/compiler.h"
#include "../core/type.h"
#include "../arm/a64emitter.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \addtogroup asmjit_a64
//! \{
//! AArch64 compiler implementation.
class ASMJIT_VIRTAPI Compiler
: public BaseCompiler,
public EmitterExplicitT<Compiler> {
public:
ASMJIT_NONCOPYABLE(Compiler)
typedef BaseCompiler Base;
//! \name Construction & Destruction
//! \{
ASMJIT_API explicit Compiler(CodeHolder* code = nullptr) noexcept;
ASMJIT_API virtual ~Compiler() noexcept;
//! \}
//! \name Virtual Registers
//! \{
//! \cond INTERNAL
template<typename RegT, typename Type>
inline RegT _newRegInternal(const Type& type) {
RegT reg(Globals::NoInit);
_newReg(&reg, type, nullptr);
return reg;
}
template<typename RegT, typename Type, typename... Args>
inline RegT _newRegInternal(const Type& type, const char* s, Args&&... args) {
#ifndef ASMJIT_NO_LOGGING
RegT reg(Globals::NoInit);
if (sizeof...(Args) == 0)
_newReg(&reg, type, s);
else
_newRegFmt(&reg, type, s, std::forward<Args>(args)...);
return reg;
#else
DebugUtils::unused(std::forward<Args>(args)...);
RegT reg(Globals::NoInit);
_newReg(&reg, type, nullptr);
return reg;
#endif
}
//! \endcond
template<typename RegT, typename... Args>
inline RegT newSimilarReg(const RegT& ref, Args&&... args) {
return _newRegInternal<RegT>(ref, std::forward<Args>(args)...);
}
template<typename... Args>
inline Reg newReg(TypeId typeId, Args&&... args) { return _newRegInternal<Reg>(typeId, std::forward<Args>(args)...); }
template<typename... Args>
inline Gp newGp(TypeId typeId, Args&&... args) { return _newRegInternal<Gp>(typeId, std::forward<Args>(args)...); }
template<typename... Args>
inline Vec newVec(TypeId typeId, Args&&... args) { return _newRegInternal<Vec>(typeId, std::forward<Args>(args)...); }
template<typename... Args>
inline Gp newInt32(Args&&... args) { return _newRegInternal<Gp>(TypeId::kInt32, std::forward<Args>(args)...); }
template<typename... Args>
inline Gp newUInt32(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt32, std::forward<Args>(args)...); }
template<typename... Args>
inline Gp newInt64(Args&&... args) { return _newRegInternal<Gp>(TypeId::kInt64, std::forward<Args>(args)...); }
template<typename... Args>
inline Gp newUInt64(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt64, std::forward<Args>(args)...); }
template<typename... Args>
inline Gp newIntPtr(Args&&... args) { return _newRegInternal<Gp>(TypeId::kIntPtr, std::forward<Args>(args)...); }
template<typename... Args>
inline Gp newUIntPtr(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUIntPtr, std::forward<Args>(args)...); }
template<typename... Args>
inline Gp newGpw(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt32, std::forward<Args>(args)...); }
template<typename... Args>
inline Gp newGpx(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt64, std::forward<Args>(args)...); }
template<typename... Args>
inline Gp newGpz(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUIntPtr, std::forward<Args>(args)...); }
template<typename... Args>
inline Vec newVecS(Args&&... args) { return _newRegInternal<Vec>(TypeId::kFloat32, std::forward<Args>(args)...); }
template<typename... Args>
inline Vec newVecD(Args&&... args) { return _newRegInternal<Vec>(TypeId::kFloat64, std::forward<Args>(args)...); }
template<typename... Args>
inline Vec newVecQ(Args&&... args) { return _newRegInternal<Vec>(TypeId::kUInt8x16, std::forward<Args>(args)...); }
//! \}
//! \name Stack
//! \{
//! Creates a new memory chunk allocated on the current function's stack.
inline Mem newStack(uint32_t size, uint32_t alignment, const char* name = nullptr) {
Mem m(Globals::NoInit);
_newStack(&m, size, alignment, name);
return m;
}
//! \}
//! \name Constants
//! \{
//! Put data to a constant-pool and get a memory reference to it.
inline Mem newConst(ConstPoolScope scope, const void* data, size_t size) {
Mem m(Globals::NoInit);
_newConst(&m, scope, data, size);
return m;
}
//! Put a BYTE `val` to a constant-pool (8 bits).
inline Mem newByteConst(ConstPoolScope scope, uint8_t val) noexcept { return newConst(scope, &val, 1); }
//! Put a HWORD `val` to a constant-pool (16 bits).
inline Mem newHWordConst(ConstPoolScope scope, uint16_t val) noexcept { return newConst(scope, &val, 2); }
//! Put a WORD `val` to a constant-pool (32 bits).
inline Mem newWordConst(ConstPoolScope scope, uint32_t val) noexcept { return newConst(scope, &val, 4); }
//! Put a DWORD `val` to a constant-pool (64 bits).
inline Mem newDWordConst(ConstPoolScope scope, uint64_t val) noexcept { return newConst(scope, &val, 8); }
//! Put a WORD `val` to a constant-pool.
inline Mem newInt16Const(ConstPoolScope scope, int16_t val) noexcept { return newConst(scope, &val, 2); }
//! Put a WORD `val` to a constant-pool.
inline Mem newUInt16Const(ConstPoolScope scope, uint16_t val) noexcept { return newConst(scope, &val, 2); }
//! Put a DWORD `val` to a constant-pool.
inline Mem newInt32Const(ConstPoolScope scope, int32_t val) noexcept { return newConst(scope, &val, 4); }
//! Put a DWORD `val` to a constant-pool.
inline Mem newUInt32Const(ConstPoolScope scope, uint32_t val) noexcept { return newConst(scope, &val, 4); }
//! Put a QWORD `val` to a constant-pool.
inline Mem newInt64Const(ConstPoolScope scope, int64_t val) noexcept { return newConst(scope, &val, 8); }
//! Put a QWORD `val` to a constant-pool.
inline Mem newUInt64Const(ConstPoolScope scope, uint64_t val) noexcept { return newConst(scope, &val, 8); }
//! Put a SP-FP `val` to a constant-pool.
inline Mem newFloatConst(ConstPoolScope scope, float val) noexcept { return newConst(scope, &val, 4); }
//! Put a DP-FP `val` to a constant-pool.
inline Mem newDoubleConst(ConstPoolScope scope, double val) noexcept { return newConst(scope, &val, 8); }
//! \}
//! \name Instruction Options
//! \{
//! Force the compiler to not follow the conditional or unconditional jump.
inline Compiler& unfollow() noexcept { _instOptions |= InstOptions::kUnfollow; return *this; }
//! \}
//! \name Compiler specific
//! \{
//! Special pseudo-instruction that can be used to load a memory address into `o0` GP register.
//!
//! \note At the moment this instruction is only useful to load a stack allocated address into a GP register
//! for further use. It makes very little sense to use it for anything else. The semantics of this instruction
//! is the same as X86 `LEA` (load effective address) instruction.
inline Error loadAddressOf(const Gp& o0, const Mem& o1) { return _emitter()->_emitI(Inst::kIdAdr, o0, o1); }
//! \}
//! \name Function Call & Ret Intrinsics
//! \{
//! Invoke a function call without `target` type enforcement.
inline Error invoke_(InvokeNode** out, const Operand_& target, const FuncSignature& signature) {
return addInvokeNode(out, Inst::kIdBlr, target, signature);
}
//! Invoke a function call of the given `target` and `signature` and store the added node to `out`.
//!
//! Creates a new \ref InvokeNode, initializes all the necessary members to match the given function `signature`,
//! adds the node to the compiler, and stores its pointer to `out`. The operation is atomic, if anything fails
//! nullptr is stored in `out` and error code is returned.
inline Error invoke(InvokeNode** out, const Gp& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
//! \overload
inline Error invoke(InvokeNode** out, const Mem& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
//! \overload
inline Error invoke(InvokeNode** out, const Label& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
//! \overload
inline Error invoke(InvokeNode** out, const Imm& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
//! \overload
inline Error invoke(InvokeNode** out, uint64_t target, const FuncSignature& signature) { return invoke_(out, Imm(int64_t(target)), signature); }
//! Return.
inline Error ret() { return addRet(Operand(), Operand()); }
//! \overload
inline Error ret(const BaseReg& o0) { return addRet(o0, Operand()); }
//! \overload
inline Error ret(const BaseReg& o0, const BaseReg& o1) { return addRet(o0, o1); }
//! \}
//! \name Jump Tables Support
//! \{
using EmitterExplicitT<Compiler>::br;
//! Adds a jump to the given `target` with the provided jump `annotation`.
inline Error br(const BaseReg& target, JumpAnnotation* annotation) { return emitAnnotatedJump(Inst::kIdBr, target, annotation); }
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
//! \}
//! \name Finalize
//! \{
ASMJIT_API Error finalize() override;
//! \}
};
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_ARM_ARMCOMPILER_H_INCLUDED

464
third_party/asmjit/arm/a64emithelper.cpp vendored Normal file
View File

@@ -0,0 +1,464 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64)
#include "../core/formatter.h"
#include "../core/funcargscontext_p.h"
#include "../core/string.h"
#include "../core/support.h"
#include "../core/type.h"
#include "../arm/a64emithelper_p.h"
#include "../arm/a64formatter_p.h"
#include "../arm/a64instapi_p.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::EmitHelper - Emit Operations
// =================================
ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
const Operand_& dst_,
const Operand_& src_, TypeId typeId, const char* comment) {
Emitter* emitter = _emitter->as<Emitter>();
// Invalid or abstract TypeIds are not allowed.
ASMJIT_ASSERT(TypeUtils::isValid(typeId) && !TypeUtils::isAbstract(typeId));
emitter->setInlineComment(comment);
if (dst_.isReg() && src_.isMem()) {
Reg dst(dst_.as<Reg>());
Mem src(src_.as<Mem>());
switch (typeId) {
case TypeId::kInt8:
case TypeId::kUInt8:
return emitter->ldrb(dst.as<Gp>(), src);
case TypeId::kInt16:
case TypeId::kUInt16:
return emitter->ldrh(dst.as<Gp>(), src);
case TypeId::kInt32:
case TypeId::kUInt32:
return emitter->ldr(dst.as<Gp>().w(), src);
case TypeId::kInt64:
case TypeId::kUInt64:
return emitter->ldr(dst.as<Gp>().x(), src);
default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId))
return emitter->ldr(dst.as<Vec>().s(), src);
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId))
return emitter->ldr(dst.as<Vec>().d(), src);
if (TypeUtils::isVec128(typeId))
return emitter->ldr(dst.as<Vec>().q(), src);
break;
}
}
}
if (dst_.isMem() && src_.isReg()) {
Mem dst(dst_.as<Mem>());
Reg src(src_.as<Reg>());
switch (typeId) {
case TypeId::kInt8:
case TypeId::kUInt8:
return emitter->strb(src.as<Gp>(), dst);
case TypeId::kInt16:
case TypeId::kUInt16:
return emitter->strh(src.as<Gp>(), dst);
case TypeId::kInt32:
case TypeId::kUInt32:
return emitter->str(src.as<Gp>().w(), dst);
case TypeId::kInt64:
case TypeId::kUInt64:
return emitter->str(src.as<Gp>().x(), dst);
default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId))
return emitter->str(src.as<Vec>().s(), dst);
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId))
return emitter->str(src.as<Vec>().d(), dst);
if (TypeUtils::isVec128(typeId))
return emitter->str(src.as<Vec>().q(), dst);
break;
}
}
}
if (dst_.isReg() && src_.isReg()) {
Reg dst(dst_.as<Reg>());
Reg src(src_.as<Reg>());
switch (typeId) {
case TypeId::kInt8:
case TypeId::kUInt8:
case TypeId::kInt16:
case TypeId::kUInt16:
case TypeId::kInt32:
case TypeId::kUInt32:
case TypeId::kInt64:
case TypeId::kUInt64:
return emitter->mov(dst.as<Gp>().x(), src.as<Gp>().x());
default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId))
return emitter->fmov(dst.as<Vec>().s(), src.as<Vec>().s());
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId))
return emitter->mov(dst.as<Vec>().b8(), src.as<Vec>().b8());
if (TypeUtils::isVec128(typeId))
return emitter->mov(dst.as<Vec>().b16(), src.as<Vec>().b16());
break;
}
}
}
emitter->setInlineComment(nullptr);
return DebugUtils::errored(kErrorInvalidState);
}
Error EmitHelper::emitRegSwap(
const BaseReg& a,
const BaseReg& b, const char* comment) {
DebugUtils::unused(a, b, comment);
return DebugUtils::errored(kErrorInvalidState);
}
// TODO: [ARM] EmitArgMove is unfinished.
Error EmitHelper::emitArgMove(
const BaseReg& dst_, TypeId dstTypeId,
const Operand_& src_, TypeId srcTypeId, const char* comment) {
// Deduce optional `dstTypeId`, which may be `TypeId::kVoid` in some cases.
if (dstTypeId == TypeId::kVoid) {
const ArchTraits& archTraits = ArchTraits::byArch(_emitter->arch());
dstTypeId = archTraits.regTypeToTypeId(dst_.type());
}
// Invalid or abstract TypeIds are not allowed.
ASMJIT_ASSERT(TypeUtils::isValid(dstTypeId) && !TypeUtils::isAbstract(dstTypeId));
ASMJIT_ASSERT(TypeUtils::isValid(srcTypeId) && !TypeUtils::isAbstract(srcTypeId));
Reg dst(dst_.as<Reg>());
Operand src(src_);
uint32_t dstSize = TypeUtils::sizeOf(dstTypeId);
uint32_t srcSize = TypeUtils::sizeOf(srcTypeId);
if (TypeUtils::isInt(dstTypeId)) {
if (TypeUtils::isInt(srcTypeId)) {
uint32_t x = dstSize == 8;
dst.setSignature(OperandSignature{x ? uint32_t(GpX::kSignature) : uint32_t(GpW::kSignature)});
_emitter->setInlineComment(comment);
if (src.isReg()) {
src.setSignature(dst.signature());
return _emitter->emit(Inst::kIdMov, dst, src);
}
else if (src.isMem()) {
InstId instId = Inst::kIdNone;
switch (srcTypeId) {
case TypeId::kInt8: instId = Inst::kIdLdrsb; break;
case TypeId::kUInt8: instId = Inst::kIdLdrb; break;
case TypeId::kInt16: instId = Inst::kIdLdrsh; break;
case TypeId::kUInt16: instId = Inst::kIdLdrh; break;
case TypeId::kInt32: instId = x ? Inst::kIdLdrsw : Inst::kIdLdr; break;
case TypeId::kUInt32: instId = Inst::kIdLdr; x = 0; break;
case TypeId::kInt64: instId = Inst::kIdLdr; break;
case TypeId::kUInt64: instId = Inst::kIdLdr; break;
default:
return DebugUtils::errored(kErrorInvalidState);
}
return _emitter->emit(instId, dst, src);
}
}
}
if (TypeUtils::isFloat(dstTypeId) || TypeUtils::isVec(dstTypeId)) {
if (TypeUtils::isFloat(srcTypeId) || TypeUtils::isVec(srcTypeId)) {
switch (srcSize) {
case 2: dst.as<Vec>().setSignature(OperandSignature{VecH::kSignature}); break;
case 4: dst.as<Vec>().setSignature(OperandSignature{VecS::kSignature}); break;
case 8: dst.as<Vec>().setSignature(OperandSignature{VecD::kSignature}); break;
case 16: dst.as<Vec>().setSignature(OperandSignature{VecV::kSignature}); break;
default:
return DebugUtils::errored(kErrorInvalidState);
}
_emitter->setInlineComment(comment);
if (src.isReg()) {
InstId instId = srcSize <= 4 ? Inst::kIdFmov_v : Inst::kIdMov_v;
src.setSignature(dst.signature());
return _emitter->emit(instId, dst, src);
}
else if (src.isMem()) {
return _emitter->emit(Inst::kIdLdr_v, dst, src);
}
}
}
return DebugUtils::errored(kErrorInvalidState);
}
// a64::EmitHelper - Emit Prolog & Epilog
// ======================================
struct LoadStoreInstructions {
InstId singleInstId;
InstId pairInstId;
};
struct PrologEpilogInfo {
struct RegPair {
uint8_t ids[2];
uint16_t offset;
};
struct GroupData {
RegPair pairs[16];
uint32_t pairCount;
};
Support::Array<GroupData, 2> groups;
uint32_t sizeTotal;
Error init(const FuncFrame& frame) noexcept {
uint32_t offset = 0;
for (RegGroup group : Support::EnumValues<RegGroup, RegGroup::kGp, RegGroup::kVec>{}) {
GroupData& data = groups[group];
uint32_t n = 0;
uint32_t pairCount = 0;
RegPair* pairs = data.pairs;
uint32_t slotSize = frame.saveRestoreRegSize(group);
uint32_t savedRegs = frame.savedRegs(group);
if (group == RegGroup::kGp && frame.hasPreservedFP()) {
// Must be at the beginning of the push/pop sequence.
ASMJIT_ASSERT(pairCount == 0);
pairs[0].offset = uint16_t(offset);
pairs[0].ids[0] = Gp::kIdFp;
pairs[0].ids[1] = Gp::kIdLr;
offset += slotSize * 2;
pairCount++;
savedRegs &= ~Support::bitMask(Gp::kIdFp, Gp::kIdLr);
}
Support::BitWordIterator<uint32_t> it(savedRegs);
while (it.hasNext()) {
pairs[pairCount].ids[n] = uint8_t(it.next());
if (++n == 2) {
pairs[pairCount].offset = uint16_t(offset);
offset += slotSize * 2;
n = 0;
pairCount++;
}
}
if (n == 1) {
pairs[pairCount].ids[1] = uint8_t(BaseReg::kIdBad);
pairs[pairCount].offset = uint16_t(offset);
offset += slotSize * 2;
pairCount++;
}
data.pairCount = pairCount;
}
sizeTotal = offset;
return kErrorOk;
}
};
ASMJIT_FAVOR_SIZE Error EmitHelper::emitProlog(const FuncFrame& frame) {
Emitter* emitter = _emitter->as<Emitter>();
PrologEpilogInfo pei;
ASMJIT_PROPAGATE(pei.init(frame));
static const Support::Array<Reg, 2> groupRegs = {{ x0, d0 }};
static const Support::Array<LoadStoreInstructions, 2> groupInsts = {{
{ Inst::kIdStr , Inst::kIdStp },
{ Inst::kIdStr_v, Inst::kIdStp_v }
}};
uint32_t adjustInitialOffset = pei.sizeTotal;
for (RegGroup group : Support::EnumValues<RegGroup, RegGroup::kGp, RegGroup::kVec>{}) {
const PrologEpilogInfo::GroupData& data = pei.groups[group];
uint32_t pairCount = data.pairCount;
Reg regs[2] = { groupRegs[group], groupRegs[group] };
Mem mem = ptr(sp);
const LoadStoreInstructions& insts = groupInsts[group];
for (uint32_t i = 0; i < pairCount; i++) {
const PrologEpilogInfo::RegPair& pair = data.pairs[i];
regs[0].setId(pair.ids[0]);
regs[1].setId(pair.ids[1]);
mem.setOffsetLo32(pair.offset);
if (pair.offset == 0 && adjustInitialOffset) {
mem.setOffset(-int(adjustInitialOffset));
mem.makePreIndex();
}
if (pair.ids[1] == BaseReg::kIdBad)
ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem));
else
ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem));
mem.resetToFixedOffset();
if (i == 0 && frame.hasPreservedFP()) {
ASMJIT_PROPAGATE(emitter->mov(x29, sp));
}
}
}
if (frame.hasStackAdjustment()) {
uint32_t adj = frame.stackAdjustment();
if (adj <= 0xFFFu) {
ASMJIT_PROPAGATE(emitter->sub(sp, sp, adj));
}
else if (adj <= 0xFFFFFFu) {
// TODO: [ARM] Prolog - we must touch the pages otherwise it's undefined.
ASMJIT_PROPAGATE(emitter->sub(sp, sp, adj & 0x000FFFu));
ASMJIT_PROPAGATE(emitter->sub(sp, sp, adj & 0xFFF000u));
}
else {
return DebugUtils::errored(kErrorInvalidState);
}
}
return kErrorOk;
}
// TODO: [ARM] Emit epilog.
ASMJIT_FAVOR_SIZE Error EmitHelper::emitEpilog(const FuncFrame& frame) {
Emitter* emitter = _emitter->as<Emitter>();
PrologEpilogInfo pei;
ASMJIT_PROPAGATE(pei.init(frame));
static const Support::Array<Reg, 2> groupRegs = {{ x0, d0 }};
static const Support::Array<LoadStoreInstructions, 2> groupInsts = {{
{ Inst::kIdLdr , Inst::kIdLdp },
{ Inst::kIdLdr_v, Inst::kIdLdp_v }
}};
uint32_t adjustInitialOffset = pei.sizeTotal;
if (frame.hasStackAdjustment()) {
uint32_t adj = frame.stackAdjustment();
if (adj <= 0xFFFu) {
ASMJIT_PROPAGATE(emitter->add(sp, sp, adj));
}
else if (adj <= 0xFFFFFFu) {
ASMJIT_PROPAGATE(emitter->add(sp, sp, adj & 0x000FFFu));
ASMJIT_PROPAGATE(emitter->add(sp, sp, adj & 0xFFF000u));
}
else {
return DebugUtils::errored(kErrorInvalidState);
}
}
for (int g = 1; g >= 0; g--) {
RegGroup group = RegGroup(g);
const PrologEpilogInfo::GroupData& data = pei.groups[group];
uint32_t pairCount = data.pairCount;
Reg regs[2] = { groupRegs[group], groupRegs[group] };
Mem mem = ptr(sp);
const LoadStoreInstructions& insts = groupInsts[group];
for (int i = int(pairCount) - 1; i >= 0; i--) {
const PrologEpilogInfo::RegPair& pair = data.pairs[i];
regs[0].setId(pair.ids[0]);
regs[1].setId(pair.ids[1]);
mem.setOffsetLo32(pair.offset);
if (pair.offset == 0 && adjustInitialOffset) {
mem.setOffset(int(adjustInitialOffset));
mem.makePostIndex();
}
if (pair.ids[1] == BaseReg::kIdBad)
ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem));
else
ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem));
mem.resetToFixedOffset();
}
}
ASMJIT_PROPAGATE(emitter->ret(x30));
return kErrorOk;
}
static Error ASMJIT_CDECL Emitter_emitProlog(BaseEmitter* emitter, const FuncFrame& frame) {
EmitHelper emitHelper(emitter);
return emitHelper.emitProlog(frame);
}
static Error ASMJIT_CDECL Emitter_emitEpilog(BaseEmitter* emitter, const FuncFrame& frame) {
EmitHelper emitHelper(emitter);
return emitHelper.emitEpilog(frame);
}
static Error ASMJIT_CDECL Emitter_emitArgsAssignment(BaseEmitter* emitter, const FuncFrame& frame, const FuncArgsAssignment& args) {
EmitHelper emitHelper(emitter);
return emitHelper.emitArgsAssignment(frame, args);
}
void assignEmitterFuncs(BaseEmitter* emitter) {
emitter->_funcs.emitProlog = Emitter_emitProlog;
emitter->_funcs.emitEpilog = Emitter_emitEpilog;
emitter->_funcs.emitArgsAssignment = Emitter_emitArgsAssignment;
#ifndef ASMJIT_NO_LOGGING
emitter->_funcs.formatInstruction = FormatterInternal::formatInstruction;
#endif
#ifndef ASMJIT_NO_VALIDATION
emitter->_funcs.validate = InstInternal::validate;
#endif
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64

View File

@@ -0,0 +1,50 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_ARMEMITHELPER_P_H_INCLUDED
#define ASMJIT_ARM_ARMEMITHELPER_P_H_INCLUDED
#include "../core/api-config.h"
#include "../core/emithelper_p.h"
#include "../core/func.h"
#include "../arm/a64emitter.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
class EmitHelper : public BaseEmitHelper {
public:
inline explicit EmitHelper(BaseEmitter* emitter = nullptr) noexcept
: BaseEmitHelper(emitter) {}
Error emitRegMove(
const Operand_& dst_,
const Operand_& src_, TypeId typeId, const char* comment = nullptr) override;
Error emitRegSwap(
const BaseReg& a,
const BaseReg& b, const char* comment = nullptr) override;
Error emitArgMove(
const BaseReg& dst_, TypeId dstTypeId,
const Operand_& src_, TypeId srcTypeId, const char* comment = nullptr) override;
Error emitProlog(const FuncFrame& frame);
Error emitEpilog(const FuncFrame& frame);
};
void assignEmitterFuncs(BaseEmitter* emitter);
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_ARMEMITHELPER_P_H_INCLUDED

1228
third_party/asmjit/arm/a64emitter.h vendored Normal file

File diff suppressed because it is too large Load Diff

298
third_party/asmjit/arm/a64formatter.cpp vendored Normal file
View File

@@ -0,0 +1,298 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/misc_p.h"
#include "../core/support.h"
#include "../arm/a64formatter_p.h"
#include "../arm/a64instapi_p.h"
#include "../arm/a64instdb_p.h"
#include "../arm/a64operand.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/compiler.h"
#endif
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::FormatterInternal - Format Register
// ========================================
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t rId,
uint32_t elementType,
uint32_t elementIndex) noexcept {
DebugUtils::unused(flags);
DebugUtils::unused(arch);
static const char bhsdq[] = "bhsdq";
bool virtRegFormatted = false;
#ifndef ASMJIT_NO_COMPILER
if (Operand::isVirtId(rId)) {
if (emitter && emitter->isCompiler()) {
const BaseCompiler* cc = static_cast<const BaseCompiler*>(emitter);
if (cc->isVirtIdValid(rId)) {
VirtReg* vReg = cc->virtRegById(rId);
ASMJIT_ASSERT(vReg != nullptr);
const char* name = vReg->name();
if (name && name[0] != '\0')
ASMJIT_PROPAGATE(sb.append(name));
else
ASMJIT_PROPAGATE(sb.appendFormat("%%%u", unsigned(Operand::virtIdToIndex(rId))));
virtRegFormatted = true;
}
}
}
#else
DebugUtils::unused(emitter, flags);
#endif
if (!virtRegFormatted) {
char letter = '\0';
switch (regType) {
case RegType::kARM_GpW:
if (rId == Gp::kIdZr)
return sb.append("wzr");
if (rId == Gp::kIdSp)
return sb.append("wsp");
letter = 'w';
break;
case RegType::kARM_GpX:
if (rId == Gp::kIdZr)
return sb.append("xzr");
if (rId == Gp::kIdSp)
return sb.append("sp");
letter = 'x';
break;
case RegType::kARM_VecB:
case RegType::kARM_VecH:
case RegType::kARM_VecS:
case RegType::kARM_VecD:
case RegType::kARM_VecV:
letter = bhsdq[uint32_t(regType) - uint32_t(RegType::kARM_VecB)];
if (elementType)
letter = 'v';
break;
default:
ASMJIT_PROPAGATE(sb.appendFormat("<Reg-%u>?$u", uint32_t(regType), rId));
break;
}
if (letter)
ASMJIT_PROPAGATE(sb.appendFormat("%c%u", letter, rId));
}
if (elementType) {
char elementLetter = '\0';
uint32_t elementCount = 0;
switch (elementType) {
case Vec::kElementTypeB:
elementLetter = 'b';
elementCount = 16;
break;
case Vec::kElementTypeH:
elementLetter = 'h';
elementCount = 8;
break;
case Vec::kElementTypeS:
elementLetter = 's';
elementCount = 4;
break;
case Vec::kElementTypeD:
elementLetter = 'd';
elementCount = 2;
break;
default:
return sb.append(".<Unknown>");
}
if (elementLetter) {
if (elementIndex == 0xFFFFFFFFu) {
if (regType == RegType::kARM_VecD)
elementCount /= 2u;
ASMJIT_PROPAGATE(sb.appendFormat(".%u%c", elementCount, elementLetter));
}
else {
ASMJIT_PROPAGATE(sb.appendFormat(".%c[%u]", elementLetter, elementIndex));
}
}
}
return kErrorOk;
}
// a64::FormatterInternal - Format Operand
// =======================================
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
const Operand_& op) noexcept {
if (op.isReg()) {
const BaseReg& reg = op.as<BaseReg>();
uint32_t elementType = op.as<Vec>().elementType();
uint32_t elementIndex = op.as<Vec>().elementIndex();
if (!op.as<Vec>().hasElementIndex())
elementIndex = 0xFFFFFFFFu;
return formatRegister(sb, flags, emitter, arch, reg.type(), reg.id(), elementType, elementIndex);
}
if (op.isMem()) {
const Mem& m = op.as<Mem>();
ASMJIT_PROPAGATE(sb.append('['));
if (m.hasBase()) {
if (m.hasBaseLabel()) {
ASMJIT_PROPAGATE(Formatter::formatLabel(sb, flags, emitter, m.baseId()));
}
else {
FormatFlags modifiedFlags = flags;
if (m.isRegHome()) {
ASMJIT_PROPAGATE(sb.append('&'));
modifiedFlags &= ~FormatFlags::kRegCasts;
}
ASMJIT_PROPAGATE(formatRegister(sb, modifiedFlags, emitter, arch, m.baseType(), m.baseId()));
}
}
else {
// ARM really requires base.
if (m.hasIndex() || m.hasOffset()) {
ASMJIT_PROPAGATE(sb.append("<None>"));
}
}
// The post index makes it look like there was another operand, but it's
// still the part of AsmJit's `arm::Mem` operand so it's consistent with
// other architectures.
if (m.isPostIndex())
ASMJIT_PROPAGATE(sb.append(']'));
if (m.hasIndex()) {
ASMJIT_PROPAGATE(sb.append(", "));
ASMJIT_PROPAGATE(formatRegister(sb, flags, emitter, arch, m.indexType(), m.indexId()));
}
if (m.hasOffset()) {
ASMJIT_PROPAGATE(sb.append(", "));
int64_t off = int64_t(m.offset());
uint32_t base = 10;
if (Support::test(flags, FormatFlags::kHexOffsets) && uint64_t(off) > 9)
base = 16;
if (base == 10) {
ASMJIT_PROPAGATE(sb.appendInt(off, base));
}
else {
ASMJIT_PROPAGATE(sb.append("0x"));
ASMJIT_PROPAGATE(sb.appendUInt(uint64_t(off), base));
}
}
if (m.hasShift()) {
ASMJIT_PROPAGATE(sb.append(' '));
if (!m.isPreOrPost())
ASMJIT_PROPAGATE(formatShiftOp(sb, (ShiftOp)m.predicate()));
ASMJIT_PROPAGATE(sb.appendFormat(" %u", m.shift()));
}
if (!m.isPostIndex())
ASMJIT_PROPAGATE(sb.append(']'));
if (m.isPreIndex())
ASMJIT_PROPAGATE(sb.append('!'));
return kErrorOk;
}
if (op.isImm()) {
const Imm& i = op.as<Imm>();
int64_t val = i.value();
if (Support::test(flags, FormatFlags::kHexImms) && uint64_t(val) > 9) {
ASMJIT_PROPAGATE(sb.append("0x"));
return sb.appendUInt(uint64_t(val), 16);
}
else {
return sb.appendInt(val, 10);
}
}
if (op.isLabel()) {
return Formatter::formatLabel(sb, flags, emitter, op.id());
}
return sb.append("<None>");
}
// a64::FormatterInternal - Format Instruction
// ===========================================
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatInstruction(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept {
DebugUtils::unused(arch);
// Format instruction options and instruction mnemonic.
InstId instId = inst.realId();
if (instId < Inst::_kIdCount)
ASMJIT_PROPAGATE(InstInternal::instIdToString(arch, instId, sb));
else
ASMJIT_PROPAGATE(sb.appendFormat("[InstId=#%u]", unsigned(instId)));
CondCode cc = inst.armCondCode();
if (cc != CondCode::kAL) {
ASMJIT_PROPAGATE(sb.append('.'));
ASMJIT_PROPAGATE(formatCondCode(sb, cc));
}
for (uint32_t i = 0; i < opCount; i++) {
const Operand_& op = operands[i];
if (op.isNone())
break;
ASMJIT_PROPAGATE(sb.append(i == 0 ? " " : ", "));
ASMJIT_PROPAGATE(formatOperand(sb, flags, emitter, arch, op));
}
return kErrorOk;
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_LOGGING

59
third_party/asmjit/arm/a64formatter_p.h vendored Normal file
View File

@@ -0,0 +1,59 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64FORMATTER_P_H_INCLUDED
#define ASMJIT_ARM_A64FORMATTER_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/formatter.h"
#include "../core/string.h"
#include "../arm/armformatter_p.h"
#include "../arm/a64globals.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
namespace FormatterInternal {
using namespace arm::FormatterInternal;
Error ASMJIT_CDECL formatRegister(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t regId,
uint32_t elementType = 0,
uint32_t elementIndex = 0xFFFFFFFFu) noexcept;
Error ASMJIT_CDECL formatOperand(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
const Operand_& op) noexcept;
Error ASMJIT_CDECL formatInstruction(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept;
} // {FormatterInternal}
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_LOGGING
#endif // ASMJIT_ARM_A64FORMATTER_P_H_INCLUDED

189
third_party/asmjit/arm/a64func.cpp vendored Normal file
View File

@@ -0,0 +1,189 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64)
#include "../arm/a64func_p.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
namespace FuncInternal {
static inline bool shouldThreatAsCDecl(CallConvId ccId) noexcept {
return ccId == CallConvId::kCDecl ||
ccId == CallConvId::kStdCall ||
ccId == CallConvId::kFastCall ||
ccId == CallConvId::kVectorCall ||
ccId == CallConvId::kThisCall ||
ccId == CallConvId::kRegParm1 ||
ccId == CallConvId::kRegParm2 ||
ccId == CallConvId::kRegParm3;
}
static RegType regTypeFromFpOrVecTypeId(TypeId typeId) noexcept {
if (typeId == TypeId::kFloat32)
return RegType::kARM_VecS;
else if (typeId == TypeId::kFloat64)
return RegType::kARM_VecD;
else if (TypeUtils::isVec32(typeId))
return RegType::kARM_VecS;
else if (TypeUtils::isVec64(typeId))
return RegType::kARM_VecD;
else if (TypeUtils::isVec128(typeId))
return RegType::kARM_VecV;
else
return RegType::kNone;
}
ASMJIT_FAVOR_SIZE Error initCallConv(CallConv& cc, CallConvId ccId, const Environment& environment) noexcept {
cc.setArch(environment.arch());
cc.setSaveRestoreRegSize(RegGroup::kGp, 8);
cc.setSaveRestoreRegSize(RegGroup::kVec, 8);
cc.setSaveRestoreAlignment(RegGroup::kGp, 16);
cc.setSaveRestoreAlignment(RegGroup::kVec, 16);
cc.setSaveRestoreAlignment(RegGroup::kExtraVirt2, 1);
cc.setSaveRestoreAlignment(RegGroup::kExtraVirt3, 1);
cc.setPassedOrder(RegGroup::kGp, 0, 1, 2, 3, 4, 5, 6, 7);
cc.setPassedOrder(RegGroup::kVec, 0, 1, 2, 3, 4, 5, 6, 7);
cc.setNaturalStackAlignment(16);
if (shouldThreatAsCDecl(ccId)) {
// ARM doesn't have that many calling conventions as we can find in X86 world, treat most conventions as __cdecl.
cc.setId(CallConvId::kCDecl);
cc.setPreservedRegs(RegGroup::kGp, Support::bitMask(Gp::kIdOs, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30));
cc.setPreservedRegs(RegGroup::kVec, Support::bitMask(8, 9, 10, 11, 12, 13, 14, 15));
}
else {
cc.setId(ccId);
cc.setSaveRestoreRegSize(RegGroup::kVec, 16);
cc.setPreservedRegs(RegGroup::kGp, Support::bitMask(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30));
cc.setPreservedRegs(RegGroup::kVec, Support::bitMask(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31));
}
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& signature, uint32_t registerSize) noexcept {
DebugUtils::unused(signature);
const CallConv& cc = func.callConv();
uint32_t stackOffset = 0;
uint32_t i;
uint32_t argCount = func.argCount();
if (func.hasRet()) {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
TypeId typeId = func._rets[valueIndex].typeId();
// Terminate at the first void type (end of the pack).
if (typeId == TypeId::kVoid)
break;
switch (typeId) {
case TypeId::kInt8:
case TypeId::kInt16:
case TypeId::kInt32: {
func._rets[valueIndex].initReg(RegType::kARM_GpW, valueIndex, TypeId::kInt32);
break;
}
case TypeId::kUInt8:
case TypeId::kUInt16:
case TypeId::kUInt32: {
func._rets[valueIndex].initReg(RegType::kARM_GpW, valueIndex, TypeId::kUInt32);
break;
}
case TypeId::kInt64:
case TypeId::kUInt64: {
func._rets[valueIndex].initReg(RegType::kARM_GpX, valueIndex, typeId);
break;
}
default: {
RegType regType = regTypeFromFpOrVecTypeId(typeId);
if (regType == RegType::kNone)
return DebugUtils::errored(kErrorInvalidRegType);
func._rets[valueIndex].initReg(regType, valueIndex, typeId);
break;
}
}
}
}
switch (cc.strategy()) {
case CallConvStrategy::kDefault: {
uint32_t gpzPos = 0;
uint32_t vecPos = 0;
for (i = 0; i < argCount; i++) {
FuncValue& arg = func._args[i][0];
TypeId typeId = arg.typeId();
if (TypeUtils::isInt(typeId)) {
uint32_t regId = BaseReg::kIdBad;
if (gpzPos < CallConv::kMaxRegArgsPerGroup)
regId = cc._passedOrder[RegGroup::kGp].id[gpzPos];
if (regId != BaseReg::kIdBad) {
RegType regType = typeId <= TypeId::kUInt32 ? RegType::kARM_GpW : RegType::kARM_GpX;
arg.assignRegData(regType, regId);
func.addUsedRegs(RegGroup::kGp, Support::bitMask(regId));
gpzPos++;
}
else {
uint32_t size = Support::max<uint32_t>(TypeUtils::sizeOf(typeId), registerSize);
arg.assignStackOffset(int32_t(stackOffset));
stackOffset += size;
}
continue;
}
if (TypeUtils::isFloat(typeId) || TypeUtils::isVec(typeId)) {
uint32_t regId = BaseReg::kIdBad;
if (vecPos < CallConv::kMaxRegArgsPerGroup)
regId = cc._passedOrder[RegGroup::kVec].id[vecPos];
if (regId != BaseReg::kIdBad) {
RegType regType = regTypeFromFpOrVecTypeId(typeId);
if (regType == RegType::kNone)
return DebugUtils::errored(kErrorInvalidRegType);
arg.initTypeId(typeId);
arg.assignRegData(regType, regId);
func.addUsedRegs(RegGroup::kVec, Support::bitMask(regId));
vecPos++;
}
else {
uint32_t size = TypeUtils::sizeOf(typeId);
arg.assignStackOffset(int32_t(stackOffset));
stackOffset += size;
}
continue;
}
}
break;
}
default:
return DebugUtils::errored(kErrorInvalidState);
}
func._argStackSize = stackOffset;
return kErrorOk;
}
} // {FuncInternal}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64

33
third_party/asmjit/arm/a64func_p.h vendored Normal file
View File

@@ -0,0 +1,33 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64FUNC_P_H_INCLUDED
#define ASMJIT_ARM_A64FUNC_P_H_INCLUDED
#include "../core/func.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
//! AArch64-specific function API (calling conventions and other utilities).
namespace FuncInternal {
//! Initialize `CallConv` structure (AArch64 specific).
Error initCallConv(CallConv& cc, CallConvId ccId, const Environment& environment) noexcept;
//! Initialize `FuncDetail` (AArch64 specific).
Error initFuncDetail(FuncDetail& func, const FuncSignature& signature, uint32_t registerSize) noexcept;
} // {FuncInternal}
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64FUNC_P_H_INCLUDED

1894
third_party/asmjit/arm/a64globals.h vendored Normal file

File diff suppressed because it is too large Load Diff

278
third_party/asmjit/arm/a64instapi.cpp vendored Normal file
View File

@@ -0,0 +1,278 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64)
#include "../core/cpuinfo.h"
#include "../core/misc_p.h"
#include "../core/support.h"
#include "../arm/a64instapi_p.h"
#include "../arm/a64instdb_p.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::InstInternal - Text
// ========================
#ifndef ASMJIT_NO_TEXT
Error InstInternal::instIdToString(Arch arch, InstId instId, String& output) noexcept {
uint32_t realId = instId & uint32_t(InstIdParts::kRealId);
DebugUtils::unused(arch);
if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId)))
return DebugUtils::errored(kErrorInvalidInstruction);
const InstDB::InstInfo& info = InstDB::infoById(realId);
return output.append(InstDB::_nameData + info._nameDataIndex);
}
InstId InstInternal::stringToInstId(Arch arch, const char* s, size_t len) noexcept {
DebugUtils::unused(arch);
if (ASMJIT_UNLIKELY(!s))
return Inst::kIdNone;
if (len == SIZE_MAX)
len = strlen(s);
if (ASMJIT_UNLIKELY(len == 0 || len > InstDB::kMaxNameSize))
return Inst::kIdNone;
uint32_t prefix = uint32_t(s[0]) - 'a';
if (ASMJIT_UNLIKELY(prefix > 'z' - 'a'))
return Inst::kIdNone;
uint32_t index = InstDB::instNameIndex[prefix].start;
if (ASMJIT_UNLIKELY(!index))
return Inst::kIdNone;
const char* nameData = InstDB::_nameData;
const InstDB::InstInfo* table = InstDB::_instInfoTable;
const InstDB::InstInfo* base = table + index;
const InstDB::InstInfo* end = table + InstDB::instNameIndex[prefix].end;
for (size_t lim = (size_t)(end - base); lim != 0; lim >>= 1) {
const InstDB::InstInfo* cur = base + (lim >> 1);
int result = Support::cmpInstName(nameData + cur[0]._nameDataIndex, s, len);
if (result < 0) {
base = cur + 1;
lim--;
continue;
}
if (result > 0)
continue;
return uint32_t((size_t)(cur - table));
}
return Inst::kIdNone;
}
#endif // !ASMJIT_NO_TEXT
// a64::InstInternal - Validate
// ============================
#ifndef ASMJIT_NO_VALIDATION
ASMJIT_FAVOR_SIZE Error InstInternal::validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept {
// TODO:
DebugUtils::unused(arch, inst, operands, opCount, validationFlags);
return kErrorOk;
}
#endif // !ASMJIT_NO_VALIDATION
// a64::InstInternal - QueryRWInfo
// ===============================
#ifndef ASMJIT_NO_INTROSPECTION
struct InstRWInfoData {
uint8_t rwx[Globals::kMaxOpCount];
};
static const InstRWInfoData instRWInfoData[] = {
#define R uint8_t(OpRWFlags::kRead)
#define W uint8_t(OpRWFlags::kWrite)
#define X uint8_t(OpRWFlags::kRW)
{{ R, R, R, R, R, R }}, // kRWI_R
{{ R, W, R, R, R, R }}, // kRWI_RW
{{ R, X, R, R, R, R }}, // kRWI_RX
{{ R, R, W, R, R, R }}, // kRWI_RRW
{{ R, W, X, R, R, R }}, // kRWI_RWX
{{ W, R, R, R, R, R }}, // kRWI_W
{{ W, R, W, R, R, R }}, // kRWI_WRW
{{ W, R, X, R, R, R }}, // kRWI_WRX
{{ W, R, R, W, R, R }}, // kRWI_WRRW
{{ W, R, R, X, R, R }}, // kRWI_WRRX
{{ W, W, R, R, R, R }}, // kRWI_WW
{{ X, R, R, R, R, R }}, // kRWI_X
{{ X, R, X, R, R, R }}, // kRWI_XRX
{{ X, X, R, R, X, R }}, // kRWI_XXRRX
{{ W, R, R, R, R, R }}, // kRWI_LDn
{{ R, W, R, R, R, R }}, // kRWI_STn
{{ R, R, R, R, R, R }} // kRWI_TODO
#undef R
#undef W
#undef X
};
static const uint8_t elementTypeSize[8] = { 0, 1, 2, 4, 8, 4, 4, 0 };
Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept {
// Unused in Release configuration as the assert is not compiled in.
DebugUtils::unused(arch);
// Only called when `arch` matches X86 family.
ASMJIT_ASSERT(Environment::isFamilyARM(arch));
// Get the instruction data.
uint32_t realId = inst.id() & uint32_t(InstIdParts::kRealId);
if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId)))
return DebugUtils::errored(kErrorInvalidInstruction);
out->_instFlags = InstRWFlags::kNone;
out->_opCount = uint8_t(opCount);
out->_rmFeature = 0;
out->_extraReg.reset();
out->_readFlags = CpuRWFlags::kNone; // TODO: [ARM] Read PSTATUS.
out->_writeFlags = CpuRWFlags::kNone; // TODO: [ARM] Write PSTATUS
const InstDB::InstInfo& instInfo = InstDB::_instInfoTable[realId];
const InstRWInfoData& rwInfo = instRWInfoData[instInfo.rwInfoIndex()];
if (instInfo.hasFlag(InstDB::kInstFlagConsecutive) && opCount > 2) {
for (uint32_t i = 0; i < opCount; i++) {
OpRWInfo& op = out->_operands[i];
const Operand_& srcOp = operands[i];
if (!srcOp.isRegOrMem()) {
op.reset();
continue;
}
OpRWFlags rwFlags = i < opCount - 1 ? (OpRWFlags)rwInfo.rwx[0] : (OpRWFlags)rwInfo.rwx[1];
op._opFlags = rwFlags & ~(OpRWFlags::kZExt);
op._physId = BaseReg::kIdBad;
op._rmSize = 0;
op._resetReserved();
uint64_t rByteMask = op.isRead() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
uint64_t wByteMask = op.isWrite() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
op._readByteMask = rByteMask;
op._writeByteMask = wByteMask;
op._extendByteMask = 0;
op._consecutiveLeadCount = 0;
if (srcOp.isReg()) {
if (i == 0)
op._consecutiveLeadCount = uint8_t(opCount - 1);
else
op.addOpFlags(OpRWFlags::kConsecutive);
}
else {
const Mem& memOp = srcOp.as<Mem>();
if (memOp.hasBase()) {
op.addOpFlags(OpRWFlags::kMemBaseRead);
}
if (memOp.hasIndex()) {
op.addOpFlags(OpRWFlags::kMemIndexRead);
op.addOpFlags(memOp.isPreOrPost() ? OpRWFlags::kMemIndexWrite : OpRWFlags::kNone);
}
}
}
}
else {
for (uint32_t i = 0; i < opCount; i++) {
OpRWInfo& op = out->_operands[i];
const Operand_& srcOp = operands[i];
if (!srcOp.isRegOrMem()) {
op.reset();
continue;
}
OpRWFlags rwFlags = (OpRWFlags)rwInfo.rwx[i];
op._opFlags = rwFlags & ~(OpRWFlags::kZExt);
op._physId = BaseReg::kIdBad;
op._rmSize = 0;
op._resetReserved();
uint64_t rByteMask = op.isRead() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
uint64_t wByteMask = op.isWrite() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
op._readByteMask = rByteMask;
op._writeByteMask = wByteMask;
op._extendByteMask = 0;
op._consecutiveLeadCount = 0;
if (srcOp.isReg()) {
if (srcOp.as<Vec>().hasElementIndex()) {
// Only part of the vector is accessed if element index [] is used.
uint32_t elementType = srcOp.as<Vec>().elementType();
uint32_t elementIndex = srcOp.as<Vec>().elementIndex();
uint32_t elementSize = elementTypeSize[elementType];
uint64_t accessMask = uint64_t(Support::lsbMask<uint32_t>(elementSize)) << (elementIndex * elementSize);
op._readByteMask &= accessMask;
op._writeByteMask &= accessMask;
}
// TODO: [ARM] RW info is not finished.
}
else {
const Mem& memOp = srcOp.as<Mem>();
if (memOp.hasBase()) {
op.addOpFlags(OpRWFlags::kMemBaseRead);
}
if (memOp.hasIndex()) {
op.addOpFlags(OpRWFlags::kMemIndexRead);
op.addOpFlags(memOp.isPreOrPost() ? OpRWFlags::kMemIndexWrite : OpRWFlags::kNone);
}
}
}
}
return kErrorOk;
}
#endif // !ASMJIT_NO_INTROSPECTION
// a64::InstInternal - QueryFeatures
// =================================
#ifndef ASMJIT_NO_INTROSPECTION
Error InstInternal::queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept {
// TODO: [ARM] QueryFeatures not implemented yet.
DebugUtils::unused(arch, inst, operands, opCount, out);
return kErrorOk;
}
#endif // !ASMJIT_NO_INTROSPECTION
// a64::InstInternal - Unit
// ========================
#if defined(ASMJIT_TEST)
UNIT(arm_inst_api_text) {
// TODO:
}
#endif
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64

41
third_party/asmjit/arm/a64instapi_p.h vendored Normal file
View File

@@ -0,0 +1,41 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64INSTAPI_P_H_INCLUDED
#define ASMJIT_ARM_A64INSTAPI_P_H_INCLUDED
#include "../core/inst.h"
#include "../core/operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
namespace InstInternal {
#ifndef ASMJIT_NO_TEXT
Error ASMJIT_CDECL instIdToString(Arch arch, InstId instId, String& output) noexcept;
InstId ASMJIT_CDECL stringToInstId(Arch arch, const char* s, size_t len) noexcept;
#endif // !ASMJIT_NO_TEXT
#ifndef ASMJIT_NO_VALIDATION
Error ASMJIT_CDECL validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept;
#endif // !ASMJIT_NO_VALIDATION
#ifndef ASMJIT_NO_INTROSPECTION
Error ASMJIT_CDECL queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept;
Error ASMJIT_CDECL queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept;
#endif // !ASMJIT_NO_INTROSPECTION
} // {InstInternal}
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64INSTAPI_P_H_INCLUDED

1957
third_party/asmjit/arm/a64instdb.cpp vendored Normal file

File diff suppressed because it is too large Load Diff

74
third_party/asmjit/arm/a64instdb.h vendored Normal file
View File

@@ -0,0 +1,74 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64INSTDB_H_INCLUDED
#define ASMJIT_ARM_A64INSTDB_H_INCLUDED
#include "../arm/a64globals.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \addtogroup asmjit_a64
//! \{
//! Instruction database (AArch64).
namespace InstDB {
//! Instruction flags.
enum InstFlags : uint32_t {
//! The instruction provides conditional execution.
kInstFlagCond = 0x00000001u,
//! SIMD instruction that processes elements in pairs.
kInstFlagPair = 0x00000002u,
//! SIMD instruction that does widening (Long).
kInstFlagLong = 0x00000004u,
//! SIMD instruction that does narrowing (Narrow).
kInstFlagNarrow = 0x00000008u,
//! SIMD element access of half-words can only be used with v0..15.
kInstFlagVH0_15 = 0x00000010u,
//! Instruction may consecutive registers if the number of operands is greater than 2.
kInstFlagConsecutive = 0x00000080u
};
//! Instruction information (AArch64).
struct InstInfo {
//! Instruction encoding type.
uint32_t _encoding : 8;
//! Index to data specific to each encoding type.
uint32_t _encodingDataIndex : 8;
uint32_t _reserved : 2;
//! Index to \ref _nameData.
uint32_t _nameDataIndex : 14;
uint16_t _rwInfoIndex;
uint16_t _flags;
//! \name Accessors
//! \{
inline uint32_t rwInfoIndex() const noexcept { return _rwInfoIndex; }
inline uint32_t flags() const noexcept { return _flags; }
inline bool hasFlag(uint32_t flag) const { return (_flags & flag) != 0; }
//! \}
};
ASMJIT_VARAPI const InstInfo _instInfoTable[];
static inline const InstInfo& infoById(InstId instId) noexcept {
instId &= uint32_t(InstIdParts::kRealId);
ASMJIT_ASSERT(Inst::isDefinedId(instId));
return _instInfoTable[instId];
}
} // {InstDB}
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64INSTDB_H_INCLUDED

876
third_party/asmjit/arm/a64instdb_p.h vendored Normal file
View File

@@ -0,0 +1,876 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64INSTDB_H_P_INCLUDED
#define ASMJIT_ARM_A64INSTDB_H_P_INCLUDED
#include "../core/codeholder.h"
#include "../arm/a64instdb.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
namespace InstDB {
// a64::InstDB - Constants Used by Instructions
// ============================================
// GP register types supported by base instructions.
static constexpr uint32_t kW = 0x1;
static constexpr uint32_t kX = 0x2;
static constexpr uint32_t kWX = 0x3;
// GP high register IDs supported by the instruction.
static constexpr uint32_t kZR = Gp::kIdZr;
static constexpr uint32_t kSP = Gp::kIdSp;
// a64::InstDB - RWInfo
// ====================
enum RWInfoType : uint32_t {
kRWI_R,
kRWI_RW,
kRWI_RX,
kRWI_RRW,
kRWI_RWX,
kRWI_W,
kRWI_WRW,
kRWI_WRX,
kRWI_WRRW,
kRWI_WRRX,
kRWI_WW,
kRWI_X,
kRWI_XRX,
kRWI_XXRRX,
kRWI_LDn,
kRWI_STn,
kRWI_SpecialStart = kRWI_LDn
};
// a64::InstDB - ElementType
// =========================
enum ElementType : uint8_t {
kET_None = Vec::kElementTypeNone,
kET_B = Vec::kElementTypeB,
kET_H = Vec::kElementTypeH,
kET_S = Vec::kElementTypeS,
kET_D = Vec::kElementTypeD,
kET_2H = Vec::kElementTypeH2,
kET_4B = Vec::kElementTypeB4
};
// a64::InstDB - GpType
// ====================
enum GpType : uint8_t {
kGp_W,
kGp_X,
kGp_X_SP
};
// a64::InstDB - OPSig
// ===================
enum kOpSignature : uint32_t {
kOp_GpW = GpW::kSignature,
kOp_GpX = GpX::kSignature,
kOp_B = VecB::kSignature,
kOp_H = VecH::kSignature,
kOp_S = VecS::kSignature,
kOp_D = VecD::kSignature,
kOp_Q = VecV::kSignature,
kOp_V8B = VecD::kSignature | Vec::kSignatureElementB,
kOp_V4H = VecD::kSignature | Vec::kSignatureElementH,
kOp_V2S = VecD::kSignature | Vec::kSignatureElementS,
kOp_V16B = VecV::kSignature | Vec::kSignatureElementB,
kOp_V8H = VecV::kSignature | Vec::kSignatureElementH,
kOp_V4S = VecV::kSignature | Vec::kSignatureElementS,
kOp_V2D = VecV::kSignature | Vec::kSignatureElementD
};
// a64::InstDB - HFConv
// ====================
enum kHFConv : uint32_t {
//! FP16 version of the instruction is not available.
kHF_N,
//! Doesn't do any change to the opcode.
kHF_0,
kHF_A,
kHF_B,
kHF_C,
kHF_D,
kHF_Count
};
// a64::InstDB - VOType
// ====================
//! Vector operand type combinations used by FP&SIMD instructions.
enum VOType : uint32_t {
kVO_V_B,
kVO_V_BH,
kVO_V_BH_4S,
kVO_V_BHS,
kVO_V_BHS_D2,
kVO_V_HS,
kVO_V_S,
kVO_V_B8H4,
kVO_V_B8H4S2,
kVO_V_B8D1,
kVO_V_H4S2,
kVO_V_B16,
kVO_V_B16H8,
kVO_V_B16H8S4,
kVO_V_B16D2,
kVO_V_H8S4,
kVO_V_S4,
kVO_V_D2,
kVO_SV_BHS,
kVO_SV_B8H4S2,
kVO_SV_HS,
kVO_V_Any,
kVO_SV_Any,
kVO_Count
};
// a64::InstDB - EncodingId
// ========================
// ${EncodingId:Begin}
// ------------------- Automatically generated, do not edit -------------------
enum EncodingId : uint32_t {
kEncodingNone = 0,
kEncodingBaseAddSub,
kEncodingBaseAdr,
kEncodingBaseAtDcIcTlbi,
kEncodingBaseAtomicCasp,
kEncodingBaseAtomicOp,
kEncodingBaseAtomicSt,
kEncodingBaseBfc,
kEncodingBaseBfi,
kEncodingBaseBfm,
kEncodingBaseBfx,
kEncodingBaseBranchCmp,
kEncodingBaseBranchReg,
kEncodingBaseBranchRel,
kEncodingBaseBranchTst,
kEncodingBaseCCmp,
kEncodingBaseCInc,
kEncodingBaseCSel,
kEncodingBaseCSet,
kEncodingBaseCmpCmn,
kEncodingBaseExtend,
kEncodingBaseExtract,
kEncodingBaseLdSt,
kEncodingBaseLdpStp,
kEncodingBaseLdxp,
kEncodingBaseLogical,
kEncodingBaseMov,
kEncodingBaseMovKNZ,
kEncodingBaseMrs,
kEncodingBaseMsr,
kEncodingBaseMvnNeg,
kEncodingBaseOp,
kEncodingBaseOpImm,
kEncodingBaseR,
kEncodingBaseRM_NoImm,
kEncodingBaseRM_SImm10,
kEncodingBaseRM_SImm9,
kEncodingBaseRR,
kEncodingBaseRRII,
kEncodingBaseRRR,
kEncodingBaseRRRR,
kEncodingBaseRev,
kEncodingBaseShift,
kEncodingBaseStx,
kEncodingBaseStxp,
kEncodingBaseSys,
kEncodingBaseTst,
kEncodingFSimdPair,
kEncodingFSimdSV,
kEncodingFSimdVV,
kEncodingFSimdVVV,
kEncodingFSimdVVVV,
kEncodingFSimdVVVe,
kEncodingISimdPair,
kEncodingISimdSV,
kEncodingISimdVV,
kEncodingISimdVVV,
kEncodingISimdVVVI,
kEncodingISimdVVVV,
kEncodingISimdVVVVx,
kEncodingISimdVVVe,
kEncodingISimdVVVx,
kEncodingISimdVVx,
kEncodingISimdWWV,
kEncodingSimdBicOrr,
kEncodingSimdCmp,
kEncodingSimdDot,
kEncodingSimdDup,
kEncodingSimdFcadd,
kEncodingSimdFccmpFccmpe,
kEncodingSimdFcm,
kEncodingSimdFcmla,
kEncodingSimdFcmpFcmpe,
kEncodingSimdFcsel,
kEncodingSimdFcvt,
kEncodingSimdFcvtLN,
kEncodingSimdFcvtSV,
kEncodingSimdFmlal,
kEncodingSimdFmov,
kEncodingSimdIns,
kEncodingSimdLdNStN,
kEncodingSimdLdSt,
kEncodingSimdLdpStp,
kEncodingSimdLdurStur,
kEncodingSimdMov,
kEncodingSimdMoviMvni,
kEncodingSimdShift,
kEncodingSimdShiftES,
kEncodingSimdSm3tt,
kEncodingSimdSmovUmov,
kEncodingSimdSxtlUxtl,
kEncodingSimdTblTbx
};
// ----------------------------------------------------------------------------
// ${EncodingId:End}
// a64::InstDB::EncodingData
// =========================
namespace EncodingData {
#define M_OPCODE(field, bits) \
uint32_t _##field : bits; \
inline constexpr uint32_t field() const noexcept { return uint32_t(_##field) << (32 - bits); }
struct BaseOp {
uint32_t opcode;
};
struct BaseOpImm {
uint32_t opcode;
uint16_t immBits;
uint16_t immOffset;
};
struct BaseR {
uint32_t opcode;
uint32_t rType : 8;
uint32_t rHiId : 8;
uint32_t rShift : 8;
};
struct BaseRR {
uint32_t opcode;
uint32_t aType : 2;
uint32_t aHiId : 6;
uint32_t aShift : 5;
uint32_t bType : 2;
uint32_t bHiId : 6;
uint32_t bShift : 5;
uint32_t uniform : 1;
};
struct BaseRRR {
M_OPCODE(opcode, 22)
uint32_t aType : 2;
uint32_t aHiId : 6;
uint32_t bType : 2;
uint32_t bHiId : 6;
uint32_t cType : 2;
uint32_t cHiId : 6;
uint32_t uniform : 1;
};
struct BaseRRRR {
M_OPCODE(opcode, 22)
uint32_t aType : 2;
uint32_t aHiId : 6;
uint32_t bType : 2;
uint32_t bHiId : 6;
uint32_t cType : 2;
uint32_t cHiId : 6;
uint32_t dType : 2;
uint32_t dHiId : 6;
uint32_t uniform : 1;
};
struct BaseRRII {
M_OPCODE(opcode, 22)
uint32_t aType : 2;
uint32_t aHiId : 6;
uint32_t bType : 2;
uint32_t bHiId : 6;
uint32_t aImmSize : 6;
uint32_t aImmDiscardLsb : 5;
uint32_t aImmOffset : 5;
uint32_t bImmSize : 6;
uint32_t bImmDiscardLsb : 5;
uint32_t bImmOffset : 5;
};
struct BaseAtDcIcTlbi {
uint32_t immVerifyMask : 14;
uint32_t immVerifyData : 14;
uint32_t mandatoryReg : 1;
};
struct BaseAdcSbc {
uint32_t opcode;
};
struct BaseAddSub {
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|Rd|
uint32_t extendedOp : 10; // sf|.......|..|.|Rm|Opt|Imm3|Rn|Rd|
uint32_t immediateOp: 10; // sf|.......|Sh| Imm:12 |Rn|Rd|
};
struct BaseAdr {
M_OPCODE(opcode, 22)
OffsetType offsetType : 8;
};
struct BaseBfm {
uint32_t opcode; // sf|........|N|ImmR:6|ImmS:6|Rn|Rd|
};
struct BaseCmpCmn {
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|11111|
uint32_t extendedOp : 10; // sf|.......|..|.|Rm|Opt|Imm3|Rn|11111|
uint32_t immediateOp: 10; // sf|.......|Sh| Imm:12 |Rn|11111|
};
struct BaseExtend {
M_OPCODE(opcode, 22) // sf|........|N|......|......|Rn|Rd|
uint32_t rType : 2;
uint32_t u : 1;
};
struct BaseLogical {
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|Rd|
uint32_t immediateOp: 10; // sf|........|N|ImmR:6|ImmS:6|Rn|Rd|
uint32_t negateImm : 1 ; // True if this is an operation that must negate IMM.
};
struct BaseMvnNeg {
uint32_t opcode;
};
struct BaseShift {
M_OPCODE(registerOp, 22)
M_OPCODE(immediateOp, 22)
uint32_t ror : 2;
};
struct BaseTst {
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|11111|
uint32_t immediateOp: 10; // sf|........|N|ImmR:6|ImmS:6|Rn|11111|
};
struct BaseRM_NoImm {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t rHiId : 6;
uint32_t xOffset : 5;
};
struct BaseRM_SImm9 {
M_OPCODE(offsetOp, 22)
M_OPCODE(prePostOp, 22)
uint32_t rType : 2;
uint32_t rHiId : 6;
uint32_t xOffset : 5;
uint32_t immShift : 4;
};
struct BaseRM_SImm10 {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t rHiId : 6;
uint32_t xOffset : 5;
uint32_t immShift : 4;
};
struct BaseLdSt {
uint32_t uOffsetOp : 10;
uint32_t prePostOp : 11;
uint32_t registerOp : 11;
uint32_t literalOp : 8;
uint32_t rType : 2;
uint32_t xOffset : 5;
uint32_t uOffsetShift : 3;
uint32_t uAltInstId : 14;
};
struct BaseLdpStp {
uint32_t offsetOp : 10;
uint32_t prePostOp : 10;
uint32_t rType : 2;
uint32_t xOffset : 5;
uint32_t offsetShift : 3;
};
struct BaseStx {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
};
struct BaseLdxp {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
};
struct BaseStxp {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
};
struct BaseAtomicOp {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
uint32_t zr : 1;
};
struct BaseAtomicSt {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
};
struct BaseAtomicCasp {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
};
typedef BaseOp BaseBranchReg;
typedef BaseOp BaseBranchRel;
typedef BaseOp BaseBranchCmp;
typedef BaseOp BaseBranchTst;
typedef BaseOp BaseExtract;
typedef BaseOp BaseBfc;
typedef BaseOp BaseBfi;
typedef BaseOp BaseBfx;
typedef BaseOp BaseCCmp;
typedef BaseOp BaseCInc;
typedef BaseOp BaseCSet;
typedef BaseOp BaseCSel;
typedef BaseOp BaseMovKNZ;
typedef BaseOp BaseMull;
struct FSimdGeneric {
uint32_t _scalarOp : 28;
uint32_t _scalarHf : 4;
uint32_t _vectorOp : 28;
uint32_t _vectorHf : 4;
constexpr uint32_t scalarOp() const noexcept { return uint32_t(_scalarOp) << 10; }
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
constexpr uint32_t scalarHf() const noexcept { return uint32_t(_scalarHf); }
constexpr uint32_t vectorHf() const noexcept { return uint32_t(_vectorHf); }
};
typedef FSimdGeneric FSimdVV;
typedef FSimdGeneric FSimdVVV;
typedef FSimdGeneric FSimdVVVV;
struct FSimdSV {
uint32_t opcode;
};
struct FSimdVVVe {
uint32_t _scalarOp : 28;
uint32_t _scalarHf : 4;
uint32_t _vectorOp;
uint32_t _elementOp;
constexpr uint32_t scalarOp() const noexcept { return uint32_t(_scalarOp) << 10; }
constexpr uint32_t scalarHf() const noexcept { return uint32_t(_scalarHf); };
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
constexpr uint32_t vectorHf() const noexcept { return kHF_C; }
constexpr uint32_t elementScalarOp() const noexcept { return (uint32_t(_elementOp) << 10) | (0x5u << 28); }
constexpr uint32_t elementVectorOp() const noexcept { return (uint32_t(_elementOp) << 10); }
};
struct SimdFcadd {
uint32_t _opcode;
constexpr uint32_t opcode() const noexcept { return _opcode << 10; }
};
struct SimdFcmla {
uint32_t _regularOp;
uint32_t _elementOp;
constexpr uint32_t regularOp() const noexcept { return uint32_t(_regularOp) << 10; }
constexpr uint32_t elementOp() const noexcept { return (uint32_t(_elementOp) << 10); }
};
struct SimdFccmpFccmpe {
uint32_t _opcode;
constexpr uint32_t opcode() const noexcept { return _opcode; }
};
struct SimdFcm {
uint32_t _registerOp : 28;
uint32_t _registerHf : 4;
uint32_t _zeroOp : 28;
constexpr bool hasRegisterOp() const noexcept { return _registerOp != 0; }
constexpr bool hasZeroOp() const noexcept { return _zeroOp != 0; }
constexpr uint32_t registerScalarOp() const noexcept { return (uint32_t(_registerOp) << 10) | (0x5u << 28); }
constexpr uint32_t registerVectorOp() const noexcept { return uint32_t(_registerOp) << 10; }
constexpr uint32_t registerScalarHf() const noexcept { return uint32_t(_registerHf); }
constexpr uint32_t registerVectorHf() const noexcept { return uint32_t(_registerHf); }
constexpr uint32_t zeroScalarOp() const noexcept { return (uint32_t(_zeroOp) << 10) | (0x5u << 28); }
constexpr uint32_t zeroVectorOp() const noexcept { return (uint32_t(_zeroOp) << 10); }
};
struct SimdFcmpFcmpe {
uint32_t _opcode;
constexpr uint32_t opcode() const noexcept { return _opcode; }
};
struct SimdFcvtLN {
uint32_t _opcode : 22;
uint32_t _isCvtxn : 1;
uint32_t _hasScalar : 1;
constexpr uint32_t scalarOp() const noexcept { return (uint32_t(_opcode) << 10) | (0x5u << 28); }
constexpr uint32_t vectorOp() const noexcept { return (uint32_t(_opcode) << 10); }
constexpr uint32_t isCvtxn() const noexcept { return _isCvtxn; }
constexpr uint32_t hasScalar() const noexcept { return _hasScalar; }
};
struct SimdFcvtSV {
uint32_t _vectorIntOp;
uint32_t _vectorFpOp;
uint32_t _generalOp : 31;
uint32_t _isFloatToInt : 1;
constexpr uint32_t scalarIntOp() const noexcept { return (uint32_t(_vectorIntOp) << 10) | (0x5u << 28); }
constexpr uint32_t vectorIntOp() const noexcept { return uint32_t(_vectorIntOp) << 10; }
constexpr uint32_t scalarFpOp() const noexcept { return (uint32_t(_vectorFpOp) << 10) | (0x5u << 28); }
constexpr uint32_t vectorFpOp() const noexcept { return uint32_t(_vectorFpOp) << 10; }
constexpr uint32_t generalOp() const noexcept { return (uint32_t(_generalOp) << 10); }
constexpr uint32_t isFloatToInt() const noexcept { return _isFloatToInt; }
constexpr uint32_t isFixedPoint() const noexcept { return _vectorFpOp != 0; }
};
struct SimdFmlal {
uint32_t _vectorOp;
uint32_t _elementOp;
uint8_t _optionalQ;
uint8_t tA;
uint8_t tB;
uint8_t tElement;
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
constexpr uint32_t elementOp() const noexcept { return uint32_t(_elementOp) << 10; }
constexpr uint32_t optionalQ() const noexcept { return _optionalQ; }
};
struct FSimdPair {
uint32_t _scalarOp;
uint32_t _vectorOp;
constexpr uint32_t scalarOp() const noexcept { return uint32_t(_scalarOp) << 10; }
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
};
struct ISimdVV {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
};
struct ISimdVVx {
M_OPCODE(opcode, 22)
uint32_t op0Signature;
uint32_t op1Signature;
};
struct ISimdSV {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
};
struct ISimdVVV {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
};
struct ISimdVVVx {
M_OPCODE(opcode, 22)
uint32_t op0Signature;
uint32_t op1Signature;
uint32_t op2Signature;
};
struct ISimdWWV {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
};
struct ISimdVVVe {
uint32_t regularOp : 26; // 22 bits used.
uint32_t regularVecType : 6;
uint32_t elementOp : 26; // 22 bits used.
uint32_t elementVecType : 6;
};
struct ISimdVVVI {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
uint32_t immSize : 4;
uint32_t immShift : 4;
uint32_t imm64HasOneBitLess : 1;
};
struct ISimdVVVV {
uint32_t opcode : 22;
uint32_t vecOpType : 6;
};
struct ISimdVVVVx {
uint32_t opcode;
uint32_t op0Signature;
uint32_t op1Signature;
uint32_t op2Signature;
uint32_t op3Signature;
};
struct SimdBicOrr {
uint32_t registerOp; // 22 bits used.
uint32_t immediateOp; // 22 bits used.
};
struct SimdCmp {
uint32_t regOp;
uint32_t zeroOp : 22;
uint32_t vecOpType : 6;
};
struct SimdDot {
uint32_t vectorOp; // 22 bits used.
uint32_t elementOp; // 22 bits used.
uint8_t tA; // Element-type of the first operand.
uint8_t tB; // Element-type of the second and third operands.
uint8_t tElement; // Element-type of the element index[] operand.
};
struct SimdMoviMvni {
uint32_t opcode : 31;
uint32_t inverted : 1;
};
struct SimdLdSt {
uint32_t uOffsetOp : 10;
uint32_t prePostOp : 11;
uint32_t registerOp : 11;
uint32_t literalOp : 8;
uint32_t uAltInstId : 16;
};
struct SimdLdNStN {
uint32_t singleOp;
uint32_t multipleOp : 22;
uint32_t n : 3;
uint32_t replicate : 1;
};
struct SimdLdpStp {
uint32_t offsetOp : 10;
uint32_t prePostOp : 10;
};
struct SimdLdurStur {
uint32_t opcode;
};
struct ISimdPair {
uint32_t opcode2; // 22 bits used.
uint32_t opcode3 : 26; // 22 bits used.
uint32_t opType3 : 6;
};
struct SimdShift {
uint32_t registerOp; // 22 bits used.
uint32_t immediateOp : 22; // 22 bits used.
uint32_t invertedImm : 1;
uint32_t vecOpType : 6;
};
struct SimdShiftES {
uint32_t opcode : 22;
uint32_t vecOpType : 6;
};
struct SimdSm3tt {
uint32_t opcode;
};
struct SimdSmovUmov {
uint32_t opcode : 22;
uint32_t vecOpType : 6;
uint32_t isSigned : 1;
};
struct SimdSxtlUxtl {
uint32_t opcode : 22;
uint32_t vecOpType : 6;
};
struct SimdTblTbx {
uint32_t opcode;
};
#undef M_OPCODE
// ${EncodingDataForward:Begin}
// ------------------- Automatically generated, do not edit -------------------
extern const BaseAddSub baseAddSub[4];
extern const BaseAdr baseAdr[2];
extern const BaseAtDcIcTlbi baseAtDcIcTlbi[4];
extern const BaseAtomicCasp baseAtomicCasp[4];
extern const BaseAtomicOp baseAtomicOp[123];
extern const BaseAtomicSt baseAtomicSt[48];
extern const BaseBfc baseBfc[1];
extern const BaseBfi baseBfi[3];
extern const BaseBfm baseBfm[3];
extern const BaseBfx baseBfx[3];
extern const BaseBranchCmp baseBranchCmp[2];
extern const BaseBranchReg baseBranchReg[3];
extern const BaseBranchRel baseBranchRel[2];
extern const BaseBranchTst baseBranchTst[2];
extern const BaseCCmp baseCCmp[2];
extern const BaseCInc baseCInc[3];
extern const BaseCSel baseCSel[4];
extern const BaseCSet baseCSet[2];
extern const BaseCmpCmn baseCmpCmn[2];
extern const BaseExtend baseExtend[5];
extern const BaseExtract baseExtract[1];
extern const BaseLdSt baseLdSt[9];
extern const BaseLdpStp baseLdpStp[6];
extern const BaseLdxp baseLdxp[2];
extern const BaseLogical baseLogical[8];
extern const BaseMovKNZ baseMovKNZ[3];
extern const BaseMvnNeg baseMvnNeg[3];
extern const BaseOp baseOp[23];
extern const BaseOpImm baseOpImm[14];
extern const BaseR baseR[10];
extern const BaseRM_NoImm baseRM_NoImm[21];
extern const BaseRM_SImm10 baseRM_SImm10[2];
extern const BaseRM_SImm9 baseRM_SImm9[23];
extern const BaseRR baseRR[15];
extern const BaseRRII baseRRII[2];
extern const BaseRRR baseRRR[26];
extern const BaseRRRR baseRRRR[6];
extern const BaseShift baseShift[8];
extern const BaseStx baseStx[3];
extern const BaseStxp baseStxp[2];
extern const BaseTst baseTst[1];
extern const FSimdPair fSimdPair[5];
extern const FSimdSV fSimdSV[4];
extern const FSimdVV fSimdVV[17];
extern const FSimdVVV fSimdVVV[13];
extern const FSimdVVVV fSimdVVVV[4];
extern const FSimdVVVe fSimdVVVe[4];
extern const ISimdPair iSimdPair[1];
extern const ISimdSV iSimdSV[7];
extern const ISimdVV iSimdVV[29];
extern const ISimdVVV iSimdVVV[65];
extern const ISimdVVVI iSimdVVVI[2];
extern const ISimdVVVV iSimdVVVV[2];
extern const ISimdVVVVx iSimdVVVVx[1];
extern const ISimdVVVe iSimdVVVe[25];
extern const ISimdVVVx iSimdVVVx[17];
extern const ISimdVVx iSimdVVx[13];
extern const ISimdWWV iSimdWWV[8];
extern const SimdBicOrr simdBicOrr[2];
extern const SimdCmp simdCmp[7];
extern const SimdDot simdDot[5];
extern const SimdFcadd simdFcadd[1];
extern const SimdFccmpFccmpe simdFccmpFccmpe[2];
extern const SimdFcm simdFcm[5];
extern const SimdFcmla simdFcmla[1];
extern const SimdFcmpFcmpe simdFcmpFcmpe[2];
extern const SimdFcvtLN simdFcvtLN[6];
extern const SimdFcvtSV simdFcvtSV[12];
extern const SimdFmlal simdFmlal[6];
extern const SimdLdNStN simdLdNStN[12];
extern const SimdLdSt simdLdSt[2];
extern const SimdLdpStp simdLdpStp[4];
extern const SimdLdurStur simdLdurStur[2];
extern const SimdMoviMvni simdMoviMvni[2];
extern const SimdShift simdShift[40];
extern const SimdShiftES simdShiftES[2];
extern const SimdSm3tt simdSm3tt[4];
extern const SimdSmovUmov simdSmovUmov[2];
extern const SimdSxtlUxtl simdSxtlUxtl[4];
extern const SimdTblTbx simdTblTbx[2];
// ----------------------------------------------------------------------------
// ${EncodingDataForward:End}
} // {EncodingData}
// a64::InstDB - InstNameIndex
// ===========================
// ${NameLimits:Begin}
// ------------------- Automatically generated, do not edit -------------------
enum : uint32_t { kMaxNameSize = 9 };
// ----------------------------------------------------------------------------
// ${NameLimits:End}
struct InstNameIndex {
uint16_t start;
uint16_t end;
};
// a64::InstDB - Tables
// ====================
#ifndef ASMJIT_NO_TEXT
extern const char _nameData[];
extern const InstNameIndex instNameIndex[26];
#endif // !ASMJIT_NO_TEXT
} // {InstDB}
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_A64_ARMINSTDB_H_P_INCLUDED

85
third_party/asmjit/arm/a64operand.cpp vendored Normal file
View File

@@ -0,0 +1,85 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64)
#include "../core/misc_p.h"
#include "../arm/a64operand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::Operand - Tests
// ====================
#if defined(ASMJIT_TEST)
UNIT(a64_operand) {
INFO("Checking if a64::reg(...) matches built-in IDs");
EXPECT(w(5) == w5);
EXPECT(x(5) == x5);
INFO("Checking Gp register properties");
EXPECT(Gp().isReg() == true);
EXPECT(w0.isReg() == true);
EXPECT(x0.isReg() == true);
EXPECT(w0.id() == 0);
EXPECT(x0.id() == 0);
EXPECT(wzr.id() == Gp::kIdZr);
EXPECT(xzr.id() == Gp::kIdZr);
EXPECT(wsp.id() == Gp::kIdSp);
EXPECT(sp.id() == Gp::kIdSp);
EXPECT(w0.size() == 4);
EXPECT(x0.size() == 8);
EXPECT(w0.type() == RegType::kARM_GpW);
EXPECT(x0.type() == RegType::kARM_GpX);
EXPECT(w0.group() == RegGroup::kGp);
EXPECT(x0.group() == RegGroup::kGp);
INFO("Checking Vec register properties");
EXPECT(v0.type() == RegType::kARM_VecV);
EXPECT(d0.type() == RegType::kARM_VecD);
EXPECT(s0.type() == RegType::kARM_VecS);
EXPECT(h0.type() == RegType::kARM_VecH);
EXPECT(b0.type() == RegType::kARM_VecB);
EXPECT(v0.group() == RegGroup::kVec);
EXPECT(d0.group() == RegGroup::kVec);
EXPECT(s0.group() == RegGroup::kVec);
EXPECT(h0.group() == RegGroup::kVec);
EXPECT(b0.group() == RegGroup::kVec);
INFO("Checking Vec register element[] access");
Vec vd_1 = v15.d(1);
EXPECT(vd_1.type() == RegType::kARM_VecV);
EXPECT(vd_1.group() == RegGroup::kVec);
EXPECT(vd_1.id() == 15);
EXPECT(vd_1.isVecD2());
EXPECT(vd_1.elementType() == Vec::kElementTypeD);
EXPECT(vd_1.hasElementIndex());
EXPECT(vd_1.elementIndex() == 1);
Vec vs_3 = v15.s(3);
EXPECT(vs_3.type() == RegType::kARM_VecV);
EXPECT(vs_3.group() == RegGroup::kVec);
EXPECT(vs_3.id() == 15);
EXPECT(vs_3.isVecS4());
EXPECT(vs_3.elementType() == Vec::kElementTypeS);
EXPECT(vs_3.hasElementIndex());
EXPECT(vs_3.elementIndex() == 3);
Vec vb_4 = v15.b4(3);
EXPECT(vb_4.type() == RegType::kARM_VecV);
EXPECT(vb_4.group() == RegGroup::kVec);
EXPECT(vb_4.id() == 15);
EXPECT(vb_4.isVecB4x4());
EXPECT(vb_4.elementType() == Vec::kElementTypeB4);
EXPECT(vb_4.hasElementIndex());
EXPECT(vb_4.elementIndex() == 3);
}
#endif
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64

312
third_party/asmjit/arm/a64operand.h vendored Normal file
View File

@@ -0,0 +1,312 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64OPERAND_H_INCLUDED
#define ASMJIT_ARM_A64OPERAND_H_INCLUDED
#include "../arm/armoperand.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \addtogroup asmjit_a64
//! \{
using arm::Reg;
using arm::Mem;
using arm::Gp;
using arm::GpW;
using arm::GpX;
using arm::Vec;
using arm::VecB;
using arm::VecH;
using arm::VecS;
using arm::VecD;
using arm::VecV;
#ifndef _DOXYGEN
namespace regs {
#endif
using namespace ::asmjit::arm::regs;
static constexpr GpW w0 = GpW(0);
static constexpr GpW w1 = GpW(1);
static constexpr GpW w2 = GpW(2);
static constexpr GpW w3 = GpW(3);
static constexpr GpW w4 = GpW(4);
static constexpr GpW w5 = GpW(5);
static constexpr GpW w6 = GpW(6);
static constexpr GpW w7 = GpW(7);
static constexpr GpW w8 = GpW(8);
static constexpr GpW w9 = GpW(9);
static constexpr GpW w10 = GpW(10);
static constexpr GpW w11 = GpW(11);
static constexpr GpW w12 = GpW(12);
static constexpr GpW w13 = GpW(13);
static constexpr GpW w14 = GpW(14);
static constexpr GpW w15 = GpW(15);
static constexpr GpW w16 = GpW(16);
static constexpr GpW w17 = GpW(17);
static constexpr GpW w18 = GpW(18);
static constexpr GpW w19 = GpW(19);
static constexpr GpW w20 = GpW(20);
static constexpr GpW w21 = GpW(21);
static constexpr GpW w22 = GpW(22);
static constexpr GpW w23 = GpW(23);
static constexpr GpW w24 = GpW(24);
static constexpr GpW w25 = GpW(25);
static constexpr GpW w26 = GpW(26);
static constexpr GpW w27 = GpW(27);
static constexpr GpW w28 = GpW(28);
static constexpr GpW w29 = GpW(29);
static constexpr GpW w30 = GpW(30);
static constexpr GpW wzr = GpW(Gp::kIdZr);
static constexpr GpW wsp = GpW(Gp::kIdSp);
static constexpr GpX x0 = GpX(0);
static constexpr GpX x1 = GpX(1);
static constexpr GpX x2 = GpX(2);
static constexpr GpX x3 = GpX(3);
static constexpr GpX x4 = GpX(4);
static constexpr GpX x5 = GpX(5);
static constexpr GpX x6 = GpX(6);
static constexpr GpX x7 = GpX(7);
static constexpr GpX x8 = GpX(8);
static constexpr GpX x9 = GpX(9);
static constexpr GpX x10 = GpX(10);
static constexpr GpX x11 = GpX(11);
static constexpr GpX x12 = GpX(12);
static constexpr GpX x13 = GpX(13);
static constexpr GpX x14 = GpX(14);
static constexpr GpX x15 = GpX(15);
static constexpr GpX x16 = GpX(16);
static constexpr GpX x17 = GpX(17);
static constexpr GpX x18 = GpX(18);
static constexpr GpX x19 = GpX(19);
static constexpr GpX x20 = GpX(20);
static constexpr GpX x21 = GpX(21);
static constexpr GpX x22 = GpX(22);
static constexpr GpX x23 = GpX(23);
static constexpr GpX x24 = GpX(24);
static constexpr GpX x25 = GpX(25);
static constexpr GpX x26 = GpX(26);
static constexpr GpX x27 = GpX(27);
static constexpr GpX x28 = GpX(28);
static constexpr GpX x29 = GpX(29);
static constexpr GpX x30 = GpX(30);
static constexpr GpX xzr = GpX(Gp::kIdZr);
static constexpr GpX sp = GpX(Gp::kIdSp);
static constexpr VecB b0 = VecB(0);
static constexpr VecB b1 = VecB(1);
static constexpr VecB b2 = VecB(2);
static constexpr VecB b3 = VecB(3);
static constexpr VecB b4 = VecB(4);
static constexpr VecB b5 = VecB(5);
static constexpr VecB b6 = VecB(6);
static constexpr VecB b7 = VecB(7);
static constexpr VecB b8 = VecB(8);
static constexpr VecB b9 = VecB(9);
static constexpr VecB b10 = VecB(10);
static constexpr VecB b11 = VecB(11);
static constexpr VecB b12 = VecB(12);
static constexpr VecB b13 = VecB(13);
static constexpr VecB b14 = VecB(14);
static constexpr VecB b15 = VecB(15);
static constexpr VecB b16 = VecB(16);
static constexpr VecB b17 = VecB(17);
static constexpr VecB b18 = VecB(18);
static constexpr VecB b19 = VecB(19);
static constexpr VecB b20 = VecB(20);
static constexpr VecB b21 = VecB(21);
static constexpr VecB b22 = VecB(22);
static constexpr VecB b23 = VecB(23);
static constexpr VecB b24 = VecB(24);
static constexpr VecB b25 = VecB(25);
static constexpr VecB b26 = VecB(26);
static constexpr VecB b27 = VecB(27);
static constexpr VecB b28 = VecB(28);
static constexpr VecB b29 = VecB(29);
static constexpr VecB b30 = VecB(30);
static constexpr VecB b31 = VecB(31);
static constexpr VecH h0 = VecH(0);
static constexpr VecH h1 = VecH(1);
static constexpr VecH h2 = VecH(2);
static constexpr VecH h3 = VecH(3);
static constexpr VecH h4 = VecH(4);
static constexpr VecH h5 = VecH(5);
static constexpr VecH h6 = VecH(6);
static constexpr VecH h7 = VecH(7);
static constexpr VecH h8 = VecH(8);
static constexpr VecH h9 = VecH(9);
static constexpr VecH h10 = VecH(10);
static constexpr VecH h11 = VecH(11);
static constexpr VecH h12 = VecH(12);
static constexpr VecH h13 = VecH(13);
static constexpr VecH h14 = VecH(14);
static constexpr VecH h15 = VecH(15);
static constexpr VecH h16 = VecH(16);
static constexpr VecH h17 = VecH(17);
static constexpr VecH h18 = VecH(18);
static constexpr VecH h19 = VecH(19);
static constexpr VecH h20 = VecH(20);
static constexpr VecH h21 = VecH(21);
static constexpr VecH h22 = VecH(22);
static constexpr VecH h23 = VecH(23);
static constexpr VecH h24 = VecH(24);
static constexpr VecH h25 = VecH(25);
static constexpr VecH h26 = VecH(26);
static constexpr VecH h27 = VecH(27);
static constexpr VecH h28 = VecH(28);
static constexpr VecH h29 = VecH(29);
static constexpr VecH h30 = VecH(30);
static constexpr VecH h31 = VecH(31);
static constexpr VecS s0 = VecS(0);
static constexpr VecS s1 = VecS(1);
static constexpr VecS s2 = VecS(2);
static constexpr VecS s3 = VecS(3);
static constexpr VecS s4 = VecS(4);
static constexpr VecS s5 = VecS(5);
static constexpr VecS s6 = VecS(6);
static constexpr VecS s7 = VecS(7);
static constexpr VecS s8 = VecS(8);
static constexpr VecS s9 = VecS(9);
static constexpr VecS s10 = VecS(10);
static constexpr VecS s11 = VecS(11);
static constexpr VecS s12 = VecS(12);
static constexpr VecS s13 = VecS(13);
static constexpr VecS s14 = VecS(14);
static constexpr VecS s15 = VecS(15);
static constexpr VecS s16 = VecS(16);
static constexpr VecS s17 = VecS(17);
static constexpr VecS s18 = VecS(18);
static constexpr VecS s19 = VecS(19);
static constexpr VecS s20 = VecS(20);
static constexpr VecS s21 = VecS(21);
static constexpr VecS s22 = VecS(22);
static constexpr VecS s23 = VecS(23);
static constexpr VecS s24 = VecS(24);
static constexpr VecS s25 = VecS(25);
static constexpr VecS s26 = VecS(26);
static constexpr VecS s27 = VecS(27);
static constexpr VecS s28 = VecS(28);
static constexpr VecS s29 = VecS(29);
static constexpr VecS s30 = VecS(30);
static constexpr VecS s31 = VecS(31);
static constexpr VecD d0 = VecD(0);
static constexpr VecD d1 = VecD(1);
static constexpr VecD d2 = VecD(2);
static constexpr VecD d3 = VecD(3);
static constexpr VecD d4 = VecD(4);
static constexpr VecD d5 = VecD(5);
static constexpr VecD d6 = VecD(6);
static constexpr VecD d7 = VecD(7);
static constexpr VecD d8 = VecD(8);
static constexpr VecD d9 = VecD(9);
static constexpr VecD d10 = VecD(10);
static constexpr VecD d11 = VecD(11);
static constexpr VecD d12 = VecD(12);
static constexpr VecD d13 = VecD(13);
static constexpr VecD d14 = VecD(14);
static constexpr VecD d15 = VecD(15);
static constexpr VecD d16 = VecD(16);
static constexpr VecD d17 = VecD(17);
static constexpr VecD d18 = VecD(18);
static constexpr VecD d19 = VecD(19);
static constexpr VecD d20 = VecD(20);
static constexpr VecD d21 = VecD(21);
static constexpr VecD d22 = VecD(22);
static constexpr VecD d23 = VecD(23);
static constexpr VecD d24 = VecD(24);
static constexpr VecD d25 = VecD(25);
static constexpr VecD d26 = VecD(26);
static constexpr VecD d27 = VecD(27);
static constexpr VecD d28 = VecD(28);
static constexpr VecD d29 = VecD(29);
static constexpr VecD d30 = VecD(30);
static constexpr VecD d31 = VecD(31);
static constexpr VecV q0 = VecV(0);
static constexpr VecV q1 = VecV(1);
static constexpr VecV q2 = VecV(2);
static constexpr VecV q3 = VecV(3);
static constexpr VecV q4 = VecV(4);
static constexpr VecV q5 = VecV(5);
static constexpr VecV q6 = VecV(6);
static constexpr VecV q7 = VecV(7);
static constexpr VecV q8 = VecV(8);
static constexpr VecV q9 = VecV(9);
static constexpr VecV q10 = VecV(10);
static constexpr VecV q11 = VecV(11);
static constexpr VecV q12 = VecV(12);
static constexpr VecV q13 = VecV(13);
static constexpr VecV q14 = VecV(14);
static constexpr VecV q15 = VecV(15);
static constexpr VecV q16 = VecV(16);
static constexpr VecV q17 = VecV(17);
static constexpr VecV q18 = VecV(18);
static constexpr VecV q19 = VecV(19);
static constexpr VecV q20 = VecV(20);
static constexpr VecV q21 = VecV(21);
static constexpr VecV q22 = VecV(22);
static constexpr VecV q23 = VecV(23);
static constexpr VecV q24 = VecV(24);
static constexpr VecV q25 = VecV(25);
static constexpr VecV q26 = VecV(26);
static constexpr VecV q27 = VecV(27);
static constexpr VecV q28 = VecV(28);
static constexpr VecV q29 = VecV(29);
static constexpr VecV q30 = VecV(30);
static constexpr VecV q31 = VecV(31);
static constexpr VecV v0 = VecV(0);
static constexpr VecV v1 = VecV(1);
static constexpr VecV v2 = VecV(2);
static constexpr VecV v3 = VecV(3);
static constexpr VecV v4 = VecV(4);
static constexpr VecV v5 = VecV(5);
static constexpr VecV v6 = VecV(6);
static constexpr VecV v7 = VecV(7);
static constexpr VecV v8 = VecV(8);
static constexpr VecV v9 = VecV(9);
static constexpr VecV v10 = VecV(10);
static constexpr VecV v11 = VecV(11);
static constexpr VecV v12 = VecV(12);
static constexpr VecV v13 = VecV(13);
static constexpr VecV v14 = VecV(14);
static constexpr VecV v15 = VecV(15);
static constexpr VecV v16 = VecV(16);
static constexpr VecV v17 = VecV(17);
static constexpr VecV v18 = VecV(18);
static constexpr VecV v19 = VecV(19);
static constexpr VecV v20 = VecV(20);
static constexpr VecV v21 = VecV(21);
static constexpr VecV v22 = VecV(22);
static constexpr VecV v23 = VecV(23);
static constexpr VecV v24 = VecV(24);
static constexpr VecV v25 = VecV(25);
static constexpr VecV v26 = VecV(26);
static constexpr VecV v27 = VecV(27);
static constexpr VecV v28 = VecV(28);
static constexpr VecV v29 = VecV(29);
static constexpr VecV v30 = VecV(30);
static constexpr VecV v31 = VecV(31);
#ifndef _DOXYGEN
} // {regs}
// Make `a64::regs` accessible through `a64` namespace as well.
using namespace regs;
#endif
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64OPERAND_H_INCLUDED

852
third_party/asmjit/arm/a64rapass.cpp vendored Normal file
View File

@@ -0,0 +1,852 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#if !defined(ASMJIT_NO_AARCH64) && !defined(ASMJIT_NO_COMPILER)
#include "../core/cpuinfo.h"
#include "../core/support.h"
#include "../core/type.h"
#include "../arm/a64assembler.h"
#include "../arm/a64compiler.h"
#include "../arm/a64emithelper_p.h"
#include "../arm/a64instapi_p.h"
#include "../arm/a64instdb_p.h"
#include "../arm/a64rapass_p.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::ARMRAPass - Helpers
// ========================
// TODO: [ARM] These should be shared with all backends.
ASMJIT_MAYBE_UNUSED
static inline uint64_t raImmMaskFromSize(uint32_t size) noexcept {
ASMJIT_ASSERT(size > 0 && size < 256);
static const uint64_t masks[] = {
0x00000000000000FFu, // 1
0x000000000000FFFFu, // 2
0x00000000FFFFFFFFu, // 4
0xFFFFFFFFFFFFFFFFu, // 8
0x0000000000000000u, // 16
0x0000000000000000u, // 32
0x0000000000000000u, // 64
0x0000000000000000u, // 128
0x0000000000000000u // 256
};
return masks[Support::ctz(size)];
}
static const RegMask raConsecutiveLeadCountToRegMaskFilter[5] = {
0xFFFFFFFFu, // [0] No consecutive.
0x00000000u, // [1] Invalid, never used.
0x7FFFFFFFu, // [2] 2 consecutive registers.
0x3FFFFFFFu, // [3] 3 consecutive registers.
0x1FFFFFFFu // [4] 4 consecutive registers.
};
static inline RATiedFlags raUseOutFlagsFromRWFlags(OpRWFlags rwFlags) noexcept {
static constexpr RATiedFlags map[] = {
RATiedFlags::kNone,
RATiedFlags::kRead | RATiedFlags::kUse, // kRead
RATiedFlags::kWrite | RATiedFlags::kOut, // kWrite
RATiedFlags::kRW | RATiedFlags::kUse, // kRW
};
return map[uint32_t(rwFlags & OpRWFlags::kRW)];
}
static inline RATiedFlags raRegRwFlags(OpRWFlags flags) noexcept {
return raUseOutFlagsFromRWFlags(flags);
}
static inline RATiedFlags raMemBaseRwFlags(OpRWFlags flags) noexcept {
constexpr uint32_t shift = Support::ConstCTZ<uint32_t(OpRWFlags::kMemBaseRW)>::value;
return raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> shift) & OpRWFlags::kRW);
}
static inline RATiedFlags raMemIndexRwFlags(OpRWFlags flags) noexcept {
constexpr uint32_t shift = Support::ConstCTZ<uint32_t(OpRWFlags::kMemIndexRW)>::value;
return raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> shift) & OpRWFlags::kRW);
}
// a64::RACFGBuilder
// =================
class RACFGBuilder : public RACFGBuilderT<RACFGBuilder> {
public:
Arch _arch;
inline RACFGBuilder(ARMRAPass* pass) noexcept
: RACFGBuilderT<RACFGBuilder>(pass),
_arch(pass->cc()->arch()) {}
inline Compiler* cc() const noexcept { return static_cast<Compiler*>(_cc); }
Error onInst(InstNode* inst, InstControlFlow& controlType, RAInstBuilder& ib) noexcept;
Error onBeforeInvoke(InvokeNode* invokeNode) noexcept;
Error onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept;
Error moveImmToRegArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_, BaseReg* out) noexcept;
Error moveImmToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_) noexcept;
Error moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const BaseReg& reg) noexcept;
Error onBeforeRet(FuncRetNode* funcRet) noexcept;
Error onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept;
};
// a64::RACFGBuilder - OnInst
// ==========================
// TODO: [ARM] This is just a workaround...
static InstControlFlow getControlFlowType(InstId instId) noexcept {
switch (BaseInst::extractRealId(instId)) {
case Inst::kIdB:
case Inst::kIdBr:
if (BaseInst::extractARMCondCode(instId) == CondCode::kAL)
return InstControlFlow::kJump;
else
return InstControlFlow::kBranch;
case Inst::kIdBl:
case Inst::kIdBlr:
return InstControlFlow::kCall;
case Inst::kIdCbz:
case Inst::kIdCbnz:
case Inst::kIdTbz:
case Inst::kIdTbnz:
return InstControlFlow::kBranch;
case Inst::kIdRet:
return InstControlFlow::kReturn;
default:
return InstControlFlow::kRegular;
}
}
Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstBuilder& ib) noexcept {
InstRWInfo rwInfo;
if (Inst::isDefinedId(inst->realId())) {
InstId instId = inst->id();
uint32_t opCount = inst->opCount();
const Operand* opArray = inst->operands();
ASMJIT_PROPAGATE(InstInternal::queryRWInfo(_arch, inst->baseInst(), opArray, opCount, &rwInfo));
const InstDB::InstInfo& instInfo = InstDB::infoById(instId);
uint32_t singleRegOps = 0;
ib.addInstRWFlags(rwInfo.instFlags());
if (opCount) {
uint32_t consecutiveOffset = 0xFFFFFFFFu;
uint32_t consecutiveParent = Globals::kInvalidId;
for (uint32_t i = 0; i < opCount; i++) {
const Operand& op = opArray[i];
const OpRWInfo& opRwInfo = rwInfo.operand(i);
if (op.isReg()) {
// Register Operand
// ----------------
const Reg& reg = op.as<Reg>();
RATiedFlags flags = raRegRwFlags(opRwInfo.opFlags());
uint32_t vIndex = Operand::virtIdToIndex(reg.id());
if (vIndex < Operand::kVirtIdCount) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
// Use RW instead of Write in case that not the whole register is overwritten. This is important for
// liveness as we cannot kill a register that will be used.
if ((flags & RATiedFlags::kRW) == RATiedFlags::kWrite) {
if (workReg->regByteMask() & ~(opRwInfo.writeByteMask() | opRwInfo.extendByteMask())) {
// Not write-only operation.
flags = (flags & ~RATiedFlags::kOut) | (RATiedFlags::kRead | RATiedFlags::kUse);
}
}
RegGroup group = workReg->group();
RegMask useRegs = _pass->_availableRegs[group];
RegMask outRegs = useRegs;
uint32_t useId = BaseReg::kIdBad;
uint32_t outId = BaseReg::kIdBad;
uint32_t useRewriteMask = 0;
uint32_t outRewriteMask = 0;
if (opRwInfo.consecutiveLeadCount()) {
// There must be a single consecutive register lead, otherwise the RW data is invalid.
if (consecutiveOffset != 0xFFFFFFFFu)
return DebugUtils::errored(kErrorInvalidState);
// A consecutive lead register cannot be used as a consecutive +1/+2/+3 register, the registers must be distinct.
if (RATiedReg::consecutiveDataFromFlags(flags) != 0)
return DebugUtils::errored(kErrorNotConsecutiveRegs);
flags |= RATiedFlags::kLeadConsecutive | RATiedReg::consecutiveDataToFlags(opRwInfo.consecutiveLeadCount() - 1);
consecutiveOffset = 0;
RegMask filter = raConsecutiveLeadCountToRegMaskFilter[opRwInfo.consecutiveLeadCount()];
if (Support::test(flags, RATiedFlags::kUse)) {
flags |= RATiedFlags::kUseConsecutive;
useRegs &= filter;
}
else {
flags |= RATiedFlags::kOutConsecutive;
outRegs &= filter;
}
}
if (Support::test(flags, RATiedFlags::kUse)) {
useRewriteMask = Support::bitMask(inst->getRewriteIndex(&reg._baseId));
if (opRwInfo.hasOpFlag(OpRWFlags::kRegPhysId)) {
useId = opRwInfo.physId();
flags |= RATiedFlags::kUseFixed;
}
else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) {
if (consecutiveOffset == 0xFFFFFFFFu)
return DebugUtils::errored(kErrorInvalidState);
flags |= RATiedFlags::kUseConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset);
}
}
else {
outRewriteMask = Support::bitMask(inst->getRewriteIndex(&reg._baseId));
if (opRwInfo.hasOpFlag(OpRWFlags::kRegPhysId)) {
outId = opRwInfo.physId();
flags |= RATiedFlags::kOutFixed;
}
else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) {
if (consecutiveOffset == 0xFFFFFFFFu)
return DebugUtils::errored(kErrorInvalidState);
flags |= RATiedFlags::kOutConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset);
}
}
// Special cases regarding element access.
if (reg.as<Vec>().hasElementIndex()) {
// Only the first 0..15 registers can be used if the register uses
// element accessor that accesses half-words (h[0..7] elements).
if (instInfo.hasFlag(InstDB::kInstFlagVH0_15) && reg.as<Vec>().elementType() == Vec::kElementTypeH) {
if (Support::test(flags, RATiedFlags::kUse))
useId &= 0x0000FFFFu;
else
outId &= 0x0000FFFFu;
}
}
ASMJIT_PROPAGATE(ib.add(workReg, flags, useRegs, useId, useRewriteMask, outRegs, outId, outRewriteMask, opRwInfo.rmSize(), consecutiveParent));
if (singleRegOps == i)
singleRegOps++;
if (Support::test(flags, RATiedFlags::kLeadConsecutive | RATiedFlags::kUseConsecutive | RATiedFlags::kOutConsecutive))
consecutiveParent = workReg->workId();
}
}
else if (op.isMem()) {
// Memory Operand
// --------------
const Mem& mem = op.as<Mem>();
if (mem.isRegHome()) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(mem.baseId()), &workReg));
_pass->getOrCreateStackSlot(workReg);
}
else if (mem.hasBaseReg()) {
uint32_t vIndex = Operand::virtIdToIndex(mem.baseId());
if (vIndex < Operand::kVirtIdCount) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
RATiedFlags flags = raMemBaseRwFlags(opRwInfo.opFlags());
RegGroup group = workReg->group();
RegMask allocable = _pass->_availableRegs[group];
// Base registers have never fixed id on ARM.
const uint32_t useId = BaseReg::kIdBad;
const uint32_t outId = BaseReg::kIdBad;
uint32_t useRewriteMask = 0;
uint32_t outRewriteMask = 0;
if (Support::test(flags, RATiedFlags::kUse))
useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId));
else
outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId));
ASMJIT_PROPAGATE(ib.add(workReg, flags, allocable, useId, useRewriteMask, allocable, outId, outRewriteMask));
}
}
if (mem.hasIndexReg()) {
uint32_t vIndex = Operand::virtIdToIndex(mem.indexId());
if (vIndex < Operand::kVirtIdCount) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
RATiedFlags flags = raMemIndexRwFlags(opRwInfo.opFlags());
RegGroup group = workReg->group();
RegMask allocable = _pass->_availableRegs[group];
// Index registers have never fixed id on ARM.
const uint32_t useId = BaseReg::kIdBad;
const uint32_t outId = BaseReg::kIdBad;
uint32_t useRewriteMask = 0;
uint32_t outRewriteMask = 0;
if (Support::test(flags, RATiedFlags::kUse))
useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId]));
else
outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId]));
ASMJIT_PROPAGATE(ib.add(workReg, RATiedFlags::kUse | RATiedFlags::kRead, allocable, useId, useRewriteMask, allocable, outId, outRewriteMask));
}
}
}
}
}
controlType = getControlFlowType(instId);
}
return kErrorOk;
}
// a64::RACFGBuilder - OnInvoke
// ============================
Error RACFGBuilder::onBeforeInvoke(InvokeNode* invokeNode) noexcept {
const FuncDetail& fd = invokeNode->detail();
uint32_t argCount = invokeNode->argCount();
cc()->_setCursor(invokeNode->prev());
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
const FuncValuePack& argPack = fd.argPack(argIndex);
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
if (!argPack[valueIndex])
break;
const FuncValue& arg = argPack[valueIndex];
const Operand& op = invokeNode->arg(argIndex, valueIndex);
if (op.isNone())
continue;
if (op.isReg()) {
const Reg& reg = op.as<Reg>();
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
if (arg.isReg()) {
RegGroup regGroup = workReg->group();
RegGroup argGroup = Reg::groupOf(arg.regType());
if (regGroup != argGroup) {
// TODO: [ARM] Conversion is not supported.
return DebugUtils::errored(kErrorInvalidAssignment);
}
}
else {
ASMJIT_PROPAGATE(moveRegToStackArg(invokeNode, arg, reg));
}
}
else if (op.isImm()) {
if (arg.isReg()) {
BaseReg reg;
ASMJIT_PROPAGATE(moveImmToRegArg(invokeNode, arg, op.as<Imm>(), &reg));
invokeNode->_args[argIndex][valueIndex] = reg;
}
else {
ASMJIT_PROPAGATE(moveImmToStackArg(invokeNode, arg, op.as<Imm>()));
}
}
}
}
cc()->_setCursor(invokeNode);
if (fd.hasRet()) {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
const FuncValue& ret = fd.ret(valueIndex);
if (!ret)
break;
const Operand& op = invokeNode->ret(valueIndex);
if (op.isReg()) {
const Reg& reg = op.as<Reg>();
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
if (ret.isReg()) {
RegGroup regGroup = workReg->group();
RegGroup retGroup = Reg::groupOf(ret.regType());
if (regGroup != retGroup) {
// TODO: [ARM] Conversion is not supported.
return DebugUtils::errored(kErrorInvalidAssignment);
}
}
}
}
}
// This block has function call(s).
_curBlock->addFlags(RABlockFlags::kHasFuncCalls);
_pass->func()->frame().addAttributes(FuncAttributes::kHasFuncCalls);
_pass->func()->frame().updateCallStackSize(fd.argStackSize());
return kErrorOk;
}
Error RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept {
uint32_t argCount = invokeNode->argCount();
const FuncDetail& fd = invokeNode->detail();
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
const FuncValuePack& argPack = fd.argPack(argIndex);
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
if (!argPack[valueIndex])
continue;
const FuncValue& arg = argPack[valueIndex];
const Operand& op = invokeNode->arg(argIndex, valueIndex);
if (op.isNone())
continue;
if (op.isReg()) {
const Reg& reg = op.as<Reg>();
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
if (arg.isIndirect()) {
RegGroup regGroup = workReg->group();
if (regGroup != RegGroup::kGp)
return DebugUtils::errored(kErrorInvalidState);
ASMJIT_PROPAGATE(ib.addCallArg(workReg, arg.regId()));
}
else if (arg.isReg()) {
RegGroup regGroup = workReg->group();
RegGroup argGroup = Reg::groupOf(arg.regType());
if (regGroup == argGroup) {
ASMJIT_PROPAGATE(ib.addCallArg(workReg, arg.regId()));
}
}
}
}
}
for (uint32_t retIndex = 0; retIndex < Globals::kMaxValuePack; retIndex++) {
const FuncValue& ret = fd.ret(retIndex);
if (!ret)
break;
const Operand& op = invokeNode->ret(retIndex);
if (op.isReg()) {
const Reg& reg = op.as<Reg>();
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
if (ret.isReg()) {
RegGroup regGroup = workReg->group();
RegGroup retGroup = Reg::groupOf(ret.regType());
if (regGroup == retGroup) {
ASMJIT_PROPAGATE(ib.addCallRet(workReg, ret.regId()));
}
}
else {
return DebugUtils::errored(kErrorInvalidAssignment);
}
}
}
// Setup clobbered registers.
ib._clobbered[0] = Support::lsbMask<RegMask>(_pass->_physRegCount[RegGroup(0)]) & ~fd.preservedRegs(RegGroup(0));
ib._clobbered[1] = Support::lsbMask<RegMask>(_pass->_physRegCount[RegGroup(1)]) & ~fd.preservedRegs(RegGroup(1));
ib._clobbered[2] = Support::lsbMask<RegMask>(_pass->_physRegCount[RegGroup(2)]) & ~fd.preservedRegs(RegGroup(2));
ib._clobbered[3] = Support::lsbMask<RegMask>(_pass->_physRegCount[RegGroup(3)]) & ~fd.preservedRegs(RegGroup(3));
return kErrorOk;
}
// a64::RACFGBuilder - MoveImmToRegArg
// ===================================
Error RACFGBuilder::moveImmToRegArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_, BaseReg* out) noexcept {
DebugUtils::unused(invokeNode);
ASMJIT_ASSERT(arg.isReg());
Imm imm(imm_);
TypeId typeId = TypeId::kVoid;
switch (arg.typeId()) {
case TypeId::kInt8 : typeId = TypeId::kUInt64; imm.signExtend8Bits(); break;
case TypeId::kUInt8 : typeId = TypeId::kUInt64; imm.zeroExtend8Bits(); break;
case TypeId::kInt16 : typeId = TypeId::kUInt64; imm.signExtend16Bits(); break;
case TypeId::kUInt16: typeId = TypeId::kUInt64; imm.zeroExtend16Bits(); break;
case TypeId::kInt32 : typeId = TypeId::kUInt64; imm.signExtend32Bits(); break;
case TypeId::kUInt32: typeId = TypeId::kUInt64; imm.zeroExtend32Bits(); break;
case TypeId::kInt64 : typeId = TypeId::kUInt64; break;
case TypeId::kUInt64: typeId = TypeId::kUInt64; break;
default:
return DebugUtils::errored(kErrorInvalidAssignment);
}
ASMJIT_PROPAGATE(cc()->_newReg(out, typeId, nullptr));
cc()->virtRegById(out->id())->setWeight(BaseRAPass::kCallArgWeight);
return cc()->mov(out->as<Gp>(), imm);
}
// a64::RACFGBuilder - MoveImmToStackArg
// =====================================
Error RACFGBuilder::moveImmToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_) noexcept {
BaseReg reg;
ASMJIT_PROPAGATE(moveImmToRegArg(invokeNode, arg, imm_, &reg));
ASMJIT_PROPAGATE(moveRegToStackArg(invokeNode, arg, reg));
return kErrorOk;
}
// a64::RACFGBuilder - MoveRegToStackArg
// =====================================
Error RACFGBuilder::moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const BaseReg& reg) noexcept {
DebugUtils::unused(invokeNode);
Mem stackPtr = ptr(_pass->_sp.as<Gp>(), arg.stackOffset());
if (reg.isGp())
return cc()->str(reg.as<Gp>(), stackPtr);
if (reg.isVec())
return cc()->str(reg.as<Vec>(), stackPtr);
return DebugUtils::errored(kErrorInvalidState);
}
// a64::RACFGBuilder - OnReg
// =========================
Error RACFGBuilder::onBeforeRet(FuncRetNode* funcRet) noexcept {
DebugUtils::unused(funcRet);
return kErrorOk;
}
Error RACFGBuilder::onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept {
const FuncDetail& funcDetail = _pass->func()->detail();
const Operand* opArray = funcRet->operands();
uint32_t opCount = funcRet->opCount();
for (uint32_t i = 0; i < opCount; i++) {
const Operand& op = opArray[i];
if (op.isNone()) continue;
const FuncValue& ret = funcDetail.ret(i);
if (ASMJIT_UNLIKELY(!ret.isReg()))
return DebugUtils::errored(kErrorInvalidAssignment);
if (op.isReg()) {
// Register return value.
const Reg& reg = op.as<Reg>();
uint32_t vIndex = Operand::virtIdToIndex(reg.id());
if (vIndex < Operand::kVirtIdCount) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
RegGroup group = workReg->group();
RegMask allocable = _pass->_availableRegs[group];
ASMJIT_PROPAGATE(ib.add(workReg, RATiedFlags::kUse | RATiedFlags::kRead, allocable, ret.regId(), 0, 0, BaseReg::kIdBad, 0));
}
}
else {
return DebugUtils::errored(kErrorInvalidAssignment);
}
}
return kErrorOk;
}
// a64::ARMRAPass - Construction & Destruction
// ===========================================
ARMRAPass::ARMRAPass() noexcept
: BaseRAPass() { _iEmitHelper = &_emitHelper; }
ARMRAPass::~ARMRAPass() noexcept {}
// a64::ARMRAPass - OnInit / OnDone
// ================================
void ARMRAPass::onInit() noexcept {
Arch arch = cc()->arch();
_emitHelper._emitter = _cb;
_archTraits = &ArchTraits::byArch(arch);
_physRegCount.set(RegGroup::kGp, 32);
_physRegCount.set(RegGroup::kVec, 32);
_physRegCount.set(RegGroup::kExtraVirt2, 0);
_physRegCount.set(RegGroup::kExtraVirt3, 0);
_buildPhysIndex();
_availableRegCount = _physRegCount;
_availableRegs[RegGroup::kGp] = Support::lsbMask<uint32_t>(_physRegCount.get(RegGroup::kGp));
_availableRegs[RegGroup::kVec] = Support::lsbMask<uint32_t>(_physRegCount.get(RegGroup::kVec));
_availableRegs[RegGroup::kExtraVirt3] = Support::lsbMask<uint32_t>(_physRegCount.get(RegGroup::kExtraVirt2));
_availableRegs[RegGroup::kExtraVirt3] = Support::lsbMask<uint32_t>(_physRegCount.get(RegGroup::kExtraVirt3));
_scratchRegIndexes[0] = uint8_t(27);
_scratchRegIndexes[1] = uint8_t(28);
// The architecture specific setup makes implicitly all registers available. So
// make unavailable all registers that are special and cannot be used in general.
bool hasFP = _func->frame().hasPreservedFP();
if (hasFP)
makeUnavailable(RegGroup::kGp, Gp::kIdFp);
makeUnavailable(RegGroup::kGp, Gp::kIdSp);
makeUnavailable(RegGroup::kGp, Gp::kIdOs); // OS-specific use, usually TLS.
_sp = sp;
_fp = x29;
}
void ARMRAPass::onDone() noexcept {}
// a64::ARMRAPass - BuildCFG
// =========================
Error ARMRAPass::buildCFG() noexcept {
return RACFGBuilder(this).run();
}
// a64::ARMRAPass - Rewrite
// ========================
ASMJIT_FAVOR_SPEED Error ARMRAPass::_rewrite(BaseNode* first, BaseNode* stop) noexcept {
uint32_t virtCount = cc()->_vRegArray.size();
BaseNode* node = first;
while (node != stop) {
BaseNode* next = node->next();
if (node->isInst()) {
InstNode* inst = node->as<InstNode>();
RAInst* raInst = node->passData<RAInst>();
Operand* operands = inst->operands();
uint32_t opCount = inst->opCount();
uint32_t i;
// Rewrite virtual registers into physical registers.
if (raInst) {
// If the instruction contains pass data (raInst) then it was a subject
// for register allocation and must be rewritten to use physical regs.
RATiedReg* tiedRegs = raInst->tiedRegs();
uint32_t tiedCount = raInst->tiedCount();
for (i = 0; i < tiedCount; i++) {
RATiedReg* tiedReg = &tiedRegs[i];
Support::BitWordIterator<uint32_t> useIt(tiedReg->useRewriteMask());
uint32_t useId = tiedReg->useId();
while (useIt.hasNext())
inst->rewriteIdAtIndex(useIt.next(), useId);
Support::BitWordIterator<uint32_t> outIt(tiedReg->outRewriteMask());
uint32_t outId = tiedReg->outId();
while (outIt.hasNext())
inst->rewriteIdAtIndex(outIt.next(), outId);
}
// This data is allocated by Zone passed to `runOnFunction()`, which
// will be reset after the RA pass finishes. So reset this data to
// prevent having a dead pointer after the RA pass is complete.
node->resetPassData();
if (ASMJIT_UNLIKELY(node->type() != NodeType::kInst)) {
// FuncRet terminates the flow, it must either be removed if the exit
// label is next to it (optimization) or patched to an architecture
// dependent jump instruction that jumps to the function's exit before
// the epilog.
if (node->type() == NodeType::kFuncRet) {
RABlock* block = raInst->block();
if (!isNextTo(node, _func->exitNode())) {
cc()->_setCursor(node->prev());
ASMJIT_PROPAGATE(emitJump(_func->exitNode()->label()));
}
BaseNode* prev = node->prev();
cc()->removeNode(node);
block->setLast(prev);
}
}
}
// Rewrite stack slot addresses.
for (i = 0; i < opCount; i++) {
Operand& op = operands[i];
if (op.isMem()) {
BaseMem& mem = op.as<BaseMem>();
if (mem.isRegHome()) {
uint32_t virtIndex = Operand::virtIdToIndex(mem.baseId());
if (ASMJIT_UNLIKELY(virtIndex >= virtCount))
return DebugUtils::errored(kErrorInvalidVirtId);
VirtReg* virtReg = cc()->virtRegByIndex(virtIndex);
RAWorkReg* workReg = virtReg->workReg();
ASMJIT_ASSERT(workReg != nullptr);
RAStackSlot* slot = workReg->stackSlot();
int32_t offset = slot->offset();
mem._setBase(_sp.type(), slot->baseRegId());
mem.clearRegHome();
mem.addOffsetLo32(offset);
}
}
}
// Rewrite `loadAddressOf()` construct.
if (inst->realId() == Inst::kIdAdr && inst->opCount() == 2 && inst->op(1).isMem()) {
BaseMem mem = inst->op(1).as<BaseMem>();
int64_t offset = mem.offset();
if (!mem.hasBaseOrIndex()) {
inst->setId(Inst::kIdMov);
inst->setOp(1, Imm(offset));
}
else {
if (mem.hasIndex())
return DebugUtils::errored(kErrorInvalidAddressIndex);
GpX dst(inst->op(0).as<Gp>().id());
GpX base(mem.baseId());
InstId arithInstId = offset < 0 ? Inst::kIdSub : Inst::kIdAdd;
uint64_t absOffset = offset < 0 ? Support::neg(uint64_t(offset)) : uint64_t(offset);
inst->setId(arithInstId);
inst->setOpCount(3);
inst->setOp(1, base);
inst->setOp(2, Imm(absOffset));
// Use two operations if the offset cannot be encoded with ADD/SUB.
if (absOffset > 0xFFFu && (absOffset & ~uint64_t(0xFFF000u)) != 0) {
if (absOffset <= 0xFFFFFFu) {
cc()->_setCursor(inst->prev());
ASMJIT_PROPAGATE(cc()->emit(arithInstId, dst, base, Imm(absOffset & 0xFFFu)));
inst->setOp(1, dst);
inst->setOp(2, Imm(absOffset & 0xFFF000u));
}
else {
cc()->_setCursor(inst->prev());
ASMJIT_PROPAGATE(cc()->emit(Inst::kIdMov, inst->op(0), Imm(absOffset)));
inst->setOp(1, base);
inst->setOp(2, dst);
}
}
}
}
}
node = next;
}
return kErrorOk;
}
// a64::ARMRAPass - Prolog & Epilog
// ================================
Error ARMRAPass::updateStackFrame() noexcept {
if (_func->frame().hasFuncCalls())
_func->frame().addDirtyRegs(RegGroup::kGp, Support::bitMask(Gp::kIdLr));
return BaseRAPass::updateStackFrame();
}
// a64::ARMRAPass - OnEmit
// =======================
Error ARMRAPass::emitMove(uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
RAWorkReg* wReg = workRegById(workId);
BaseReg dst(wReg->signature(), dstPhysId);
BaseReg src(wReg->signature(), srcPhysId);
const char* comment = nullptr;
#ifndef ASMJIT_NO_LOGGING
if (hasDiagnosticOption(DiagnosticOptions::kRAAnnotate)) {
_tmpString.assignFormat("<MOVE> %s", workRegById(workId)->name());
comment = _tmpString.data();
}
#endif
return _emitHelper.emitRegMove(dst, src, wReg->typeId(), comment);
}
Error ARMRAPass::emitSwap(uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
DebugUtils::unused(aWorkId, aPhysId, bWorkId, bPhysId);
return DebugUtils::errored(kErrorInvalidState);
}
Error ARMRAPass::emitLoad(uint32_t workId, uint32_t dstPhysId) noexcept {
RAWorkReg* wReg = workRegById(workId);
BaseReg dstReg(wReg->signature(), dstPhysId);
BaseMem srcMem(workRegAsMem(wReg));
const char* comment = nullptr;
#ifndef ASMJIT_NO_LOGGING
if (hasDiagnosticOption(DiagnosticOptions::kRAAnnotate)) {
_tmpString.assignFormat("<LOAD> %s", workRegById(workId)->name());
comment = _tmpString.data();
}
#endif
return _emitHelper.emitRegMove(dstReg, srcMem, wReg->typeId(), comment);
}
Error ARMRAPass::emitSave(uint32_t workId, uint32_t srcPhysId) noexcept {
RAWorkReg* wReg = workRegById(workId);
BaseMem dstMem(workRegAsMem(wReg));
BaseReg srcReg(wReg->signature(), srcPhysId);
const char* comment = nullptr;
#ifndef ASMJIT_NO_LOGGING
if (hasDiagnosticOption(DiagnosticOptions::kRAAnnotate)) {
_tmpString.assignFormat("<SAVE> %s", workRegById(workId)->name());
comment = _tmpString.data();
}
#endif
return _emitHelper.emitRegMove(dstMem, srcReg, wReg->typeId(), comment);
}
Error ARMRAPass::emitJump(const Label& label) noexcept {
return cc()->b(label);
}
Error ARMRAPass::emitPreCall(InvokeNode* invokeNode) noexcept {
DebugUtils::unused(invokeNode);
return kErrorOk;
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_AARCH64 && !ASMJIT_NO_COMPILER

105
third_party/asmjit/arm/a64rapass_p.h vendored Normal file
View File

@@ -0,0 +1,105 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64RAPASS_P_H_INCLUDED
#define ASMJIT_ARM_A64RAPASS_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/compiler.h"
#include "../core/rabuilders_p.h"
#include "../core/rapass_p.h"
#include "../arm/a64assembler.h"
#include "../arm/a64compiler.h"
#include "../arm/a64emithelper_p.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \cond INTERNAL
//! \addtogroup asmjit_a64
//! \{
//! ARM register allocation pass.
//!
//! Takes care of generating function prologs and epilogs, and also performs
//! register allocation.
class ARMRAPass : public BaseRAPass {
public:
ASMJIT_NONCOPYABLE(ARMRAPass)
typedef BaseRAPass Base;
EmitHelper _emitHelper;
//! \name Construction & Destruction
//! \{
ARMRAPass() noexcept;
virtual ~ARMRAPass() noexcept;
//! \}
//! \name Accessors
//! \{
//! Returns the compiler casted to `arm::Compiler`.
inline Compiler* cc() const noexcept { return static_cast<Compiler*>(_cb); }
//! Returns emit helper.
inline EmitHelper* emitHelper() noexcept { return &_emitHelper; }
//! \}
//! \name Events
//! \{
void onInit() noexcept override;
void onDone() noexcept override;
//! \}
//! \name CFG
//! \{
Error buildCFG() noexcept override;
//! \}
//! \name Rewrite
//! \{
Error _rewrite(BaseNode* first, BaseNode* stop) noexcept override;
//! \}
//! \name Prolog & Epilog
//! \{
Error updateStackFrame() noexcept override;
//! \}
//! \name Emit Helpers
//! \{
Error emitMove(uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept override;
Error emitSwap(uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept override;
Error emitLoad(uint32_t workId, uint32_t dstPhysId) noexcept override;
Error emitSave(uint32_t workId, uint32_t srcPhysId) noexcept override;
Error emitJump(const Label& label) noexcept override;
Error emitPreCall(InvokeNode* invokeNode) noexcept override;
//! \}
};
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_ARM_A64RAPASS_P_H_INCLUDED

179
third_party/asmjit/arm/a64utils.h vendored Normal file
View File

@@ -0,0 +1,179 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_A64UTILS_H_INCLUDED
#define ASMJIT_ARM_A64UTILS_H_INCLUDED
#include "../arm/a64globals.h"
ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \addtogroup asmjit_a64
//! \{
//! Public utilities and helpers for targeting AArch64 architecture.
namespace Utils {
//! Decomposed fields of a logical immediate value (AArch64).
struct LogicalImm {
uint32_t n;
uint32_t s;
uint32_t r;
};
//! Encodes the given `imm` value of the given `width` to a logical immediate value represented as N, S, and R fields
//! and writes these fields to `out`.
//!
//! Encoding Table:
//!
//! ```
//! +---+--------+--------+------+
//! | N | ImmS | ImmR | Size |
//! +---+--------+--------+------+
//! | 1 | ssssss | rrrrrr | 64 |
//! | 0 | 0sssss | .rrrrr | 32 |
//! | 0 | 10ssss | ..rrrr | 16 |
//! | 0 | 110sss | ...rrr | 8 |
//! | 0 | 1110ss | ....rr | 4 |
//! | 0 | 11110s | .....r | 2 |
//! +---+--------+--------+------+
//! ```
ASMJIT_MAYBE_UNUSED
static bool encodeLogicalImm(uint64_t imm, uint32_t width, a64::Utils::LogicalImm* out) noexcept {
// Determine the element width, which must be 2, 4, 8, 16, 32, or 64 bits.
do {
width /= 2;
uint64_t mask = (uint64_t(1) << width) - 1u;
if ((imm & mask) != ((imm >> width) & mask)) {
width *= 2;
break;
}
} while (width > 2);
// Patterns of all zeros and all ones are not encodable.
uint64_t lsbMask = Support::lsbMask<uint64_t>(width);
imm &= lsbMask;
if (imm == 0 || imm == lsbMask)
return false;
// Inspect the pattern and get the most important bit indexes.
//
// oIndex <-+ +-> zIndex
// | |
// |..zeros..|oCount|zCount|..ones..|
// |000000000|111111|000000|11111111|
uint32_t zIndex = Support::ctz(~imm);
uint64_t zImm = imm ^ ((uint64_t(1) << zIndex) - 1);
uint32_t zCount = (zImm ? Support::ctz(zImm) : width) - zIndex;
uint32_t oIndex = zIndex + zCount;
uint64_t oImm = ~(zImm ^ Support::lsbMask<uint64_t>(oIndex));
uint32_t oCount = (oImm ? Support::ctz(oImm) : width) - (oIndex);
// Verify whether the bit-pattern is encodable.
uint64_t mustBeZero = oImm ^ ~Support::lsbMask<uint64_t>(oIndex + oCount);
if (mustBeZero != 0 || (zIndex > 0 && width - (oIndex + oCount) != 0))
return false;
out->n = width == 64;
out->s = (oCount + zIndex - 1) | (Support::neg(width * 2) & 0x3F);
out->r = width - oIndex;
return true;
}
//! Returns true if the given `imm` value is encodable as a logical immediate. The `width` argument describes the
//! width of the operation, and must be either 32 or 64. This function can be used to test whether an immediate
//! value can be used with AND, ANDS, BIC, BICS, EON, EOR, ORN, and ORR instruction.
ASMJIT_MAYBE_UNUSED
static inline bool isLogicalImm(uint64_t imm, uint32_t width) noexcept {
LogicalImm dummy;
return encodeLogicalImm(imm, width, &dummy);
}
//! Returns true if the given `imm` value is a byte mask. Byte mask has each byte part of the value set to either
//! 0x00 or 0xFF. Some ARM instructions accept immediates that form a byte-mask and this function can be used to
//! verify that the immediate is encodable before using the value.
template<typename T>
static inline bool isByteMaskImm8(const T& imm) noexcept {
constexpr T kMask = T(0x0101010101010101 & Support::allOnes<T>());
return imm == (imm & kMask) * T(255);
}
//! \cond
//! A generic implementation that checjs whether a floating point value can be converted to ARM Imm8.
template<typename T, uint32_t kNumBBits, uint32_t kNumCDEFGHBits, uint32_t kNumZeroBits>
static inline bool isFPImm8Generic(T val) noexcept {
constexpr uint32_t kAllBsMask = Support::lsbMask<uint32_t>(kNumBBits);
constexpr uint32_t kB0Pattern = Support::bitMask(kNumBBits - 1);
constexpr uint32_t kB1Pattern = kAllBsMask ^ kB0Pattern;
T immZ = val & Support::lsbMask<T>(kNumZeroBits);
uint32_t immB = uint32_t(val >> (kNumZeroBits + kNumCDEFGHBits)) & kAllBsMask;
// ImmZ must be all zeros and ImmB must either be B0 or B1 pattern.
return immZ == 0 && (immB == kB0Pattern || immB == kB1Pattern);
}
//! \endcond
//! Returns true if the given half precision floating point `val` can be encoded as ARM IMM8 value, which represents
//! a limited set of floating point immediate values, which can be used with FMOV instruction.
//!
//! The floating point must have bits distributed in the following way:
//!
//! ```
//! [aBbbcdef|gh000000]
//! ```
static inline bool isFP16Imm8(uint32_t val) noexcept { return isFPImm8Generic<uint32_t, 3, 6, 6>(val); }
//! Returns true if the given single precision floating point `val` can be encoded as ARM IMM8 value, which represents
//! a limited set of floating point immediate values, which can be used with FMOV instruction.
//!
//! The floating point must have bits distributed in the following way:
//!
//! ```
//! [aBbbbbbc|defgh000|00000000|00000000]
//! ```
static inline bool isFP32Imm8(uint32_t val) noexcept { return isFPImm8Generic<uint32_t, 6, 6, 19>(val); }
//! \overload
static inline bool isFP32Imm8(float val) noexcept { return isFP32Imm8(Support::bitCast<uint32_t>(val)); }
//! Returns true if the given double precision floating point `val` can be encoded as ARM IMM8 value, which represents
//! a limited set of floating point immediate values, which can be used with FMOV instruction.
//!
//! The floating point must have bits distributed in the following way:
//!
//! ```
//! [aBbbbbbb|bbcdefgh|00000000|00000000|00000000|00000000|00000000|00000000]
//! ```
static inline bool isFP64Imm8(uint64_t val) noexcept { return isFPImm8Generic<uint64_t, 9, 6, 48>(val); }
//! \overload
static inline bool isFP64Imm8(double val) noexcept { return isFP64Imm8(Support::bitCast<uint64_t>(val)); }
//! \cond
template<typename T, uint32_t kNumBBits, uint32_t kNumCDEFGHBits, uint32_t kNumZeroBits>
static inline uint32_t encodeFPToImm8Generic(T val) noexcept {
uint32_t bits = uint32_t(val >> kNumZeroBits);
return ((bits >> (kNumBBits + kNumCDEFGHBits - 7)) & 0x80u) | (bits & 0x7F);
}
//! \endcond
//! Encodes a double precision floating point value into IMM8 format.
//!
//! \note This function expects that `isFP64Imm8(val) == true` so it doesn't perform any checks of the value and just
//! rearranges some bits into Imm8 order.
static inline uint32_t encodeFP64ToImm8(uint64_t val) noexcept { return encodeFPToImm8Generic<uint64_t, 9, 6, 48>(val); }
//! \overload
static inline uint32_t encodeFP64ToImm8(double val) noexcept { return encodeFP64ToImm8(Support::bitCast<uint64_t>(val)); }
} // {Utils}
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_A64UTILS_H_INCLUDED

143
third_party/asmjit/arm/armformatter.cpp vendored Normal file
View File

@@ -0,0 +1,143 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/misc_p.h"
#include "../core/support.h"
#include "../arm/armformatter_p.h"
#include "../arm/armoperand.h"
#include "../arm/a64instapi_p.h"
#include "../arm/a64instdb_p.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/compiler.h"
#endif
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
// arm::FormatterInternal - Format Feature
// =======================================
Error FormatterInternal::formatFeature(String& sb, uint32_t featureId) noexcept {
// @EnumStringBegin{"enum": "CpuFeatures::ARM", "output": "sFeature", "strip": "k"}@
static const char sFeatureString[] =
"None\0"
"THUMB\0"
"THUMBv2\0"
"ARMv6\0"
"ARMv7\0"
"ARMv8a\0"
"ARMv8_1a\0"
"ARMv8_2a\0"
"ARMv8_3a\0"
"ARMv8_4a\0"
"ARMv8_5a\0"
"ARMv8_6a\0"
"ARMv8_7a\0"
"VFPv2\0"
"VFPv3\0"
"VFPv4\0"
"VFP_D32\0"
"AES\0"
"ALTNZCV\0"
"ASIMD\0"
"BF16\0"
"BTI\0"
"CPUID\0"
"CRC32\0"
"DGH\0"
"DIT\0"
"DOTPROD\0"
"EDSP\0"
"FCMA\0"
"FJCVTZS\0"
"FLAGM\0"
"FP16CONV\0"
"FP16FML\0"
"FP16FULL\0"
"FRINT\0"
"I8MM\0"
"IDIVA\0"
"IDIVT\0"
"LSE\0"
"MTE\0"
"RCPC_IMMO\0"
"RDM\0"
"PMU\0"
"PMULL\0"
"RNG\0"
"SB\0"
"SHA1\0"
"SHA2\0"
"SHA3\0"
"SHA512\0"
"SM3\0"
"SM4\0"
"SSBS\0"
"SVE\0"
"SVE_BF16\0"
"SVE_F32MM\0"
"SVE_F64MM\0"
"SVE_I8MM\0"
"SVE_PMULL\0"
"SVE2\0"
"SVE2_AES\0"
"SVE2_BITPERM\0"
"SVE2_SHA3\0"
"SVE2_SM4\0"
"TME\0"
"<Unknown>\0";
static const uint16_t sFeatureIndex[] = {
0, 5, 11, 19, 25, 31, 38, 47, 56, 65, 74, 83, 92, 101, 107, 113, 119, 127,
131, 139, 145, 150, 154, 160, 166, 170, 174, 182, 187, 192, 200, 206, 215,
223, 232, 238, 243, 249, 255, 259, 263, 273, 277, 281, 287, 291, 294, 299,
304, 309, 316, 320, 324, 329, 333, 342, 352, 362, 371, 381, 386, 395, 408,
418, 427, 431
};
// @EnumStringEnd@
return sb.append(sFeatureString + sFeatureIndex[Support::min<uint32_t>(featureId, uint32_t(CpuFeatures::ARM::kMaxValue) + 1)]);
}
// arm::FormatterInternal - Format Constants
// =========================================
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatCondCode(String& sb, CondCode cc) noexcept {
static const char condCodeData[] =
"al\0" "na\0"
"eq\0" "ne\0"
"cs\0" "cc\0" "mi\0" "pl\0" "vs\0" "vc\0"
"hi\0" "ls\0" "ge\0" "lt\0" "gt\0" "le\0"
"<Unknown>";
return sb.append(condCodeData + Support::min<uint32_t>(uint32_t(cc), 16u) * 3);
}
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatShiftOp(String& sb, ShiftOp shiftOp) noexcept {
const char* str = "<Unknown>";
switch (shiftOp) {
case ShiftOp::kLSL: str = "lsl"; break;
case ShiftOp::kLSR: str = "lsr"; break;
case ShiftOp::kASR: str = "asr"; break;
case ShiftOp::kROR: str = "ror"; break;
case ShiftOp::kRRX: str = "rrx"; break;
case ShiftOp::kMSL: str = "msl"; break;
case ShiftOp::kUXTB: str = "uxtb"; break;
case ShiftOp::kUXTH: str = "uxth"; break;
case ShiftOp::kUXTW: str = "uxtw"; break;
case ShiftOp::kUXTX: str = "uxtx"; break;
case ShiftOp::kSXTB: str = "sxtb"; break;
case ShiftOp::kSXTH: str = "sxth"; break;
case ShiftOp::kSXTW: str = "sxtw"; break;
case ShiftOp::kSXTX: str = "sxtx"; break;
}
return sb.append(str);
}
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_LOGGING

44
third_party/asmjit/arm/armformatter_p.h vendored Normal file
View File

@@ -0,0 +1,44 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_ARMFORMATTER_P_H_INCLUDED
#define ASMJIT_ARM_ARMFORMATTER_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/formatter.h"
#include "../core/string.h"
#include "../arm/armglobals.h"
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
//! \cond INTERNAL
//! \addtogroup asmjit_arm
//! \{
namespace FormatterInternal {
Error ASMJIT_CDECL formatFeature(
String& sb,
uint32_t featureId) noexcept;
Error ASMJIT_CDECL formatCondCode(
String& sb,
CondCode cc) noexcept;
Error ASMJIT_CDECL formatShiftOp(
String& sb,
ShiftOp shiftOp) noexcept;
} // {FormatterInternal}
//! \}
//! \endcond
ASMJIT_END_SUB_NAMESPACE
#endif // !ASMJIT_NO_LOGGING
#endif // ASMJIT_ARM_ARMFORMATTER_P_H_INCLUDED

21
third_party/asmjit/arm/armglobals.h vendored Normal file
View File

@@ -0,0 +1,21 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_ARMGLOBALS_H_INCLUDED
#define ASMJIT_ARM_ARMGLOBALS_H_INCLUDED
#include "../core/archcommons.h"
#include "../core/inst.h"
//! \namespace asmjit::arm
//! \ingroup asmjit_arm
//!
//! API shared between AArch32 & AArch64 backends.
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_ARM_ARMGLOBALS_H_INCLUDED

621
third_party/asmjit/arm/armoperand.h vendored Normal file
View File

@@ -0,0 +1,621 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_ARM_ARMOPERAND_H_INCLUDED
#define ASMJIT_ARM_ARMOPERAND_H_INCLUDED
#include "../core/archtraits.h"
#include "../core/operand.h"
#include "../core/type.h"
#include "../arm/armglobals.h"
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
//! \addtogroup asmjit_arm
//! \{
class Reg;
class Mem;
class Gp;
class GpW;
class GpX;
class Vec;
class VecB;
class VecH;
class VecS;
class VecD;
class VecV;
//! Register traits (ARM/AArch64).
//!
//! Register traits contains information about a particular register type. It's used by asmjit to setup register
//! information on-the-fly and to populate tables that contain register information (this way it's possible to
//! change register types and groups without having to reorder these tables).
template<RegType kRegType>
struct RegTraits : public BaseRegTraits {};
//! \cond
// <--------------------+-----+-------------------------+------------------------+---+---+------------------+
// | Reg | Reg-Type | Reg-Group |Sz |Cnt| TypeId |
// <--------------------+-----+-------------------------+------------------------+---+---+------------------+
ASMJIT_DEFINE_REG_TRAITS(GpW , RegType::kARM_GpW , RegGroup::kGp , 4 , 32, TypeId::kInt32 );
ASMJIT_DEFINE_REG_TRAITS(GpX , RegType::kARM_GpX , RegGroup::kGp , 8 , 32, TypeId::kInt64 );
ASMJIT_DEFINE_REG_TRAITS(VecB , RegType::kARM_VecB , RegGroup::kVec , 1 , 32, TypeId::kVoid );
ASMJIT_DEFINE_REG_TRAITS(VecH , RegType::kARM_VecH , RegGroup::kVec , 2 , 32, TypeId::kVoid );
ASMJIT_DEFINE_REG_TRAITS(VecS , RegType::kARM_VecS , RegGroup::kVec , 4 , 32, TypeId::kInt32x1 );
ASMJIT_DEFINE_REG_TRAITS(VecD , RegType::kARM_VecD , RegGroup::kVec , 8 , 32, TypeId::kInt32x2 );
ASMJIT_DEFINE_REG_TRAITS(VecV , RegType::kARM_VecV , RegGroup::kVec , 16, 32, TypeId::kInt32x4 );
//! \endcond
//! Register (ARM).
class Reg : public BaseReg {
public:
ASMJIT_DEFINE_ABSTRACT_REG(Reg, BaseReg)
//! Gets whether the register is a `R|W` register (32-bit).
inline constexpr bool isGpW() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpW>::kSignature; }
//! Gets whether the register is an `X` register (64-bit).
inline constexpr bool isGpX() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpX>::kSignature; }
//! Gets whether the register is a VEC-B register (8-bit).
inline constexpr bool isVecB() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecB>::kSignature; }
//! Gets whether the register is a VEC-H register (16-bit).
inline constexpr bool isVecH() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecH>::kSignature; }
//! Gets whether the register is a VEC-S register (32-bit).
inline constexpr bool isVecS() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecS>::kSignature; }
//! Gets whether the register is a VEC-D register (64-bit).
inline constexpr bool isVecD() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecD>::kSignature; }
//! Gets whether the register is a VEC-Q register (128-bit).
inline constexpr bool isVecQ() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; }
//! Gets whether the register is either VEC-D (64-bit) or VEC-Q (128-bit).
inline constexpr bool isVecDOrQ() const noexcept { return uint32_t(type()) - uint32_t(RegType::kARM_VecD) <= 1u; }
//! Gets whether the register is a VEC-V register (128-bit).
inline constexpr bool isVecV() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; }
template<RegType kRegType>
inline void setRegT(uint32_t id) noexcept {
setSignature(RegTraits<kRegType>::kSignature);
setId(id);
}
inline void setTypeAndId(RegType type, uint32_t id) noexcept {
setSignature(signatureOf(type));
setId(id);
}
static inline RegGroup groupOf(RegType type) noexcept { return ArchTraits::byArch(Arch::kAArch64).regTypeToGroup(type); }
static inline TypeId typeIdOf(RegType type) noexcept { return ArchTraits::byArch(Arch::kAArch64).regTypeToTypeId(type); }
static inline OperandSignature signatureOf(RegType type) noexcept { return ArchTraits::byArch(Arch::kAArch64).regTypeToSignature(type); }
template<RegType kRegType>
static inline RegGroup groupOfT() noexcept { return RegTraits<kRegType>::kGroup; }
template<RegType kRegType>
static inline TypeId typeIdOfT() noexcept { return RegTraits<kRegType>::kTypeId; }
template<RegType kRegType>
static inline OperandSignature signatureOfT() noexcept { return RegTraits<kRegType>::kSignature; }
static inline bool isGpW(const Operand_& op) noexcept { return op.as<Reg>().isGpW(); }
static inline bool isGpX(const Operand_& op) noexcept { return op.as<Reg>().isGpX(); }
static inline bool isVecB(const Operand_& op) noexcept { return op.as<Reg>().isVecB(); }
static inline bool isVecH(const Operand_& op) noexcept { return op.as<Reg>().isVecH(); }
static inline bool isVecS(const Operand_& op) noexcept { return op.as<Reg>().isVecS(); }
static inline bool isVecD(const Operand_& op) noexcept { return op.as<Reg>().isVecD(); }
static inline bool isVecQ(const Operand_& op) noexcept { return op.as<Reg>().isVecQ(); }
static inline bool isVecV(const Operand_& op) noexcept { return op.as<Reg>().isVecV(); }
static inline bool isGpW(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isGpW(op)) & unsigned(op.id() == id)); }
static inline bool isGpX(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isGpX(op)) & unsigned(op.id() == id)); }
static inline bool isVecB(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isVecB(op)) & unsigned(op.id() == id)); }
static inline bool isVecH(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isVecH(op)) & unsigned(op.id() == id)); }
static inline bool isVecS(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isVecS(op)) & unsigned(op.id() == id)); }
static inline bool isVecD(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isVecD(op)) & unsigned(op.id() == id)); }
static inline bool isVecQ(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isVecQ(op)) & unsigned(op.id() == id)); }
static inline bool isVecV(const Operand_& op, uint32_t id) noexcept { return bool(unsigned(isVecV(op)) & unsigned(op.id() == id)); }
};
//! General purpose register (ARM).
class Gp : public Reg {
public:
ASMJIT_DEFINE_ABSTRACT_REG(Gp, Reg)
//! Special register id.
enum Id : uint32_t {
//! Register that depends on OS, could be used as TLS offset.
kIdOs = 18,
//! Frame pointer.
kIdFp = 29,
//! Link register.
kIdLr = 30,
//! Stack register id.
kIdSp = 31,
//! Zero register id.
//!
//! Although zero register has the same id as stack register it has a special treatment, because we need to be
//! able to distinguish between these two at API level. Some intructions were designed to be used with SP and
//! some other with ZR - so we need a way to distinguish these two to make sure we emit the right thing.
//!
//! The number 63 is not random, when you perform `id & 31` you would always get 31 for both SP and ZR inputs,
//! which is the identifier used by AArch64 ISA to encode either SP or ZR depending on the instruction.
kIdZr = 63
};
inline constexpr bool isZR() const noexcept { return id() == kIdZr; }
inline constexpr bool isSP() const noexcept { return id() == kIdSp; }
//! Cast this register to a 32-bit R|W.
inline GpW w() const noexcept;
//! Cast this register to a 64-bit X.
inline GpX x() const noexcept;
};
//! Vector register (ARM).
class Vec : public Reg {
public:
ASMJIT_DEFINE_ABSTRACT_REG(Vec, Reg)
//! Additional signature bits used by arm::Vec.
enum AdditionalBits : uint32_t {
// Register element type (3 bits).
// |........|........|.XXX....|........|
kSignatureRegElementTypeShift = 12,
kSignatureRegElementTypeMask = 0x07 << kSignatureRegElementTypeShift,
// Register has element index (1 bit).
// |........|........|X.......|........|
kSignatureRegElementFlagShift = 15,
kSignatureRegElementFlagMask = 0x01 << kSignatureRegElementFlagShift,
// Register element index (4 bits).
// |........|....XXXX|........|........|
kSignatureRegElementIndexShift = 16,
kSignatureRegElementIndexMask = 0x0F << kSignatureRegElementIndexShift
};
//! Element type.
enum ElementType : uint32_t {
//! No element type specified.
kElementTypeNone = 0,
//! Byte elements (B8 or B16).
kElementTypeB,
//! Halfword elements (H4 or H8).
kElementTypeH,
//! Singleword elements (S2 or S4).
kElementTypeS,
//! Doubleword elements (D2).
kElementTypeD,
//! Byte elements grouped by 4 bytes (B4).
//!
//! \note This element-type is only used by few instructions.
kElementTypeB4,
//! Halfword elements grouped by 2 halfwords (H2).
//!
//! \note This element-type is only used by few instructions.
kElementTypeH2,
//! Count of element types.
kElementTypeCount
};
//! \cond
//! Shortcuts.
enum SignatureReg : uint32_t {
kSignatureElementB = kElementTypeB << kSignatureRegElementTypeShift,
kSignatureElementH = kElementTypeH << kSignatureRegElementTypeShift,
kSignatureElementS = kElementTypeS << kSignatureRegElementTypeShift,
kSignatureElementD = kElementTypeD << kSignatureRegElementTypeShift,
kSignatureElementB4 = kElementTypeB4 << kSignatureRegElementTypeShift,
kSignatureElementH2 = kElementTypeH2 << kSignatureRegElementTypeShift
};
//! \endcond
//! Returns whether the register has associated an element type.
inline constexpr bool hasElementType() const noexcept { return _signature.hasField<kSignatureRegElementTypeMask>(); }
//! Returns whether the register has element index (it's an element index access).
inline constexpr bool hasElementIndex() const noexcept { return _signature.hasField<kSignatureRegElementFlagMask>(); }
//! Returns whether the reggister has element type or element index (or both).
inline constexpr bool hasElementTypeOrIndex() const noexcept { return _signature.hasField<kSignatureRegElementTypeMask | kSignatureRegElementFlagMask>(); }
//! Returns element type of the register.
inline constexpr uint32_t elementType() const noexcept { return _signature.getField<kSignatureRegElementTypeMask>(); }
//! Sets element type of the register to `elementType`.
inline void setElementType(uint32_t elementType) noexcept { _signature.setField<kSignatureRegElementTypeMask>(elementType); }
//! Resets element type to none.
inline void resetElementType() noexcept { _signature.setField<kSignatureRegElementTypeMask>(0); }
//! Returns element index of the register.
inline constexpr uint32_t elementIndex() const noexcept { return _signature.getField<kSignatureRegElementIndexMask>(); }
//! Sets element index of the register to `elementType`.
inline void setElementIndex(uint32_t elementIndex) noexcept {
_signature |= kSignatureRegElementFlagMask;
_signature.setField<kSignatureRegElementIndexMask>(elementIndex);
}
//! Resets element index of the register.
inline void resetElementIndex() noexcept {
_signature &= ~(kSignatureRegElementFlagMask | kSignatureRegElementIndexMask);
}
inline constexpr bool isVecB8() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementB); }
inline constexpr bool isVecH4() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementH); }
inline constexpr bool isVecS2() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementS); }
inline constexpr bool isVecD1() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecD>::kSignature); }
inline constexpr bool isVecB16() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementB); }
inline constexpr bool isVecH8() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementH); }
inline constexpr bool isVecS4() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementS); }
inline constexpr bool isVecD2() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementD); }
inline constexpr bool isVecB4x4() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementB4); }
inline constexpr bool isVecH2x4() const noexcept { return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementH2); }
//! Creates a cloned register with element access.
inline Vec at(uint32_t elementIndex) const noexcept {
return Vec((signature() & ~kSignatureRegElementIndexMask) | (elementIndex << kSignatureRegElementIndexShift) | kSignatureRegElementFlagMask, id());
}
//! Cast this register to an 8-bit B register (scalar).
inline VecB b() const noexcept;
//! Cast this register to a 16-bit H register (scalar).
inline VecH h() const noexcept;
//! Cast this register to a 32-bit S register (scalar).
inline VecS s() const noexcept;
//! Cast this register to a 64-bit D register (scalar).
inline VecD d() const noexcept;
//! Cast this register to a 128-bit Q register (scalar).
inline VecV q() const noexcept;
//! Cast this register to a 128-bit V register.
inline VecV v() const noexcept;
//! Cast this register to a 128-bit V.B[elementIndex] register.
inline VecV b(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.H[elementIndex] register.
inline VecV h(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.S[elementIndex] register.
inline VecV s(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.D[elementIndex] register.
inline VecV d(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.H2[elementIndex] register.
inline VecV h2(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.B4[elementIndex] register.
inline VecV b4(uint32_t elementIndex) const noexcept;
//! Cast this register to V.8B.
inline VecD b8() const noexcept;
//! Cast this register to V.16B.
inline VecV b16() const noexcept;
//! Cast this register to V.2H.
inline VecS h2() const noexcept;
//! Cast this register to V.4H.
inline VecD h4() const noexcept;
//! Cast this register to V.8H.
inline VecV h8() const noexcept;
//! Cast this register to V.2S.
inline VecD s2() const noexcept;
//! Cast this register to V.4S.
inline VecV s4() const noexcept;
//! Cast this register to V.2D.
inline VecV d2() const noexcept;
static inline constexpr OperandSignature _makeElementAccessSignature(uint32_t elementType, uint32_t elementIndex) noexcept {
return OperandSignature{
uint32_t(RegTraits<RegType::kARM_VecV>::kSignature) |
uint32_t(kSignatureRegElementFlagMask) |
uint32_t(elementType << kSignatureRegElementTypeShift) |
uint32_t(elementIndex << kSignatureRegElementIndexShift)};
}
};
//! 32-bit GPW (AArch64) and/or GPR (ARM/AArch32) register.
class GpW : public Gp { ASMJIT_DEFINE_FINAL_REG(GpW, Gp, RegTraits<RegType::kARM_GpW>) };
//! 64-bit GPX (AArch64) register.
class GpX : public Gp { ASMJIT_DEFINE_FINAL_REG(GpX, Gp, RegTraits<RegType::kARM_GpX>) };
//! 8-bit view (S) of VFP/SIMD register.
class VecB : public Vec { ASMJIT_DEFINE_FINAL_REG(VecB, Vec, RegTraits<RegType::kARM_VecB>) };
//! 16-bit view (S) of VFP/SIMD register.
class VecH : public Vec { ASMJIT_DEFINE_FINAL_REG(VecH, Vec, RegTraits<RegType::kARM_VecH>) };
//! 32-bit view (S) of VFP/SIMD register.
class VecS : public Vec { ASMJIT_DEFINE_FINAL_REG(VecS, Vec, RegTraits<RegType::kARM_VecS>) };
//! 64-bit view (D) of VFP/SIMD register.
class VecD : public Vec { ASMJIT_DEFINE_FINAL_REG(VecD, Vec, RegTraits<RegType::kARM_VecD>) };
//! 128-bit vector register (Q or V).
class VecV : public Vec { ASMJIT_DEFINE_FINAL_REG(VecV, Vec, RegTraits<RegType::kARM_VecV>) };
inline GpW Gp::w() const noexcept { return GpW(id()); }
inline GpX Gp::x() const noexcept { return GpX(id()); }
inline VecB Vec::b() const noexcept { return VecB(id()); }
inline VecH Vec::h() const noexcept { return VecH(id()); }
inline VecS Vec::s() const noexcept { return VecS(id()); }
inline VecD Vec::d() const noexcept { return VecD(id()); }
inline VecV Vec::q() const noexcept { return VecV(id()); }
inline VecV Vec::v() const noexcept { return VecV(id()); }
inline VecV Vec::b(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeB, elementIndex), id()); }
inline VecV Vec::h(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeH, elementIndex), id()); }
inline VecV Vec::s(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeS, elementIndex), id()); }
inline VecV Vec::d(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeD, elementIndex), id()); }
inline VecV Vec::h2(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeH2, elementIndex), id()); }
inline VecV Vec::b4(uint32_t elementIndex) const noexcept { return VecV(_makeElementAccessSignature(kElementTypeB4, elementIndex), id()); }
inline VecD Vec::b8() const noexcept { return VecD(OperandSignature{VecD::kSignature | kSignatureElementB}, id()); }
inline VecS Vec::h2() const noexcept { return VecS(OperandSignature{VecS::kSignature | kSignatureElementH}, id()); }
inline VecD Vec::h4() const noexcept { return VecD(OperandSignature{VecD::kSignature | kSignatureElementH}, id()); }
inline VecD Vec::s2() const noexcept { return VecD(OperandSignature{VecD::kSignature | kSignatureElementS}, id()); }
inline VecV Vec::b16() const noexcept { return VecV(OperandSignature{VecV::kSignature | kSignatureElementB}, id()); }
inline VecV Vec::h8() const noexcept { return VecV(OperandSignature{VecV::kSignature | kSignatureElementH}, id()); }
inline VecV Vec::s4() const noexcept { return VecV(OperandSignature{VecV::kSignature | kSignatureElementS}, id()); }
inline VecV Vec::d2() const noexcept { return VecV(OperandSignature{VecV::kSignature | kSignatureElementD}, id()); }
#ifndef _DOXYGEN
namespace regs {
#endif
//! Creates a 32-bit W register operand (ARM/AArch64).
static inline constexpr GpW w(uint32_t id) noexcept { return GpW(id); }
//! Creates a 64-bit X register operand (AArch64).
static inline constexpr GpX x(uint32_t id) noexcept { return GpX(id); }
//! Creates a 32-bit S register operand (ARM/AArch64).
static inline constexpr VecS s(uint32_t id) noexcept { return VecS(id); }
//! Creates a 64-bit D register operand (ARM/AArch64).
static inline constexpr VecD d(uint32_t id) noexcept { return VecD(id); }
//! Creates a 1282-bit V register operand (ARM/AArch64).
static inline constexpr VecV v(uint32_t id) noexcept { return VecV(id); }
#ifndef _DOXYGEN
} // {regs}
// Make `arm::regs` accessible through `arm` namespace as well.
using namespace regs;
#endif
//! Memory operand (ARM).
class Mem : public BaseMem {
public:
//! \cond INTERNAL
//! Additional bits of operand's signature used by `arm::Mem`.
enum AdditionalBits : uint32_t {
// Index shift value (5 bits).
// |........|.....XXX|XX......|........|
kSignatureMemShiftValueShift = 14,
kSignatureMemShiftValueMask = 0x1Fu << kSignatureMemShiftValueShift,
// Shift operation type (4 bits).
// |........|XXXX....|........|........|
kSignatureMemPredicateShift = 20,
kSignatureMemPredicateMask = 0x0Fu << kSignatureMemPredicateShift
};
//! \endcond
//! Memory offset mode.
//!
//! Additional constants that can be used with the `predicate`.
enum OffsetMode : uint32_t {
//! Pre-index "[BASE, #Offset {, <shift>}]!" with write-back.
kOffsetPreIndex = 0xE,
//! Post-index "[BASE], #Offset {, <shift>}" with write-back.
kOffsetPostIndex = 0xF
};
//! \name Construction & Destruction
//! \{
//! Construct a default `Mem` operand, that points to [0].
inline constexpr Mem() noexcept
: BaseMem() {}
inline constexpr Mem(const Mem& other) noexcept
: BaseMem(other) {}
inline explicit Mem(Globals::NoInit_) noexcept
: BaseMem(Globals::NoInit) {}
inline constexpr Mem(const Signature& signature, uint32_t baseId, uint32_t indexId, int32_t offset) noexcept
: BaseMem(signature, baseId, indexId, offset) {}
inline constexpr explicit Mem(const Label& base, int32_t off = 0, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(RegType::kLabelTag) |
signature, base.id(), 0, off) {}
inline constexpr explicit Mem(const BaseReg& base, int32_t off = 0, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(base.type()) |
signature, base.id(), 0, off) {}
inline constexpr Mem(const BaseReg& base, const BaseReg& index, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(base.type()) |
Signature::fromMemIndexType(index.type()) |
signature, base.id(), index.id(), 0) {}
inline constexpr Mem(const BaseReg& base, const BaseReg& index, const Shift& shift, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(base.type()) |
Signature::fromMemIndexType(index.type()) |
Signature::fromValue<kSignatureMemPredicateMask>(uint32_t(shift.op())) |
Signature::fromValue<kSignatureMemShiftValueMask>(shift.value()) |
signature, base.id(), index.id(), 0) {}
inline constexpr Mem(uint64_t base, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
signature, uint32_t(base >> 32), 0, int32_t(uint32_t(base & 0xFFFFFFFFu))) {}
//! \}
//! \name Overloaded Operators
//! \{
inline Mem& operator=(const Mem& other) noexcept = default;
//! \}
//! \name Clone
//! \{
//! Clones the memory operand.
inline constexpr Mem clone() const noexcept { return Mem(*this); }
//! Gets new memory operand adjusted by `off`.
inline Mem cloneAdjusted(int64_t off) const noexcept {
Mem result(*this);
result.addOffset(off);
return result;
}
//! Clones the memory operand and makes it pre-index.
inline Mem pre() const noexcept {
Mem result(*this);
result.setPredicate(kOffsetPreIndex);
return result;
}
//! Clones the memory operand, applies a given offset `off` and makes it pre-index.
inline Mem pre(int64_t off) const noexcept {
Mem result(*this);
result.setPredicate(kOffsetPreIndex);
result.addOffset(off);
return result;
}
//! Clones the memory operand and makes it post-index.
inline Mem post() const noexcept {
Mem result(*this);
result.setPredicate(kOffsetPreIndex);
return result;
}
//! Clones the memory operand, applies a given offset `off` and makes it post-index.
inline Mem post(int64_t off) const noexcept {
Mem result(*this);
result.setPredicate(kOffsetPostIndex);
result.addOffset(off);
return result;
}
//! \}
//! \name Base & Index
//! \{
//! Converts memory `baseType` and `baseId` to `arm::Reg` instance.
//!
//! The memory must have a valid base register otherwise the result will be wrong.
inline Reg baseReg() const noexcept { return Reg::fromTypeAndId(baseType(), baseId()); }
//! Converts memory `indexType` and `indexId` to `arm::Reg` instance.
//!
//! The memory must have a valid index register otherwise the result will be wrong.
inline Reg indexReg() const noexcept { return Reg::fromTypeAndId(indexType(), indexId()); }
using BaseMem::setIndex;
inline void setIndex(const BaseReg& index, uint32_t shift) noexcept {
setIndex(index);
setShift(shift);
}
//! \}
//! \name ARM Specific Features
//! \{
//! Gets whether the memory operand has shift (aka scale) constant.
inline constexpr bool hasShift() const noexcept { return _signature.hasField<kSignatureMemShiftValueMask>(); }
//! Gets the memory operand's shift (aka scale) constant.
inline constexpr uint32_t shift() const noexcept { return _signature.getField<kSignatureMemShiftValueMask>(); }
//! Sets the memory operand's shift (aka scale) constant.
inline void setShift(uint32_t shift) noexcept { _signature.setField<kSignatureMemShiftValueMask>(shift); }
//! Resets the memory operand's shift (aka scale) constant to zero.
inline void resetShift() noexcept { _signature.setField<kSignatureMemShiftValueMask>(0); }
//! Gets memory predicate (shift mode or offset mode), see \ref ShiftOp and \ref OffsetMode.
inline constexpr uint32_t predicate() const noexcept { return _signature.getField<kSignatureMemPredicateMask>(); }
//! Sets memory predicate to `predicate`, see `Mem::ShiftOp`.
inline void setPredicate(uint32_t predicate) noexcept { _signature.setField<kSignatureMemPredicateMask>(predicate); }
//! Resets shift mode to LSL (default).
inline void resetPredicate() noexcept { _signature.setField<kSignatureMemPredicateMask>(0); }
inline constexpr bool isFixedOffset() const noexcept { return predicate() < kOffsetPreIndex; }
inline constexpr bool isPreOrPost() const noexcept { return predicate() >= kOffsetPreIndex; }
inline constexpr bool isPreIndex() const noexcept { return predicate() == kOffsetPreIndex; }
inline constexpr bool isPostIndex() const noexcept { return predicate() == kOffsetPostIndex; }
inline void resetToFixedOffset() noexcept { resetPredicate(); }
inline void makePreIndex() noexcept { setPredicate(kOffsetPreIndex); }
inline void makePostIndex() noexcept { setPredicate(kOffsetPostIndex); }
//! \}
};
//! Creates `[base.reg, offset]` memory operand (offset mode).
static inline constexpr Mem ptr(const Gp& base, int32_t offset = 0) noexcept {
return Mem(base, offset);
}
//! Creates `[base.reg, offset]!` memory operand (pre-index mode).
static inline constexpr Mem ptr_pre(const Gp& base, int32_t offset = 0) noexcept {
return Mem(base, offset, OperandSignature::fromValue<Mem::kSignatureMemPredicateMask>(Mem::kOffsetPreIndex));
}
//! Creates `[base.reg], offset` memory operand (post-index mode).
static inline constexpr Mem ptr_post(const Gp& base, int32_t offset = 0) noexcept {
return Mem(base, offset, OperandSignature::fromValue<Mem::kSignatureMemPredicateMask>(Mem::kOffsetPostIndex));
}
//! Creates `[base.reg, index]` memory operand.
static inline constexpr Mem ptr(const Gp& base, const Gp& index) noexcept {
return Mem(base, index);
}
//! Creates `[base.reg], index` memory operand (post-index mode).
static inline constexpr Mem ptr_post(const Gp& base, const Gp& index) noexcept {
return Mem(base, index, OperandSignature::fromValue<Mem::kSignatureMemPredicateMask>(Mem::kOffsetPostIndex));
}
//! Creates `[base.reg, index, SHIFT_OP #shift]` memory operand.
static inline constexpr Mem ptr(const Gp& base, const Gp& index, const Shift& shift) noexcept {
return Mem(base, index, shift);
}
//! Creates `[base + offset]` memory operand.
static inline constexpr Mem ptr(const Label& base, int32_t offset = 0) noexcept {
return Mem(base, offset);
}
// TODO: [ARM] PC + offset address.
#if 0
//! Creates `[PC + offset]` (relative) memory operand.
static inline constexpr Mem ptr(const PC& pc, int32_t offset = 0) noexcept {
return Mem(pc, offset);
}
#endif
//! Creates `[base]` absolute memory operand.
//!
//! \note The concept of absolute memory operands doesn't exist on ARM, the ISA only provides PC relative addressing.
//! Absolute memory operands can only be used if it's known that the PC relative offset is encodable and that it
//! would be within the limits. Absolute address is also often output from disassemblers, so AsmJit support it so it
//! can assemble it back.
static inline constexpr Mem ptr(uint64_t base) noexcept { return Mem(base); }
//! \}
ASMJIT_END_SUB_NAMESPACE
//! \cond INTERNAL
ASMJIT_BEGIN_NAMESPACE
ASMJIT_DEFINE_TYPE_ID(arm::GpW, TypeId::kInt32);
ASMJIT_DEFINE_TYPE_ID(arm::GpX, TypeId::kInt64);
ASMJIT_DEFINE_TYPE_ID(arm::VecS, TypeId::kFloat32x1);
ASMJIT_DEFINE_TYPE_ID(arm::VecD, TypeId::kFloat64x1);
ASMJIT_DEFINE_TYPE_ID(arm::VecV, TypeId::kInt32x4);
ASMJIT_END_NAMESPACE
//! \endcond
#endif // ASMJIT_ARM_ARMOPERAND_H_INCLUDED