#include "ppsspp_config.h"
#if PPSSPP_ARCH(ARM)
#include <cmath>
#include "Common/Data/Convert/SmallDataConvert.h"
#include "Common/Math/math_util.h"
#include "Common/CPUDetect.h"
#include "Core/MemMap.h"
#include "Core/MIPS/MIPS.h"
#include "Core/MIPS/MIPSAnalyst.h"
#include "Core/MIPS/MIPSCodeUtils.h"
#include "Core/MIPS/MIPSVFPUUtils.h"
#include "Core/Config.h"
#include "Core/Reporting.h"
#include "Core/MIPS/ARM/ArmJit.h"
#include "Core/MIPS/ARM/ArmRegCache.h"
#include "Core/MIPS/ARM/ArmRegCacheFPU.h"
#include "Core/MIPS/ARM/ArmCompVFPUNEONUtil.h"
#define CONDITIONAL_DISABLE(flag) if (jo.Disabled(JitDisable::flag)) { Comp_Generic(op); return; }
#define DISABLE { fpr.ReleaseSpillLocksAndDiscardTemps(); Comp_Generic(op); return; }
#define DISABLE_UNKNOWN_PREFIX { WARN_LOG(Log::JIT, "DISABLE: Unknown Prefix in %s", __FUNCTION__); fpr.ReleaseSpillLocksAndDiscardTemps(); Comp_Generic(op); return; }
#define _RS MIPS_GET_RS(op)
#define _RT MIPS_GET_RT(op)
#define _RD MIPS_GET_RD(op)
#define _FS MIPS_GET_FS(op)
#define _FT MIPS_GET_FT(op)
#define _FD MIPS_GET_FD(op)
#define _SA MIPS_GET_SA(op)
#define _POS ((op>> 6) & 0x1F)
#define _SIZE ((op>>11) & 0x1F)
#define _IMM16 (signed short)(op & 0xFFFF)
#define _IMM26 (op & 0x03FFFFFF)
namespace MIPSComp {
using namespace ArmGen;
using namespace ArmJitConstants;
static const float minus_one = -1.0f;
static const float one = 1.0f;
static const float zero = 0.0f;
void ArmJit::CompNEON_VecDo3(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_VEC);
if (js.HasUnknownPrefix()) {
DISABLE_UNKNOWN_PREFIX;
}
VectorSize sz = GetVecSize(op);
int n = GetNumVectorElements(sz);
MappedRegs r = NEONMapDirtyInIn(op, sz, sz, sz);
ARMReg temp = MatchSize(Q0, r.vs);
switch (op >> 26) {
case 24:
switch ((op >> 23) & 7) {
case 0: VADD(F_32, r.vd, r.vs, r.vt); break;
case 1: VSUB(F_32, r.vd, r.vs, r.vt); break;
case 7:
{
VMOV(D0, D_0(r.vs));
VMOV(D1, D_0(r.vt));
VDIV(S0, S0, S2);
if (sz >= V_Pair)
VDIV(S1, S1, S3);
VMOV(D_0(r.vd), D0);
if (sz >= V_Triple) {
VMOV(D0, D_1(r.vs));
VMOV(D1, D_1(r.vt));
VDIV(S0, S0, S2);
if (sz == V_Quad)
VDIV(S1, S1, S3);
VMOV(D_1(r.vd), D0);
}
}
break;
default:
DISABLE;
}
break;
case 25:
switch ((op >> 23) & 7) {
case 0: VMUL(F_32, r.vd, r.vs, r.vt); break;
default:
DISABLE;
}
break;
case 27:
switch ((op >> 23) & 7) {
case 2: VMIN(F_32, r.vd, r.vs, r.vt); break;
case 3: VMAX(F_32, r.vd, r.vs, r.vt); break;
case 6:
VMOV_immf(temp, 1.0f);
VCGE(F_32, r.vd, r.vs, r.vt);
VAND(r.vd, r.vd, temp);
break;
case 7:
VMOV_immf(temp, 1.0f);
VCLT(F_32, r.vd, r.vs, r.vt);
VAND(r.vd, r.vd, temp);
break;
}
break;
default:
DISABLE;
}
NEONApplyPrefixD(r.vd);
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_SV(MIPSOpcode op) {
CONDITIONAL_DISABLE(LSU_VFPU);
CheckMemoryBreakpoint();
s32 offset = (signed short)(op & 0xFFFC);
int vt = ((op >> 16) & 0x1f) | ((op & 3) << 5);
MIPSGPReg rs = _RS;
bool doCheck = false;
switch (op >> 26)
{
case 50:
{
if (!gpr.IsImm(rs) && jo.cachePointers && g_Config.bFastMemory && (offset & 3) == 0 && offset < 0x400 && offset > -0x400) {
INFO_LOG(Log::HLE, "LV.S fastmode!");
gpr.MapRegAsPointer(rs);
ARMReg ar = fpr.QMapReg(vt, V_Single, MAP_NOINIT | MAP_DIRTY);
if (offset) {
ADDI2R(R0, gpr.RPtr(rs), offset, R1);
VLD1_lane(F_32, ar, R0, 0, true);
} else {
VLD1_lane(F_32, ar, gpr.RPtr(rs), 0, true);
}
break;
}
INFO_LOG(Log::HLE, "LV.S slowmode!");
ARMReg ar = fpr.QMapReg(vt, V_Single, MAP_DIRTY | MAP_NOINIT);
if (gpr.IsImm(rs)) {
u32 addr = (offset + gpr.GetImm(rs)) & 0x3FFFFFFF;
gpr.SetRegImm(R0, addr + (u32)Memory::base);
} else {
gpr.MapReg(rs);
if (g_Config.bFastMemory) {
SetR0ToEffectiveAddress(rs, offset);
} else {
SetCCAndR0ForSafeAddress(rs, offset, R1);
doCheck = true;
}
ADD(R0, R0, MEMBASEREG);
}
FixupBranch skip;
if (doCheck) {
skip = B_CC(CC_EQ);
}
VLD1_lane(F_32, ar, R0, 0, true);
if (doCheck) {
SetJumpTarget(skip);
SetCC(CC_AL);
}
}
break;
case 58:
{
if (!gpr.IsImm(rs) && jo.cachePointers && g_Config.bFastMemory && (offset & 3) == 0 && offset < 0x400 && offset > -0x400) {
INFO_LOG(Log::HLE, "SV.S fastmode!");
gpr.MapRegAsPointer(rs);
ARMReg ar = fpr.QMapReg(vt, V_Single, 0);
if (offset) {
ADDI2R(R0, gpr.RPtr(rs), offset, R1);
VST1_lane(F_32, ar, R0, 0, true);
} else {
VST1_lane(F_32, ar, gpr.RPtr(rs), 0, true);
}
break;
}
INFO_LOG(Log::HLE, "SV.S slowmode!");
ARMReg ar = fpr.QMapReg(vt, V_Single, 0);
if (gpr.IsImm(rs)) {
u32 addr = (offset + gpr.GetImm(rs)) & 0x3FFFFFFF;
gpr.SetRegImm(R0, addr + (u32)Memory::base);
} else {
gpr.MapReg(rs);
if (g_Config.bFastMemory) {
SetR0ToEffectiveAddress(rs, offset);
} else {
SetCCAndR0ForSafeAddress(rs, offset, R1);
doCheck = true;
}
ADD(R0, R0, MEMBASEREG);
}
FixupBranch skip;
if (doCheck) {
skip = B_CC(CC_EQ);
}
VST1_lane(F_32, ar, R0, 0, true);
if (doCheck) {
SetJumpTarget(skip);
SetCC(CC_AL);
}
}
break;
}
fpr.ReleaseSpillLocksAndDiscardTemps();
}
inline int MIPS_GET_VQVT(u32 op) {
return (((op >> 16) & 0x1f)) | ((op & 1) << 5);
}
void ArmJit::CompNEON_SVQ(MIPSOpcode op) {
CONDITIONAL_DISABLE(LSU_VFPU);
CheckMemoryBreakpoint();
int offset = (signed short)(op & 0xFFFC);
int vt = MIPS_GET_VQVT(op.encoding);
MIPSGPReg rs = _RS;
bool doCheck = false;
switch (op >> 26)
{
case 54:
{
const u32 ops[4] = {
op.encoding,
GetOffsetInstruction(1).encoding,
GetOffsetInstruction(2).encoding,
GetOffsetInstruction(3).encoding,
};
if (g_Config.bFastMemory && (ops[1] >> 26) == 54 && (ops[2] >> 26) == 54 && (ops[3] >> 26) == 54) {
int offsets[4] = {offset, (s16)(ops[1] & 0xFFFC), (s16)(ops[2] & 0xFFFC), (s16)(ops[3] & 0xFFFC)};
int rss[4] = {MIPS_GET_RS(op), MIPS_GET_RS(ops[1]), MIPS_GET_RS(ops[2]), MIPS_GET_RS(ops[3])};
if (offsets[1] == offset + 16 && offsets[2] == offsets[1] + 16 && offsets[3] == offsets[2] + 16 &&
rss[0] == rss[1] && rss[1] == rss[2] && rss[2] == rss[3]) {
int vts[4] = {MIPS_GET_VQVT(op.encoding), MIPS_GET_VQVT(ops[1]), MIPS_GET_VQVT(ops[2]), MIPS_GET_VQVT(ops[3])};
INFO_LOG(Log::JIT, "Matrix load detected! TODO: optimize");
}
}
if (!gpr.IsImm(rs) && jo.cachePointers && g_Config.bFastMemory && offset < 0x400-16 && offset > -0x400-16) {
gpr.MapRegAsPointer(rs);
ARMReg ar = fpr.QMapReg(vt, V_Quad, MAP_DIRTY | MAP_NOINIT);
if (offset) {
ADDI2R(R0, gpr.RPtr(rs), offset, R1);
VLD1(F_32, ar, R0, 2, ALIGN_128);
} else {
VLD1(F_32, ar, gpr.RPtr(rs), 2, ALIGN_128);
}
break;
}
ARMReg ar = fpr.QMapReg(vt, V_Quad, MAP_DIRTY | MAP_NOINIT);
if (gpr.IsImm(rs)) {
u32 addr = (offset + gpr.GetImm(rs)) & 0x3FFFFFFF;
gpr.SetRegImm(R0, addr + (u32)Memory::base);
} else {
gpr.MapReg(rs);
if (g_Config.bFastMemory) {
SetR0ToEffectiveAddress(rs, offset);
} else {
SetCCAndR0ForSafeAddress(rs, offset, R1);
doCheck = true;
}
ADD(R0, R0, MEMBASEREG);
}
FixupBranch skip;
if (doCheck) {
skip = B_CC(CC_EQ);
}
VLD1(F_32, ar, R0, 2, ALIGN_128);
if (doCheck) {
SetJumpTarget(skip);
SetCC(CC_AL);
}
}
break;
case 62:
{
const u32 ops[4] = {
op.encoding,
GetOffsetInstruction(1).encoding,
GetOffsetInstruction(2).encoding,
GetOffsetInstruction(3).encoding,
};
if (g_Config.bFastMemory && (ops[1] >> 26) == 54 && (ops[2] >> 26) == 54 && (ops[3] >> 26) == 54) {
int offsets[4] = { offset, (s16)(ops[1] & 0xFFFC), (s16)(ops[2] & 0xFFFC), (s16)(ops[3] & 0xFFFC) };
int rss[4] = { MIPS_GET_RS(op), MIPS_GET_RS(ops[1]), MIPS_GET_RS(ops[2]), MIPS_GET_RS(ops[3]) };
if (offsets[1] == offset + 16 && offsets[2] == offsets[1] + 16 && offsets[3] == offsets[2] + 16 &&
rss[0] == rss[1] && rss[1] == rss[2] && rss[2] == rss[3]) {
int vts[4] = { MIPS_GET_VQVT(op.encoding), MIPS_GET_VQVT(ops[1]), MIPS_GET_VQVT(ops[2]), MIPS_GET_VQVT(ops[3]) };
INFO_LOG(Log::JIT, "Matrix store detected! TODO: optimize");
}
}
if (!gpr.IsImm(rs) && jo.cachePointers && g_Config.bFastMemory && offset < 0x400-16 && offset > -0x400-16) {
gpr.MapRegAsPointer(rs);
ARMReg ar = fpr.QMapReg(vt, V_Quad, 0);
if (offset) {
ADDI2R(R0, gpr.RPtr(rs), offset, R1);
VST1(F_32, ar, R0, 2, ALIGN_128);
} else {
VST1(F_32, ar, gpr.RPtr(rs), 2, ALIGN_128);
}
break;
}
ARMReg ar = fpr.QMapReg(vt, V_Quad, 0);
if (gpr.IsImm(rs)) {
u32 addr = (offset + gpr.GetImm(rs)) & 0x3FFFFFFF;
gpr.SetRegImm(R0, addr + (u32)Memory::base);
} else {
gpr.MapReg(rs);
if (g_Config.bFastMemory) {
SetR0ToEffectiveAddress(rs, offset);
} else {
SetCCAndR0ForSafeAddress(rs, offset, R1);
doCheck = true;
}
ADD(R0, R0, MEMBASEREG);
}
FixupBranch skip;
if (doCheck) {
skip = B_CC(CC_EQ);
}
VST1(F_32, ar, R0, 2, ALIGN_128);
if (doCheck) {
SetJumpTarget(skip);
SetCC(CC_AL);
}
}
break;
default:
DISABLE;
break;
}
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_VVectorInit(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_XFER);
if (js.HasUnknownPrefix()) {
DISABLE_UNKNOWN_PREFIX;
}
VectorSize sz = GetVecSize(op);
DestARMReg vd = NEONMapPrefixD(_VD, sz, MAP_NOINIT | MAP_DIRTY);
switch ((op >> 16) & 0xF) {
case 6:
VEOR(vd.rd, vd.rd, vd.rd);
break;
case 7:
VMOV_immf(vd.rd, 1.0f);
break;
default:
DISABLE;
break;
}
NEONApplyPrefixD(vd);
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_VDot(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_VEC);
if (js.HasUnknownPrefix()) {
DISABLE_UNKNOWN_PREFIX;
}
VectorSize sz = GetVecSize(op);
MappedRegs r = NEONMapDirtyInIn(op, V_Single, sz, sz);
switch (sz) {
case V_Pair:
VMUL(F_32, r.vd, r.vs, r.vt);
VPADD(F_32, r.vd, r.vd, r.vd);
break;
case V_Triple:
VMUL(F_32, Q0, r.vs, r.vt);
VPADD(F_32, D0, D0, D0);
VADD(F_32, r.vd, D0, D1);
break;
case V_Quad:
VMUL(F_32, D0, D_0(r.vs), D_0(r.vt));
VMLA(F_32, D0, D_1(r.vs), D_1(r.vt));
VPADD(F_32, r.vd, D0, D0);
break;
case V_Single:
case V_Invalid:
;
}
NEONApplyPrefixD(r.vd);
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_VHdp(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_VEC);
if (js.HasUnknownPrefix()) {
DISABLE_UNKNOWN_PREFIX;
}
DISABLE;
}
void ArmJit::CompNEON_VScl(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_VEC);
if (js.HasUnknownPrefix()) {
DISABLE_UNKNOWN_PREFIX;
}
VectorSize sz = GetVecSize(op);
MappedRegs r = NEONMapDirtyInIn(op, sz, sz, V_Single);
ARMReg temp = MatchSize(Q0, r.vt);
VMOV_neon(temp, r.vt);
VMUL_scalar(F_32, r.vd, r.vs, DScalar(Q0, 0));
NEONApplyPrefixD(r.vd);
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_VV2Op(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_VEC);
if (js.HasUnknownPrefix()) {
DISABLE_UNKNOWN_PREFIX;
}
if (((op >> 16) & 0x1f) == 0 && _VS == _VD && js.HasNoPrefix()) {
return;
}
switch ((op >> 16) & 0x1f) {
case 0:
case 1:
case 2:
case 17:
break;
default:
DISABLE;
break;
}
VectorSize sz = GetVecSize(op);
int n = GetNumVectorElements(sz);
MappedRegs r = NEONMapDirtyIn(op, sz, sz);
ARMReg temp = MatchSize(Q0, r.vs);
switch ((op >> 16) & 0x1f) {
case 0:
VMOV_neon(r.vd, r.vs);
break;
case 1:
VABS(F_32, r.vd, r.vs);
break;
case 2:
VNEG(F_32, r.vd, r.vs);
break;
case 4:
if (IsD(r.vd)) {
VMOV_immf(D0, 0.0f);
VMOV_immf(D1, 1.0f);
VMAX(F_32, r.vd, r.vs, D0);
VMIN(F_32, r.vd, r.vd, D1);
} else {
VMOV_immf(Q0, 1.0f);
VMIN(F_32, r.vd, r.vs, Q0);
VMOV_immf(Q0, 0.0f);
VMAX(F_32, r.vd, r.vd, Q0);
}
break;
case 5:
if (IsD(r.vd)) {
VMOV_immf(D0, -1.0f);
VMOV_immf(D1, 1.0f);
VMAX(F_32, r.vd, r.vs, D0);
VMIN(F_32, r.vd, r.vd, D1);
} else {
VMOV_immf(Q0, 1.0f);
VMIN(F_32, r.vd, r.vs, Q0);
VMOV_immf(Q0, -1.0f);
VMAX(F_32, r.vd, r.vd, Q0);
}
break;
case 16:
DISABLE;
{
ARMReg temp2 = fpr.QAllocTemp(sz);
VRECPE(F_32, temp, r.vs);
VRECPS(temp2, r.vs, temp);
VMUL(F_32, temp2, temp2, temp);
VRECPS(temp2, r.vs, temp);
VMUL(F_32, temp2, temp2, temp);
}
DISABLE;
break;
case 17:
DISABLE;
{
if (true) {
VRSQRTE(F_32, r.vd, r.vs);
} else {
ARMReg temp2 = fpr.QAllocTemp(sz);
VRSQRTE(F_32, temp, r.vs);
VRSQRTS(temp2, r.vs, temp);
VMUL(F_32, r.vd, temp2, temp);
}
}
break;
case 18:
DISABLE;
break;
case 19:
DISABLE;
break;
case 20:
DISABLE;
break;
case 21:
DISABLE;
break;
case 22:
VMOV_neon(MatchSize(Q0, r.vs), r.vs);
for (int i = 0; i < n; i++) {
VSQRT((ARMReg)(S0 + i), (ARMReg)(S0 + i));
}
VMOV_neon(MatchSize(Q0, r.vd), r.vd);
break;
case 23:
DISABLE;
break;
case 24:
DISABLE;
break;
case 26:
DISABLE;
break;
case 28:
DISABLE;
break;
default:
DISABLE;
break;
}
NEONApplyPrefixD(r.vd);
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_Mftv(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_XFER);
int imm = op & 0xFF;
MIPSGPReg rt = _RT;
switch ((op >> 21) & 0x1f) {
case 3:
if (rt != 0) {
if (imm < 128) {
ARMReg r = fpr.QMapReg(imm, V_Single, MAP_READ);
gpr.MapReg(rt, MAP_NOINIT | MAP_DIRTY);
VMOV_neon(MatchSize(Q0, r), r);
VMOV(gpr.R(rt), S0);
} else if (imm < 128 + VFPU_CTRL_MAX) {
FlushPrefixV();
if (imm - 128 == VFPU_CTRL_CC) {
gpr.MapDirtyIn(rt, MIPS_REG_VFPUCC);
MOV(gpr.R(rt), gpr.R(MIPS_REG_VFPUCC));
} else {
gpr.MapReg(rt, MAP_NOINIT | MAP_DIRTY);
LDR(gpr.R(rt), CTXREG, offsetof(MIPSState, vfpuCtrl) + 4 * (imm - 128));
}
} else {
ERROR_LOG(Log::CPU, "mfv - invalid register %i", imm);
}
}
break;
case 7:
if (imm < 128) {
ARMReg r = fpr.QMapReg(imm, V_Single, MAP_DIRTY | MAP_NOINIT);
if (gpr.IsMapped(rt)) {
VMOV(S0, gpr.R(rt));
VMOV_neon(r, MatchSize(Q0, r));
} else {
ADDI2R(R0, CTXREG, gpr.GetMipsRegOffset(rt), R1);
VLD1_lane(F_32, r, R0, 0, true);
}
} else if (imm < 128 + VFPU_CTRL_MAX) {
if (imm - 128 == VFPU_CTRL_CC) {
gpr.MapDirtyIn(MIPS_REG_VFPUCC, rt);
MOV(gpr.R(MIPS_REG_VFPUCC), rt);
} else {
gpr.MapReg(rt);
STR(gpr.R(rt), CTXREG, offsetof(MIPSState, vfpuCtrl) + 4 * (imm - 128));
}
if (imm - 128 == VFPU_CTRL_SPREFIX) {
js.prefixSFlag = JitState::PREFIX_UNKNOWN;
js.blockWrotePrefixes = true;
} else if (imm - 128 == VFPU_CTRL_TPREFIX) {
js.prefixTFlag = JitState::PREFIX_UNKNOWN;
js.blockWrotePrefixes = true;
} else if (imm - 128 == VFPU_CTRL_DPREFIX) {
js.prefixDFlag = JitState::PREFIX_UNKNOWN;
js.blockWrotePrefixes = true;
}
} else {
_dbg_assert_msg_(false,"mtv - invalid register");
}
break;
default:
DISABLE;
}
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_Vmfvc(MIPSOpcode op) {
DISABLE;
}
void ArmJit::CompNEON_Vmtvc(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_XFER);
int vs = _VS;
int imm = op & 0xFF;
if (imm >= 128 && imm < 128 + VFPU_CTRL_MAX) {
ARMReg r = fpr.QMapReg(vs, V_Single, 0);
ADDI2R(R0, CTXREG, offsetof(MIPSState, vfpuCtrl[0]) + (imm - 128) * 4, R1);
VST1_lane(F_32, r, R0, 0, true);
fpr.ReleaseSpillLocksAndDiscardTemps();
if (imm - 128 == VFPU_CTRL_SPREFIX) {
js.prefixSFlag = JitState::PREFIX_UNKNOWN;
js.blockWrotePrefixes = true;
} else if (imm - 128 == VFPU_CTRL_TPREFIX) {
js.prefixTFlag = JitState::PREFIX_UNKNOWN;
js.blockWrotePrefixes = true;
} else if (imm - 128 == VFPU_CTRL_DPREFIX) {
js.prefixDFlag = JitState::PREFIX_UNKNOWN;
js.blockWrotePrefixes = true;
}
}
}
void ArmJit::CompNEON_VMatrixInit(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_XFER);
MatrixSize msz = GetMtxSize(op);
int n = GetMatrixSide(msz);
ARMReg cols[4];
fpr.QMapMatrix(cols, _VD, msz, MAP_NOINIT | MAP_DIRTY);
switch ((op >> 16) & 0xF) {
case 3:
VEOR(D0, D0, D0);
VMOV_immf(D1, 1.0f);
VTRN(F_32, D0, D1);
VREV64(I_32, D0, D0);
switch (msz) {
case M_2x2:
VMOV_neon(cols[0], D0);
VMOV_neon(cols[1], D1);
break;
case M_3x3:
VMOV_neon(D_0(cols[0]), D0);
VMOV_imm(I_8, D_1(cols[0]), VIMMxxxxxxxx, 0);
VMOV_neon(D_0(cols[1]), D1);
VMOV_imm(I_8, D_1(cols[1]), VIMMxxxxxxxx, 0);
VMOV_imm(I_8, D_0(cols[2]), VIMMxxxxxxxx, 0);
VMOV_neon(D_1(cols[2]), D0);
break;
case M_4x4:
VMOV_neon(D_0(cols[0]), D0);
VMOV_imm(I_8, D_1(cols[0]), VIMMxxxxxxxx, 0);
VMOV_neon(D_0(cols[1]), D1);
VMOV_imm(I_8, D_1(cols[1]), VIMMxxxxxxxx, 0);
VMOV_imm(I_8, D_0(cols[2]), VIMMxxxxxxxx, 0);
VMOV_neon(D_1(cols[2]), D0);
VMOV_imm(I_8, D_0(cols[3]), VIMMxxxxxxxx, 0);
VMOV_neon(D_1(cols[3]), D1);
break;
default:
_assert_msg_(false, "Bad matrix size");
break;
}
break;
case 6:
for (int i = 0; i < n; i++) {
VEOR(cols[i], cols[i], cols[i]);
}
break;
case 7:
for (int i = 0; i < n; i++) {
VMOV_immf(cols[i], 1.0f);
}
break;
}
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_Vmmov(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_MTX_VMMOV);
if (_VS == _VD) {
return;
}
MatrixSize msz = GetMtxSize(op);
MatrixOverlapType overlap = GetMatrixOverlap(_VD, _VS, msz);
if (overlap != OVERLAP_NONE) {
DISABLE;
}
ARMReg s_cols[4], d_cols[4];
fpr.QMapMatrix(s_cols, _VS, msz, 0);
fpr.QMapMatrix(d_cols, _VD, msz, MAP_DIRTY | MAP_NOINIT);
int n = GetMatrixSide(msz);
for (int i = 0; i < n; i++) {
VMOV_neon(d_cols[i], s_cols[i]);
}
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_Vmmul(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_MTX_VMMUL);
MatrixSize msz = GetMtxSize(op);
int n = GetMatrixSide(msz);
bool overlap = GetMatrixOverlap(_VD, _VS, msz) || GetMatrixOverlap(_VD, _VT, msz);
if (overlap) {
INFO_LOG(Log::JIT, "Matrix overlap, ignoring.");
DISABLE;
}
if (msz == M_2x2) {
DISABLE;
}
ARMReg s_cols[4], t_cols[4], d_cols[4];
fpr.QMapMatrix(t_cols, _VT, msz, MAP_FORCE_LOW);
fpr.QMapMatrix(s_cols, Xpose(_VS), msz, MAP_PREFER_HIGH);
fpr.QMapMatrix(d_cols, _VD, msz, MAP_PREFER_HIGH | MAP_NOINIT | MAP_DIRTY);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
if (i == 0) {
VMUL_scalar(F_32, d_cols[j], s_cols[i], XScalar(t_cols[j], i));
} else {
VMLA_scalar(F_32, d_cols[j], s_cols[i], XScalar(t_cols[j], i));
}
}
}
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_Vmscl(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_MTX_VMSCL);
MatrixSize msz = GetMtxSize(op);
bool overlap = GetMatrixOverlap(_VD, _VS, msz) != OVERLAP_NONE;
if (overlap) {
DISABLE;
}
int n = GetMatrixSide(msz);
ARMReg s_cols[4], t, d_cols[4];
fpr.QMapMatrix(s_cols, _VS, msz, 0);
fpr.QMapMatrix(d_cols, _VD, msz, MAP_NOINIT | MAP_DIRTY);
t = fpr.QMapReg(_VT, V_Single, 0);
VMOV_neon(D0, t);
for (int i = 0; i < n; i++) {
VMUL_scalar(F_32, d_cols[i], s_cols[i], DScalar(D0, 0));
}
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_Vtfm(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_MTX_VTFM);
if (js.HasUnknownPrefix()) {
DISABLE;
}
if (_VT == _VD) {
DISABLE;
}
VectorSize sz = GetVecSize(op);
MatrixSize msz = GetMtxSize(op);
int n = GetNumVectorElements(sz);
int ins = (op >> 23) & 7;
bool homogenous = false;
if (n == ins) {
n++;
sz = (VectorSize)((int)(sz)+1);
msz = (MatrixSize)((int)(msz)+1);
homogenous = true;
}
else if (n != ins + 1) {
DISABLE;
}
ARMReg s_cols[4], t, d;
t = fpr.QMapReg(_VT, sz, MAP_FORCE_LOW);
fpr.QMapMatrix(s_cols, Xpose(_VS), msz, MAP_PREFER_HIGH);
d = fpr.QMapReg(_VD, sz, MAP_DIRTY | MAP_NOINIT | MAP_PREFER_HIGH);
VMUL_scalar(F_32, d, s_cols[0], XScalar(t, 0));
for (int i = 1; i < n; i++) {
if (homogenous && i == n - 1) {
VADD(F_32, d, d, s_cols[i]);
} else {
VMLA_scalar(F_32, d, s_cols[i], XScalar(t, i));
}
}
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_VCrs(MIPSOpcode op) {
DISABLE;
}
void ArmJit::CompNEON_VDet(MIPSOpcode op) {
DISABLE;
}
void ArmJit::CompNEON_Vi2x(MIPSOpcode op) {
DISABLE;
}
void ArmJit::CompNEON_Vx2i(MIPSOpcode op) {
DISABLE;
}
void ArmJit::CompNEON_Vf2i(MIPSOpcode op) {
DISABLE;
}
void ArmJit::CompNEON_Vi2f(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_VEC);
if (js.HasUnknownPrefix()) {
DISABLE;
}
DISABLE;
VectorSize sz = GetVecSize(op);
int n = GetNumVectorElements(sz);
int imm = (op >> 16) & 0x1f;
const float mult = 1.0f / (float)(1UL << imm);
MappedRegs regs = NEONMapDirtyIn(op, sz, sz);
MOVI2F_neon(MatchSize(Q0, regs.vd), mult, R0);
VCVT(F_32, regs.vd, regs.vs);
VMUL(F_32, regs.vd, regs.vd, Q0);
NEONApplyPrefixD(regs.vd);
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_Vh2f(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_VEC);
if (!cpu_info.bHalf) {
DISABLE;
}
VectorSize sz = GetVecSize(op);
VectorSize outsize = V_Pair;
switch (sz) {
case V_Single:
outsize = V_Pair;
break;
case V_Pair:
outsize = V_Quad;
break;
default:
ERROR_LOG(Log::JIT, "Vh2f: Must be pair or quad");
break;
}
ARMReg vs = NEONMapPrefixS(_VS, sz, 0);
DestARMReg vd = NEONMapPrefixD(_VD, outsize, MAP_DIRTY);
VCVTF32F16(vd.rd, vs);
NEONApplyPrefixD(vd);
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_Vcst(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_XFER);
if (js.HasUnknownPrefix()) {
DISABLE_UNKNOWN_PREFIX;
}
int conNum = (op >> 16) & 0x1f;
VectorSize sz = GetVecSize(op);
int n = GetNumVectorElements(sz);
DestARMReg vd = NEONMapPrefixD(_VD, sz, MAP_DIRTY | MAP_NOINIT);
gpr.SetRegImm(R0, (u32)(void *)&cst_constants[conNum]);
VLD1_all_lanes(F_32, vd, R0, true);
NEONApplyPrefixD(vd);
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_Vhoriz(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_VEC);
if (js.HasUnknownPrefix()) {
DISABLE_UNKNOWN_PREFIX;
}
VectorSize sz = GetVecSize(op);
switch ((op >> 16) & 31) {
case 6:
{
VMOV_neon(F_32, D1, 0.0f);
MappedRegs r = NEONMapDirtyIn(op, V_Single, sz);
switch (sz) {
case V_Pair:
VPADD(F_32, r.vd, r.vs, r.vs);
break;
case V_Triple:
VPADD(F_32, D0, D_0(r.vs), D_0(r.vs));
VADD(F_32, r.vd, D0, D_1(r.vs));
break;
case V_Quad:
VADD(F_32, D0, D_0(r.vs), D_1(r.vs));
VPADD(F_32, r.vd, D0, D0);
break;
default:
;
}
VADD(F_32, r.vd, r.vd, D1);
break;
}
case 7:
DISABLE;
break;
}
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_VRot(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_VEC);
if (js.HasUnknownPrefix()) {
DISABLE_UNKNOWN_PREFIX;
}
DISABLE;
int vd = _VD;
int vs = _VS;
VectorSize sz = GetVecSize(op);
int n = GetNumVectorElements(sz);
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_VIdt(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_XFER);
if (js.HasUnknownPrefix()) {
DISABLE_UNKNOWN_PREFIX;
}
VectorSize sz = GetVecSize(op);
DestARMReg vd = NEONMapPrefixD(_VD, sz, MAP_NOINIT | MAP_DIRTY);
switch (sz) {
case V_Pair:
VMOV_immf(vd, 1.0f);
if ((_VD & 1) == 0) {
VMOV_imm(I_64, D0, VIMMbits2bytes, 0x0F);
VAND(vd, vd, D0);
} else {
VMOV_imm(I_64, D0, VIMMbits2bytes, 0xF0);
VAND(vd, vd, D0);
}
break;
case V_Triple:
case V_Quad:
{
VEOR(vd, vd, vd);
ARMReg dest = (_VD & 2) ? D_1(vd) : D_0(vd);
VMOV_immf(dest, 1.0f);
if ((_VD & 1) == 0) {
VMOV_imm(I_64, D0, VIMMbits2bytes, 0x0F);
VAND(dest, dest, D0);
} else {
VMOV_imm(I_64, D0, VIMMbits2bytes, 0xF0);
VAND(dest, dest, D0);
}
}
break;
default:
_dbg_assert_msg_(false,"Bad vidt instruction");
break;
}
NEONApplyPrefixD(vd);
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_Vcmp(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_COMP);
if (js.HasUnknownPrefix())
DISABLE;
DISABLE;
VectorSize sz = GetVecSize(op);
int n = GetNumVectorElements(sz);
VCondition cond = (VCondition)(op & 0xF);
MappedRegs regs = NEONMapInIn(op, sz, sz);
ARMReg vs = regs.vs, vt = regs.vt;
ARMReg res = fpr.QAllocTemp(sz);
switch (cond) {
case VC_EI:
case VC_NI:
DISABLE;
case VC_ES:
case VC_NS:
case VC_EN:
case VC_NN:
DISABLE;
break;
case VC_EZ:
case VC_NZ:
VMOV_immf(Q0, 0.0f);
break;
default:
;
}
int affected_bits = (1 << 4) | (1 << 5);
for (int i = 0; i < n; i++) {
affected_bits |= 1 << i;
}
static const u32 collectorBits[4] = { 1, 2, 4, 8 };
MOVP2R(R1, &collectorBits);
MOVI2R(R0, 0);
CCFlags flag = CC_AL;
bool oneIsFalse = false;
switch (cond) {
case VC_FL:
break;
case VC_TR:
MOVI2R(R0, affected_bits);
break;
case VC_ES:
case VC_NS:
DISABLE;
break;
case VC_EN:
case VC_NN:
DISABLE;
break;
case VC_EQ:
VCEQ(F_32, res, vs, vt);
break;
case VC_LT:
VCLT(F_32, res, vs, vt);
break;
case VC_LE:
VCLE(F_32, res, vs, vt);
break;
case VC_NE:
VCEQ(F_32, res, vs, vt);
oneIsFalse = true;
break;
case VC_GE:
VCGE(F_32, res, vs, vt);
break;
case VC_GT:
VCGT(F_32, res, vs, vt);
break;
case VC_EZ:
VCEQ(F_32, res, vs);
break;
case VC_NZ:
VCEQ(F_32, res, vs);
oneIsFalse = true;
break;
default:
DISABLE;
}
if (oneIsFalse) {
VMVN(res, res);
}
VLD1(I_32, Q0, R1, n < 2 ? 1 : 2);
VAND(Q0, Q0, res);
VPADD(I_32, Q0, Q0, Q0);
VPADD(I_32, D0, D0, D0);
VMOV(R0, S0);
AND(R0, R0, affected_bits);
gpr.MapReg(MIPS_REG_VFPUCC, MAP_DIRTY);
BIC(gpr.R(MIPS_REG_VFPUCC), gpr.R(MIPS_REG_VFPUCC), affected_bits);
ORR(gpr.R(MIPS_REG_VFPUCC), gpr.R(MIPS_REG_VFPUCC), R0);
}
void ArmJit::CompNEON_Vcmov(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_COMP);
if (js.HasUnknownPrefix()) {
DISABLE;
}
DISABLE;
VectorSize sz = GetVecSize(op);
int n = GetNumVectorElements(sz);
ARMReg vs = NEONMapPrefixS(_VS, sz, 0);
DestARMReg vd = NEONMapPrefixD(_VD, sz, MAP_DIRTY);
int tf = (op >> 19) & 1;
int imm3 = (op >> 16) & 7;
if (imm3 < 6) {
gpr.MapReg(MIPS_REG_VFPUCC);
TST(gpr.R(MIPS_REG_VFPUCC), 1 << imm3);
FixupBranch skip = B_CC(CC_NEQ);
VMOV_neon(vd, vs);
SetJumpTarget(skip);
} else {
DISABLE;
}
NEONApplyPrefixD(vd);
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_Viim(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_XFER);
if (js.HasUnknownPrefix()) {
DISABLE;
}
DestARMReg vt = NEONMapPrefixD(_VT, V_Single, MAP_NOINIT | MAP_DIRTY);
s32 imm = SignExtend16ToS32(op);
MOVI2F(S0, (float)imm, R0);
VMOV_neon(vt.rd, D0);
NEONApplyPrefixD(vt);
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_Vfim(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_XFER);
if (js.HasUnknownPrefix()) {
DISABLE;
}
DestARMReg vt = NEONMapPrefixD(_VT, V_Single, MAP_NOINIT | MAP_DIRTY);
FP16 half;
half.u = op & 0xFFFF;
FP32 fval = half_to_float_fast5(half);
MOVI2F(S0, (float)fval.f, R0);
VMOV_neon(vt.rd, D0);
NEONApplyPrefixD(vt);
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_VCrossQuat(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_VEC);
if (js.HasUnknownPrefix()) {
DISABLE_UNKNOWN_PREFIX;
}
VectorSize sz = GetVecSize(op);
if (sz != V_Triple) {
DISABLE;
}
MappedRegs r = NEONMapDirtyInIn(op, sz, sz, sz, false);
ARMReg t1 = Q0;
ARMReg t2 = fpr.QAllocTemp(V_Triple);
VMOV_neon(t1, r.vs);
VMOV_neon(t2, r.vt);
VTRN(F_32, D_0(t2), D_1(t2));
VREV64(F_32, D_0(t1), D_0(t1));
VREV64(F_32, D_0(t2), D_0(t2));
VTRN(F_32, D_0(t1), D_1(t1));
VMUL(F_32, r.vd, t1, t2);
VTRN(F_32, D_0(t2), D_1(t2));
VREV64(F_32, D_0(t1), D_0(t1));
VREV64(F_32, D_0(t2), D_0(t2));
VTRN(F_32, D_0(t1), D_1(t1));
VMLS(F_32, r.vd, t1, t2);
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_Vsgn(MIPSOpcode op) {
DISABLE;
}
void ArmJit::CompNEON_Vocp(MIPSOpcode op) {
CONDITIONAL_DISABLE(VFPU_VEC);
if (js.HasUnknownPrefix()) {
DISABLE;
}
js.prefixS |= 0x000F0000;
js.prefixT = (js.prefixT & ~0x000000FF) | 0x00000055 | 0x0000F000;
VectorSize sz = GetVecSize(op);
int n = GetNumVectorElements(sz);
MappedRegs regs = NEONMapDirtyIn(op, sz, sz);
MOVI2F_neon(Q0, 1.0f, R0);
VADD(F_32, regs.vd, Q0, regs.vs);
NEONApplyPrefixD(regs.vd);
fpr.ReleaseSpillLocksAndDiscardTemps();
}
void ArmJit::CompNEON_ColorConv(MIPSOpcode op) {
DISABLE;
}
void ArmJit::CompNEON_Vbfy(MIPSOpcode op) {
DISABLE;
}
}
#endif