Implements some 32-bit instructions (VBIC, VTST, VSRA) (#1192)
* Added some 32 bits instructions: * VBIC * VTST * VSRA * Incremented the PTC * Add tests and fix implementation * Fixed VBIC immediate opcode mapping * Hey hey! * Nit. Co-authored-by: gdkchan <gab.dark.100@gmail.com> Co-authored-by: LDj3SNuD <dvitiello@gmail.com> Co-authored-by: LDj3SNuD <35856442+LDj3SNuD@users.noreply.github.com>
This commit is contained in:
parent
9d65de74fc
commit
3af2ce74ec
10 changed files with 361 additions and 66 deletions
|
@ -15,7 +15,7 @@ namespace ARMeilleure.Instructions
|
|||
{
|
||||
if (Optimizations.UseSse2)
|
||||
{
|
||||
EmitVectorBinaryOpF32(context, Intrinsic.X86Pand, Intrinsic.X86Pand);
|
||||
EmitVectorBinaryOpSimd32(context, (n, m) => context.AddIntrinsic(Intrinsic.X86Pand, n, m));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -23,6 +23,54 @@ namespace ARMeilleure.Instructions
|
|||
}
|
||||
}
|
||||
|
||||
public static void Vbic_I(ArmEmitterContext context)
|
||||
{
|
||||
if (Optimizations.UseSse2)
|
||||
{
|
||||
EmitVectorBinaryOpSimd32(context, (n, m) => context.AddIntrinsic(Intrinsic.X86Pandn, m, n));
|
||||
}
|
||||
else
|
||||
{
|
||||
EmitVectorBinaryOpZx32(context, (op1, op2) => context.BitwiseAnd(op1, context.BitwiseNot(op2)));
|
||||
}
|
||||
}
|
||||
|
||||
public static void Vbic_II(ArmEmitterContext context)
|
||||
{
|
||||
OpCode32SimdImm op = (OpCode32SimdImm)context.CurrOp;
|
||||
|
||||
long immediate = op.Immediate;
|
||||
|
||||
// Replicate fields to fill the 64-bits, if size is < 64-bits.
|
||||
switch (op.Size)
|
||||
{
|
||||
case 0: immediate *= 0x0101010101010101L; break;
|
||||
case 1: immediate *= 0x0001000100010001L; break;
|
||||
case 2: immediate *= 0x0000000100000001L; break;
|
||||
}
|
||||
|
||||
Operand imm = Const(immediate);
|
||||
Operand res = GetVecA32(op.Qd);
|
||||
|
||||
if (op.Q)
|
||||
{
|
||||
for (int elem = 0; elem < 2; elem++)
|
||||
{
|
||||
Operand de = EmitVectorExtractZx(context, op.Qd, elem, 3);
|
||||
|
||||
res = EmitVectorInsert(context, res, context.BitwiseAnd(de, context.BitwiseNot(imm)), elem, 3);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Operand de = EmitVectorExtractZx(context, op.Qd, op.Vd & 1, 3);
|
||||
|
||||
res = EmitVectorInsert(context, res, context.BitwiseAnd(de, context.BitwiseNot(imm)), op.Vd & 1, 3);
|
||||
}
|
||||
|
||||
context.Copy(GetVecA32(op.Qd), res);
|
||||
}
|
||||
|
||||
public static void Vbif(ArmEmitterContext context)
|
||||
{
|
||||
EmitBifBit(context, true);
|
||||
|
@ -59,7 +107,7 @@ namespace ARMeilleure.Instructions
|
|||
{
|
||||
if (Optimizations.UseSse2)
|
||||
{
|
||||
EmitVectorBinaryOpF32(context, Intrinsic.X86Pxor, Intrinsic.X86Pxor);
|
||||
EmitVectorBinaryOpSimd32(context, (n, m) => context.AddIntrinsic(Intrinsic.X86Pxor, n, m));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -71,7 +119,7 @@ namespace ARMeilleure.Instructions
|
|||
{
|
||||
if (Optimizations.UseSse2)
|
||||
{
|
||||
EmitVectorBinaryOpF32(context, Intrinsic.X86Por, Intrinsic.X86Por);
|
||||
EmitVectorBinaryOpSimd32(context, (n, m) => context.AddIntrinsic(Intrinsic.X86Por, n, m));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -115,6 +163,15 @@ namespace ARMeilleure.Instructions
|
|||
context.Copy(GetVecA32(op.Qd), res);
|
||||
}
|
||||
|
||||
public static void Vtst(ArmEmitterContext context)
|
||||
{
|
||||
EmitVectorBinaryOpZx32(context, (op1, op2) =>
|
||||
{
|
||||
Operand isZero = context.ICompareEqual(context.BitwiseAnd(op1, op2), Const(0));
|
||||
return context.ConditionalSelect(isZero, Const(0), Const(-1));
|
||||
});
|
||||
}
|
||||
|
||||
private static void EmitBifBit(ArmEmitterContext context, bool notRm)
|
||||
{
|
||||
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue