ARMeilleure: Add gfni acceleration (#3669)

* ARMeilleure: Add `GFNI` detection

This is intended for utilizing the `gf2p8affineqb` instruction

* ARMeilleure: Add `gf2p8affineqb`

Not using the VEX or EVEX-form of this instruction is intentional. There
are `GFNI`-chips that do not support AVX(so no VEX encoding) such as
Tremont(Lakefield) chips as well as Jasper Lake.

13df339fe7/GenuineIntel/GenuineIntel00806A1_Lakefield_LC_InstLatX64.txt (L1297-L1299)

13df339fe7/GenuineIntel/GenuineIntel00906C0_JasperLake_InstLatX64.txt (L1252-L1254)

* ARMeilleure: Add `gfni` acceleration of `Rbit_V`

Passes all `Rbit_V*` unit tests on my `i9-11900k`

* ARMeilleure: Add `gfni` acceleration of `S{l,r}i_V`

Also added a fast-path for when the shift amount is greater than the
size of the element.

* ARMeilleure: Add `gfni` acceleration of `Shl_V` and `Sshr_V`

* ARMeilleure: Increment InternalVersion

* ARMeilleure: Fix Intrinsic and Assembler Table alignment

`gf2p8affineqb` is the longest instruction name I know of. It shouldn't
get any wider than this.

* ARMeilleure: Remove SSE2+SHA requirement for GFNI

* ARMeilleure Add `X86GetGf2p8LogicalShiftLeft`

Used to generate GF(2^8) 8x8 bit-matrices for bit-shifting for the `gf2p8affineqb` instruction.

* ARMeilleure: Append `FeatureInfo7Ecx` to `FeatureInfo`
This commit is contained in:
Wunk 2022-10-02 02:17:19 -07:00 committed by GitHub
parent 96bf7f8522
commit 45ce540b9b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 589 additions and 409 deletions

View file

@ -88,8 +88,35 @@ namespace ARMeilleure.Instructions
OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
int shift = GetImmShl(op);
int eSize = 8 << op.Size;
if (Optimizations.UseSse2 && op.Size > 0)
if (shift >= eSize)
{
if ((op.RegisterSize == RegisterSize.Simd64))
{
Operand res = context.VectorZeroUpper64(GetVec(op.Rd));
context.Copy(GetVec(op.Rd), res);
}
}
else if (Optimizations.UseGfni && op.Size == 0)
{
Operand n = GetVec(op.Rn);
ulong bitMatrix = X86GetGf2p8LogicalShiftLeft(shift);
Operand vBitMatrix = X86GetElements(context, bitMatrix, bitMatrix);
Operand res = context.AddIntrinsic(Intrinsic.X86Gf2p8affineqb, n, vBitMatrix, Const(0));
if (op.RegisterSize == RegisterSize.Simd64)
{
res = context.VectorZeroUpper64(res);
}
context.Copy(GetVec(op.Rd), res);
}
else if (Optimizations.UseSse2 && op.Size > 0)
{
Operand n = GetVec(op.Rn);
@ -396,10 +423,40 @@ namespace ARMeilleure.Instructions
{
OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
if (Optimizations.UseSse2 && op.Size > 0 && op.Size < 3)
{
int shift = GetImmShr(op);
int shift = GetImmShr(op);
if (Optimizations.UseGfni && op.Size == 0)
{
Operand n = GetVec(op.Rn);
ulong bitMatrix;
if (shift < 8)
{
bitMatrix = X86GetGf2p8LogicalShiftLeft(-shift);
// Extend sign-bit
bitMatrix |= 0x8080808080808080UL >> (64 - shift * 8);
}
else
{
// Replicate sign-bit into all bits
bitMatrix = 0x8080808080808080UL;
}
Operand vBitMatrix = X86GetElements(context, bitMatrix, bitMatrix);
Operand res = context.AddIntrinsic(Intrinsic.X86Gf2p8affineqb, n, vBitMatrix, Const(0));
if (op.RegisterSize == RegisterSize.Simd64)
{
res = context.VectorZeroUpper64(res);
}
context.Copy(GetVec(op.Rd), res);
}
else if (Optimizations.UseSse2 && op.Size > 0 && op.Size < 3)
{
Operand n = GetVec(op.Rn);
Intrinsic sraInst = X86PsraInstruction[op.Size];
@ -929,10 +986,44 @@ namespace ARMeilleure.Instructions
OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
int shift = GetImmShl(op);
int eSize = 8 << op.Size;
ulong mask = shift != 0 ? ulong.MaxValue >> (64 - shift) : 0UL;
if (Optimizations.UseSse2 && op.Size > 0)
if (shift >= eSize)
{
if ((op.RegisterSize == RegisterSize.Simd64) || scalar)
{
Operand res = context.VectorZeroUpper64(GetVec(op.Rd));
context.Copy(GetVec(op.Rd), res);
}
}
else if (Optimizations.UseGfni && op.Size == 0)
{
Operand d = GetVec(op.Rd);
Operand n = GetVec(op.Rn);
ulong bitMatrix = X86GetGf2p8LogicalShiftLeft(shift);
Operand vBitMatrix = X86GetElements(context, bitMatrix, bitMatrix);
Operand nShifted = context.AddIntrinsic(Intrinsic.X86Gf2p8affineqb, n, vBitMatrix, Const(0));
Operand dMask = X86GetAllElements(context, (long)mask * _masks_SliSri[op.Size]);
Operand dMasked = context.AddIntrinsic(Intrinsic.X86Pand, d, dMask);
Operand res = context.AddIntrinsic(Intrinsic.X86Por, nShifted, dMasked);
if ((op.RegisterSize == RegisterSize.Simd64) || scalar)
{
res = context.VectorZeroUpper64(res);
}
context.Copy(d, res);
}
else if (Optimizations.UseSse2 && op.Size > 0)
{
Operand d = GetVec(op.Rd);
Operand n = GetVec(op.Rn);
@ -988,7 +1079,40 @@ namespace ARMeilleure.Instructions
ulong mask = (ulong.MaxValue << (eSize - shift)) & (ulong.MaxValue >> (64 - eSize));
if (Optimizations.UseSse2 && op.Size > 0)
if (shift >= eSize)
{
if ((op.RegisterSize == RegisterSize.Simd64) || scalar)
{
Operand res = context.VectorZeroUpper64(GetVec(op.Rd));
context.Copy(GetVec(op.Rd), res);
}
}
else if (Optimizations.UseGfni && op.Size == 0)
{
Operand d = GetVec(op.Rd);
Operand n = GetVec(op.Rn);
ulong bitMatrix = X86GetGf2p8LogicalShiftLeft(-shift);
Operand vBitMatrix = X86GetElements(context, bitMatrix, bitMatrix);
Operand nShifted = context.AddIntrinsic(Intrinsic.X86Gf2p8affineqb, n, vBitMatrix, Const(0));
Operand dMask = X86GetAllElements(context, (long)mask * _masks_SliSri[op.Size]);
Operand dMasked = context.AddIntrinsic(Intrinsic.X86Pand, d, dMask);
Operand res = context.AddIntrinsic(Intrinsic.X86Por, nShifted, dMasked);
if ((op.RegisterSize == RegisterSize.Simd64) || scalar)
{
res = context.VectorZeroUpper64(res);
}
context.Copy(d, res);
}
else if (Optimizations.UseSse2 && op.Size > 0)
{
Operand d = GetVec(op.Rd);
Operand n = GetVec(op.Rn);