gl_rasterizer_cache: Move format conversion to its own file

This commit is contained in:
ReinUsesLisp 2019-02-23 22:45:47 -03:00
parent 0ccd490fcd
commit 0ad3c031f4
7 changed files with 175 additions and 136 deletions

View file

@ -23,28 +23,12 @@
#include "video_core/textures/astc.h"
class BitStream {
class InputBitStream {
public:
explicit BitStream(unsigned char* ptr, int nBits = 0, int start_offset = 0)
explicit InputBitStream(const unsigned char* ptr, int nBits = 0, int start_offset = 0)
: m_NumBits(nBits), m_CurByte(ptr), m_NextBit(start_offset % 8) {}
~BitStream() = default;
int GetBitsWritten() const {
return m_BitsWritten;
}
void WriteBitsR(unsigned int val, unsigned int nBits) {
for (unsigned int i = 0; i < nBits; i++) {
WriteBit((val >> (nBits - i - 1)) & 1);
}
}
void WriteBits(unsigned int val, unsigned int nBits) {
for (unsigned int i = 0; i < nBits; i++) {
WriteBit((val >> i) & 1);
}
}
~InputBitStream() = default;
int GetBitsRead() const {
return m_BitsRead;
@ -70,6 +54,38 @@ public:
return ret;
}
private:
const int m_NumBits;
const unsigned char* m_CurByte;
int m_NextBit = 0;
int m_BitsRead = 0;
bool done = false;
};
class OutputBitStream {
public:
explicit OutputBitStream(unsigned char* ptr, int nBits = 0, int start_offset = 0)
: m_NumBits(nBits), m_CurByte(ptr), m_NextBit(start_offset % 8) {}
~OutputBitStream() = default;
int GetBitsWritten() const {
return m_BitsWritten;
}
void WriteBitsR(unsigned int val, unsigned int nBits) {
for (unsigned int i = 0; i < nBits; i++) {
WriteBit((val >> (nBits - i - 1)) & 1);
}
}
void WriteBits(unsigned int val, unsigned int nBits) {
for (unsigned int i = 0; i < nBits; i++) {
WriteBit((val >> i) & 1);
}
}
private:
void WriteBit(int b) {
@ -238,8 +254,8 @@ public:
// Fills result with the values that are encoded in the given
// bitstream. We must know beforehand what the maximum possible
// value is, and how many values we're decoding.
static void DecodeIntegerSequence(std::vector<IntegerEncodedValue>& result, BitStream& bits,
uint32_t maxRange, uint32_t nValues) {
static void DecodeIntegerSequence(std::vector<IntegerEncodedValue>& result,
InputBitStream& bits, uint32_t maxRange, uint32_t nValues) {
// Determine encoding parameters
IntegerEncodedValue val = IntegerEncodedValue::CreateEncoding(maxRange);
@ -267,7 +283,7 @@ public:
}
private:
static void DecodeTritBlock(BitStream& bits, std::vector<IntegerEncodedValue>& result,
static void DecodeTritBlock(InputBitStream& bits, std::vector<IntegerEncodedValue>& result,
uint32_t nBitsPerValue) {
// Implement the algorithm in section C.2.12
uint32_t m[5];
@ -327,7 +343,7 @@ private:
}
}
static void DecodeQuintBlock(BitStream& bits, std::vector<IntegerEncodedValue>& result,
static void DecodeQuintBlock(InputBitStream& bits, std::vector<IntegerEncodedValue>& result,
uint32_t nBitsPerValue) {
// Implement the algorithm in section C.2.12
uint32_t m[3];
@ -406,7 +422,7 @@ struct TexelWeightParams {
}
};
static TexelWeightParams DecodeBlockInfo(BitStream& strm) {
static TexelWeightParams DecodeBlockInfo(InputBitStream& strm) {
TexelWeightParams params;
// Read the entire block mode all at once
@ -605,7 +621,7 @@ static TexelWeightParams DecodeBlockInfo(BitStream& strm) {
return params;
}
static void FillVoidExtentLDR(BitStream& strm, uint32_t* const outBuf, uint32_t blockWidth,
static void FillVoidExtentLDR(InputBitStream& strm, uint32_t* const outBuf, uint32_t blockWidth,
uint32_t blockHeight) {
// Don't actually care about the void extent, just read the bits...
for (int i = 0; i < 4; ++i) {
@ -821,7 +837,7 @@ static void DecodeColorValues(uint32_t* out, uint8_t* data, const uint32_t* mode
// We now have enough to decode our integer sequence.
std::vector<IntegerEncodedValue> decodedColorValues;
BitStream colorStream(data);
InputBitStream colorStream(data);
IntegerEncodedValue::DecodeIntegerSequence(decodedColorValues, colorStream, range, nValues);
// Once we have the decoded values, we need to dequantize them to the 0-255 range
@ -1365,9 +1381,9 @@ static void ComputeEndpoints(Pixel& ep1, Pixel& ep2, const uint32_t*& colorValue
#undef READ_INT_VALUES
}
static void DecompressBlock(uint8_t inBuf[16], const uint32_t blockWidth,
static void DecompressBlock(const uint8_t inBuf[16], const uint32_t blockWidth,
const uint32_t blockHeight, uint32_t* outBuf) {
BitStream strm(inBuf);
InputBitStream strm(inBuf);
TexelWeightParams weightParams = DecodeBlockInfo(strm);
// Was there an error?
@ -1421,7 +1437,7 @@ static void DecompressBlock(uint8_t inBuf[16], const uint32_t blockWidth,
// Define color data.
uint8_t colorEndpointData[16];
memset(colorEndpointData, 0, sizeof(colorEndpointData));
BitStream colorEndpointStream(colorEndpointData, 16 * 8, 0);
OutputBitStream colorEndpointStream(colorEndpointData, 16 * 8, 0);
// Read extra config data...
uint32_t baseCEM = 0;
@ -1549,7 +1565,7 @@ static void DecompressBlock(uint8_t inBuf[16], const uint32_t blockWidth,
memset(texelWeightData + clearByteStart, 0, 16 - clearByteStart);
std::vector<IntegerEncodedValue> texelWeightValues;
BitStream weightStream(texelWeightData);
InputBitStream weightStream(texelWeightData);
IntegerEncodedValue::DecodeIntegerSequence(texelWeightValues, weightStream,
weightParams.m_MaxWeight,
@ -1597,7 +1613,7 @@ static void DecompressBlock(uint8_t inBuf[16], const uint32_t blockWidth,
namespace Tegra::Texture::ASTC {
std::vector<uint8_t> Decompress(std::vector<uint8_t>& data, uint32_t width, uint32_t height,
std::vector<uint8_t> Decompress(const uint8_t* data, uint32_t width, uint32_t height,
uint32_t depth, uint32_t block_width, uint32_t block_height) {
uint32_t blockIdx = 0;
std::vector<uint8_t> outData(height * width * depth * 4);
@ -1605,7 +1621,7 @@ std::vector<uint8_t> Decompress(std::vector<uint8_t>& data, uint32_t width, uint
for (uint32_t j = 0; j < height; j += block_height) {
for (uint32_t i = 0; i < width; i += block_width) {
uint8_t* blockPtr = data.data() + blockIdx * 16;
const uint8_t* blockPtr = data + blockIdx * 16;
// Blocks can be at most 12x12
uint32_t uncompData[144];