#ifdef _MSC_VER
#include <intrin.h>
#endif
#include <immintrin.h>
#ifdef _MSC_VER
#define JPGD_SIMD_ALIGN(type, name) __declspec(align(16)) type name
#else
#define JPGD_SIMD_ALIGN(type, name) type name __attribute__((aligned(16)))
#endif
#define BITS_INV_ACC 4
#define SHIFT_INV_ROW 16 - BITS_INV_ACC
#define SHIFT_INV_COL 1 + BITS_INV_ACC
const short IRND_INV_ROW = 1024 * (6 - BITS_INV_ACC);
const short IRND_INV_COL = 16 * (BITS_INV_ACC - 3);
const short IRND_INV_CORR = IRND_INV_COL - 1;
JPGD_SIMD_ALIGN(short, shortM128_one_corr[8]) = {1, 1, 1, 1, 1, 1, 1, 1};
JPGD_SIMD_ALIGN(short, shortM128_round_inv_row[8]) = {IRND_INV_ROW, 0, IRND_INV_ROW, 0, IRND_INV_ROW, 0, IRND_INV_ROW, 0};
JPGD_SIMD_ALIGN(short, shortM128_round_inv_col[8]) = {IRND_INV_COL, IRND_INV_COL, IRND_INV_COL, IRND_INV_COL, IRND_INV_COL, IRND_INV_COL, IRND_INV_COL, IRND_INV_COL};
JPGD_SIMD_ALIGN(short, shortM128_round_inv_corr[8])= {IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR};
JPGD_SIMD_ALIGN(short, shortM128_tg_1_16[8]) = {13036, 13036, 13036, 13036, 13036, 13036, 13036, 13036};
JPGD_SIMD_ALIGN(short, shortM128_tg_2_16[8]) = {27146, 27146, 27146, 27146, 27146, 27146, 27146, 27146};
JPGD_SIMD_ALIGN(short, shortM128_tg_3_16[8]) = {-21746, -21746, -21746, -21746, -21746, -21746, -21746, -21746};
JPGD_SIMD_ALIGN(short, shortM128_cos_4_16[8]) = {-19195, -19195, -19195, -19195, -19195, -19195, -19195, -19195};
JPGD_SIMD_ALIGN(short, shortM128_tab_i_04[]) = {
16384, 21407, 16384, 8867,
16384, -8867, 16384, -21407,
16384, 8867, -16384, -21407,
-16384, 21407, 16384, -8867,
22725, 19266, 19266, -4520,
12873, -22725, 4520, -12873,
12873, 4520, -22725, -12873,
4520, 19266, 19266, -22725};
JPGD_SIMD_ALIGN(short, shortM128_tab_i_17[]) = {
22725, 29692, 22725, 12299,
22725, -12299, 22725, -29692,
22725, 12299, -22725, -29692,
-22725, 29692, 22725, -12299,
31521, 26722, 26722, -6270,
17855, -31521, 6270, -17855,
17855, 6270, -31521, -17855,
6270, 26722, 26722, -31521};
JPGD_SIMD_ALIGN(short, shortM128_tab_i_26[]) = {
21407, 27969, 21407, 11585,
21407, -11585, 21407, -27969,
21407, 11585, -21407, -27969,
-21407, 27969, 21407, -11585,
29692, 25172, 25172, -5906,
16819, -29692, 5906, -16819,
16819, 5906, -29692, -16819,
5906, 25172, 25172, -29692};
JPGD_SIMD_ALIGN(short, shortM128_tab_i_35[]) = {
19266, 25172, 19266, 10426,
19266, -10426, 19266, -25172,
19266, 10426, -19266, -25172,
-19266, 25172, 19266, -10426,
26722, 22654, 22654, -5315,
15137, -26722, 5315, -15137,
15137, 5315, -26722, -15137,
5315, 22654, 22654, -26722};
JPGD_SIMD_ALIGN(short, shortM128_128[8]) = { 128, 128, 128, 128, 128, 128, 128, 128 };
void idctSSEShortU8(const short *pInput, uint8_t * pOutputUB)
{
__m128i r_xmm0, r_xmm4;
__m128i r_xmm1, r_xmm2, r_xmm3, r_xmm5, r_xmm6, r_xmm7;
__m128i row0, row1, row2, row3, row4, row5, row6, row7;
short * pTab_i_04 = shortM128_tab_i_04;
short * pTab_i_26 = shortM128_tab_i_26;
pTab_i_04 = shortM128_tab_i_04;
pTab_i_26 = shortM128_tab_i_26;
r_xmm0 = _mm_load_si128((__m128i *) pInput);
r_xmm4 = _mm_load_si128((__m128i *) (&pInput[2*8]));
r_xmm0 = _mm_shufflelo_epi16(r_xmm0, 0xd8);
r_xmm1 = _mm_shuffle_epi32(r_xmm0, 0);
r_xmm1 = _mm_madd_epi16(r_xmm1, *((__m128i *) pTab_i_04));
r_xmm3 = _mm_shuffle_epi32(r_xmm0, 0x55);
r_xmm0 = _mm_shufflehi_epi16(r_xmm0, 0xd8);
r_xmm3 = _mm_madd_epi16(r_xmm3, *((__m128i *) &pTab_i_04[16]));
r_xmm2 = _mm_shuffle_epi32(r_xmm0, 0xaa);
r_xmm0 = _mm_shuffle_epi32(r_xmm0, 0xff);
r_xmm2 = _mm_madd_epi16(r_xmm2, *((__m128i *) &pTab_i_04[8]));
r_xmm4 = _mm_shufflehi_epi16(r_xmm4, 0xd8);
r_xmm1 = _mm_add_epi32(r_xmm1, *((__m128i *) shortM128_round_inv_row));
r_xmm4 = _mm_shufflelo_epi16(r_xmm4, 0xd8);
r_xmm0 = _mm_madd_epi16(r_xmm0, *((__m128i *) &pTab_i_04[24]));
r_xmm5 = _mm_shuffle_epi32(r_xmm4, 0);
r_xmm6 = _mm_shuffle_epi32(r_xmm4, 0xaa);
r_xmm5 = _mm_madd_epi16(r_xmm5, *((__m128i *) &shortM128_tab_i_26[0]));
r_xmm1 = _mm_add_epi32(r_xmm1, r_xmm2);
r_xmm2 = r_xmm1;
r_xmm7 = _mm_shuffle_epi32(r_xmm4, 0x55);
r_xmm6 = _mm_madd_epi16(r_xmm6, *((__m128i *) &shortM128_tab_i_26[8]));
r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm3);
r_xmm4 = _mm_shuffle_epi32(r_xmm4, 0xff);
r_xmm2 = _mm_sub_epi32(r_xmm2, r_xmm0);
r_xmm7 = _mm_madd_epi16(r_xmm7, *((__m128i *) &shortM128_tab_i_26[16]));
r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm1);
r_xmm2 = _mm_srai_epi32(r_xmm2, 12);
r_xmm5 = _mm_add_epi32(r_xmm5, *((__m128i *) shortM128_round_inv_row));
r_xmm4 = _mm_madd_epi16(r_xmm4, *((__m128i *) &shortM128_tab_i_26[24]));
r_xmm5 = _mm_add_epi32(r_xmm5, r_xmm6);
r_xmm6 = r_xmm5;
r_xmm0 = _mm_srai_epi32(r_xmm0, 12);
r_xmm2 = _mm_shuffle_epi32(r_xmm2, 0x1b);
row0 = _mm_packs_epi32(r_xmm0, r_xmm2);
r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm7);
r_xmm6 = _mm_sub_epi32(r_xmm6, r_xmm4);
r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm5);
r_xmm6 = _mm_srai_epi32(r_xmm6, 12);
r_xmm4 = _mm_srai_epi32(r_xmm4, 12);
r_xmm6 = _mm_shuffle_epi32(r_xmm6, 0x1b);
row2 = _mm_packs_epi32(r_xmm4, r_xmm6);
r_xmm0 = _mm_load_si128((__m128i *) (&pInput[4*8]));
r_xmm4 = _mm_load_si128((__m128i *) (&pInput[6*8]));
r_xmm0 = _mm_shufflelo_epi16(r_xmm0, 0xd8);
r_xmm1 = _mm_shuffle_epi32(r_xmm0, 0);
r_xmm1 = _mm_madd_epi16(r_xmm1, *((__m128i *) pTab_i_04));
r_xmm3 = _mm_shuffle_epi32(r_xmm0, 0x55);
r_xmm0 = _mm_shufflehi_epi16(r_xmm0, 0xd8);
r_xmm3 = _mm_madd_epi16(r_xmm3, *((__m128i *) &pTab_i_04[16]));
r_xmm2 = _mm_shuffle_epi32(r_xmm0, 0xaa);
r_xmm0 = _mm_shuffle_epi32(r_xmm0, 0xff);
r_xmm2 = _mm_madd_epi16(r_xmm2, *((__m128i *) &pTab_i_04[8]));
r_xmm4 = _mm_shufflehi_epi16(r_xmm4, 0xd8);
r_xmm1 = _mm_add_epi32(r_xmm1, *((__m128i *) shortM128_round_inv_row));
r_xmm4 = _mm_shufflelo_epi16(r_xmm4, 0xd8);
r_xmm0 = _mm_madd_epi16(r_xmm0, *((__m128i *) &pTab_i_04[24]));
r_xmm5 = _mm_shuffle_epi32(r_xmm4, 0);
r_xmm6 = _mm_shuffle_epi32(r_xmm4, 0xaa);
r_xmm5 = _mm_madd_epi16(r_xmm5, *((__m128i *) &shortM128_tab_i_26[0]));
r_xmm1 = _mm_add_epi32(r_xmm1, r_xmm2);
r_xmm2 = r_xmm1;
r_xmm7 = _mm_shuffle_epi32(r_xmm4, 0x55);
r_xmm6 = _mm_madd_epi16(r_xmm6, *((__m128i *) &shortM128_tab_i_26[8]));
r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm3);
r_xmm4 = _mm_shuffle_epi32(r_xmm4, 0xff);
r_xmm2 = _mm_sub_epi32(r_xmm2, r_xmm0);
r_xmm7 = _mm_madd_epi16(r_xmm7, *((__m128i *) &shortM128_tab_i_26[16]));
r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm1);
r_xmm2 = _mm_srai_epi32(r_xmm2, 12);
r_xmm5 = _mm_add_epi32(r_xmm5, *((__m128i *) shortM128_round_inv_row));
r_xmm4 = _mm_madd_epi16(r_xmm4, *((__m128i *) &shortM128_tab_i_26[24]));
r_xmm5 = _mm_add_epi32(r_xmm5, r_xmm6);
r_xmm6 = r_xmm5;
r_xmm0 = _mm_srai_epi32(r_xmm0, 12);
r_xmm2 = _mm_shuffle_epi32(r_xmm2, 0x1b);
row4 = _mm_packs_epi32(r_xmm0, r_xmm2);
r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm7);
r_xmm6 = _mm_sub_epi32(r_xmm6, r_xmm4);
r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm5);
r_xmm6 = _mm_srai_epi32(r_xmm6, 12);
r_xmm4 = _mm_srai_epi32(r_xmm4, 12);
r_xmm6 = _mm_shuffle_epi32(r_xmm6, 0x1b);
row6 = _mm_packs_epi32(r_xmm4, r_xmm6);
pTab_i_04 = shortM128_tab_i_35;
pTab_i_26 = shortM128_tab_i_17;
r_xmm0 = _mm_load_si128((__m128i *) (&pInput[3*8]));
r_xmm4 = _mm_load_si128((__m128i *) (&pInput[1*8]));
r_xmm0 = _mm_shufflelo_epi16(r_xmm0, 0xd8);
r_xmm1 = _mm_shuffle_epi32(r_xmm0, 0);
r_xmm1 = _mm_madd_epi16(r_xmm1, *((__m128i *) pTab_i_04));
r_xmm3 = _mm_shuffle_epi32(r_xmm0, 0x55);
r_xmm0 = _mm_shufflehi_epi16(r_xmm0, 0xd8);
r_xmm3 = _mm_madd_epi16(r_xmm3, *((__m128i *) &pTab_i_04[16]));
r_xmm2 = _mm_shuffle_epi32(r_xmm0, 0xaa);
r_xmm0 = _mm_shuffle_epi32(r_xmm0, 0xff);
r_xmm2 = _mm_madd_epi16(r_xmm2, *((__m128i *) &pTab_i_04[8]));
r_xmm4 = _mm_shufflehi_epi16(r_xmm4, 0xd8);
r_xmm1 = _mm_add_epi32(r_xmm1, *((__m128i *) shortM128_round_inv_row));
r_xmm4 = _mm_shufflelo_epi16(r_xmm4, 0xd8);
r_xmm0 = _mm_madd_epi16(r_xmm0, *((__m128i *) &pTab_i_04[24]));
r_xmm5 = _mm_shuffle_epi32(r_xmm4, 0);
r_xmm6 = _mm_shuffle_epi32(r_xmm4, 0xaa);
r_xmm5 = _mm_madd_epi16(r_xmm5, *((__m128i *) &pTab_i_26[0]));
r_xmm1 = _mm_add_epi32(r_xmm1, r_xmm2);
r_xmm2 = r_xmm1;
r_xmm7 = _mm_shuffle_epi32(r_xmm4, 0x55);
r_xmm6 = _mm_madd_epi16(r_xmm6, *((__m128i *) &pTab_i_26[8]));
r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm3);
r_xmm4 = _mm_shuffle_epi32(r_xmm4, 0xff);
r_xmm2 = _mm_sub_epi32(r_xmm2, r_xmm0);
r_xmm7 = _mm_madd_epi16(r_xmm7, *((__m128i *) &pTab_i_26[16]));
r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm1);
r_xmm2 = _mm_srai_epi32(r_xmm2, 12);
r_xmm5 = _mm_add_epi32(r_xmm5, *((__m128i *) shortM128_round_inv_row));
r_xmm4 = _mm_madd_epi16(r_xmm4, *((__m128i *) &pTab_i_26[24]));
r_xmm5 = _mm_add_epi32(r_xmm5, r_xmm6);
r_xmm6 = r_xmm5;
r_xmm0 = _mm_srai_epi32(r_xmm0, 12);
r_xmm2 = _mm_shuffle_epi32(r_xmm2, 0x1b);
row3 = _mm_packs_epi32(r_xmm0, r_xmm2);
r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm7);
r_xmm6 = _mm_sub_epi32(r_xmm6, r_xmm4);
r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm5);
r_xmm6 = _mm_srai_epi32(r_xmm6, 12);
r_xmm4 = _mm_srai_epi32(r_xmm4, 12);
r_xmm6 = _mm_shuffle_epi32(r_xmm6, 0x1b);
row1 = _mm_packs_epi32(r_xmm4, r_xmm6);
r_xmm0 = _mm_load_si128((__m128i *) (&pInput[5*8]));
r_xmm4 = _mm_load_si128((__m128i *) (&pInput[7*8]));
r_xmm0 = _mm_shufflelo_epi16(r_xmm0, 0xd8);
r_xmm1 = _mm_shuffle_epi32(r_xmm0, 0);
r_xmm1 = _mm_madd_epi16(r_xmm1, *((__m128i *) pTab_i_04));
r_xmm3 = _mm_shuffle_epi32(r_xmm0, 0x55);
r_xmm0 = _mm_shufflehi_epi16(r_xmm0, 0xd8);
r_xmm3 = _mm_madd_epi16(r_xmm3, *((__m128i *) &pTab_i_04[16]));
r_xmm2 = _mm_shuffle_epi32(r_xmm0, 0xaa);
r_xmm0 = _mm_shuffle_epi32(r_xmm0, 0xff);
r_xmm2 = _mm_madd_epi16(r_xmm2, *((__m128i *) &pTab_i_04[8]));
r_xmm4 = _mm_shufflehi_epi16(r_xmm4, 0xd8);
r_xmm1 = _mm_add_epi32(r_xmm1, *((__m128i *) shortM128_round_inv_row));
r_xmm4 = _mm_shufflelo_epi16(r_xmm4, 0xd8);
r_xmm0 = _mm_madd_epi16(r_xmm0, *((__m128i *) &pTab_i_04[24]));
r_xmm5 = _mm_shuffle_epi32(r_xmm4, 0);
r_xmm6 = _mm_shuffle_epi32(r_xmm4, 0xaa);
r_xmm5 = _mm_madd_epi16(r_xmm5, *((__m128i *) &pTab_i_26[0]));
r_xmm1 = _mm_add_epi32(r_xmm1, r_xmm2);
r_xmm2 = r_xmm1;
r_xmm7 = _mm_shuffle_epi32(r_xmm4, 0x55);
r_xmm6 = _mm_madd_epi16(r_xmm6, *((__m128i *) &pTab_i_26[8]));
r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm3);
r_xmm4 = _mm_shuffle_epi32(r_xmm4, 0xff);
r_xmm2 = _mm_sub_epi32(r_xmm2, r_xmm0);
r_xmm7 = _mm_madd_epi16(r_xmm7, *((__m128i *) &pTab_i_26[16]));
r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm1);
r_xmm2 = _mm_srai_epi32(r_xmm2, 12);
r_xmm5 = _mm_add_epi32(r_xmm5, *((__m128i *) shortM128_round_inv_row));
r_xmm4 = _mm_madd_epi16(r_xmm4, *((__m128i *) &pTab_i_26[24]));
r_xmm5 = _mm_add_epi32(r_xmm5, r_xmm6);
r_xmm6 = r_xmm5;
r_xmm0 = _mm_srai_epi32(r_xmm0, 12);
r_xmm2 = _mm_shuffle_epi32(r_xmm2, 0x1b);
row5 = _mm_packs_epi32(r_xmm0, r_xmm2);
r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm7);
r_xmm6 = _mm_sub_epi32(r_xmm6, r_xmm4);
r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm5);
r_xmm6 = _mm_srai_epi32(r_xmm6, 12);
r_xmm4 = _mm_srai_epi32(r_xmm4, 12);
r_xmm6 = _mm_shuffle_epi32(r_xmm6, 0x1b);
row7 = _mm_packs_epi32(r_xmm4, r_xmm6);
r_xmm1 = _mm_load_si128((__m128i *) shortM128_tg_3_16);
r_xmm2 = row5;
r_xmm3 = row3;
r_xmm0 = _mm_mulhi_epi16(row5, r_xmm1);
r_xmm1 = _mm_mulhi_epi16(r_xmm1, r_xmm3);
r_xmm5 = _mm_load_si128((__m128i *) shortM128_tg_1_16);
r_xmm6 = row7;
r_xmm4 = _mm_mulhi_epi16(row7, r_xmm5);
r_xmm0 = _mm_adds_epi16(r_xmm0, r_xmm2);
r_xmm5 = _mm_mulhi_epi16(r_xmm5, row1);
r_xmm1 = _mm_adds_epi16(r_xmm1, r_xmm3);
r_xmm7 = row6;
r_xmm0 = _mm_adds_epi16(r_xmm0, r_xmm3);
r_xmm3 = _mm_load_si128((__m128i *) shortM128_tg_2_16);
r_xmm2 = _mm_subs_epi16(r_xmm2, r_xmm1);
r_xmm7 = _mm_mulhi_epi16(r_xmm7, r_xmm3);
r_xmm1 = r_xmm0;
r_xmm3 = _mm_mulhi_epi16(r_xmm3, row2);
r_xmm5 = _mm_subs_epi16(r_xmm5, r_xmm6);
r_xmm4 = _mm_adds_epi16(r_xmm4, row1);
r_xmm0 = _mm_adds_epi16(r_xmm0, r_xmm4);
r_xmm0 = _mm_adds_epi16(r_xmm0, *((__m128i *) shortM128_one_corr));
r_xmm4 = _mm_subs_epi16(r_xmm4, r_xmm1);
r_xmm6 = r_xmm5;
r_xmm5 = _mm_subs_epi16(r_xmm5, r_xmm2);
r_xmm5 = _mm_adds_epi16(r_xmm5, *((__m128i *) shortM128_one_corr));
r_xmm6 = _mm_adds_epi16(r_xmm6, r_xmm2);
__m128i temp3, temp7;
temp7 = r_xmm0;
r_xmm1 = r_xmm4;
r_xmm0 = _mm_load_si128((__m128i *) shortM128_cos_4_16);
r_xmm4 = _mm_adds_epi16(r_xmm4, r_xmm5);
r_xmm2 = _mm_load_si128((__m128i *) shortM128_cos_4_16);
r_xmm2 = _mm_mulhi_epi16(r_xmm2, r_xmm4);
temp3 = r_xmm6;
r_xmm1 = _mm_subs_epi16(r_xmm1, r_xmm5);
r_xmm7 = _mm_adds_epi16(r_xmm7, row2);
r_xmm3 = _mm_subs_epi16(r_xmm3, row6);
r_xmm6 = row0;
r_xmm0 = _mm_mulhi_epi16(r_xmm0, r_xmm1);
r_xmm5 = row4;
r_xmm5 = _mm_adds_epi16(r_xmm5, r_xmm6);
r_xmm6 = _mm_subs_epi16(r_xmm6, row4);
r_xmm4 = _mm_adds_epi16(r_xmm4, r_xmm2);
r_xmm4 = _mm_or_si128(r_xmm4, *((__m128i *) shortM128_one_corr));
r_xmm0 = _mm_adds_epi16(r_xmm0, r_xmm1);
r_xmm0 = _mm_or_si128(r_xmm0, *((__m128i *) shortM128_one_corr));
r_xmm2 = r_xmm5;
r_xmm5 = _mm_adds_epi16(r_xmm5, r_xmm7);
r_xmm1 = r_xmm6;
r_xmm5 = _mm_adds_epi16(r_xmm5, *((__m128i *) shortM128_round_inv_col));
r_xmm2 = _mm_subs_epi16(r_xmm2, r_xmm7);
r_xmm7 = temp7;
r_xmm6 = _mm_adds_epi16(r_xmm6, r_xmm3);
r_xmm6 = _mm_adds_epi16(r_xmm6, *((__m128i *) shortM128_round_inv_col));
r_xmm7 = _mm_adds_epi16(r_xmm7, r_xmm5);
r_xmm7 = _mm_srai_epi16(r_xmm7, SHIFT_INV_COL);
r_xmm1 = _mm_subs_epi16(r_xmm1, r_xmm3);
r_xmm1 = _mm_adds_epi16(r_xmm1, *((__m128i *) shortM128_round_inv_corr));
r_xmm3 = r_xmm6;
r_xmm2 = _mm_adds_epi16(r_xmm2, *((__m128i *) shortM128_round_inv_corr));
r_xmm6 = _mm_adds_epi16(r_xmm6, r_xmm4);
__m128i r0 = r_xmm7;
r_xmm6 = _mm_srai_epi16(r_xmm6, SHIFT_INV_COL);
r_xmm7 = r_xmm1;
r_xmm1 = _mm_adds_epi16(r_xmm1, r_xmm0);
__m128i r1 = r_xmm6;
r_xmm1 = _mm_srai_epi16(r_xmm1, SHIFT_INV_COL);
r_xmm6 = temp3;
r_xmm7 = _mm_subs_epi16(r_xmm7, r_xmm0);
r_xmm7 = _mm_srai_epi16(r_xmm7, SHIFT_INV_COL);
__m128i r2 = r_xmm1;
r_xmm5 = _mm_subs_epi16(r_xmm5, temp7);
r_xmm5 = _mm_srai_epi16(r_xmm5, SHIFT_INV_COL);
__m128i r7 = r_xmm5;
r_xmm3 = _mm_subs_epi16(r_xmm3, r_xmm4);
r_xmm6 = _mm_adds_epi16(r_xmm6, r_xmm2);
r_xmm2 = _mm_subs_epi16(r_xmm2, temp3);
r_xmm6 = _mm_srai_epi16(r_xmm6, SHIFT_INV_COL);
r_xmm2 = _mm_srai_epi16(r_xmm2, SHIFT_INV_COL);
__m128i r3 = r_xmm6;
r_xmm3 = _mm_srai_epi16(r_xmm3, SHIFT_INV_COL);
__m128i r4 = r_xmm2;
__m128i r5 = r_xmm7;
__m128i r6 = r_xmm3;
r0 = _mm_add_epi16(*(const __m128i *)shortM128_128, r0);
r1 = _mm_add_epi16(*(const __m128i *)shortM128_128, r1);
r2 = _mm_add_epi16(*(const __m128i *)shortM128_128, r2);
r3 = _mm_add_epi16(*(const __m128i *)shortM128_128, r3);
r4 = _mm_add_epi16(*(const __m128i *)shortM128_128, r4);
r5 = _mm_add_epi16(*(const __m128i *)shortM128_128, r5);
r6 = _mm_add_epi16(*(const __m128i *)shortM128_128, r6);
r7 = _mm_add_epi16(*(const __m128i *)shortM128_128, r7);
((__m128i *)pOutputUB)[0] = _mm_packus_epi16(r0, r1);
((__m128i *)pOutputUB)[1] = _mm_packus_epi16(r2, r3);
((__m128i *)pOutputUB)[2] = _mm_packus_epi16(r4, r5);
((__m128i *)pOutputUB)[3] = _mm_packus_epi16(r6, r7);
}