Path: blob/master/thirdparty/libwebp/src/dsp/lossless_enc_avx2.c
14730 views
// Copyright 2025 Google Inc. All Rights Reserved.1//2// Use of this source code is governed by a BSD-style license3// that can be found in the COPYING file in the root of the source4// tree. An additional intellectual property rights grant can be found5// in the file PATENTS. All contributing project authors may6// be found in the AUTHORS file in the root of the source tree.7// -----------------------------------------------------------------------------8//9// AVX2 variant of methods for lossless encoder10//11// Author: Vincent Rabaud ([email protected])1213#include "src/dsp/dsp.h"1415#if defined(WEBP_USE_AVX2)16#include <emmintrin.h>17#include <immintrin.h>1819#include <assert.h>20#include <stddef.h>2122#include "src/dsp/cpu.h"23#include "src/dsp/lossless.h"24#include "src/dsp/lossless_common.h"25#include "src/utils/utils.h"26#include "src/webp/format_constants.h"27#include "src/webp/types.h"2829//------------------------------------------------------------------------------30// Subtract-Green Transform3132static void SubtractGreenFromBlueAndRed_AVX2(uint32_t* argb_data,33int num_pixels) {34int i;35const __m256i kCstShuffle = _mm256_set_epi8(36-1, 29, -1, 29, -1, 25, -1, 25, -1, 21, -1, 21, -1, 17, -1, 17, -1, 13,37-1, 13, -1, 9, -1, 9, -1, 5, -1, 5, -1, 1, -1, 1);38for (i = 0; i + 8 <= num_pixels; i += 8) {39const __m256i in = _mm256_loadu_si256((__m256i*)&argb_data[i]); // argb40const __m256i in_0g0g = _mm256_shuffle_epi8(in, kCstShuffle);41const __m256i out = _mm256_sub_epi8(in, in_0g0g);42_mm256_storeu_si256((__m256i*)&argb_data[i], out);43}44// fallthrough and finish off with plain-SSE45if (i != num_pixels) {46VP8LSubtractGreenFromBlueAndRed_SSE(argb_data + i, num_pixels - i);47}48}4950//------------------------------------------------------------------------------51// Color Transform5253// For sign-extended multiplying constants, pre-shifted by 5:54#define CST_5b(X) (((int16_t)((uint16_t)(X) << 8)) >> 5)5556#define MK_CST_16(HI, LO) \57_mm256_set1_epi32((int)(((uint32_t)(HI) << 16) | ((LO) & 0xffff)))5859static void TransformColor_AVX2(const VP8LMultipliers* WEBP_RESTRICT const m,60uint32_t* WEBP_RESTRICT argb_data,61int num_pixels) {62const __m256i mults_rb =63MK_CST_16(CST_5b(m->green_to_red), CST_5b(m->green_to_blue));64const __m256i mults_b2 = MK_CST_16(CST_5b(m->red_to_blue), 0);65const __m256i mask_rb = _mm256_set1_epi32(0x00ff00ff); // red-blue masks66const __m256i kCstShuffle = _mm256_set_epi8(6729, -1, 29, -1, 25, -1, 25, -1, 21, -1, 21, -1, 17, -1, 17, -1, 13, -1,6813, -1, 9, -1, 9, -1, 5, -1, 5, -1, 1, -1, 1, -1);69int i;70for (i = 0; i + 8 <= num_pixels; i += 8) {71const __m256i in = _mm256_loadu_si256((__m256i*)&argb_data[i]); // argb72const __m256i A = _mm256_shuffle_epi8(in, kCstShuffle); // g0g073const __m256i B = _mm256_mulhi_epi16(A, mults_rb); // x dr x db174const __m256i C = _mm256_slli_epi16(in, 8); // r 0 b 075const __m256i D = _mm256_mulhi_epi16(C, mults_b2); // x db2 0 076const __m256i E = _mm256_srli_epi32(D, 16); // 0 0 x db277const __m256i F = _mm256_add_epi8(E, B); // x dr x db78const __m256i G = _mm256_and_si256(F, mask_rb); // 0 dr 0 db79const __m256i out = _mm256_sub_epi8(in, G);80_mm256_storeu_si256((__m256i*)&argb_data[i], out);81}82// fallthrough and finish off with plain-C83if (i != num_pixels) {84VP8LTransformColor_SSE(m, argb_data + i, num_pixels - i);85}86}8788//------------------------------------------------------------------------------89#define SPAN 1690static void CollectColorBlueTransforms_AVX2(const uint32_t* WEBP_RESTRICT argb,91int stride, int tile_width,92int tile_height, int green_to_blue,93int red_to_blue, uint32_t histo[]) {94const __m256i mult =95MK_CST_16(CST_5b(red_to_blue) + 256, CST_5b(green_to_blue));96const __m256i perm = _mm256_setr_epi8(97-1, 1, -1, 2, -1, 5, -1, 6, -1, 9, -1, 10, -1, 13, -1, 14, -1, 17, -1, 18,98-1, 21, -1, 22, -1, 25, -1, 26, -1, 29, -1, 30);99if (tile_width >= 8) {100int y, i;101for (y = 0; y < tile_height; ++y) {102uint8_t values[32];103const uint32_t* const src = argb + y * stride;104const __m256i A1 = _mm256_loadu_si256((const __m256i*)src);105const __m256i B1 = _mm256_shuffle_epi8(A1, perm);106const __m256i C1 = _mm256_mulhi_epi16(B1, mult);107const __m256i D1 = _mm256_sub_epi16(A1, C1);108__m256i E = _mm256_add_epi16(_mm256_srli_epi32(D1, 16), D1);109int x;110for (x = 8; x + 8 <= tile_width; x += 8) {111const __m256i A2 = _mm256_loadu_si256((const __m256i*)(src + x));112__m256i B2, C2, D2;113_mm256_storeu_si256((__m256i*)values, E);114for (i = 0; i < 32; i += 4) ++histo[values[i]];115B2 = _mm256_shuffle_epi8(A2, perm);116C2 = _mm256_mulhi_epi16(B2, mult);117D2 = _mm256_sub_epi16(A2, C2);118E = _mm256_add_epi16(_mm256_srli_epi32(D2, 16), D2);119}120_mm256_storeu_si256((__m256i*)values, E);121for (i = 0; i < 32; i += 4) ++histo[values[i]];122}123}124{125const int left_over = tile_width & 7;126if (left_over > 0) {127VP8LCollectColorBlueTransforms_SSE(argb + tile_width - left_over, stride,128left_over, tile_height, green_to_blue,129red_to_blue, histo);130}131}132}133134static void CollectColorRedTransforms_AVX2(const uint32_t* WEBP_RESTRICT argb,135int stride, int tile_width,136int tile_height, int green_to_red,137uint32_t histo[]) {138const __m256i mult = MK_CST_16(0, CST_5b(green_to_red));139const __m256i mask_g = _mm256_set1_epi32(0x0000ff00);140if (tile_width >= 8) {141int y, i;142for (y = 0; y < tile_height; ++y) {143uint8_t values[32];144const uint32_t* const src = argb + y * stride;145const __m256i A1 = _mm256_loadu_si256((const __m256i*)src);146const __m256i B1 = _mm256_and_si256(A1, mask_g);147const __m256i C1 = _mm256_madd_epi16(B1, mult);148__m256i D = _mm256_sub_epi16(A1, C1);149int x;150for (x = 8; x + 8 <= tile_width; x += 8) {151const __m256i A2 = _mm256_loadu_si256((const __m256i*)(src + x));152__m256i B2, C2;153_mm256_storeu_si256((__m256i*)values, D);154for (i = 2; i < 32; i += 4) ++histo[values[i]];155B2 = _mm256_and_si256(A2, mask_g);156C2 = _mm256_madd_epi16(B2, mult);157D = _mm256_sub_epi16(A2, C2);158}159_mm256_storeu_si256((__m256i*)values, D);160for (i = 2; i < 32; i += 4) ++histo[values[i]];161}162}163{164const int left_over = tile_width & 7;165if (left_over > 0) {166VP8LCollectColorRedTransforms_SSE(argb + tile_width - left_over, stride,167left_over, tile_height, green_to_red,168histo);169}170}171}172#undef SPAN173#undef MK_CST_16174175//------------------------------------------------------------------------------176177// Note we are adding uint32_t's as *signed* int32's (using _mm256_add_epi32).178// But that's ok since the histogram values are less than 1<<28 (max picture179// size).180static void AddVector_AVX2(const uint32_t* WEBP_RESTRICT a,181const uint32_t* WEBP_RESTRICT b,182uint32_t* WEBP_RESTRICT out, int size) {183int i = 0;184int aligned_size = size & ~31;185// Size is, at minimum, NUM_DISTANCE_CODES (40) and may be as large as186// NUM_LITERAL_CODES (256) + NUM_LENGTH_CODES (24) + (0 or a non-zero power of187// 2). See the usage in VP8LHistogramAdd().188assert(size >= 32);189assert(size % 2 == 0);190191do {192const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i + 0]);193const __m256i a1 = _mm256_loadu_si256((const __m256i*)&a[i + 8]);194const __m256i a2 = _mm256_loadu_si256((const __m256i*)&a[i + 16]);195const __m256i a3 = _mm256_loadu_si256((const __m256i*)&a[i + 24]);196const __m256i b0 = _mm256_loadu_si256((const __m256i*)&b[i + 0]);197const __m256i b1 = _mm256_loadu_si256((const __m256i*)&b[i + 8]);198const __m256i b2 = _mm256_loadu_si256((const __m256i*)&b[i + 16]);199const __m256i b3 = _mm256_loadu_si256((const __m256i*)&b[i + 24]);200_mm256_storeu_si256((__m256i*)&out[i + 0], _mm256_add_epi32(a0, b0));201_mm256_storeu_si256((__m256i*)&out[i + 8], _mm256_add_epi32(a1, b1));202_mm256_storeu_si256((__m256i*)&out[i + 16], _mm256_add_epi32(a2, b2));203_mm256_storeu_si256((__m256i*)&out[i + 24], _mm256_add_epi32(a3, b3));204i += 32;205} while (i != aligned_size);206207if ((size & 16) != 0) {208const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i + 0]);209const __m256i a1 = _mm256_loadu_si256((const __m256i*)&a[i + 8]);210const __m256i b0 = _mm256_loadu_si256((const __m256i*)&b[i + 0]);211const __m256i b1 = _mm256_loadu_si256((const __m256i*)&b[i + 8]);212_mm256_storeu_si256((__m256i*)&out[i + 0], _mm256_add_epi32(a0, b0));213_mm256_storeu_si256((__m256i*)&out[i + 8], _mm256_add_epi32(a1, b1));214i += 16;215}216217size &= 15;218if (size == 8) {219const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i]);220const __m256i b0 = _mm256_loadu_si256((const __m256i*)&b[i]);221_mm256_storeu_si256((__m256i*)&out[i], _mm256_add_epi32(a0, b0));222} else {223for (; size--; ++i) {224out[i] = a[i] + b[i];225}226}227}228229static void AddVectorEq_AVX2(const uint32_t* WEBP_RESTRICT a,230uint32_t* WEBP_RESTRICT out, int size) {231int i = 0;232int aligned_size = size & ~31;233// Size is, at minimum, NUM_DISTANCE_CODES (40) and may be as large as234// NUM_LITERAL_CODES (256) + NUM_LENGTH_CODES (24) + (0 or a non-zero power of235// 2). See the usage in VP8LHistogramAdd().236assert(size >= 32);237assert(size % 2 == 0);238239do {240const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i + 0]);241const __m256i a1 = _mm256_loadu_si256((const __m256i*)&a[i + 8]);242const __m256i a2 = _mm256_loadu_si256((const __m256i*)&a[i + 16]);243const __m256i a3 = _mm256_loadu_si256((const __m256i*)&a[i + 24]);244const __m256i b0 = _mm256_loadu_si256((const __m256i*)&out[i + 0]);245const __m256i b1 = _mm256_loadu_si256((const __m256i*)&out[i + 8]);246const __m256i b2 = _mm256_loadu_si256((const __m256i*)&out[i + 16]);247const __m256i b3 = _mm256_loadu_si256((const __m256i*)&out[i + 24]);248_mm256_storeu_si256((__m256i*)&out[i + 0], _mm256_add_epi32(a0, b0));249_mm256_storeu_si256((__m256i*)&out[i + 8], _mm256_add_epi32(a1, b1));250_mm256_storeu_si256((__m256i*)&out[i + 16], _mm256_add_epi32(a2, b2));251_mm256_storeu_si256((__m256i*)&out[i + 24], _mm256_add_epi32(a3, b3));252i += 32;253} while (i != aligned_size);254255if ((size & 16) != 0) {256const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i + 0]);257const __m256i a1 = _mm256_loadu_si256((const __m256i*)&a[i + 8]);258const __m256i b0 = _mm256_loadu_si256((const __m256i*)&out[i + 0]);259const __m256i b1 = _mm256_loadu_si256((const __m256i*)&out[i + 8]);260_mm256_storeu_si256((__m256i*)&out[i + 0], _mm256_add_epi32(a0, b0));261_mm256_storeu_si256((__m256i*)&out[i + 8], _mm256_add_epi32(a1, b1));262i += 16;263}264265size &= 15;266if (size == 8) {267const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i]);268const __m256i b0 = _mm256_loadu_si256((const __m256i*)&out[i]);269_mm256_storeu_si256((__m256i*)&out[i], _mm256_add_epi32(a0, b0));270} else {271for (; size--; ++i) {272out[i] += a[i];273}274}275}276277//------------------------------------------------------------------------------278// Entropy279280#if !defined(WEBP_HAVE_SLOW_CLZ_CTZ)281282static uint64_t CombinedShannonEntropy_AVX2(const uint32_t X[256],283const uint32_t Y[256]) {284int i;285uint64_t retval = 0;286uint32_t sumX = 0, sumXY = 0;287const __m256i zero = _mm256_setzero_si256();288289for (i = 0; i < 256; i += 32) {290const __m256i x0 = _mm256_loadu_si256((const __m256i*)(X + i + 0));291const __m256i y0 = _mm256_loadu_si256((const __m256i*)(Y + i + 0));292const __m256i x1 = _mm256_loadu_si256((const __m256i*)(X + i + 8));293const __m256i y1 = _mm256_loadu_si256((const __m256i*)(Y + i + 8));294const __m256i x2 = _mm256_loadu_si256((const __m256i*)(X + i + 16));295const __m256i y2 = _mm256_loadu_si256((const __m256i*)(Y + i + 16));296const __m256i x3 = _mm256_loadu_si256((const __m256i*)(X + i + 24));297const __m256i y3 = _mm256_loadu_si256((const __m256i*)(Y + i + 24));298const __m256i x4 = _mm256_packs_epi16(_mm256_packs_epi32(x0, x1),299_mm256_packs_epi32(x2, x3));300const __m256i y4 = _mm256_packs_epi16(_mm256_packs_epi32(y0, y1),301_mm256_packs_epi32(y2, y3));302// Packed pixels are actually in order: ... 17 16 12 11 10 9 8 3 2 1 0303const __m256i x5 = _mm256_permutevar8x32_epi32(304x4, _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0));305const __m256i y5 = _mm256_permutevar8x32_epi32(306y4, _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0));307const uint32_t mx =308(uint32_t)_mm256_movemask_epi8(_mm256_cmpgt_epi8(x5, zero));309uint32_t my =310(uint32_t)_mm256_movemask_epi8(_mm256_cmpgt_epi8(y5, zero)) | mx;311while (my) {312const int32_t j = BitsCtz(my);313uint32_t xy;314if ((mx >> j) & 1) {315const int x = X[i + j];316sumXY += x;317retval += VP8LFastSLog2(x);318}319xy = X[i + j] + Y[i + j];320sumX += xy;321retval += VP8LFastSLog2(xy);322my &= my - 1;323}324}325retval = VP8LFastSLog2(sumX) + VP8LFastSLog2(sumXY) - retval;326return retval;327}328329#else330331#define DONT_USE_COMBINED_SHANNON_ENTROPY_SSE2_FUNC // won't be faster332333#endif334335//------------------------------------------------------------------------------336337static int VectorMismatch_AVX2(const uint32_t* const array1,338const uint32_t* const array2, int length) {339int match_len;340341if (length >= 24) {342__m256i A0 = _mm256_loadu_si256((const __m256i*)&array1[0]);343__m256i A1 = _mm256_loadu_si256((const __m256i*)&array2[0]);344match_len = 0;345do {346// Loop unrolling and early load both provide a speedup of 10% for the347// current function. Also, max_limit can be MAX_LENGTH=4096 at most.348const __m256i cmpA = _mm256_cmpeq_epi32(A0, A1);349const __m256i B0 =350_mm256_loadu_si256((const __m256i*)&array1[match_len + 8]);351const __m256i B1 =352_mm256_loadu_si256((const __m256i*)&array2[match_len + 8]);353if ((uint32_t)_mm256_movemask_epi8(cmpA) != 0xffffffff) break;354match_len += 8;355356{357const __m256i cmpB = _mm256_cmpeq_epi32(B0, B1);358A0 = _mm256_loadu_si256((const __m256i*)&array1[match_len + 8]);359A1 = _mm256_loadu_si256((const __m256i*)&array2[match_len + 8]);360if ((uint32_t)_mm256_movemask_epi8(cmpB) != 0xffffffff) break;361match_len += 8;362}363} while (match_len + 24 < length);364} else {365match_len = 0;366// Unroll the potential first two loops.367if (length >= 8 &&368(uint32_t)_mm256_movemask_epi8(_mm256_cmpeq_epi32(369_mm256_loadu_si256((const __m256i*)&array1[0]),370_mm256_loadu_si256((const __m256i*)&array2[0]))) == 0xffffffff) {371match_len = 8;372if (length >= 16 &&373(uint32_t)_mm256_movemask_epi8(_mm256_cmpeq_epi32(374_mm256_loadu_si256((const __m256i*)&array1[8]),375_mm256_loadu_si256((const __m256i*)&array2[8]))) == 0xffffffff) {376match_len = 16;377}378}379}380381while (match_len < length && array1[match_len] == array2[match_len]) {382++match_len;383}384return match_len;385}386387// Bundles multiple (1, 2, 4 or 8) pixels into a single pixel.388static void BundleColorMap_AVX2(const uint8_t* WEBP_RESTRICT const row,389int width, int xbits,390uint32_t* WEBP_RESTRICT dst) {391int x = 0;392assert(xbits >= 0);393assert(xbits <= 3);394switch (xbits) {395case 0: {396const __m256i ff = _mm256_set1_epi16((short)0xff00);397const __m256i zero = _mm256_setzero_si256();398// Store 0xff000000 | (row[x] << 8).399for (x = 0; x + 32 <= width; x += 32, dst += 32) {400const __m256i in = _mm256_loadu_si256((const __m256i*)&row[x]);401const __m256i in_lo = _mm256_unpacklo_epi8(zero, in);402const __m256i dst0 = _mm256_unpacklo_epi16(in_lo, ff);403const __m256i dst1 = _mm256_unpackhi_epi16(in_lo, ff);404const __m256i in_hi = _mm256_unpackhi_epi8(zero, in);405const __m256i dst2 = _mm256_unpacklo_epi16(in_hi, ff);406const __m256i dst3 = _mm256_unpackhi_epi16(in_hi, ff);407_mm256_storeu2_m128i((__m128i*)&dst[16], (__m128i*)&dst[0], dst0);408_mm256_storeu2_m128i((__m128i*)&dst[20], (__m128i*)&dst[4], dst1);409_mm256_storeu2_m128i((__m128i*)&dst[24], (__m128i*)&dst[8], dst2);410_mm256_storeu2_m128i((__m128i*)&dst[28], (__m128i*)&dst[12], dst3);411}412break;413}414case 1: {415const __m256i ff = _mm256_set1_epi16((short)0xff00);416const __m256i mul = _mm256_set1_epi16(0x110);417for (x = 0; x + 32 <= width; x += 32, dst += 16) {418// 0a0b | (where a/b are 4 bits).419const __m256i in = _mm256_loadu_si256((const __m256i*)&row[x]);420const __m256i tmp = _mm256_mullo_epi16(in, mul); // aba0421const __m256i pack = _mm256_and_si256(tmp, ff); // ab00422const __m256i dst0 = _mm256_unpacklo_epi16(pack, ff);423const __m256i dst1 = _mm256_unpackhi_epi16(pack, ff);424_mm256_storeu2_m128i((__m128i*)&dst[8], (__m128i*)&dst[0], dst0);425_mm256_storeu2_m128i((__m128i*)&dst[12], (__m128i*)&dst[4], dst1);426}427break;428}429case 2: {430const __m256i mask_or = _mm256_set1_epi32((int)0xff000000);431const __m256i mul_cst = _mm256_set1_epi16(0x0104);432const __m256i mask_mul = _mm256_set1_epi16(0x0f00);433for (x = 0; x + 32 <= width; x += 32, dst += 8) {434// 000a000b000c000d | (where a/b/c/d are 2 bits).435const __m256i in = _mm256_loadu_si256((const __m256i*)&row[x]);436const __m256i mul =437_mm256_mullo_epi16(in, mul_cst); // 00ab00b000cd00d0438const __m256i tmp =439_mm256_and_si256(mul, mask_mul); // 00ab000000cd0000440const __m256i shift = _mm256_srli_epi32(tmp, 12); // 00000000ab000000441const __m256i pack = _mm256_or_si256(shift, tmp); // 00000000abcd0000442// Convert to 0xff00**00.443const __m256i res = _mm256_or_si256(pack, mask_or);444_mm256_storeu_si256((__m256i*)dst, res);445}446break;447}448default: {449assert(xbits == 3);450for (x = 0; x + 32 <= width; x += 32, dst += 4) {451// 0000000a00000000b... | (where a/b are 1 bit).452const __m256i in = _mm256_loadu_si256((const __m256i*)&row[x]);453const __m256i shift = _mm256_slli_epi64(in, 7);454const uint32_t move = _mm256_movemask_epi8(shift);455dst[0] = 0xff000000 | ((move & 0xff) << 8);456dst[1] = 0xff000000 | (move & 0xff00);457dst[2] = 0xff000000 | ((move & 0xff0000) >> 8);458dst[3] = 0xff000000 | ((move & 0xff000000) >> 16);459}460break;461}462}463if (x != width) {464VP8LBundleColorMap_SSE(row + x, width - x, xbits, dst);465}466}467468//------------------------------------------------------------------------------469// Batch version of Predictor Transform subtraction470471static WEBP_INLINE void Average2_m256i(const __m256i* const a0,472const __m256i* const a1,473__m256i* const avg) {474// (a + b) >> 1 = ((a + b + 1) >> 1) - ((a ^ b) & 1)475const __m256i ones = _mm256_set1_epi8(1);476const __m256i avg1 = _mm256_avg_epu8(*a0, *a1);477const __m256i one = _mm256_and_si256(_mm256_xor_si256(*a0, *a1), ones);478*avg = _mm256_sub_epi8(avg1, one);479}480481// Predictor0: ARGB_BLACK.482static void PredictorSub0_AVX2(const uint32_t* in, const uint32_t* upper,483int num_pixels, uint32_t* WEBP_RESTRICT out) {484int i;485const __m256i black = _mm256_set1_epi32((int)ARGB_BLACK);486for (i = 0; i + 8 <= num_pixels; i += 8) {487const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]);488const __m256i res = _mm256_sub_epi8(src, black);489_mm256_storeu_si256((__m256i*)&out[i], res);490}491if (i != num_pixels) {492VP8LPredictorsSub_SSE[0](in + i, NULL, num_pixels - i, out + i);493}494(void)upper;495}496497#define GENERATE_PREDICTOR_1(X, IN) \498static void PredictorSub##X##_AVX2( \499const uint32_t* const in, const uint32_t* const upper, int num_pixels, \500uint32_t* WEBP_RESTRICT const out) { \501int i; \502for (i = 0; i + 8 <= num_pixels; i += 8) { \503const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); \504const __m256i pred = _mm256_loadu_si256((const __m256i*)&(IN)); \505const __m256i res = _mm256_sub_epi8(src, pred); \506_mm256_storeu_si256((__m256i*)&out[i], res); \507} \508if (i != num_pixels) { \509VP8LPredictorsSub_SSE[(X)](in + i, WEBP_OFFSET_PTR(upper, i), \510num_pixels - i, out + i); \511} \512}513514GENERATE_PREDICTOR_1(1, in[i - 1]) // Predictor1: L515GENERATE_PREDICTOR_1(2, upper[i]) // Predictor2: T516GENERATE_PREDICTOR_1(3, upper[i + 1]) // Predictor3: TR517GENERATE_PREDICTOR_1(4, upper[i - 1]) // Predictor4: TL518#undef GENERATE_PREDICTOR_1519520// Predictor5: avg2(avg2(L, TR), T)521static void PredictorSub5_AVX2(const uint32_t* in, const uint32_t* upper,522int num_pixels, uint32_t* WEBP_RESTRICT out) {523int i;524for (i = 0; i + 8 <= num_pixels; i += 8) {525const __m256i L = _mm256_loadu_si256((const __m256i*)&in[i - 1]);526const __m256i T = _mm256_loadu_si256((const __m256i*)&upper[i]);527const __m256i TR = _mm256_loadu_si256((const __m256i*)&upper[i + 1]);528const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]);529__m256i avg, pred, res;530Average2_m256i(&L, &TR, &avg);531Average2_m256i(&avg, &T, &pred);532res = _mm256_sub_epi8(src, pred);533_mm256_storeu_si256((__m256i*)&out[i], res);534}535if (i != num_pixels) {536VP8LPredictorsSub_SSE[5](in + i, upper + i, num_pixels - i, out + i);537}538}539540#define GENERATE_PREDICTOR_2(X, A, B) \541static void PredictorSub##X##_AVX2(const uint32_t* in, \542const uint32_t* upper, int num_pixels, \543uint32_t* WEBP_RESTRICT out) { \544int i; \545for (i = 0; i + 8 <= num_pixels; i += 8) { \546const __m256i tA = _mm256_loadu_si256((const __m256i*)&(A)); \547const __m256i tB = _mm256_loadu_si256((const __m256i*)&(B)); \548const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); \549__m256i pred, res; \550Average2_m256i(&tA, &tB, &pred); \551res = _mm256_sub_epi8(src, pred); \552_mm256_storeu_si256((__m256i*)&out[i], res); \553} \554if (i != num_pixels) { \555VP8LPredictorsSub_SSE[(X)](in + i, upper + i, num_pixels - i, out + i); \556} \557}558559GENERATE_PREDICTOR_2(6, in[i - 1], upper[i - 1]) // Predictor6: avg(L, TL)560GENERATE_PREDICTOR_2(7, in[i - 1], upper[i]) // Predictor7: avg(L, T)561GENERATE_PREDICTOR_2(8, upper[i - 1], upper[i]) // Predictor8: avg(TL, T)562GENERATE_PREDICTOR_2(9, upper[i], upper[i + 1]) // Predictor9: average(T, TR)563#undef GENERATE_PREDICTOR_2564565// Predictor10: avg(avg(L,TL), avg(T, TR)).566static void PredictorSub10_AVX2(const uint32_t* in, const uint32_t* upper,567int num_pixels, uint32_t* WEBP_RESTRICT out) {568int i;569for (i = 0; i + 8 <= num_pixels; i += 8) {570const __m256i L = _mm256_loadu_si256((const __m256i*)&in[i - 1]);571const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]);572const __m256i TL = _mm256_loadu_si256((const __m256i*)&upper[i - 1]);573const __m256i T = _mm256_loadu_si256((const __m256i*)&upper[i]);574const __m256i TR = _mm256_loadu_si256((const __m256i*)&upper[i + 1]);575__m256i avgTTR, avgLTL, avg, res;576Average2_m256i(&T, &TR, &avgTTR);577Average2_m256i(&L, &TL, &avgLTL);578Average2_m256i(&avgTTR, &avgLTL, &avg);579res = _mm256_sub_epi8(src, avg);580_mm256_storeu_si256((__m256i*)&out[i], res);581}582if (i != num_pixels) {583VP8LPredictorsSub_SSE[10](in + i, upper + i, num_pixels - i, out + i);584}585}586587// Predictor11: select.588static void GetSumAbsDiff32_AVX2(const __m256i* const A, const __m256i* const B,589__m256i* const out) {590// We can unpack with any value on the upper 32 bits, provided it's the same591// on both operands (to that their sum of abs diff is zero). Here we use *A.592const __m256i A_lo = _mm256_unpacklo_epi32(*A, *A);593const __m256i B_lo = _mm256_unpacklo_epi32(*B, *A);594const __m256i A_hi = _mm256_unpackhi_epi32(*A, *A);595const __m256i B_hi = _mm256_unpackhi_epi32(*B, *A);596const __m256i s_lo = _mm256_sad_epu8(A_lo, B_lo);597const __m256i s_hi = _mm256_sad_epu8(A_hi, B_hi);598*out = _mm256_packs_epi32(s_lo, s_hi);599}600601static void PredictorSub11_AVX2(const uint32_t* in, const uint32_t* upper,602int num_pixels, uint32_t* WEBP_RESTRICT out) {603int i;604for (i = 0; i + 8 <= num_pixels; i += 8) {605const __m256i L = _mm256_loadu_si256((const __m256i*)&in[i - 1]);606const __m256i T = _mm256_loadu_si256((const __m256i*)&upper[i]);607const __m256i TL = _mm256_loadu_si256((const __m256i*)&upper[i - 1]);608const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]);609__m256i pa, pb;610GetSumAbsDiff32_AVX2(&T, &TL, &pa); // pa = sum |T-TL|611GetSumAbsDiff32_AVX2(&L, &TL, &pb); // pb = sum |L-TL|612{613const __m256i mask = _mm256_cmpgt_epi32(pb, pa);614const __m256i A = _mm256_and_si256(mask, L);615const __m256i B = _mm256_andnot_si256(mask, T);616const __m256i pred = _mm256_or_si256(A, B); // pred = (L > T)? L : T617const __m256i res = _mm256_sub_epi8(src, pred);618_mm256_storeu_si256((__m256i*)&out[i], res);619}620}621if (i != num_pixels) {622VP8LPredictorsSub_SSE[11](in + i, upper + i, num_pixels - i, out + i);623}624}625626// Predictor12: ClampedSubSubtractFull.627static void PredictorSub12_AVX2(const uint32_t* in, const uint32_t* upper,628int num_pixels, uint32_t* WEBP_RESTRICT out) {629int i;630const __m256i zero = _mm256_setzero_si256();631for (i = 0; i + 8 <= num_pixels; i += 8) {632const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]);633const __m256i L = _mm256_loadu_si256((const __m256i*)&in[i - 1]);634const __m256i L_lo = _mm256_unpacklo_epi8(L, zero);635const __m256i L_hi = _mm256_unpackhi_epi8(L, zero);636const __m256i T = _mm256_loadu_si256((const __m256i*)&upper[i]);637const __m256i T_lo = _mm256_unpacklo_epi8(T, zero);638const __m256i T_hi = _mm256_unpackhi_epi8(T, zero);639const __m256i TL = _mm256_loadu_si256((const __m256i*)&upper[i - 1]);640const __m256i TL_lo = _mm256_unpacklo_epi8(TL, zero);641const __m256i TL_hi = _mm256_unpackhi_epi8(TL, zero);642const __m256i diff_lo = _mm256_sub_epi16(T_lo, TL_lo);643const __m256i diff_hi = _mm256_sub_epi16(T_hi, TL_hi);644const __m256i pred_lo = _mm256_add_epi16(L_lo, diff_lo);645const __m256i pred_hi = _mm256_add_epi16(L_hi, diff_hi);646const __m256i pred = _mm256_packus_epi16(pred_lo, pred_hi);647const __m256i res = _mm256_sub_epi8(src, pred);648_mm256_storeu_si256((__m256i*)&out[i], res);649}650if (i != num_pixels) {651VP8LPredictorsSub_SSE[12](in + i, upper + i, num_pixels - i, out + i);652}653}654655// Predictors13: ClampedAddSubtractHalf656static void PredictorSub13_AVX2(const uint32_t* in, const uint32_t* upper,657int num_pixels, uint32_t* WEBP_RESTRICT out) {658int i;659const __m256i zero = _mm256_setzero_si256();660for (i = 0; i + 8 <= num_pixels; i += 8) {661const __m256i L = _mm256_loadu_si256((const __m256i*)&in[i - 1]);662const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]);663const __m256i T = _mm256_loadu_si256((const __m256i*)&upper[i]);664const __m256i TL = _mm256_loadu_si256((const __m256i*)&upper[i - 1]);665// lo.666const __m256i L_lo = _mm256_unpacklo_epi8(L, zero);667const __m256i T_lo = _mm256_unpacklo_epi8(T, zero);668const __m256i TL_lo = _mm256_unpacklo_epi8(TL, zero);669const __m256i sum_lo = _mm256_add_epi16(T_lo, L_lo);670const __m256i avg_lo = _mm256_srli_epi16(sum_lo, 1);671const __m256i A1_lo = _mm256_sub_epi16(avg_lo, TL_lo);672const __m256i bit_fix_lo = _mm256_cmpgt_epi16(TL_lo, avg_lo);673const __m256i A2_lo = _mm256_sub_epi16(A1_lo, bit_fix_lo);674const __m256i A3_lo = _mm256_srai_epi16(A2_lo, 1);675const __m256i A4_lo = _mm256_add_epi16(avg_lo, A3_lo);676// hi.677const __m256i L_hi = _mm256_unpackhi_epi8(L, zero);678const __m256i T_hi = _mm256_unpackhi_epi8(T, zero);679const __m256i TL_hi = _mm256_unpackhi_epi8(TL, zero);680const __m256i sum_hi = _mm256_add_epi16(T_hi, L_hi);681const __m256i avg_hi = _mm256_srli_epi16(sum_hi, 1);682const __m256i A1_hi = _mm256_sub_epi16(avg_hi, TL_hi);683const __m256i bit_fix_hi = _mm256_cmpgt_epi16(TL_hi, avg_hi);684const __m256i A2_hi = _mm256_sub_epi16(A1_hi, bit_fix_hi);685const __m256i A3_hi = _mm256_srai_epi16(A2_hi, 1);686const __m256i A4_hi = _mm256_add_epi16(avg_hi, A3_hi);687688const __m256i pred = _mm256_packus_epi16(A4_lo, A4_hi);689const __m256i res = _mm256_sub_epi8(src, pred);690_mm256_storeu_si256((__m256i*)&out[i], res);691}692if (i != num_pixels) {693VP8LPredictorsSub_SSE[13](in + i, upper + i, num_pixels - i, out + i);694}695}696697//------------------------------------------------------------------------------698// Entry point699700extern void VP8LEncDspInitAVX2(void);701702WEBP_TSAN_IGNORE_FUNCTION void VP8LEncDspInitAVX2(void) {703VP8LSubtractGreenFromBlueAndRed = SubtractGreenFromBlueAndRed_AVX2;704VP8LTransformColor = TransformColor_AVX2;705VP8LCollectColorBlueTransforms = CollectColorBlueTransforms_AVX2;706VP8LCollectColorRedTransforms = CollectColorRedTransforms_AVX2;707VP8LAddVector = AddVector_AVX2;708VP8LAddVectorEq = AddVectorEq_AVX2;709VP8LCombinedShannonEntropy = CombinedShannonEntropy_AVX2;710VP8LVectorMismatch = VectorMismatch_AVX2;711VP8LBundleColorMap = BundleColorMap_AVX2;712713VP8LPredictorsSub[0] = PredictorSub0_AVX2;714VP8LPredictorsSub[1] = PredictorSub1_AVX2;715VP8LPredictorsSub[2] = PredictorSub2_AVX2;716VP8LPredictorsSub[3] = PredictorSub3_AVX2;717VP8LPredictorsSub[4] = PredictorSub4_AVX2;718VP8LPredictorsSub[5] = PredictorSub5_AVX2;719VP8LPredictorsSub[6] = PredictorSub6_AVX2;720VP8LPredictorsSub[7] = PredictorSub7_AVX2;721VP8LPredictorsSub[8] = PredictorSub8_AVX2;722VP8LPredictorsSub[9] = PredictorSub9_AVX2;723VP8LPredictorsSub[10] = PredictorSub10_AVX2;724VP8LPredictorsSub[11] = PredictorSub11_AVX2;725VP8LPredictorsSub[12] = PredictorSub12_AVX2;726VP8LPredictorsSub[13] = PredictorSub13_AVX2;727VP8LPredictorsSub[14] = PredictorSub0_AVX2; // <- padding security sentinels728VP8LPredictorsSub[15] = PredictorSub0_AVX2;729}730731#else // !WEBP_USE_AVX2732733WEBP_DSP_INIT_STUB(VP8LEncDspInitAVX2)734735#endif // WEBP_USE_AVX2736737738