Spaces:
Runtime error
Runtime error
/* | |
* Copyright (c) 2004 Romain Dolbeau <[email protected]> | |
* | |
* This file is part of FFmpeg. | |
* | |
* FFmpeg is free software; you can redistribute it and/or | |
* modify it under the terms of the GNU Lesser General Public | |
* License as published by the Free Software Foundation; either | |
* version 2.1 of the License, or (at your option) any later version. | |
* | |
* FFmpeg is distributed in the hope that it will be useful, | |
* but WITHOUT ANY WARRANTY; without even the implied warranty of | |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
* Lesser General Public License for more details. | |
* | |
* You should have received a copy of the GNU Lesser General Public | |
* License along with FFmpeg; if not, write to the Free Software | |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
*/ | |
/**************************************************************************** | |
* IDCT transform: | |
****************************************************************************/ | |
static void h264_idct_add_altivec(uint8_t *dst, int16_t *block, int stride) | |
{ | |
vec_s16 va0, va1, va2, va3; | |
vec_s16 vz0, vz1, vz2, vz3; | |
vec_s16 vtmp0, vtmp1, vtmp2, vtmp3; | |
vec_u8 va_u8; | |
vec_u32 va_u32; | |
vec_s16 vdst_ss; | |
const vec_u16 v6us = vec_splat_u16(6); | |
vec_u8 vdst, vdst_orig; | |
vec_u8 vdst_mask = vec_lvsl(0, dst); | |
int element = ((unsigned long)dst & 0xf) >> 2; | |
LOAD_ZERO; | |
block[0] += 32; /* add 32 as a DC-level for rounding */ | |
vtmp0 = vec_ld(0,block); | |
vtmp1 = vec_sld(vtmp0, vtmp0, 8); | |
vtmp2 = vec_ld(16,block); | |
vtmp3 = vec_sld(vtmp2, vtmp2, 8); | |
memset(block, 0, 16 * sizeof(int16_t)); | |
VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3); | |
VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3); | |
VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3); | |
va0 = vec_sra(va0,v6us); | |
va1 = vec_sra(va1,v6us); | |
va2 = vec_sra(va2,v6us); | |
va3 = vec_sra(va3,v6us); | |
VEC_LOAD_U8_ADD_S16_STORE_U8(va0); | |
dst += stride; | |
VEC_LOAD_U8_ADD_S16_STORE_U8(va1); | |
dst += stride; | |
VEC_LOAD_U8_ADD_S16_STORE_U8(va2); | |
dst += stride; | |
VEC_LOAD_U8_ADD_S16_STORE_U8(va3); | |
} | |
static void h264_idct8_add_altivec(uint8_t *dst, int16_t *dct, int stride) | |
{ | |
vec_s16 s0, s1, s2, s3, s4, s5, s6, s7; | |
vec_s16 d0, d1, d2, d3, d4, d5, d6, d7; | |
vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7; | |
vec_u8 perm_ldv, perm_stv; | |
GET_2PERM(perm_ldv, perm_stv, dst); | |
const vec_u16 onev = vec_splat_u16(1); | |
const vec_u16 twov = vec_splat_u16(2); | |
const vec_u16 sixv = vec_splat_u16(6); | |
const vec_u8 sel = (vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1}; | |
LOAD_ZERO; | |
dct[0] += 32; // rounding for the >>6 at the end | |
s0 = vec_ld(0x00, (int16_t*)dct); | |
s1 = vec_ld(0x10, (int16_t*)dct); | |
s2 = vec_ld(0x20, (int16_t*)dct); | |
s3 = vec_ld(0x30, (int16_t*)dct); | |
s4 = vec_ld(0x40, (int16_t*)dct); | |
s5 = vec_ld(0x50, (int16_t*)dct); | |
s6 = vec_ld(0x60, (int16_t*)dct); | |
s7 = vec_ld(0x70, (int16_t*)dct); | |
memset(dct, 0, 64 * sizeof(int16_t)); | |
IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, | |
d0, d1, d2, d3, d4, d5, d6, d7); | |
TRANSPOSE8( d0, d1, d2, d3, d4, d5, d6, d7 ); | |
IDCT8_1D_ALTIVEC(d0, d1, d2, d3, d4, d5, d6, d7, | |
idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7); | |
ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel); | |
ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel); | |
ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel); | |
ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel); | |
ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel); | |
ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel); | |
ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel); | |
ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel); | |
} | |
static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, int16_t *block, int stride, int size) | |
{ | |
vec_s16 dc16; | |
vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner; | |
vec_s32 v_dc32; | |
LOAD_ZERO; | |
DECLARE_ALIGNED(16, int, dc); | |
int i; | |
dc = (block[0] + 32) >> 6; | |
block[0] = 0; | |
v_dc32 = vec_lde(0, &dc); | |
dc16 = VEC_SPLAT16((vec_s16)v_dc32, 1); | |
if (size == 4) | |
dc16 = VEC_SLD16(dc16, zero_s16v, 8); | |
dcplus = vec_packsu(dc16, zero_s16v); | |
dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v); | |
aligner = vec_lvsr(0, dst); | |
dcplus = vec_perm(dcplus, dcplus, aligner); | |
dcminus = vec_perm(dcminus, dcminus, aligner); | |
for (i = 0; i < size; i += 4) { | |
v0 = DST_LD(0, dst+0*stride); | |
v1 = DST_LD(0, dst+1*stride); | |
v2 = DST_LD(0, dst+2*stride); | |
v3 = DST_LD(0, dst+3*stride); | |
v0 = vec_adds(v0, dcplus); | |
v1 = vec_adds(v1, dcplus); | |
v2 = vec_adds(v2, dcplus); | |
v3 = vec_adds(v3, dcplus); | |
v0 = vec_subs(v0, dcminus); | |
v1 = vec_subs(v1, dcminus); | |
v2 = vec_subs(v2, dcminus); | |
v3 = vec_subs(v3, dcminus); | |
VEC_ST(v0, 0, dst+0*stride); | |
VEC_ST(v1, 0, dst+1*stride); | |
VEC_ST(v2, 0, dst+2*stride); | |
VEC_ST(v3, 0, dst+3*stride); | |
dst += 4*stride; | |
} | |
} | |
static void h264_idct_dc_add_altivec(uint8_t *dst, int16_t *block, int stride) | |
{ | |
h264_idct_dc_add_internal(dst, block, stride, 4); | |
} | |
static void h264_idct8_dc_add_altivec(uint8_t *dst, int16_t *block, int stride) | |
{ | |
h264_idct_dc_add_internal(dst, block, stride, 8); | |
} | |
static void h264_idct_add16_altivec(uint8_t *dst, const int *block_offset, | |
int16_t *block, int stride, | |
const uint8_t nnzc[5 * 8]) | |
{ | |
int i; | |
for(i=0; i<16; i++){ | |
int nnz = nnzc[ scan8[i] ]; | |
if(nnz){ | |
if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride); | |
else h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride); | |
} | |
} | |
} | |
static void h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset, | |
int16_t *block, int stride, | |
const uint8_t nnzc[5 * 8]) | |
{ | |
int i; | |
for(i=0; i<16; i++){ | |
if(nnzc[ scan8[i] ]) h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride); | |
else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride); | |
} | |
} | |
static void h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset, | |
int16_t *block, int stride, | |
const uint8_t nnzc[5 * 8]) | |
{ | |
int i; | |
for(i=0; i<16; i+=4){ | |
int nnz = nnzc[ scan8[i] ]; | |
if(nnz){ | |
if(nnz==1 && block[i*16]) h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride); | |
else h264_idct8_add_altivec(dst + block_offset[i], block + i*16, stride); | |
} | |
} | |
} | |
static void h264_idct_add8_altivec(uint8_t **dest, const int *block_offset, | |
int16_t *block, int stride, | |
const uint8_t nnzc[15 * 8]) | |
{ | |
int i, j; | |
for (j = 1; j < 3; j++) { | |
for(i = j * 16; i < j * 16 + 4; i++){ | |
if(nnzc[ scan8[i] ]) | |
h264_idct_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride); | |
else if(block[i*16]) | |
h264_idct_dc_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride); | |
} | |
} | |
} | |
static inline void write16x4(uint8_t *dst, int dst_stride, | |
register vec_u8 r0, register vec_u8 r1, | |
register vec_u8 r2, register vec_u8 r3) { | |
DECLARE_ALIGNED(16, unsigned char, result)[64]; | |
uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst; | |
int int_dst_stride = dst_stride/4; | |
vec_st(r0, 0, result); | |
vec_st(r1, 16, result); | |
vec_st(r2, 32, result); | |
vec_st(r3, 48, result); | |
/* FIXME: there has to be a better way!!!! */ | |
*dst_int = *src_int; | |
*(dst_int+ int_dst_stride) = *(src_int + 1); | |
*(dst_int+ 2*int_dst_stride) = *(src_int + 2); | |
*(dst_int+ 3*int_dst_stride) = *(src_int + 3); | |
*(dst_int+ 4*int_dst_stride) = *(src_int + 4); | |
*(dst_int+ 5*int_dst_stride) = *(src_int + 5); | |
*(dst_int+ 6*int_dst_stride) = *(src_int + 6); | |
*(dst_int+ 7*int_dst_stride) = *(src_int + 7); | |
*(dst_int+ 8*int_dst_stride) = *(src_int + 8); | |
*(dst_int+ 9*int_dst_stride) = *(src_int + 9); | |
*(dst_int+10*int_dst_stride) = *(src_int + 10); | |
*(dst_int+11*int_dst_stride) = *(src_int + 11); | |
*(dst_int+12*int_dst_stride) = *(src_int + 12); | |
*(dst_int+13*int_dst_stride) = *(src_int + 13); | |
*(dst_int+14*int_dst_stride) = *(src_int + 14); | |
*(dst_int+15*int_dst_stride) = *(src_int + 15); | |
} | |
/** @brief performs a 6x16 transpose of data in src, and stores it to dst | |
@todo FIXME: see if we can't spare some vec_lvsl() by them factorizing | |
out of unaligned_load() */ | |
// out: o = |x-y| < a | |
static inline vec_u8 diff_lt_altivec ( register vec_u8 x, | |
register vec_u8 y, | |
register vec_u8 a) { | |
register vec_u8 diff = vec_subs(x, y); | |
register vec_u8 diffneg = vec_subs(y, x); | |
register vec_u8 o = vec_or(diff, diffneg); /* |x-y| */ | |
o = (vec_u8)vec_cmplt(o, a); | |
return o; | |
} | |
static inline vec_u8 h264_deblock_mask ( register vec_u8 p0, | |
register vec_u8 p1, | |
register vec_u8 q0, | |
register vec_u8 q1, | |
register vec_u8 alpha, | |
register vec_u8 beta) { | |
register vec_u8 mask; | |
register vec_u8 tempmask; | |
mask = diff_lt_altivec(p0, q0, alpha); | |
tempmask = diff_lt_altivec(p1, p0, beta); | |
mask = vec_and(mask, tempmask); | |
tempmask = diff_lt_altivec(q1, q0, beta); | |
mask = vec_and(mask, tempmask); | |
return mask; | |
} | |
// out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0) | |
static inline vec_u8 h264_deblock_q1(register vec_u8 p0, | |
register vec_u8 p1, | |
register vec_u8 p2, | |
register vec_u8 q0, | |
register vec_u8 tc0) { | |
register vec_u8 average = vec_avg(p0, q0); | |
register vec_u8 temp; | |
register vec_u8 unclipped; | |
register vec_u8 ones; | |
register vec_u8 max; | |
register vec_u8 min; | |
register vec_u8 newp1; | |
temp = vec_xor(average, p2); | |
average = vec_avg(average, p2); /*avg(p2, avg(p0, q0)) */ | |
ones = vec_splat_u8(1); | |
temp = vec_and(temp, ones); /*(p2^avg(p0, q0)) & 1 */ | |
unclipped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */ | |
max = vec_adds(p1, tc0); | |
min = vec_subs(p1, tc0); | |
newp1 = vec_max(min, unclipped); | |
newp1 = vec_min(max, newp1); | |
return newp1; | |
} | |
static void h264_v_loop_filter_luma_altivec(uint8_t *pix, ptrdiff_t stride, int alpha, int beta, int8_t *tc0) { | |
if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) { | |
register vec_u8 p2 = vec_ld(-3*stride, pix); | |
register vec_u8 p1 = vec_ld(-2*stride, pix); | |
register vec_u8 p0 = vec_ld(-1*stride, pix); | |
register vec_u8 q0 = vec_ld(0, pix); | |
register vec_u8 q1 = vec_ld(stride, pix); | |
register vec_u8 q2 = vec_ld(2*stride, pix); | |
h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0); | |
vec_st(p1, -2*stride, pix); | |
vec_st(p0, -1*stride, pix); | |
vec_st(q0, 0, pix); | |
vec_st(q1, stride, pix); | |
} | |
} | |
static void h264_h_loop_filter_luma_altivec(uint8_t *pix, ptrdiff_t stride, int alpha, int beta, int8_t *tc0) { | |
register vec_u8 line0, line1, line2, line3, line4, line5; | |
if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0) | |
return; | |
readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5); | |
h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0); | |
transpose4x16(line1, line2, line3, line4); | |
write16x4(pix-2, stride, line1, line2, line3, line4); | |
} | |
static av_always_inline | |
void weight_h264_W_altivec(uint8_t *block, int stride, int height, | |
int log2_denom, int weight, int offset, int w) | |
{ | |
int y, aligned; | |
vec_u8 vblock; | |
vec_s16 vtemp, vweight, voffset, v0, v1; | |
vec_u16 vlog2_denom; | |
DECLARE_ALIGNED(16, int32_t, temp)[4]; | |
LOAD_ZERO; | |
offset <<= log2_denom; | |
if(log2_denom) offset += 1<<(log2_denom-1); | |
temp[0] = log2_denom; | |
temp[1] = weight; | |
temp[2] = offset; | |
vtemp = (vec_s16)vec_ld(0, temp); | |
vtemp =(vec_s16)vec_perm(vtemp, vtemp, vcswapi2s(0,1,2,3)); | |
vlog2_denom = (vec_u16)vec_splat(vtemp, 1); | |
vweight = vec_splat(vtemp, 3); | |
voffset = vec_splat(vtemp, 5); | |
aligned = !((unsigned long)block & 0xf); | |
for (y = 0; y < height; y++) { | |
vblock = vec_ld(0, block); | |
v0 = (vec_s16)VEC_MERGEH(zero_u8v, vblock); | |
v1 = (vec_s16)VEC_MERGEL(zero_u8v, vblock); | |
if (w == 16 || aligned) { | |
v0 = vec_mladd(v0, vweight, zero_s16v); | |
v0 = vec_adds(v0, voffset); | |
v0 = vec_sra(v0, vlog2_denom); | |
} | |
if (w == 16 || !aligned) { | |
v1 = vec_mladd(v1, vweight, zero_s16v); | |
v1 = vec_adds(v1, voffset); | |
v1 = vec_sra(v1, vlog2_denom); | |
} | |
vblock = vec_packsu(v0, v1); | |
vec_st(vblock, 0, block); | |
block += stride; | |
} | |
} | |
static av_always_inline | |
void biweight_h264_W_altivec(uint8_t *dst, uint8_t *src, int stride, int height, | |
int log2_denom, int weightd, int weights, int offset, int w) | |
{ | |
int y, dst_aligned, src_aligned; | |
vec_u8 vsrc, vdst; | |
vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3; | |
vec_u16 vlog2_denom; | |
DECLARE_ALIGNED(16, int32_t, temp)[4]; | |
LOAD_ZERO; | |
offset = ((offset + 1) | 1) << log2_denom; | |
temp[0] = log2_denom+1; | |
temp[1] = weights; | |
temp[2] = weightd; | |
temp[3] = offset; | |
vtemp = (vec_s16)vec_ld(0, temp); | |
vtemp =(vec_s16)vec_perm(vtemp, vtemp, vcswapi2s(0,1,2,3)); | |
vlog2_denom = (vec_u16)vec_splat(vtemp, 1); | |
vweights = vec_splat(vtemp, 3); | |
vweightd = vec_splat(vtemp, 5); | |
voffset = vec_splat(vtemp, 7); | |
dst_aligned = !((unsigned long)dst & 0xf); | |
src_aligned = !((unsigned long)src & 0xf); | |
for (y = 0; y < height; y++) { | |
vdst = vec_ld(0, dst); | |
vsrc = vec_ld(0, src); | |
v0 = (vec_s16)VEC_MERGEH(zero_u8v, vdst); | |
v1 = (vec_s16)VEC_MERGEL(zero_u8v, vdst); | |
v2 = (vec_s16)VEC_MERGEH(zero_u8v, vsrc); | |
v3 = (vec_s16)VEC_MERGEL(zero_u8v, vsrc); | |
if (w == 8) { | |
if (src_aligned) | |
v3 = v2; | |
else | |
v2 = v3; | |
} | |
if (w == 16 || dst_aligned) { | |
v0 = vec_mladd(v0, vweightd, zero_s16v); | |
v2 = vec_mladd(v2, vweights, zero_s16v); | |
v0 = vec_adds(v0, voffset); | |
v0 = vec_adds(v0, v2); | |
v0 = vec_sra(v0, vlog2_denom); | |
} | |
if (w == 16 || !dst_aligned) { | |
v1 = vec_mladd(v1, vweightd, zero_s16v); | |
v3 = vec_mladd(v3, vweights, zero_s16v); | |
v1 = vec_adds(v1, voffset); | |
v1 = vec_adds(v1, v3); | |
v1 = vec_sra(v1, vlog2_denom); | |
} | |
vdst = vec_packsu(v0, v1); | |
vec_st(vdst, 0, dst); | |
dst += stride; | |
src += stride; | |
} | |
} | |
H264_WEIGHT(16) | |
H264_WEIGHT( 8) | |
av_cold void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth, | |
const int chroma_format_idc) | |
{ | |
if (!PPC_ALTIVEC(av_get_cpu_flags())) | |
return; | |
if (bit_depth == 8) { | |
c->h264_idct_add = h264_idct_add_altivec; | |
if (chroma_format_idc <= 1) | |
c->h264_idct_add8 = h264_idct_add8_altivec; | |
c->h264_idct_add16 = h264_idct_add16_altivec; | |
c->h264_idct_add16intra = h264_idct_add16intra_altivec; | |
c->h264_idct_dc_add= h264_idct_dc_add_altivec; | |
c->h264_idct8_dc_add = h264_idct8_dc_add_altivec; | |
c->h264_idct8_add = h264_idct8_add_altivec; | |
c->h264_idct8_add4 = h264_idct8_add4_altivec; | |
c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec; | |
c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec; | |
c->weight_h264_pixels_tab[0] = weight_h264_pixels16_altivec; | |
c->weight_h264_pixels_tab[1] = weight_h264_pixels8_altivec; | |
c->biweight_h264_pixels_tab[0] = biweight_h264_pixels16_altivec; | |
c->biweight_h264_pixels_tab[1] = biweight_h264_pixels8_altivec; | |
} | |
} | |