Spaces:
Runtime error
Runtime error
/* | |
* Copyright (c) 2010 Mans Rullgard <[email protected]> | |
* | |
* This file is part of FFmpeg. | |
* | |
* FFmpeg is free software; you can redistribute it and/or | |
* modify it under the terms of the GNU Lesser General Public | |
* License as published by the Free Software Foundation; either | |
* version 2.1 of the License, or (at your option) any later version. | |
* | |
* FFmpeg is distributed in the hope that it will be useful, | |
* but WITHOUT ANY WARRANTY; without even the implied warranty of | |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
* Lesser General Public License for more details. | |
* | |
* You should have received a copy of the GNU Lesser General Public | |
* License along with FFmpeg; if not, write to the Free Software | |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
*/ | |
static av_always_inline uint16_t AV_RB16(const void *p) | |
{ | |
uint16_t v; | |
__asm__ ("loadacc, (%1+) \n\t" | |
"rol8 \n\t" | |
"storeacc, %0 \n\t" | |
"loadacc, (%1+) \n\t" | |
"add, %0 \n\t" | |
: "=r"(v), "+a"(p)); | |
return v; | |
} | |
static av_always_inline void AV_WB16(void *p, uint16_t v) | |
{ | |
__asm__ volatile ("loadacc, %1 \n\t" | |
"lsr8 \n\t" | |
"storeacc, (%0+) \n\t" | |
"loadacc, %1 \n\t" | |
"storeacc, (%0+) \n\t" | |
: "+&a"(p) : "r"(v)); | |
} | |
static av_always_inline uint16_t AV_RL16(const void *p) | |
{ | |
uint16_t v; | |
__asm__ ("loadacc, (%1+) \n\t" | |
"storeacc, %0 \n\t" | |
"loadacc, (%1+) \n\t" | |
"rol8 \n\t" | |
"add, %0 \n\t" | |
: "=r"(v), "+a"(p)); | |
return v; | |
} | |
static av_always_inline void AV_WL16(void *p, uint16_t v) | |
{ | |
__asm__ volatile ("loadacc, %1 \n\t" | |
"storeacc, (%0+) \n\t" | |
"lsr8 \n\t" | |
"storeacc, (%0+) \n\t" | |
: "+&a"(p) : "r"(v)); | |
} | |
static av_always_inline uint32_t AV_RB32(const void *p) | |
{ | |
uint32_t v; | |
__asm__ ("loadacc, (%1+) \n\t" | |
"rol8 \n\t" | |
"rol8 \n\t" | |
"rol8 \n\t" | |
"storeacc, %0 \n\t" | |
"loadacc, (%1+) \n\t" | |
"rol8 \n\t" | |
"rol8 \n\t" | |
"add, %0 \n\t" | |
"loadacc, (%1+) \n\t" | |
"rol8 \n\t" | |
"add, %0 \n\t" | |
"loadacc, (%1+) \n\t" | |
"add, %0 \n\t" | |
: "=r"(v), "+a"(p)); | |
return v; | |
} | |
static av_always_inline void AV_WB32(void *p, uint32_t v) | |
{ | |
__asm__ volatile ("loadacc, #4 \n\t" | |
"add, %0 \n\t" | |
"loadacc, %1 \n\t" | |
"storeacc, (-%0) \n\t" | |
"lsr8 \n\t" | |
"storeacc, (-%0) \n\t" | |
"lsr8 \n\t" | |
"storeacc, (-%0) \n\t" | |
"lsr8 \n\t" | |
"storeacc, (-%0) \n\t" | |
: "+&a"(p) : "r"(v)); | |
} | |
static av_always_inline uint32_t AV_RL32(const void *p) | |
{ | |
uint32_t v; | |
__asm__ ("loadacc, (%1+) \n\t" | |
"storeacc, %0 \n\t" | |
"loadacc, (%1+) \n\t" | |
"rol8 \n\t" | |
"add, %0 \n\t" | |
"loadacc, (%1+) \n\t" | |
"rol8 \n\t" | |
"rol8 \n\t" | |
"add, %0 \n\t" | |
"loadacc, (%1+) \n\t" | |
"rol8 \n\t" | |
"rol8 \n\t" | |
"rol8 \n\t" | |
"add, %0 \n\t" | |
: "=r"(v), "+a"(p)); | |
return v; | |
} | |
static av_always_inline void AV_WL32(void *p, uint32_t v) | |
{ | |
__asm__ volatile ("loadacc, %1 \n\t" | |
"storeacc, (%0+) \n\t" | |
"lsr8 \n\t" | |
"storeacc, (%0+) \n\t" | |
"lsr8 \n\t" | |
"storeacc, (%0+) \n\t" | |
"lsr8 \n\t" | |
"storeacc, (%0+) \n\t" | |
: "+&a"(p) : "r"(v)); | |
} | |