28#if !defined(_SPANDSP_SATURATED_H_)
29#define _SPANDSP_SATURATED_H_
40#if defined(__cplusplus)
45static __inline__ int16_t saturate16(int32_t amp)
47#if defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__))
51 " ssat %[z],#16,%[amp];\n"
71static __inline__ int16_t saturate15(int32_t amp)
73#if defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__))
77 " ssat %[z],#15,%[amp];\n"
92static __inline__ uint16_t saturateu16(int32_t amp)
94#if defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__))
98 " usat %[z],#16,%[amp];\n"
110 if (amp > UINT16_MAX)
117static __inline__ uint8_t saturateu8(int32_t amp)
119#if defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__))
122 __asm__ __volatile__(
123 " usat %[z],#8,%[amp];\n"
142static __inline__ int16_t fsaturatef(
float famp)
144 if (famp > (
float) INT16_MAX)
146 if (famp < (
float) INT16_MIN)
148 return (int16_t) lrintf(famp);
152static __inline__ int16_t fsaturate(
double damp)
154 if (damp > (
double) INT16_MAX)
156 if (damp < (
double) INT16_MIN)
158 return (int16_t) lrint(damp);
163static __inline__ int16_t ffastsaturatef(
float famp)
165 if (famp > (
float) INT16_MAX)
167 if (famp < (
float) INT16_MIN)
169 return (int16_t) lfastrintf(famp);
174static __inline__ int16_t ffastsaturate(
double damp)
176 if (damp > (
double) INT16_MAX)
178 if (damp < (
double) INT16_MIN)
180 return (int16_t) lfastrint(damp);
185static __inline__
float ffsaturatef(
float famp)
187 if (famp > (
float) INT16_MAX)
188 return (
float) INT16_MAX;
189 if (famp < (
float) INT16_MIN)
190 return (
float) INT16_MIN;
196static __inline__
double ffsaturate(
double famp)
198 if (famp > (
double) INT16_MAX)
199 return (
double) INT16_MAX;
200 if (famp < (
double) INT16_MIN)
201 return (
double) INT16_MIN;
206static __inline__ int16_t sat_add16(int16_t x, int16_t y)
208#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
209 __asm__ __volatile__(
" addw %[y],%[x];\n"
211 " movw $0x7FFF,%[x];\n"
218#elif defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__))
221 __asm__ __volatile__(
222 " qadd16 %[z],%[x],%[y];\n"
224 : [x]
"r" (x), [y]
"r" (y)
229 return saturate16((int32_t) x + y);
234static __inline__ int32_t sat_add32(int32_t x, int32_t y)
236#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
237 __asm__ __volatile__(
" addl %[y],%[x];\n"
239 " movl $0x7FFFFFFF,%[x];\n"
246#elif defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__))
249 __asm__ __volatile__(
" qadd %[z],%[x],%[y];\n"
251 : [x]
"r" (x), [y]
"r" (y));
261 z = (x < 0) ? INT32_MIN : INT32_MAX;
268static __inline__ int16_t sat_sub16(int16_t x, int16_t y)
270#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
271 __asm__ __volatile__(
" subw %[y],%[x];\n"
273 " movw $0x8000,%[x];\n"
280#elif defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__))
283 __asm__ __volatile__(
" qsub16 %[z],%[x],%[y];\n"
285 : [x]
"r" (x), [y]
"r" (y));
289 return saturate16((int32_t) x - y);
294static __inline__ int32_t sat_sub32(int32_t x, int32_t y)
296#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
297 __asm__ __volatile__(
" subl %[y],%[x];\n"
299 " movl $0x80000000,%[x];\n"
306#elif defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__))
309 __asm__ __volatile__(
" qsub %[z],%[x],%[y];\n"
311 : [x]
"r" (x), [y]
"r" (y));
321 z = (x < 0L) ? INT32_MIN : INT32_MAX;
328static __inline__ int16_t sat_mul16(int16_t x, int16_t y)
332#if defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__))
333 __asm__ __volatile__(
" smulbb %[z],%[x],%[y];\n"
334 " qadd %[z],%[z],%[z];\n"
336 : [x]
"r" (x), [y]
"r" (y));
338 return (int16_t) (z >> 16);
344 return (int16_t) (z >> 15);
349static __inline__ int32_t sat_mul32_16(int16_t x, int16_t y)
353#if defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__))
354 __asm__ __volatile__(
" smulbb %[z],%[x],%[y];\n"
355 " qadd %[z],%[z],%[z];\n"
357 : [x]
"r" (x), [y]
"r" (y));
368static __inline__ int32_t sat_mac32_16(int32_t z, int16_t x, int16_t y)
370#if defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__))
373 __asm__ __volatile__(
" smulbb %[p],%[x],%[y];\n"
374 " qdadd %[z],%[z],%[p];\n"
376 : [x]
"r" (x), [y]
"r" (y), [p]
"r" (product));
379 return sat_add32(z, sat_mul32_16(x, y));
384static __inline__ int32_t sat_msu32_16(int32_t z, int16_t x, int16_t y)
386#if defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__))
389 __asm__ __volatile__(
" smulbb %[p],%[x],%[y];\n"
390 " qdsub %[z],%[z],%[p];\n"
392 : [x]
"r" (x), [y]
"r" (y), [p]
"r" (product));
395 return sat_sub32(z, sat_mul32_16(x, y));
400static __inline__ int16_t sat_abs16(int16_t x)
404 return (int16_t) abs(x);
408static __inline__ int32_t sat_abs32(int32_t x)
416#if defined(__cplusplus)