Skip to content
Snippets Groups Projects
Commit 22ee2f67 authored by sletz's avatar sletz
Browse files

Another Tim Bechmann memops.c optimization patch.

git-svn-id: http://subversion.jackaudio.org/jack/jack2/trunk/jackmp@3574 0c269be4-1314-0410-8aa9-9f06e86f4224
parent 304060be
No related branches found
No related tags found
No related merge requests found
......@@ -25,6 +25,10 @@ Paul Davis
Jackdmp changes log
---------------------------
2009-07-03 Stephane Letz <letz@grame.fr>
* Another Tim Bechmann memops.c optimization patch.
2009-07-01 Stephane Letz <letz@grame.fr>
* Tim Bechmann memops.c optimization patch.
......
......@@ -164,6 +164,32 @@
}
#if defined (__SSE2__) && !defined (__sun__)
/* generates same as _mm_set_ps(1.f, 1.f, 1f., 1f) but faster */
static inline __m128 gen_one(void)
{
volatile __m128i x;
__m128i ones = _mm_cmpeq_epi32(x, x);
return (__m128)_mm_slli_epi32 (_mm_srli_epi32(ones, 25), 23);
}
static inline __m128 clip(__m128 s, __m128 min, __m128 max)
{
return _mm_min_ps(max, _mm_max_ps(s, min));
}
static inline __m128i float_24_sse(__m128 s)
{
const __m128 upper_bound = gen_one(); /* NORMALIZED_FLOAT_MAX */
const __m128 lower_bound = _mm_sub_ps(_mm_setzero_ps(), upper_bound);
__m128 clipped = clip(s, lower_bound, upper_bound);
__m128 scaled = _mm_mul_ps(clipped, _mm_set1_ps(SAMPLE_24BIT_SCALING));
return _mm_cvtps_epi32(scaled);
}
#endif
/* Linear Congruential noise generator. From the music-dsp list
* less random than rand(), but good enough and 10x faster
*/
......@@ -254,7 +280,7 @@ void sample_move_d32u24_sS (char *dst, jack_default_audio_sample_t *src, unsigne
while (unrolled--) {
__m128 in = _mm_load_ps(src);
__m128 scaled = _mm_mul_ps(in, factor);
__m128 clipped = _mm_min_ps(int_max, _mm_max_ps(scaled, int_min));
__m128 clipped = clip(scaled, int_min, int_max);
__m128i y = _mm_cvttps_epi32(clipped);
__m128i shifted = _mm_slli_epi32(y, 8);
......@@ -264,13 +290,11 @@ void sample_move_d32u24_sS (char *dst, jack_default_audio_sample_t *src, unsigne
__m128i shuffled3 = _mm_shuffle_epi32(shifted, _MM_SHUFFLE(2, 1, 0, 3));
_mm_store_ss((float*)dst, (__m128)shifted);
dst += dst_skip;
_mm_store_ss((float*)dst, (__m128)shuffled1);
dst += dst_skip;
_mm_store_ss((float*)dst, (__m128)shuffled2);
dst += dst_skip;
_mm_store_ss((float*)dst, (__m128)shuffled3);
dst += dst_skip;
_mm_store_ss((float*)(dst+dst_skip), (__m128)shuffled1);
_mm_store_ss((float*)(dst+2*dst_skip), (__m128)shuffled2);
_mm_store_ss((float*)(dst+3*dst_skip), (__m128)shuffled3);
dst += 4*dst_skip;
src+= 4;
}
......@@ -389,8 +413,38 @@ void sample_move_d24_sSs (char *dst, jack_default_audio_sample_t *src, unsigned
void sample_move_d24_sS (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state)
{
int32_t z;
#if defined (__SSE2__) && !defined (__sun__)
_MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
while (nsamples >= 4) {
int i;
int32_t z[4];
__m128 samples = _mm_loadu_ps(src);
__m128i converted = float_24_sse(samples);
__m128i shuffled1 = _mm_shuffle_epi32(converted, _MM_SHUFFLE(0, 3, 2, 1));
__m128i shuffled2 = _mm_shuffle_epi32(converted, _MM_SHUFFLE(1, 0, 3, 2));
__m128i shuffled3 = _mm_shuffle_epi32(converted, _MM_SHUFFLE(2, 1, 0, 3));
_mm_store_ss((float*)z, (__m128)converted);
_mm_store_ss((float*)z+1, (__m128)shuffled1);
_mm_store_ss((float*)z+2, (__m128)shuffled2);
_mm_store_ss((float*)z+3, (__m128)shuffled3);
for (i = 0; i != 4; ++i) {
#if __BYTE_ORDER == __LITTLE_ENDIAN
memcpy (dst, z+i, 3);
#elif __BYTE_ORDER == __BIG_ENDIAN
memcpy (dst, (float*)((char *)&z + 1)+i, 3);
#endif
dst += dst_skip;
}
nsamples -= 4;
src += 4;
}
#endif
int32_t z;
while (nsamples--) {
float_24 (*src, z);
#if __BYTE_ORDER == __LITTLE_ENDIAN
......@@ -401,7 +455,7 @@ void sample_move_d24_sS (char *dst, jack_default_audio_sample_t *src, unsigned l
dst += dst_skip;
src++;
}
}
}
void sample_move_dS_s24s (jack_default_audio_sample_t *dst, char *src, unsigned long nsamples, unsigned long src_skip)
{
......@@ -443,7 +497,7 @@ void sample_move_dS_s24 (jack_default_audio_sample_t *dst, char *src, unsigned l
#if defined (__SSE2__) && !defined (__sun__)
const __m128 scaling_block = _mm_set_ps1(scaling);
while (nsamples > 4) {
while (nsamples >= 4) {
int x0, x1, x2, x3;
#if __BYTE_ORDER == __LITTLE_ENDIAN
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment