diff options
author | Daniil Cherednik <dan.cherednik@gmail.com> | 2023-12-12 23:17:35 +0100 |
---|---|---|
committer | Daniil Cherednik <dan.cherednik@gmail.com> | 2023-12-12 23:17:35 +0100 |
commit | 0f08bc5487adffde5f3e0aff30ffe41b53a21bf1 (patch) | |
tree | dce92f6daa256f17d8e292e1578605af2f2218f9 /lib/src | |
parent | 93a9691246574b6d51ddbbdb5be3dceb89ff24e9 (diff) | |
download | libfshift-main.tar.gz |
- f must be divisors of 44100 and be positive
- 44100 hardcoded
Diffstat (limited to 'lib/src')
-rw-r--r-- | lib/src/dsp.c | 256 | ||||
-rw-r--r-- | lib/src/fft/pffft/README.md | 320 | ||||
-rw-r--r-- | lib/src/fft/pffft/pffft.c | 1884 | ||||
-rw-r--r-- | lib/src/fft/pffft/pffft.h | 177 |
4 files changed, 2637 insertions, 0 deletions
diff --git a/lib/src/dsp.c b/lib/src/dsp.c new file mode 100644 index 0000000..45192bf --- /dev/null +++ b/lib/src/dsp.c @@ -0,0 +1,256 @@ +#include <libfshift.h> + +#include <fft/pffft/pffft.h> + +#include <stddef.h> +#include <stdlib.h> +#include <string.h> +#include <math.h> + +#include <malloc.h> + +//del +#include <stdio.h> + +//////////////////////////////////////////////////////////////////////////////// + +struct hilbert_ctx { + PFFFT_Setup* fft_ctx; + float *pcm_in; + float *fft_bins; + float *pcm_out1; + float *pcm_out2; + const float* half_win; +}; + +struct fshift_ctx { + struct hilbert_ctx hilbert_ctx; + float *carrier; + size_t carrier_sz; + size_t carrier_cos_offset; + size_t carrier_cur_pos; + size_t delay_sz; + float fshift; +}; + +static void fill_half_win(float* w, int sz) +{ + int i; + const float n = sz * 2.0 - 1; + for (i = 0; i < sz; i++) + w[i] = sinf(M_PI * (float)i / n); +} + +static size_t fill_carrier(float* s, int sz, float shift) +{ +// fprintf(stderr, "fill car: %d %f\n", sz, shift); + shift /= 44100; + size_t cos_offset = 0; + s[0] = sinf(2 * M_PI * shift * (float)0); + float prev = s[0]; + for (int i = 1; i < sz; i++) { + s[i] = sinf(2 * M_PI * shift * (float)i); + if (!cos_offset) { + if (s[i] < prev) { + cos_offset = i - 1; + //fprintf(stderr, "cos offset: %d\n", (int)cos_offset); + } else { + prev = s[i]; + } + } + //fprintf(stderr, "carrier: %d %f\n", i, s[i]); + } + return cos_offset; +} + +static void do_hilbert_transform(struct hilbert_ctx* ctx, const float* const in, float* out1, float* out2, int sz) +{ + int i, j; + float* cur = ctx->pcm_in + sz; + const float scale = 1.0 / (sz * 2); + + // Fill second half of buffer + for (i = 0, j = sz - 1; i < sz; i++, j--) { + //fprintf(stderr, "%d in: %f , win %f\n", i, in[i], *(ctx->half_win + j)); + *cur++ = in[i] * (*(ctx->half_win + j)); + } + +// for (i = 0; i < sz * 2; i++) +// fprintf(stderr, "in %d: %f\n", i, *(ctx->pcm_in + i)); + + pffft_transform_ordered(ctx->fft_ctx, ctx->pcm_in, ctx->fft_bins, NULL, PFFFT_FORWARD); + + // Fill first half of buffer + cur = ctx->pcm_in; + for (i = 0; i < sz; i++) + *cur++ = in[i] * (*(ctx->half_win + i)); + + // Scale fft result + for (i = 0; i < sz * 2; i++) + *(ctx->fft_bins + i) *= scale; + + // Zero DC + *(ctx->fft_bins) = 0.0; + *(ctx->fft_bins + 1) = 0.0; + + for (i = 0; i < sz; i++) + out2[i] = *(ctx->pcm_out2 + i); + + pffft_transform_ordered(ctx->fft_ctx, ctx->fft_bins, ctx->pcm_out2, NULL, PFFFT_BACKWARD); + + for (i = 0; i < sz; i++) { + out2[i] += (*(ctx->pcm_out2 + i) * (*(ctx->half_win + i))); + *(ctx->pcm_out2 + i) = *(ctx->pcm_out2 + i + sz) * (*(ctx->half_win + sz - 1 - i)); + } + + // Multiply to -j + for (i = 2; i < sz * 2; i+=2) { + //fprintf(stderr, "fft %d: %f\n", i, *(ctx->fft_bins + i)); + float t = *(ctx->fft_bins + i); + *(ctx->fft_bins + i) = *(ctx->fft_bins + i + 1); + *(ctx->fft_bins + i + 1) = -t; + } + +// for (i = 0; i < sz * 2; i++) +// fprintf(stderr, "fft %d: %f\n", i, *(ctx->fft_bins + i)); + + for (i = 0; i < sz; i++) + out1[i] = *(ctx->pcm_out1 + i); + + pffft_transform_ordered(ctx->fft_ctx, ctx->fft_bins, ctx->pcm_out1, NULL, PFFFT_BACKWARD); + +// for (i = 0; i < sz * 2; i++) +// fprintf(stderr, "out %d: %f\n", i, *(ctx->pcm_out1 + i)); + + for (i = 0; i < sz; i++) { + out1[i] += (*(ctx->pcm_out1 + i) * (*(ctx->half_win + i))); + *(ctx->pcm_out1 + i) = *(ctx->pcm_out1 + i + sz) * (*(ctx->half_win + sz - 1 - i)); + } + +// for (i = 0; i < sz; i++) +// fprintf(stderr, "res out %d: %f\n", i, out1[i]); + +} + +static size_t calc_delay_sz(uint8_t sz) +{ + return 1 << (sz - 1); +} + +static size_t calc_carrier_size(float shift) +{ + return 44100 / shift; +} + +// n - number of samples to process +static int init_hilbert_ctx(struct hilbert_ctx *ctx, int n) +{ + memset(ctx, 0, sizeof(struct hilbert_ctx)); + int fft_sz = n * 2; + + fprintf(stderr, "fft sz: %d\n", n); + ctx->fft_ctx = pffft_new_setup(fft_sz, PFFFT_REAL); + if (!ctx->fft_ctx) + goto err; + + ctx->pcm_in = pffft_aligned_malloc(sizeof(float) * fft_sz); + if (!ctx->pcm_in) + goto err; + + ctx->fft_bins = pffft_aligned_malloc(sizeof(float) * fft_sz); + if (!ctx->fft_bins) + goto err; + + ctx->pcm_out1 = pffft_aligned_malloc(sizeof(float) * fft_sz); + if (!ctx->pcm_out1) + goto err; + + ctx->pcm_out2 = pffft_aligned_malloc(sizeof(float) * fft_sz); + if (!ctx->pcm_out2) + goto err; + + + float *w = memalign(16, sizeof(float) * n); + if (!w) + goto err; + + fill_half_win(w, n); + ctx->half_win = w; + memset(ctx->pcm_in, 0, sizeof(float) * fft_sz); + memset(ctx->pcm_out1, 0, sizeof(float) * fft_sz); + memset(ctx->pcm_out2, 0, sizeof(float) * fft_sz); + + return 0; + +err: + return -1; +} + +static void free_hilbert_ctx(struct hilbert_ctx *ctx) +{ + if (ctx->half_win) + free((void*)ctx->half_win); + + if (ctx->pcm_out2) + pffft_aligned_free(ctx->pcm_out2); + + if (ctx->pcm_out1) + pffft_aligned_free(ctx->pcm_out1); + + if (ctx->fft_bins) + pffft_aligned_free(ctx->fft_bins); + + if (ctx->pcm_in) + pffft_aligned_free(ctx->pcm_in); + + if (ctx->fft_ctx) + pffft_destroy_setup(ctx->fft_ctx); +} + +fshift_ctx_t fshift_create_ctx(float fshift, uint8_t sz) +{ + const size_t ctx_sz = sizeof(struct fshift_ctx); + const size_t delay_sz = calc_delay_sz(sz); + const size_t carrier_sz = calc_carrier_size(fshift); + size_t cos_offset = 0; + fshift_ctx_t ctx = malloc(ctx_sz + sizeof(float) * carrier_sz); + ctx->carrier = ((void*)ctx) + ctx_sz; + ctx->carrier_sz = carrier_sz; + ctx->delay_sz = delay_sz; + memset(ctx->carrier, 0, (sizeof(float) * carrier_sz)); + cos_offset = fill_carrier(ctx->carrier, carrier_sz, fshift); + ctx->carrier_cos_offset = cos_offset; + ctx->carrier_cur_pos = 0; + ctx->fshift = fshift; + init_hilbert_ctx(&ctx->hilbert_ctx, delay_sz * 2); + return ctx; +} + +void fshift_free_ctx(fshift_ctx_t ctx) +{ + free_hilbert_ctx(&ctx->hilbert_ctx); + free(ctx); +} + +static void do_fshift(fshift_ctx_t ctx, float* out1, const float* out2) +{ + size_t cur = ctx->carrier_cur_pos; + size_t block_sz = ctx->delay_sz * 2; + for (int i = 0; i < block_sz; i++, cur++) { + size_t sin_pos = cur % ctx->carrier_sz; + size_t cos_pos = (cur + ctx->carrier_cos_offset ) % ctx->carrier_sz; + float s = *(ctx->carrier + sin_pos); + float c = *(ctx->carrier + cos_pos); + //fprintf(stderr, "dsp: %d, %f %f\n", sin_pos, s, c); + out1[i] = out1[i] * s - out2[i] * c; + //out1[i] = out1[i] * s + out2[i] * c; + cur = sin_pos; + } + ctx->carrier_cur_pos = cur; +} + +void fshift_run(fshift_ctx_t ctx, const float* const in, float* out1, float* out2) +{ + do_hilbert_transform(&ctx->hilbert_ctx, in, out1, out2, ctx->delay_sz * 2); + do_fshift(ctx, out1, out2); +} diff --git a/lib/src/fft/pffft/README.md b/lib/src/fft/pffft/README.md new file mode 100644 index 0000000..5f741aa --- /dev/null +++ b/lib/src/fft/pffft/README.md @@ -0,0 +1,320 @@ +PFFFT: a pretty fast FFT. + +## TL;DR + +PFFFT does 1D Fast Fourier Transforms, of single precision real and +complex vectors. It tries do it fast, it tries to be correct, and it +tries to be small. Computations do take advantage of SSE1 instructions +on x86 cpus, Altivec on powerpc cpus, and NEON on ARM cpus. The +license is BSD-like. + + +## Why does it exist: + +I was in search of a good performing FFT library , preferably very +small and with a very liberal license. + +When one says "fft library", FFTW ("Fastest Fourier Transform in the +West") is probably the first name that comes to mind -- I guess that +99% of open-source projects that need a FFT do use FFTW, and are happy +with it. However, it is quite a large library , which does everything +fft related (2d transforms, 3d transforms, other transformations such +as discrete cosine , or fast hartley). And it is licensed under the +GNU GPL , which means that it cannot be used in non open-source +products. + +An alternative to FFTW that is really small, is the venerable FFTPACK +v4, which is available on NETLIB. A more recent version (v5) exists, +but it is larger as it deals with multi-dimensional transforms. This +is a library that is written in FORTRAN 77, a language that is now +considered as a bit antiquated by many. FFTPACKv4 was written in 1985, +by Dr Paul Swarztrauber of NCAR, more than 25 years ago ! And despite +its age, benchmarks show it that it still a very good performing FFT +library, see for example the 1d single precision benchmarks here: +http://www.fftw.org/speed/opteron-2.2GHz-32bit/ . It is however not +competitive with the fastest ones, such as FFTW, Intel MKL, AMD ACML, +Apple vDSP. The reason for that is that those libraries do take +advantage of the SSE SIMD instructions available on Intel CPUs, +available since the days of the Pentium III. These instructions deal +with small vectors of 4 floats at a time, instead of a single float +for a traditionnal FPU, so when using these instructions one may expect +a 4-fold performance improvement. + +The idea was to take this fortran fftpack v4 code, translate to C, +modify it to deal with those SSE instructions, and check that the +final performance is not completely ridiculous when compared to other +SIMD FFT libraries. Translation to C was performed with f2c ( +http://www.netlib.org/f2c/ ). The resulting file was a bit edited in +order to remove the thousands of gotos that were introduced by +f2c. You will find the fftpack.h and fftpack.c sources in the +repository, this a complete translation of +http://www.netlib.org/fftpack/ , with the discrete cosine transform +and the test program. There is no license information in the netlib +repository, but it was confirmed to me by the fftpack v5 curators that +the same terms do apply to fftpack v4: +http://www.cisl.ucar.edu/css/software/fftpack5/ftpk.html . This is a +"BSD-like" license, it is compatible with proprietary projects. + +Adapting fftpack to deal with the SIMD 4-element vectors instead of +scalar single precision numbers was more complex than I originally +thought, especially with the real transforms, and I ended up writing +more code than I planned.. + + +## The code: + +Only two files, in good old C, pffft.c and pffft.h . The API is very +very simple, just make sure that you read the comments in pffft.h. + + +## Comparison with other FFTs: + +The idea was not to break speed records, but to get a decently fast +fft that is at least 50% as fast as the fastest FFT -- especially on +slowest computers . I'm more focused on getting the best performance +on slow cpus (Atom, Intel Core 1, old Athlons, ARM Cortex-A9...), than +on getting top performance on today fastest cpus. + +It can be used in a real-time context as the fft functions do not +perform any memory allocation -- that is why they accept a 'work' +array in their arguments. + +It is also a bit focused on performing 1D convolutions, that is why it +provides "unordered" FFTs , and a fourier domain convolution +operation. + + +Benchmark results +-- + +The benchmark shows the performance of various fft implementations measured in +MFlops, with the number of floating point operations being defined as 5Nlog2(N) +for a length N complex fft, and 2.5*Nlog2(N) for a real fft. +See http://www.fftw.org/speed/method.html for an explanation of these formulas. + +2021 update: I'm now including Intel MKL, and I'm removing old benchmarks results for cpu that have +not been relevant for a very long time.. Now that most Intel cpus have AVX enabled (most but not +all, Intel is still selling cpus with no AVX), the performance of pffft vs MKL or VDSP is a bit +behind since pffft is only SSE2. + +However, performance of pffft on the Apple M1 cpu is very good (especially for the real fft), so I'll put it first :-) + + +MacOS Big Sur, XCode 12, M1 cpu on a 2020 mac mini. I'm not including fftw results as they are very +bad here, most likely Homebrew did not enable neon when building the lib. + + clang -o test_pffft -DHAVE_VECLIB -O3 -Wall -W pffft.c test_pffft.c fftpack.c -framework Accelerate + +| input len |real FFTPack| real vDSP | real PFFFT |cplx FFTPack| cplx vDSP | cplx PFFFT | +|-----------:|-----------:|-----------:|-----------:|-----------:|-----------:|-----------:| +| 64 | 4835 | 15687 | 23704 | 12935 | 36222 | 33046 | +| 96 | 9539 | n/a | 25957 | 11068 | n/a | 32703 | +| 128 | 11119 | 33087 | 30143 | 12329 | 50866 | 36363 | +| 160 | 11062 | n/a | 32938 | 11641 | n/a | 34945 | +| 192 | 11784 | n/a | 35726 | 12945 | n/a | 39258 | +| 256 | 13370 | 44880 | 40683 | 14214 | 64654 | 42522 | +| 384 | 11059 | n/a | 40038 | 11655 | n/a | 39565 | +| 480 | 10977 | n/a | 40895 | 10802 | n/a | 36943 | +| 512 | 12197 | 34830 | 43353 | 12357 | 78441 | 41450 | +| 640 | 11685 | n/a | 43393 | 11775 | n/a | 39302 | +| 768 | 12314 | n/a | 45836 | 12756 | n/a | 43058 | +| 800 | 11213 | n/a | 41321 | 10842 | n/a | 37382 | +| 1024 | 13354 | 45214 | 50039 | 13759 | 93269 | 45210 | +| 2048 | 12806 | 57047 | 49519 | 12908 | 99361 | 43719 | +| 2400 | 10972 | n/a | 43399 | 10928 | n/a | 37574 | +| 4096 | 13957 | 65233 | 52845 | 13851 | 105734 | 46274 | +| 8192 | 12772 | 70108 | 49830 | 12560 | 85238 | 40252 | +| 9216 | 12281 | n/a | 48929 | 12114 | n/a | 39202 | +| 16384 | 13363 | 62150 | 48260 | 12910 | 79073 | 38742 | +| 32768 | 11622 | 60809 | 32801 | 11145 | 71570 | 35607 | +| 262144 | 11525 | 53651 | 34988 | 10818 | 63198 | 36742 | +| 1048576 | 11167 | 46119 | 34437 | 9202 | 38823 | 31788 | + +So yes, the perf of the M1 cpu on the complex is really impressive... + +Windows 10, Ryzen 7 4800HS @ 2.9GHz, Visual c++ 2019 and Intel MKL 2018 + +Build with: + + cl /Ox -D_USE_MATH_DEFINES -DHAVE_MKL /arch:SSE test_pffft.c pffft.c fftpack.c /I c:/MKL/include c:/MKL/lib/intel64/mkl_intel_lp64.lib c:/MKL/lib/intel64/mkl_sequential.lib c:/MKL/lib/intel64/mkl_core.lib + +| input len |real FFTPack| real MKL | real PFFFT |cplx FFTPack| cplx MKL | cplx PFFFT | +|-----------:|-----------:|-----------:|-----------:|-----------:|-----------:|-----------:| +| 64 | 3938 | 7877 | 14629 | 7314 | 25600 | 19200 | +| 96 | 5108 | 14984 | 13761 | 7329 | 20128 | 20748 | +| 128 | 5973 | 18379 | 19911 | 7626 | 29257 | 23123 | +| 160 | 6694 | 18287 | 19731 | 7976 | 22720 | 21120 | +| 192 | 6472 | 16525 | 20439 | 6813 | 27252 | 25054 | +| 256 | 7585 | 23406 | 25600 | 8715 | 33437 | 26006 | +| 384 | 6279 | 21441 | 23759 | 7206 | 25855 | 25481 | +| 480 | 6514 | 20267 | 22800 | 7238 | 26435 | 21976 | +| 512 | 6776 | 26332 | 29729 | 6312 | 34777 | 25961 | +| 640 | 7019 | 21695 | 27273 | 7232 | 26889 | 25120 | +| 768 | 6815 | 21809 | 28865 | 7667 | 31658 | 27645 | +| 800 | 7261 | 23513 | 25988 | 6764 | 27056 | 25001 | +| 1024 | 7529 | 30118 | 31030 | 8127 | 38641 | 28055 | +| 2048 | 7411 | 31289 | 33129 | 8533 | 38841 | 27812 | +| 2400 | 7768 | 22993 | 26128 | 7563 | 26429 | 24992 | +| 4096 | 8533 | 33211 | 34134 | 8777 | 38400 | 27007 | +| 8192 | 6525 | 32468 | 30254 | 7924 | 39737 | 28025 | +| 9216 | 7322 | 22835 | 28068 | 7322 | 29939 | 26945 | +| 16384 | 7455 | 31807 | 30453 | 8132 | 37177 | 27525 | +| 32768 | 8157 | 31949 | 30671 | 8334 | 29210 | 26214 | +| 262144 | 7349 | 25255 | 24904 | 6844 | 22413 | 16996 | +| 1048576 | 5115 | 19284 | 8347 | 6079 | 12906 | 9244 | + +(Note: MKL is not using AVX on AMD cpus) + + +MacOS Catalina, Xcode 12, fftw 3.3.9 and MKL 2020.1, cpu is "i7-6920HQ CPU @ 2.90GHz" + +Built with: + + clang -o test_pffft -DHAVE_MKL -I /opt/intel/mkl/include -DHAVE_FFTW -DHAVE_VECLIB -O3 -Wall -W pffft.c test_pffft.c fftpack.c -L/usr/local/lib -I/usr/local/include/ -lfftw3f -framework Accelerate /opt/intel/mkl/lib/libmkl_{intel_lp64,sequential,core}. + +| input len |real FFTPack| real vDSP | real MKL | real FFTW | real PFFFT |cplx FFTPack| cplx vDSP | cplx MKL | cplx FFTW | cplx PFFFT | +|------------|------------|------------|------------|------------|------------|------------|------------|------------|------------|------------| +| 64 | 4528 | 12550 | 18214 | 8237 | 10097 | 5962 | 25748 | 40865 | 32233 | 15807 | +| 96 | 4738 | n/a | 15844 | 10749 | 11026 | 5344 | n/a | 21086 | 34678 | 15493 | +| 128 | 5464 | 20419 | 25739 | 12371 | 13338 | 6060 | 28659 | 42419 | 38868 | 17694 | +| 160 | 5517 | n/a | 18644 | 12361 | 13765 | 6002 | n/a | 21633 | 37726 | 17969 | +| 192 | 5904 | n/a | 18861 | 12480 | 15134 | 6271 | n/a | 26074 | 33216 | 18525 | +| 256 | 6618 | 24944 | 26063 | 14646 | 16895 | 6965 | 34332 | 52182 | 44496 | 20980 | +| 384 | 5685 | n/a | 22307 | 14682 | 16969 | 5853 | n/a | 27363 | 35805 | 19841 | +| 480 | 5757 | n/a | 21122 | 14572 | 16765 | 5836 | n/a | 26259 | 26340 | 18852 | +| 512 | 6245 | 28100 | 27224 | 16546 | 18502 | 6240 | 37098 | 51679 | 43444 | 21519 | +| 640 | 6110 | n/a | 22565 | 14691 | 18573 | 6376 | n/a | 29219 | 34327 | 20708 | +| 768 | 6424 | n/a | 21496 | 15999 | 19900 | 6358 | n/a | 30168 | 36437 | 21657 | +| 800 | 5747 | n/a | 24857 | 15068 | 18842 | 5698 | n/a | 26891 | 20249 | 18497 | +| 1024 | 6397 | 28477 | 27520 | 13399 | 18491 | 5558 | 33632 | 44366 | 35960 | 23421 | +| 2048 | 6563 | 37379 | 34743 | 14204 | 20854 | 5828 | 41758 | 40301 | 36469 | 18504 | +| 2400 | 5594 | n/a | 24631 | 15496 | 16732 | 4128 | n/a | 16997 | 23421 | 16710 | +| 4096 | 6262 | 36417 | 28150 | 17742 | 19356 | 6272 | 33534 | 31632 | 33524 | 16995 | +| 8192 | 4142 | 24923 | 26571 | 17102 | 10104 | 5681 | 29504 | 33221 | 21803 | 15212 | +| 9216 | 5762 | n/a | 17305 | 14870 | 14464 | 5781 | n/a | 21579 | 22174 | 17358 | +| 16384 | 5650 | 29395 | 27201 | 15857 | 11748 | 5932 | 26534 | 31708 | 21161 | 16173 | +| 32768 | 5441 | 23931 | 26261 | 15394 | 10334 | 5679 | 23278 | 31162 | 19966 | 14624 | +| 262144 | 4946 | 19081 | 23591 | 9612 | 9544 | 4958 | 16362 | 20196 | 10419 | 12575 | +| 1048576 | 3752 | 14873 | 15469 | 6673 | 6653 | 4048 | 9563 | 16681 | 4298 | 7852 | + + +MacOS Lion, gcc 4.2, 64-bit, fftw 3.3 on a 3.4 GHz core i7 2600 + +Built with: + + gcc-4.2 -o test_pffft -arch x86_64 -O3 -Wall -W pffft.c test_pffft.c fftpack.c -L/usr/local/lib -I/usr/local/include/ -DHAVE_VECLIB -framework veclib -DHAVE_FFTW -lfftw3f + +| input len |real FFTPack| real vDSP | real FFTW | real PFFFT |cplx FFTPack| cplx vDSP | cplx FFTW | cplx PFFFT | +|----------:|-----------:|-----------:|-----------:|-----------:|-----------:|-----------:|-----------:|-----------:| +| 64 | 2816 | 8596 | 7329 | 8187 | 2887 | 14898 | 14668 | 11108 | +| 96 | 3298 | n/a | 8378 | 7727 | 3953 | n/a | 15680 | 10878 | +| 128 | 3507 | 11575 | 9266 | 10108 | 4233 | 17598 | 16427 | 12000 | +| 160 | 3391 | n/a | 9838 | 10711 | 4220 | n/a | 16653 | 11187 | +| 192 | 3919 | n/a | 9868 | 10956 | 4297 | n/a | 15770 | 12540 | +| 256 | 4283 | 13179 | 10694 | 13128 | 4545 | 19550 | 16350 | 13822 | +| 384 | 3136 | n/a | 10810 | 12061 | 3600 | n/a | 16103 | 13240 | +| 480 | 3477 | n/a | 10632 | 12074 | 3536 | n/a | 11630 | 12522 | +| 512 | 3783 | 15141 | 11267 | 13838 | 3649 | 20002 | 16560 | 13580 | +| 640 | 3639 | n/a | 11164 | 13946 | 3695 | n/a | 15416 | 13890 | +| 768 | 3800 | n/a | 11245 | 13495 | 3590 | n/a | 15802 | 14552 | +| 800 | 3440 | n/a | 10499 | 13301 | 3659 | n/a | 12056 | 13268 | +| 1024 | 3924 | 15605 | 11450 | 15339 | 3769 | 20963 | 13941 | 15467 | +| 2048 | 4518 | 16195 | 11551 | 15532 | 4258 | 20413 | 13723 | 15042 | +| 2400 | 4294 | n/a | 10685 | 13078 | 4093 | n/a | 12777 | 13119 | +| 4096 | 4750 | 16596 | 11672 | 15817 | 4157 | 19662 | 14316 | 14336 | +| 8192 | 3820 | 16227 | 11084 | 12555 | 3691 | 18132 | 12102 | 13813 | +| 9216 | 3864 | n/a | 10254 | 12870 | 3586 | n/a | 12119 | 13994 | +| 16384 | 3822 | 15123 | 10454 | 12822 | 3613 | 16874 | 12370 | 13881 | +| 32768 | 4175 | 14512 | 10662 | 11095 | 3881 | 14702 | 11619 | 11524 | +| 262144 | 3317 | 11429 | 6269 | 9517 | 2810 | 11729 | 7757 | 10179 | +| 1048576 | 2913 | 10551 | 4730 | 5867 | 2661 | 7881 | 3520 | 5350 | + + +Ubuntu 11.04, gcc 4.5, 32-bit, fftw 3.2 on a 2.66 core 2 quad + +Built with: + + gcc -o test_pffft -DHAVE_FFTW -msse -mfpmath=sse -O3 -Wall -W pffft.c test_pffft.c fftpack.c -L/usr/local/lib -I/usr/local/include/ -lfftw3f -lm + +| input len |real FFTPack| real FFTW | real PFFFT |cplx FFTPack| cplx FFTW | cplx PFFFT | +|----------:|-----------:|-----------:|-----------:|-----------:|-----------:|-----------:| +| 64 | 1920 | 3614 | 5120 | 2194 | 7680 | 6467 | +| 96 | 1873 | 3549 | 5187 | 2107 | 8429 | 5863 | +| 128 | 2240 | 3773 | 5514 | 2560 | 7964 | 6827 | +| 192 | 1765 | 4569 | 7767 | 2284 | 9137 | 7061 | +| 256 | 2048 | 5461 | 7447 | 2731 | 9638 | 7802 | +| 384 | 1998 | 5861 | 6762 | 2313 | 9253 | 7644 | +| 512 | 2095 | 6144 | 7680 | 2194 | 10240 | 7089 | +| 768 | 2230 | 5773 | 7549 | 2045 | 10331 | 7010 | +| 1024 | 2133 | 6400 | 8533 | 2133 | 10779 | 7877 | +| 2048 | 2011 | 7040 | 8665 | 1942 | 10240 | 7768 | +| 4096 | 2194 | 6827 | 8777 | 1755 | 9452 | 6827 | +| 8192 | 1849 | 6656 | 6656 | 1752 | 7831 | 6827 | +| 9216 | 1871 | 5858 | 6416 | 1643 | 6909 | 6266 | +| 16384 | 1883 | 6223 | 6506 | 1664 | 7340 | 6982 | +| 32768 | 1826 | 6390 | 6667 | 1631 | 7481 | 6971 | +| 262144 | 1546 | 4075 | 5977 | 1299 | 3415 | 3551 | +| 1048576 | 1104 | 2071 | 1730 | 1104 | 1149 | 1834 | + + +NVIDIA Jetson TK1 board, gcc-4.8.2. The cpu is a 2.3GHz cortex A15 (Tegra K1). + +Built with: + + gcc -O3 -march=armv7-a -mtune=native -mfloat-abi=hard -mfpu=neon -ffast-math test_pffft.c pffft.c -o test_pffft_arm fftpack.c -lm + +| input len |real FFTPack| real PFFFT |cplx FFTPack| cplx PFFFT | +|----------:|-----------:|-----------:|-----------:|-----------:| +| 64 | 1735 | 3308 | 1994 | 3744 | +| 96 | 1596 | 3448 | 1987 | 3572 | +| 128 | 1807 | 4076 | 2255 | 3960 | +| 160 | 1769 | 4083 | 2071 | 3845 | +| 192 | 1990 | 4233 | 2017 | 3939 | +| 256 | 2191 | 4882 | 2254 | 4346 | +| 384 | 1878 | 4492 | 2073 | 4012 | +| 480 | 1748 | 4398 | 1923 | 3951 | +| 512 | 2030 | 5064 | 2267 | 4195 | +| 640 | 1918 | 4756 | 2094 | 4184 | +| 768 | 2099 | 4907 | 2048 | 4297 | +| 800 | 1822 | 4555 | 1880 | 4063 | +| 1024 | 2232 | 5355 | 2187 | 4420 | +| 2048 | 2176 | 4983 | 2027 | 3602 | +| 2400 | 1741 | 4256 | 1710 | 3344 | +| 4096 | 1816 | 3914 | 1851 | 3349 | +| 8192 | 1716 | 3481 | 1700 | 3255 | +| 9216 | 1735 | 3589 | 1653 | 3094 | +| 16384 | 1567 | 3483 | 1637 | 3244 | +| 32768 | 1624 | 3240 | 1655 | 3156 | +| 262144 | 1012 | 1898 | 983 | 1503 | +| 1048576 | 876 | 1154 | 868 | 1341 | + + +iPad Air 2 with iOS9, xcode 8.0, arm64. The cpu is an Apple A8X, supposedly running at 1.5GHz. + +| input len |real FFTPack| real vDSP | real PFFFT |cplx FFTPack| cplx vDSP | cplx PFFFT | +|----------:|-----------:|-----------:|-----------:|-----------:|-----------:|-----------:| +| 64 | 2517 | 7995 | 6086 | 2725 | 13006 | 8495 | +| 96 | 2442 | n/a | 6691 | 2256 | n/a | 7991 | +| 128 | 2664 | 10186 | 7877 | 2575 | 15115 | 9115 | +| 160 | 2638 | n/a | 8283 | 2682 | n/a | 8806 | +| 192 | 2903 | n/a | 9083 | 2634 | n/a | 8980 | +| 256 | 3184 | 11452 | 10039 | 3026 | 15410 | 10199 | +| 384 | 2665 | n/a | 10100 | 2275 | n/a | 9247 | +| 480 | 2546 | n/a | 9863 | 2341 | n/a | 8892 | +| 512 | 2832 | 12197 | 10989 | 2547 | 16768 | 10154 | +| 640 | 2755 | n/a | 10461 | 2569 | n/a | 9666 | +| 768 | 2998 | n/a | 11355 | 2585 | n/a | 9813 | +| 800 | 2516 | n/a | 10332 | 2433 | n/a | 9164 | +| 1024 | 3109 | 12965 | 12114 | 2869 | 16448 | 10519 | +| 2048 | 3027 | 12996 | 12023 | 2648 | 17304 | 10307 | +| 2400 | 2515 | n/a | 10372 | 2355 | n/a | 8443 | +| 4096 | 3204 | 13603 | 12359 | 2814 | 16570 | 9780 | +| 8192 | 2759 | 13422 | 10824 | 2153 | 15652 | 7884 | +| 9216 | 2700 | n/a | 9938 | 2241 | n/a | 7900 | +| 16384 | 2280 | 13057 | 7976 | 593 | 4272 | 2534 | +| 32768 | 768 | 4269 | 2882 | 606 | 4405 | 2604 | +| 262144 | 724 | 3527 | 2630 | 534 | 2418 | 2157 | +| 1048576 | 674 | 1467 | 2135 | 530 | 1621 | 2055 | + + + diff --git a/lib/src/fft/pffft/pffft.c b/lib/src/fft/pffft/pffft.c new file mode 100644 index 0000000..d6cbaaf --- /dev/null +++ b/lib/src/fft/pffft/pffft.c @@ -0,0 +1,1884 @@ +/* Copyright (c) 2013 Julien Pommier ( pommier@modartt.com ) + + Based on original fortran 77 code from FFTPACKv4 from NETLIB + (http://www.netlib.org/fftpack), authored by Dr Paul Swarztrauber + of NCAR, in 1985. + + As confirmed by the NCAR fftpack software curators, the following + FFTPACKv5 license applies to FFTPACKv4 sources. My changes are + released under the same terms. + + FFTPACK license: + + http://www.cisl.ucar.edu/css/software/fftpack5/ftpk.html + + Copyright (c) 2004 the University Corporation for Atmospheric + Research ("UCAR"). All rights reserved. Developed by NCAR's + Computational and Information Systems Laboratory, UCAR, + www.cisl.ucar.edu. + + Redistribution and use of the Software in source and binary forms, + with or without modification, is permitted provided that the + following conditions are met: + + - Neither the names of NCAR's Computational and Information Systems + Laboratory, the University Corporation for Atmospheric Research, + nor the names of its sponsors or contributors may be used to + endorse or promote products derived from this Software without + specific prior written permission. + + - Redistributions of source code must retain the above copyright + notices, this list of conditions, and the disclaimer below. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions, and the disclaimer below in the + documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN + ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE + SOFTWARE. + + + PFFFT : a Pretty Fast FFT. + + This file is largerly based on the original FFTPACK implementation, modified in + order to take advantage of SIMD instructions of modern CPUs. +*/ + +/* + ChangeLog: + - 2011/10/02, version 1: This is the very first release of this file. +*/ + +#include "pffft.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include <assert.h> + +/* detect compiler flavour */ +#if defined(_MSC_VER) +# define COMPILER_MSVC +#elif defined(__GNUC__) +# define COMPILER_GCC +#endif + +#if defined(COMPILER_GCC) +# define ALWAYS_INLINE(return_type) inline return_type __attribute__ ((always_inline)) +# define NEVER_INLINE(return_type) return_type __attribute__ ((noinline)) +# define RESTRICT __restrict +# define VLA_ARRAY_ON_STACK(type__, varname__, size__) type__ varname__[size__]; +#elif defined(COMPILER_MSVC) +# define ALWAYS_INLINE(return_type) __forceinline return_type +# define NEVER_INLINE(return_type) __declspec(noinline) return_type +# define RESTRICT __restrict +# define VLA_ARRAY_ON_STACK(type__, varname__, size__) type__ *varname__ = (type__*)_alloca(size__ * sizeof(type__)) +#endif + + +/* + vector support macros: the rest of the code is independant of + SSE/Altivec/NEON -- adding support for other platforms with 4-element + vectors should be limited to these macros +*/ + + +// define PFFFT_SIMD_DISABLE if you want to use scalar code instead of simd code +//#define PFFFT_SIMD_DISABLE + +/* + Altivec support macros +*/ +#if !defined(PFFFT_SIMD_DISABLE) && (defined(__ppc__) || defined(__ppc64__) || defined(__powerpc__) || defined(__powerpc64__)) +#include <altivec.h> +typedef vector float v4sf; +# define SIMD_SZ 4 +# define VZERO() ((vector float) vec_splat_u8(0)) +# define VMUL(a,b) vec_madd(a,b, VZERO()) +# define VADD(a,b) vec_add(a,b) +# define VMADD(a,b,c) vec_madd(a,b,c) +# define VSUB(a,b) vec_sub(a,b) +inline v4sf ld_ps1(const float *p) { v4sf v=vec_lde(0,p); return vec_splat(vec_perm(v, v, vec_lvsl(0, p)), 0); } +# define LD_PS1(p) ld_ps1(&p) +# define INTERLEAVE2(in1, in2, out1, out2) { v4sf tmp__ = vec_mergeh(in1, in2); out2 = vec_mergel(in1, in2); out1 = tmp__; } +# define UNINTERLEAVE2(in1, in2, out1, out2) { \ + vector unsigned char vperm1 = (vector unsigned char){0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27}; \ + vector unsigned char vperm2 = (vector unsigned char){4,5,6,7,12,13,14,15,20,21,22,23,28,29,30,31}; \ + v4sf tmp__ = vec_perm(in1, in2, vperm1); out2 = vec_perm(in1, in2, vperm2); out1 = tmp__; \ + } +# define VTRANSPOSE4(x0,x1,x2,x3) { \ + v4sf y0 = vec_mergeh(x0, x2); \ + v4sf y1 = vec_mergel(x0, x2); \ + v4sf y2 = vec_mergeh(x1, x3); \ + v4sf y3 = vec_mergel(x1, x3); \ + x0 = vec_mergeh(y0, y2); \ + x1 = vec_mergel(y0, y2); \ + x2 = vec_mergeh(y1, y3); \ + x3 = vec_mergel(y1, y3); \ + } +# define VSWAPHL(a,b) vec_perm(a,b, (vector unsigned char){16,17,18,19,20,21,22,23,8,9,10,11,12,13,14,15}) +# define VALIGNED(ptr) ((((long long)(ptr)) & 0xF) == 0) + +/* + SSE1 support macros +*/ +#elif !defined(PFFFT_SIMD_DISABLE) && (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(i386) || defined(_M_IX86)) + +#include <xmmintrin.h> +typedef __m128 v4sf; +# define SIMD_SZ 4 // 4 floats by simd vector -- this is pretty much hardcoded in the preprocess/finalize functions anyway so you will have to work if you want to enable AVX with its 256-bit vectors. +# define VZERO() _mm_setzero_ps() +# define VMUL(a,b) _mm_mul_ps(a,b) +# define VADD(a,b) _mm_add_ps(a,b) +# define VMADD(a,b,c) _mm_add_ps(_mm_mul_ps(a,b), c) +# define VSUB(a,b) _mm_sub_ps(a,b) +# define LD_PS1(p) _mm_set1_ps(p) +# define INTERLEAVE2(in1, in2, out1, out2) { v4sf tmp__ = _mm_unpacklo_ps(in1, in2); out2 = _mm_unpackhi_ps(in1, in2); out1 = tmp__; } +# define UNINTERLEAVE2(in1, in2, out1, out2) { v4sf tmp__ = _mm_shuffle_ps(in1, in2, _MM_SHUFFLE(2,0,2,0)); out2 = _mm_shuffle_ps(in1, in2, _MM_SHUFFLE(3,1,3,1)); out1 = tmp__; } +# define VTRANSPOSE4(x0,x1,x2,x3) _MM_TRANSPOSE4_PS(x0,x1,x2,x3) +# define VSWAPHL(a,b) _mm_shuffle_ps(b, a, _MM_SHUFFLE(3,2,1,0)) +# define VALIGNED(ptr) ((((long long)(ptr)) & 0xF) == 0) + +/* + ARM NEON support macros +*/ +#elif !defined(PFFFT_SIMD_DISABLE) && (defined(__arm__) || defined(__aarch64__) || defined(__arm64__)) +# include <arm_neon.h> +typedef float32x4_t v4sf; +# define SIMD_SZ 4 +# define VZERO() vdupq_n_f32(0) +# define VMUL(a,b) vmulq_f32(a,b) +# define VADD(a,b) vaddq_f32(a,b) +# define VMADD(a,b,c) vmlaq_f32(c,a,b) +# define VSUB(a,b) vsubq_f32(a,b) +# define LD_PS1(p) vld1q_dup_f32(&(p)) +# define INTERLEAVE2(in1, in2, out1, out2) { float32x4x2_t tmp__ = vzipq_f32(in1,in2); out1=tmp__.val[0]; out2=tmp__.val[1]; } +# define UNINTERLEAVE2(in1, in2, out1, out2) { float32x4x2_t tmp__ = vuzpq_f32(in1,in2); out1=tmp__.val[0]; out2=tmp__.val[1]; } +# define VTRANSPOSE4(x0,x1,x2,x3) { \ + float32x4x2_t t0_ = vzipq_f32(x0, x2); \ + float32x4x2_t t1_ = vzipq_f32(x1, x3); \ + float32x4x2_t u0_ = vzipq_f32(t0_.val[0], t1_.val[0]); \ + float32x4x2_t u1_ = vzipq_f32(t0_.val[1], t1_.val[1]); \ + x0 = u0_.val[0]; x1 = u0_.val[1]; x2 = u1_.val[0]; x3 = u1_.val[1]; \ + } +// marginally faster version +//# define VTRANSPOSE4(x0,x1,x2,x3) { asm("vtrn.32 %q0, %q1;\n vtrn.32 %q2,%q3\n vswp %f0,%e2\n vswp %f1,%e3" : "+w"(x0), "+w"(x1), "+w"(x2), "+w"(x3)::); } +# define VSWAPHL(a,b) vcombine_f32(vget_low_f32(b), vget_high_f32(a)) +# define VALIGNED(ptr) ((((long long)(ptr)) & 0x3) == 0) +#else +# if !defined(PFFFT_SIMD_DISABLE) +# warning "building with simd disabled !\n"; +# define PFFFT_SIMD_DISABLE // fallback to scalar code +# endif +#endif + +// fallback mode for situations where SSE/Altivec are not available, use scalar mode instead +#ifdef PFFFT_SIMD_DISABLE +typedef float v4sf; +# define SIMD_SZ 1 +# define VZERO() 0.f +# define VMUL(a,b) ((a)*(b)) +# define VADD(a,b) ((a)+(b)) +# define VMADD(a,b,c) ((a)*(b)+(c)) +# define VSUB(a,b) ((a)-(b)) +# define LD_PS1(p) (p) +# define VALIGNED(ptr) ((((long long)(ptr)) & 0x3) == 0) +#endif + +// shortcuts for complex multiplcations +#define VCPLXMUL(ar,ai,br,bi) { v4sf tmp; tmp=VMUL(ar,bi); ar=VMUL(ar,br); ar=VSUB(ar,VMUL(ai,bi)); ai=VMUL(ai,br); ai=VADD(ai,tmp); } +#define VCPLXMULCONJ(ar,ai,br,bi) { v4sf tmp; tmp=VMUL(ar,bi); ar=VMUL(ar,br); ar=VADD(ar,VMUL(ai,bi)); ai=VMUL(ai,br); ai=VSUB(ai,tmp); } +#ifndef SVMUL +// multiply a scalar with a vector +#define SVMUL(f,v) VMUL(LD_PS1(f),v) +#endif + +#if !defined(PFFFT_SIMD_DISABLE) +typedef union v4sf_union { + v4sf v; + float f[4]; +} v4sf_union; + +#include <string.h> + +#define assertv4(v,f0,f1,f2,f3) assert(v.f[0] == (f0) && v.f[1] == (f1) && v.f[2] == (f2) && v.f[3] == (f3)) + +/* detect bugs with the vector support macros */ +void validate_pffft_simd(void) { + float f[16] = { 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 }; + v4sf_union a0, a1, a2, a3, t, u; + memcpy(a0.f, f, 4*sizeof(float)); + memcpy(a1.f, f+4, 4*sizeof(float)); + memcpy(a2.f, f+8, 4*sizeof(float)); + memcpy(a3.f, f+12, 4*sizeof(float)); + + t = a0; u = a1; t.v = VZERO(); + printf("VZERO=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]); assertv4(t, 0, 0, 0, 0); + t.v = VADD(a1.v, a2.v); + printf("VADD(4:7,8:11)=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]); assertv4(t, 12, 14, 16, 18); + t.v = VMUL(a1.v, a2.v); + printf("VMUL(4:7,8:11)=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]); assertv4(t, 32, 45, 60, 77); + t.v = VMADD(a1.v, a2.v,a0.v); + printf("VMADD(4:7,8:11,0:3)=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]); assertv4(t, 32, 46, 62, 80); + + INTERLEAVE2(a1.v,a2.v,t.v,u.v); + printf("INTERLEAVE2(4:7,8:11)=[%2g %2g %2g %2g] [%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3], u.f[0], u.f[1], u.f[2], u.f[3]); + assertv4(t, 4, 8, 5, 9); assertv4(u, 6, 10, 7, 11); + UNINTERLEAVE2(a1.v,a2.v,t.v,u.v); + printf("UNINTERLEAVE2(4:7,8:11)=[%2g %2g %2g %2g] [%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3], u.f[0], u.f[1], u.f[2], u.f[3]); + assertv4(t, 4, 6, 8, 10); assertv4(u, 5, 7, 9, 11); + + t.v=LD_PS1(f[15]); + printf("LD_PS1(15)=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]); + assertv4(t, 15, 15, 15, 15); + t.v = VSWAPHL(a1.v, a2.v); + printf("VSWAPHL(4:7,8:11)=[%2g %2g %2g %2g]\n", t.f[0], t.f[1], t.f[2], t.f[3]); + assertv4(t, 8, 9, 6, 7); + VTRANSPOSE4(a0.v, a1.v, a2.v, a3.v); + printf("VTRANSPOSE4(0:3,4:7,8:11,12:15)=[%2g %2g %2g %2g] [%2g %2g %2g %2g] [%2g %2g %2g %2g] [%2g %2g %2g %2g]\n", + a0.f[0], a0.f[1], a0.f[2], a0.f[3], a1.f[0], a1.f[1], a1.f[2], a1.f[3], + a2.f[0], a2.f[1], a2.f[2], a2.f[3], a3.f[0], a3.f[1], a3.f[2], a3.f[3]); + assertv4(a0, 0, 4, 8, 12); assertv4(a1, 1, 5, 9, 13); assertv4(a2, 2, 6, 10, 14); assertv4(a3, 3, 7, 11, 15); +} +#else +void validate_pffft_simd() {} // allow test_pffft.c to call this function even when simd is not available.. +#endif //!PFFFT_SIMD_DISABLE + +/* SSE and co like 16-bytes aligned pointers */ +#define MALLOC_V4SF_ALIGNMENT 64 // with a 64-byte alignment, we are even aligned on L2 cache lines... +void *pffft_aligned_malloc(size_t nb_bytes) { + void *p, *p0 = malloc(nb_bytes + MALLOC_V4SF_ALIGNMENT); + if (!p0) return (void *) 0; + p = (void *) (((size_t) p0 + MALLOC_V4SF_ALIGNMENT) & (~((size_t) (MALLOC_V4SF_ALIGNMENT-1)))); + *((void **) p - 1) = p0; + return p; +} + +void pffft_aligned_free(void *p) { + if (p) free(*((void **) p - 1)); +} + +int pffft_simd_size(void) { return SIMD_SZ; } + +/* + passf2 and passb2 has been merged here, fsign = -1 for passf2, +1 for passb2 +*/ +static NEVER_INLINE(void) passf2_ps(int ido, int l1, const v4sf *cc, v4sf *ch, const float *wa1, float fsign) { + int k, i; + int l1ido = l1*ido; + if (ido <= 2) { + for (k=0; k < l1ido; k += ido, ch += ido, cc+= 2*ido) { + ch[0] = VADD(cc[0], cc[ido+0]); + ch[l1ido] = VSUB(cc[0], cc[ido+0]); + ch[1] = VADD(cc[1], cc[ido+1]); + ch[l1ido + 1] = VSUB(cc[1], cc[ido+1]); + } + } else { + for (k=0; k < l1ido; k += ido, ch += ido, cc += 2*ido) { + for (i=0; i<ido-1; i+=2) { + v4sf tr2 = VSUB(cc[i+0], cc[i+ido+0]); + v4sf ti2 = VSUB(cc[i+1], cc[i+ido+1]); + v4sf wr = LD_PS1(wa1[i]), wi = VMUL(LD_PS1(fsign), LD_PS1(wa1[i+1])); + ch[i] = VADD(cc[i+0], cc[i+ido+0]); + ch[i+1] = VADD(cc[i+1], cc[i+ido+1]); + VCPLXMUL(tr2, ti2, wr, wi); + ch[i+l1ido] = tr2; + ch[i+l1ido+1] = ti2; + } + } + } +} + +/* + passf3 and passb3 has been merged here, fsign = -1 for passf3, +1 for passb3 +*/ +static NEVER_INLINE(void) passf3_ps(int ido, int l1, const v4sf *cc, v4sf *ch, + const float *wa1, const float *wa2, float fsign) { + static const float taur = -0.5f; + float taui = 0.866025403784439f*fsign; + int i, k; + v4sf tr2, ti2, cr2, ci2, cr3, ci3, dr2, di2, dr3, di3; + int l1ido = l1*ido; + float wr1, wi1, wr2, wi2; + assert(ido > 2); + for (k=0; k< l1ido; k += ido, cc+= 3*ido, ch +=ido) { + for (i=0; i<ido-1; i+=2) { + tr2 = VADD(cc[i+ido], cc[i+2*ido]); + cr2 = VADD(cc[i], SVMUL(taur,tr2)); + ch[i] = VADD(cc[i], tr2); + ti2 = VADD(cc[i+ido+1], cc[i+2*ido+1]); + ci2 = VADD(cc[i +1], SVMUL(taur,ti2)); + ch[i+1] = VADD(cc[i+1], ti2); + cr3 = SVMUL(taui, VSUB(cc[i+ido], cc[i+2*ido])); + ci3 = SVMUL(taui, VSUB(cc[i+ido+1], cc[i+2*ido+1])); + dr2 = VSUB(cr2, ci3); + dr3 = VADD(cr2, ci3); + di2 = VADD(ci2, cr3); + di3 = VSUB(ci2, cr3); + wr1=wa1[i]; wi1=fsign*wa1[i+1]; wr2=wa2[i]; wi2=fsign*wa2[i+1]; + VCPLXMUL(dr2, di2, LD_PS1(wr1), LD_PS1(wi1)); + ch[i+l1ido] = dr2; + ch[i+l1ido + 1] = di2; + VCPLXMUL(dr3, di3, LD_PS1(wr2), LD_PS1(wi2)); + ch[i+2*l1ido] = dr3; + ch[i+2*l1ido+1] = di3; + } + } +} /* passf3 */ + +static NEVER_INLINE(void) passf4_ps(int ido, int l1, const v4sf *cc, v4sf *ch, + const float *wa1, const float *wa2, const float *wa3, float fsign) { + /* isign == -1 for forward transform and +1 for backward transform */ + + int i, k; + v4sf ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4; + int l1ido = l1*ido; + if (ido == 2) { + for (k=0; k < l1ido; k += ido, ch += ido, cc += 4*ido) { + tr1 = VSUB(cc[0], cc[2*ido + 0]); + tr2 = VADD(cc[0], cc[2*ido + 0]); + ti1 = VSUB(cc[1], cc[2*ido + 1]); + ti2 = VADD(cc[1], cc[2*ido + 1]); + ti4 = VMUL(VSUB(cc[1*ido + 0], cc[3*ido + 0]), LD_PS1(fsign)); + tr4 = VMUL(VSUB(cc[3*ido + 1], cc[1*ido + 1]), LD_PS1(fsign)); + tr3 = VADD(cc[ido + 0], cc[3*ido + 0]); + ti3 = VADD(cc[ido + 1], cc[3*ido + 1]); + + ch[0*l1ido + 0] = VADD(tr2, tr3); + ch[0*l1ido + 1] = VADD(ti2, ti3); + ch[1*l1ido + 0] = VADD(tr1, tr4); + ch[1*l1ido + 1] = VADD(ti1, ti4); + ch[2*l1ido + 0] = VSUB(tr2, tr3); + ch[2*l1ido + 1] = VSUB(ti2, ti3); + ch[3*l1ido + 0] = VSUB(tr1, tr4); + ch[3*l1ido + 1] = VSUB(ti1, ti4); + } + } else { + for (k=0; k < l1ido; k += ido, ch+=ido, cc += 4*ido) { + for (i=0; i<ido-1; i+=2) { + float wr1, wi1, wr2, wi2, wr3, wi3; + tr1 = VSUB(cc[i + 0], cc[i + 2*ido + 0]); + tr2 = VADD(cc[i + 0], cc[i + 2*ido + 0]); + ti1 = VSUB(cc[i + 1], cc[i + 2*ido + 1]); + ti2 = VADD(cc[i + 1], cc[i + 2*ido + 1]); + tr4 = VMUL(VSUB(cc[i + 3*ido + 1], cc[i + 1*ido + 1]), LD_PS1(fsign)); + ti4 = VMUL(VSUB(cc[i + 1*ido + 0], cc[i + 3*ido + 0]), LD_PS1(fsign)); + tr3 = VADD(cc[i + ido + 0], cc[i + 3*ido + 0]); + ti3 = VADD(cc[i + ido + 1], cc[i + 3*ido + 1]); + + ch[i] = VADD(tr2, tr3); + cr3 = VSUB(tr2, tr3); + ch[i + 1] = VADD(ti2, ti3); + ci3 = VSUB(ti2, ti3); + + cr2 = VADD(tr1, tr4); + cr4 = VSUB(tr1, tr4); + ci2 = VADD(ti1, ti4); + ci4 = VSUB(ti1, ti4); + wr1=wa1[i]; wi1=fsign*wa1[i+1]; + VCPLXMUL(cr2, ci2, LD_PS1(wr1), LD_PS1(wi1)); + wr2=wa2[i]; wi2=fsign*wa2[i+1]; + ch[i + l1ido] = cr2; + ch[i + l1ido + 1] = ci2; + + VCPLXMUL(cr3, ci3, LD_PS1(wr2), LD_PS1(wi2)); + wr3=wa3[i]; wi3=fsign*wa3[i+1]; + ch[i + 2*l1ido] = cr3; + ch[i + 2*l1ido + 1] = ci3; + + VCPLXMUL(cr4, ci4, LD_PS1(wr3), LD_PS1(wi3)); + ch[i + 3*l1ido] = cr4; + ch[i + 3*l1ido + 1] = ci4; + } + } + } +} /* passf4 */ + +/* + passf5 and passb5 has been merged here, fsign = -1 for passf5, +1 for passb5 +*/ +static NEVER_INLINE(void) passf5_ps(int ido, int l1, const v4sf *cc, v4sf *ch, + const float *wa1, const float *wa2, + const float *wa3, const float *wa4, float fsign) { + static const float tr11 = .309016994374947f; + const float ti11 = .951056516295154f*fsign; + static const float tr12 = -.809016994374947f; + const float ti12 = .587785252292473f*fsign; + + /* Local variables */ + int i, k; + v4sf ci2, ci3, ci4, ci5, di3, di4, di5, di2, cr2, cr3, cr5, cr4, ti2, ti3, + ti4, ti5, dr3, dr4, dr5, dr2, tr2, tr3, tr4, tr5; + + float wr1, wi1, wr2, wi2, wr3, wi3, wr4, wi4; + +#define cc_ref(a_1,a_2) cc[(a_2-1)*ido + a_1 + 1] +#define ch_ref(a_1,a_3) ch[(a_3-1)*l1*ido + a_1 + 1] + + assert(ido > 2); + for (k = 0; k < l1; ++k, cc += 5*ido, ch += ido) { + for (i = 0; i < ido-1; i += 2) { + ti5 = VSUB(cc_ref(i , 2), cc_ref(i , 5)); + ti2 = VADD(cc_ref(i , 2), cc_ref(i , 5)); + ti4 = VSUB(cc_ref(i , 3), cc_ref(i , 4)); + ti3 = VADD(cc_ref(i , 3), cc_ref(i , 4)); + tr5 = VSUB(cc_ref(i-1, 2), cc_ref(i-1, 5)); + tr2 = VADD(cc_ref(i-1, 2), cc_ref(i-1, 5)); + tr4 = VSUB(cc_ref(i-1, 3), cc_ref(i-1, 4)); + tr3 = VADD(cc_ref(i-1, 3), cc_ref(i-1, 4)); + ch_ref(i-1, 1) = VADD(cc_ref(i-1, 1), VADD(tr2, tr3)); + ch_ref(i , 1) = VADD(cc_ref(i , 1), VADD(ti2, ti3)); + cr2 = VADD(cc_ref(i-1, 1), VADD(SVMUL(tr11, tr2),SVMUL(tr12, tr3))); + ci2 = VADD(cc_ref(i , 1), VADD(SVMUL(tr11, ti2),SVMUL(tr12, ti3))); + cr3 = VADD(cc_ref(i-1, 1), VADD(SVMUL(tr12, tr2),SVMUL(tr11, tr3))); + ci3 = VADD(cc_ref(i , 1), VADD(SVMUL(tr12, ti2),SVMUL(tr11, ti3))); + cr5 = VADD(SVMUL(ti11, tr5), SVMUL(ti12, tr4)); + ci5 = VADD(SVMUL(ti11, ti5), SVMUL(ti12, ti4)); + cr4 = VSUB(SVMUL(ti12, tr5), SVMUL(ti11, tr4)); + ci4 = VSUB(SVMUL(ti12, ti5), SVMUL(ti11, ti4)); + dr3 = VSUB(cr3, ci4); + dr4 = VADD(cr3, ci4); + di3 = VADD(ci3, cr4); + di4 = VSUB(ci3, cr4); + dr5 = VADD(cr2, ci5); + dr2 = VSUB(cr2, ci5); + di5 = VSUB(ci2, cr5); + di2 = VADD(ci2, cr5); + wr1=wa1[i]; wi1=fsign*wa1[i+1]; wr2=wa2[i]; wi2=fsign*wa2[i+1]; + wr3=wa3[i]; wi3=fsign*wa3[i+1]; wr4=wa4[i]; wi4=fsign*wa4[i+1]; + VCPLXMUL(dr2, di2, LD_PS1(wr1), LD_PS1(wi1)); + ch_ref(i - 1, 2) = dr2; + ch_ref(i, 2) = di2; + VCPLXMUL(dr3, di3, LD_PS1(wr2), LD_PS1(wi2)); + ch_ref(i - 1, 3) = dr3; + ch_ref(i, 3) = di3; + VCPLXMUL(dr4, di4, LD_PS1(wr3), LD_PS1(wi3)); + ch_ref(i - 1, 4) = dr4; + ch_ref(i, 4) = di4; + VCPLXMUL(dr5, di5, LD_PS1(wr4), LD_PS1(wi4)); + ch_ref(i - 1, 5) = dr5; + ch_ref(i, 5) = di5; + } + } +#undef ch_ref +#undef cc_ref +} + +static NEVER_INLINE(void) radf2_ps(int ido, int l1, const v4sf * RESTRICT cc, v4sf * RESTRICT ch, const float *wa1) { + static const float minus_one = -1.f; + int i, k, l1ido = l1*ido; + for (k=0; k < l1ido; k += ido) { + v4sf a = cc[k], b = cc[k + l1ido]; + ch[2*k] = VADD(a, b); + ch[2*(k+ido)-1] = VSUB(a, b); + } + if (ido < 2) return; + if (ido != 2) { + for (k=0; k < l1ido; k += ido) { + for (i=2; i<ido; i+=2) { + v4sf tr2 = cc[i - 1 + k + l1ido], ti2 = cc[i + k + l1ido]; + v4sf br = cc[i - 1 + k], bi = cc[i + k]; + VCPLXMULCONJ(tr2, ti2, LD_PS1(wa1[i - 2]), LD_PS1(wa1[i - 1])); + ch[i + 2*k] = VADD(bi, ti2); + ch[2*(k+ido) - i] = VSUB(ti2, bi); + ch[i - 1 + 2*k] = VADD(br, tr2); + ch[2*(k+ido) - i -1] = VSUB(br, tr2); + } + } + if (ido % 2 == 1) return; + } + for (k=0; k < l1ido; k += ido) { + ch[2*k + ido] = SVMUL(minus_one, cc[ido-1 + k + l1ido]); + ch[2*k + ido-1] = cc[k + ido-1]; + } +} /* radf2 */ + + +static NEVER_INLINE(void) radb2_ps(int ido, int l1, const v4sf *cc, v4sf *ch, const float *wa1) { + static const float minus_two=-2; + int i, k, l1ido = l1*ido; + v4sf a,b,c,d, tr2, ti2; + for (k=0; k < l1ido; k += ido) { + a = cc[2*k]; b = cc[2*(k+ido) - 1]; + ch[k] = VADD(a, b); + ch[k + l1ido] =VSUB(a, b); + } + if (ido < 2) return; + if (ido != 2) { + for (k = 0; k < l1ido; k += ido) { + for (i = 2; i < ido; i += 2) { + a = cc[i-1 + 2*k]; b = cc[2*(k + ido) - i - 1]; + c = cc[i+0 + 2*k]; d = cc[2*(k + ido) - i + 0]; + ch[i-1 + k] = VADD(a, b); + tr2 = VSUB(a, b); + ch[i+0 + k] = VSUB(c, d); + ti2 = VADD(c, d); + VCPLXMUL(tr2, ti2, LD_PS1(wa1[i - 2]), LD_PS1(wa1[i - 1])); + ch[i-1 + k + l1ido] = tr2; + ch[i+0 + k + l1ido] = ti2; + } + } + if (ido % 2 == 1) return; + } + for (k = 0; k < l1ido; k += ido) { + a = cc[2*k + ido-1]; b = cc[2*k + ido]; + ch[k + ido-1] = VADD(a,a); + ch[k + ido-1 + l1ido] = SVMUL(minus_two, b); + } +} /* radb2 */ + +static void radf3_ps(int ido, int l1, const v4sf * RESTRICT cc, v4sf * RESTRICT ch, + const float *wa1, const float *wa2) { + static const float taur = -0.5f; + static const float taui = 0.866025403784439f; + int i, k, ic; + v4sf ci2, di2, di3, cr2, dr2, dr3, ti2, ti3, tr2, tr3, wr1, wi1, wr2, wi2; + for (k=0; k<l1; k++) { + cr2 = VADD(cc[(k + l1)*ido], cc[(k + 2*l1)*ido]); + ch[3*k*ido] = VADD(cc[k*ido], cr2); + ch[(3*k+2)*ido] = SVMUL(taui, VSUB(cc[(k + l1*2)*ido], cc[(k + l1)*ido])); + ch[ido-1 + (3*k + 1)*ido] = VADD(cc[k*ido], SVMUL(taur, cr2)); + } + if (ido == 1) return; + for (k=0; k<l1; k++) { + for (i=2; i<ido; i+=2) { + ic = ido - i; + wr1 = LD_PS1(wa1[i - 2]); wi1 = LD_PS1(wa1[i - 1]); + dr2 = cc[i - 1 + (k + l1)*ido]; di2 = cc[i + (k + l1)*ido]; + VCPLXMULCONJ(dr2, di2, wr1, wi1); + + wr2 = LD_PS1(wa2[i - 2]); wi2 = LD_PS1(wa2[i - 1]); + dr3 = cc[i - 1 + (k + l1*2)*ido]; di3 = cc[i + (k + l1*2)*ido]; + VCPLXMULCONJ(dr3, di3, wr2, wi2); + + cr2 = VADD(dr2, dr3); + ci2 = VADD(di2, di3); + ch[i - 1 + 3*k*ido] = VADD(cc[i - 1 + k*ido], cr2); + ch[i + 3*k*ido] = VADD(cc[i + k*ido], ci2); + tr2 = VADD(cc[i - 1 + k*ido], SVMUL(taur, cr2)); + ti2 = VADD(cc[i + k*ido], SVMUL(taur, ci2)); + tr3 = SVMUL(taui, VSUB(di2, di3)); + ti3 = SVMUL(taui, VSUB(dr3, dr2)); + ch[i - 1 + (3*k + 2)*ido] = VADD(tr2, tr3); + ch[ic - 1 + (3*k + 1)*ido] = VSUB(tr2, tr3); + ch[i + (3*k + 2)*ido] = VADD(ti2, ti3); + ch[ic + (3*k + 1)*ido] = VSUB(ti3, ti2); + } + } +} /* radf3 */ + + +static void radb3_ps(int ido, int l1, const v4sf *RESTRICT cc, v4sf *RESTRICT ch, + const float *wa1, const float *wa2) +{ + static const float taur = -0.5f; + static const float taui = 0.866025403784439f; + static const float taui_2 = 0.866025403784439f*2; + int i, k, ic; + v4sf ci2, ci3, di2, di3, cr2, cr3, dr2, dr3, ti2, tr2; + for (k=0; k<l1; k++) { + tr2 = cc[ido-1 + (3*k + 1)*ido]; tr2 = VADD(tr2,tr2); + cr2 = VMADD(LD_PS1(taur), tr2, cc[3*k*ido]); + ch[k*ido] = VADD(cc[3*k*ido], tr2); + ci3 = SVMUL(taui_2, cc[(3*k + 2)*ido]); + ch[(k + l1)*ido] = VSUB(cr2, ci3); + ch[(k + 2*l1)*ido] = VADD(cr2, ci3); + } + if (ido == 1) return; + for (k=0; k<l1; k++) { + for (i=2; i<ido; i+=2) { + ic = ido - i; + tr2 = VADD(cc[i - 1 + (3*k + 2)*ido], cc[ic - 1 + (3*k + 1)*ido]); + cr2 = VMADD(LD_PS1(taur), tr2, cc[i - 1 + 3*k*ido]); + ch[i - 1 + k*ido] = VADD(cc[i - 1 + 3*k*ido], tr2); + ti2 = VSUB(cc[i + (3*k + 2)*ido], cc[ic + (3*k + 1)*ido]); + ci2 = VMADD(LD_PS1(taur), ti2, cc[i + 3*k*ido]); + ch[i + k*ido] = VADD(cc[i + 3*k*ido], ti2); + cr3 = SVMUL(taui, VSUB(cc[i - 1 + (3*k + 2)*ido], cc[ic - 1 + (3*k + 1)*ido])); + ci3 = SVMUL(taui, VADD(cc[i + (3*k + 2)*ido], cc[ic + (3*k + 1)*ido])); + dr2 = VSUB(cr2, ci3); + dr3 = VADD(cr2, ci3); + di2 = VADD(ci2, cr3); + di3 = VSUB(ci2, cr3); + VCPLXMUL(dr2, di2, LD_PS1(wa1[i-2]), LD_PS1(wa1[i-1])); + ch[i - 1 + (k + l1)*ido] = dr2; + ch[i + (k + l1)*ido] = di2; + VCPLXMUL(dr3, di3, LD_PS1(wa2[i-2]), LD_PS1(wa2[i-1])); + ch[i - 1 + (k + 2*l1)*ido] = dr3; + ch[i + (k + 2*l1)*ido] = di3; + } + } +} /* radb3 */ + +static NEVER_INLINE(void) radf4_ps(int ido, int l1, const v4sf *RESTRICT cc, v4sf * RESTRICT ch, + const float * RESTRICT wa1, const float * RESTRICT wa2, const float * RESTRICT wa3) +{ + static const float minus_hsqt2 = (float)-0.7071067811865475; + int i, k, l1ido = l1*ido; + { + const v4sf *RESTRICT cc_ = cc, * RESTRICT cc_end = cc + l1ido; + v4sf * RESTRICT ch_ = ch; + while (cc < cc_end) { + // this loop represents between 25% and 40% of total radf4_ps cost ! + v4sf a0 = cc[0], a1 = cc[l1ido]; + v4sf a2 = cc[2*l1ido], a3 = cc[3*l1ido]; + v4sf tr1 = VADD(a1, a3); + v4sf tr2 = VADD(a0, a2); + ch[2*ido-1] = VSUB(a0, a2); + ch[2*ido ] = VSUB(a3, a1); + ch[0 ] = VADD(tr1, tr2); + ch[4*ido-1] = VSUB(tr2, tr1); + cc += ido; ch += 4*ido; + } + cc = cc_; ch = ch_; + } + if (ido < 2) return; + if (ido != 2) { + for (k = 0; k < l1ido; k += ido) { + const v4sf * RESTRICT pc = (v4sf*)(cc + 1 + k); + for (i=2; i<ido; i += 2, pc += 2) { + int ic = ido - i; + v4sf wr, wi, cr2, ci2, cr3, ci3, cr4, ci4; + v4sf tr1, ti1, tr2, ti2, tr3, ti3, tr4, ti4; + + cr2 = pc[1*l1ido+0]; + ci2 = pc[1*l1ido+1]; + wr=LD_PS1(wa1[i - 2]); + wi=LD_PS1(wa1[i - 1]); + VCPLXMULCONJ(cr2,ci2,wr,wi); + + cr3 = pc[2*l1ido+0]; + ci3 = pc[2*l1ido+1]; + wr = LD_PS1(wa2[i-2]); + wi = LD_PS1(wa2[i-1]); + VCPLXMULCONJ(cr3, ci3, wr, wi); + + cr4 = pc[3*l1ido]; + ci4 = pc[3*l1ido+1]; + wr = LD_PS1(wa3[i-2]); + wi = LD_PS1(wa3[i-1]); + VCPLXMULCONJ(cr4, ci4, wr, wi); + + /* at this point, on SSE, five of "cr2 cr3 cr4 ci2 ci3 ci4" should be loaded in registers */ + + tr1 = VADD(cr2,cr4); + tr4 = VSUB(cr4,cr2); + tr2 = VADD(pc[0],cr3); + tr3 = VSUB(pc[0],cr3); + ch[i - 1 + 4*k] = VADD(tr1,tr2); + ch[ic - 1 + 4*k + 3*ido] = VSUB(tr2,tr1); // at this point tr1 and tr2 can be disposed + ti1 = VADD(ci2,ci4); + ti4 = VSUB(ci2,ci4); + ch[i - 1 + 4*k + 2*ido] = VADD(ti4,tr3); + ch[ic - 1 + 4*k + 1*ido] = VSUB(tr3,ti4); // dispose tr3, ti4 + ti2 = VADD(pc[1],ci3); + ti3 = VSUB(pc[1],ci3); + ch[i + 4*k] = VADD(ti1, ti2); + ch[ic + 4*k + 3*ido] = VSUB(ti1, ti2); + ch[i + 4*k + 2*ido] = VADD(tr4, ti3); + ch[ic + 4*k + 1*ido] = VSUB(tr4, ti3); + } + } + if (ido % 2 == 1) return; + } + for (k=0; k<l1ido; k += ido) { + v4sf a = cc[ido-1 + k + l1ido], b = cc[ido-1 + k + 3*l1ido]; + v4sf c = cc[ido-1 + k], d = cc[ido-1 + k + 2*l1ido]; + v4sf ti1 = SVMUL(minus_hsqt2, VADD(a, b)); + v4sf tr1 = SVMUL(minus_hsqt2, VSUB(b, a)); + ch[ido-1 + 4*k] = VADD(tr1, c); + ch[ido-1 + 4*k + 2*ido] = VSUB(c, tr1); + ch[4*k + 1*ido] = VSUB(ti1, d); + ch[4*k + 3*ido] = VADD(ti1, d); + } +} /* radf4 */ + + +static NEVER_INLINE(void) radb4_ps(int ido, int l1, const v4sf * RESTRICT cc, v4sf * RESTRICT ch, + const float * RESTRICT wa1, const float * RESTRICT wa2, const float *RESTRICT wa3) +{ + static const float minus_sqrt2 = (float)-1.414213562373095; + static const float two = 2.f; + int i, k, l1ido = l1*ido; + v4sf ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4; + { + const v4sf *RESTRICT cc_ = cc, * RESTRICT ch_end = ch + l1ido; + v4sf *ch_ = ch; + while (ch < ch_end) { + v4sf a = cc[0], b = cc[4*ido-1]; + v4sf c = cc[2*ido], d = cc[2*ido-1]; + tr3 = SVMUL(two,d); + tr2 = VADD(a,b); + tr1 = VSUB(a,b); + tr4 = SVMUL(two,c); + ch[0*l1ido] = VADD(tr2, tr3); + ch[2*l1ido] = VSUB(tr2, tr3); + ch[1*l1ido] = VSUB(tr1, tr4); + ch[3*l1ido] = VADD(tr1, tr4); + + cc += 4*ido; ch += ido; + } + cc = cc_; ch = ch_; + } + if (ido < 2) return; + if (ido != 2) { + for (k = 0; k < l1ido; k += ido) { + const v4sf * RESTRICT pc = (v4sf*)(cc - 1 + 4*k); + v4sf * RESTRICT ph = (v4sf*)(ch + k + 1); + for (i = 2; i < ido; i += 2) { + + tr1 = VSUB(pc[i], pc[4*ido - i]); + tr2 = VADD(pc[i], pc[4*ido - i]); + ti4 = VSUB(pc[2*ido + i], pc[2*ido - i]); + tr3 = VADD(pc[2*ido + i], pc[2*ido - i]); + ph[0] = VADD(tr2, tr3); + cr3 = VSUB(tr2, tr3); + + ti3 = VSUB(pc[2*ido + i + 1], pc[2*ido - i + 1]); + tr4 = VADD(pc[2*ido + i + 1], pc[2*ido - i + 1]); + cr2 = VSUB(tr1, tr4); + cr4 = VADD(tr1, tr4); + + ti1 = VADD(pc[i + 1], pc[4*ido - i + 1]); + ti2 = VSUB(pc[i + 1], pc[4*ido - i + 1]); + + ph[1] = VADD(ti2, ti3); ph += l1ido; + ci3 = VSUB(ti2, ti3); + ci2 = VADD(ti1, ti4); + ci4 = VSUB(ti1, ti4); + VCPLXMUL(cr2, ci2, LD_PS1(wa1[i-2]), LD_PS1(wa1[i-1])); + ph[0] = cr2; + ph[1] = ci2; ph += l1ido; + VCPLXMUL(cr3, ci3, LD_PS1(wa2[i-2]), LD_PS1(wa2[i-1])); + ph[0] = cr3; + ph[1] = ci3; ph += l1ido; + VCPLXMUL(cr4, ci4, LD_PS1(wa3[i-2]), LD_PS1(wa3[i-1])); + ph[0] = cr4; + ph[1] = ci4; ph = ph - 3*l1ido + 2; + } + } + if (ido % 2 == 1) return; + } + for (k=0; k < l1ido; k+=ido) { + int i0 = 4*k + ido; + v4sf c = cc[i0-1], d = cc[i0 + 2*ido-1]; + v4sf a = cc[i0+0], b = cc[i0 + 2*ido+0]; + tr1 = VSUB(c,d); + tr2 = VADD(c,d); + ti1 = VADD(b,a); + ti2 = VSUB(b,a); + ch[ido-1 + k + 0*l1ido] = VADD(tr2,tr2); + ch[ido-1 + k + 1*l1ido] = SVMUL(minus_sqrt2, VSUB(ti1, tr1)); + ch[ido-1 + k + 2*l1ido] = VADD(ti2, ti2); + ch[ido-1 + k + 3*l1ido] = SVMUL(minus_sqrt2, VADD(ti1, tr1)); + } +} /* radb4 */ + +static void radf5_ps(int ido, int l1, const v4sf * RESTRICT cc, v4sf * RESTRICT ch, + const float *wa1, const float *wa2, const float *wa3, const float *wa4) +{ + static const float tr11 = .309016994374947f; + static const float ti11 = .951056516295154f; + static const float tr12 = -.809016994374947f; + static const float ti12 = .587785252292473f; + + /* System generated locals */ + int cc_offset, ch_offset; + + /* Local variables */ + int i, k, ic; + v4sf ci2, di2, ci4, ci5, di3, di4, di5, ci3, cr2, cr3, dr2, dr3, dr4, dr5, + cr5, cr4, ti2, ti3, ti5, ti4, tr2, tr3, tr4, tr5; + int idp2; + + +#define cc_ref(a_1,a_2,a_3) cc[((a_3)*l1 + (a_2))*ido + a_1] +#define ch_ref(a_1,a_2,a_3) ch[((a_3)*5 + (a_2))*ido + a_1] + + /* Parameter adjustments */ + ch_offset = 1 + ido * 6; + ch -= ch_offset; + cc_offset = 1 + ido * (1 + l1); + cc -= cc_offset; + + /* Function Body */ + for (k = 1; k <= l1; ++k) { + cr2 = VADD(cc_ref(1, k, 5), cc_ref(1, k, 2)); + ci5 = VSUB(cc_ref(1, k, 5), cc_ref(1, k, 2)); + cr3 = VADD(cc_ref(1, k, 4), cc_ref(1, k, 3)); + ci4 = VSUB(cc_ref(1, k, 4), cc_ref(1, k, 3)); + ch_ref(1, 1, k) = VADD(cc_ref(1, k, 1), VADD(cr2, cr3)); + ch_ref(ido, 2, k) = VADD(cc_ref(1, k, 1), VADD(SVMUL(tr11, cr2), SVMUL(tr12, cr3))); + ch_ref(1, 3, k) = VADD(SVMUL(ti11, ci5), SVMUL(ti12, ci4)); + ch_ref(ido, 4, k) = VADD(cc_ref(1, k, 1), VADD(SVMUL(tr12, cr2), SVMUL(tr11, cr3))); + ch_ref(1, 5, k) = VSUB(SVMUL(ti12, ci5), SVMUL(ti11, ci4)); + //printf("pffft: radf5, k=%d ch_ref=%f, ci4=%f\n", k, ch_ref(1, 5, k), ci4); + } + if (ido == 1) { + return; + } + idp2 = ido + 2; + for (k = 1; k <= l1; ++k) { + for (i = 3; i <= ido; i += 2) { + ic = idp2 - i; + dr2 = LD_PS1(wa1[i-3]); di2 = LD_PS1(wa1[i-2]); + dr3 = LD_PS1(wa2[i-3]); di3 = LD_PS1(wa2[i-2]); + dr4 = LD_PS1(wa3[i-3]); di4 = LD_PS1(wa3[i-2]); + dr5 = LD_PS1(wa4[i-3]); di5 = LD_PS1(wa4[i-2]); + VCPLXMULCONJ(dr2, di2, cc_ref(i-1, k, 2), cc_ref(i, k, 2)); + VCPLXMULCONJ(dr3, di3, cc_ref(i-1, k, 3), cc_ref(i, k, 3)); + VCPLXMULCONJ(dr4, di4, cc_ref(i-1, k, 4), cc_ref(i, k, 4)); + VCPLXMULCONJ(dr5, di5, cc_ref(i-1, k, 5), cc_ref(i, k, 5)); + cr2 = VADD(dr2, dr5); + ci5 = VSUB(dr5, dr2); + cr5 = VSUB(di2, di5); + ci2 = VADD(di2, di5); + cr3 = VADD(dr3, dr4); + ci4 = VSUB(dr4, dr3); + cr4 = VSUB(di3, di4); + ci3 = VADD(di3, di4); + ch_ref(i - 1, 1, k) = VADD(cc_ref(i - 1, k, 1), VADD(cr2, cr3)); + ch_ref(i, 1, k) = VSUB(cc_ref(i, k, 1), VADD(ci2, ci3));// + tr2 = VADD(cc_ref(i - 1, k, 1), VADD(SVMUL(tr11, cr2), SVMUL(tr12, cr3))); + ti2 = VSUB(cc_ref(i, k, 1), VADD(SVMUL(tr11, ci2), SVMUL(tr12, ci3)));// + tr3 = VADD(cc_ref(i - 1, k, 1), VADD(SVMUL(tr12, cr2), SVMUL(tr11, cr3))); + ti3 = VSUB(cc_ref(i, k, 1), VADD(SVMUL(tr12, ci2), SVMUL(tr11, ci3)));// + tr5 = VADD(SVMUL(ti11, cr5), SVMUL(ti12, cr4)); + ti5 = VADD(SVMUL(ti11, ci5), SVMUL(ti12, ci4)); + tr4 = VSUB(SVMUL(ti12, cr5), SVMUL(ti11, cr4)); + ti4 = VSUB(SVMUL(ti12, ci5), SVMUL(ti11, ci4)); + ch_ref(i - 1, 3, k) = VSUB(tr2, tr5); + ch_ref(ic - 1, 2, k) = VADD(tr2, tr5); + ch_ref(i, 3, k) = VADD(ti2, ti5); + ch_ref(ic, 2, k) = VSUB(ti5, ti2); + ch_ref(i - 1, 5, k) = VSUB(tr3, tr4); + ch_ref(ic - 1, 4, k) = VADD(tr3, tr4); + ch_ref(i, 5, k) = VADD(ti3, ti4); + ch_ref(ic, 4, k) = VSUB(ti4, ti3); + } + } +#undef cc_ref +#undef ch_ref +} /* radf5 */ + +static void radb5_ps(int ido, int l1, const v4sf *RESTRICT cc, v4sf *RESTRICT ch, + const float *wa1, const float *wa2, const float *wa3, const float *wa4) +{ + static const float tr11 = .309016994374947f; + static const float ti11 = .951056516295154f; + static const float tr12 = -.809016994374947f; + static const float ti12 = .587785252292473f; + + int cc_offset, ch_offset; + + /* Local variables */ + int i, k, ic; + v4sf ci2, ci3, ci4, ci5, di3, di4, di5, di2, cr2, cr3, cr5, cr4, ti2, ti3, + ti4, ti5, dr3, dr4, dr5, dr2, tr2, tr3, tr4, tr5; + int idp2; + +#define cc_ref(a_1,a_2,a_3) cc[((a_3)*5 + (a_2))*ido + a_1] +#define ch_ref(a_1,a_2,a_3) ch[((a_3)*l1 + (a_2))*ido + a_1] + + /* Parameter adjustments */ + ch_offset = 1 + ido * (1 + l1); + ch -= ch_offset; + cc_offset = 1 + ido * 6; + cc -= cc_offset; + + /* Function Body */ + for (k = 1; k <= l1; ++k) { + ti5 = VADD(cc_ref(1, 3, k), cc_ref(1, 3, k)); + ti4 = VADD(cc_ref(1, 5, k), cc_ref(1, 5, k)); + tr2 = VADD(cc_ref(ido, 2, k), cc_ref(ido, 2, k)); + tr3 = VADD(cc_ref(ido, 4, k), cc_ref(ido, 4, k)); + ch_ref(1, k, 1) = VADD(cc_ref(1, 1, k), VADD(tr2, tr3)); + cr2 = VADD(cc_ref(1, 1, k), VADD(SVMUL(tr11, tr2), SVMUL(tr12, tr3))); + cr3 = VADD(cc_ref(1, 1, k), VADD(SVMUL(tr12, tr2), SVMUL(tr11, tr3))); + ci5 = VADD(SVMUL(ti11, ti5), SVMUL(ti12, ti4)); + ci4 = VSUB(SVMUL(ti12, ti5), SVMUL(ti11, ti4)); + ch_ref(1, k, 2) = VSUB(cr2, ci5); + ch_ref(1, k, 3) = VSUB(cr3, ci4); + ch_ref(1, k, 4) = VADD(cr3, ci4); + ch_ref(1, k, 5) = VADD(cr2, ci5); + } + if (ido == 1) { + return; + } + idp2 = ido + 2; + for (k = 1; k <= l1; ++k) { + for (i = 3; i <= ido; i += 2) { + ic = idp2 - i; + ti5 = VADD(cc_ref(i , 3, k), cc_ref(ic , 2, k)); + ti2 = VSUB(cc_ref(i , 3, k), cc_ref(ic , 2, k)); + ti4 = VADD(cc_ref(i , 5, k), cc_ref(ic , 4, k)); + ti3 = VSUB(cc_ref(i , 5, k), cc_ref(ic , 4, k)); + tr5 = VSUB(cc_ref(i-1, 3, k), cc_ref(ic-1, 2, k)); + tr2 = VADD(cc_ref(i-1, 3, k), cc_ref(ic-1, 2, k)); + tr4 = VSUB(cc_ref(i-1, 5, k), cc_ref(ic-1, 4, k)); + tr3 = VADD(cc_ref(i-1, 5, k), cc_ref(ic-1, 4, k)); + ch_ref(i - 1, k, 1) = VADD(cc_ref(i-1, 1, k), VADD(tr2, tr3)); + ch_ref(i, k, 1) = VADD(cc_ref(i, 1, k), VADD(ti2, ti3)); + cr2 = VADD(cc_ref(i-1, 1, k), VADD(SVMUL(tr11, tr2), SVMUL(tr12, tr3))); + ci2 = VADD(cc_ref(i , 1, k), VADD(SVMUL(tr11, ti2), SVMUL(tr12, ti3))); + cr3 = VADD(cc_ref(i-1, 1, k), VADD(SVMUL(tr12, tr2), SVMUL(tr11, tr3))); + ci3 = VADD(cc_ref(i , 1, k), VADD(SVMUL(tr12, ti2), SVMUL(tr11, ti3))); + cr5 = VADD(SVMUL(ti11, tr5), SVMUL(ti12, tr4)); + ci5 = VADD(SVMUL(ti11, ti5), SVMUL(ti12, ti4)); + cr4 = VSUB(SVMUL(ti12, tr5), SVMUL(ti11, tr4)); + ci4 = VSUB(SVMUL(ti12, ti5), SVMUL(ti11, ti4)); + dr3 = VSUB(cr3, ci4); + dr4 = VADD(cr3, ci4); + di3 = VADD(ci3, cr4); + di4 = VSUB(ci3, cr4); + dr5 = VADD(cr2, ci5); + dr2 = VSUB(cr2, ci5); + di5 = VSUB(ci2, cr5); + di2 = VADD(ci2, cr5); + VCPLXMUL(dr2, di2, LD_PS1(wa1[i-3]), LD_PS1(wa1[i-2])); + VCPLXMUL(dr3, di3, LD_PS1(wa2[i-3]), LD_PS1(wa2[i-2])); + VCPLXMUL(dr4, di4, LD_PS1(wa3[i-3]), LD_PS1(wa3[i-2])); + VCPLXMUL(dr5, di5, LD_PS1(wa4[i-3]), LD_PS1(wa4[i-2])); + + ch_ref(i-1, k, 2) = dr2; ch_ref(i, k, 2) = di2; + ch_ref(i-1, k, 3) = dr3; ch_ref(i, k, 3) = di3; + ch_ref(i-1, k, 4) = dr4; ch_ref(i, k, 4) = di4; + ch_ref(i-1, k, 5) = dr5; ch_ref(i, k, 5) = di5; + } + } +#undef cc_ref +#undef ch_ref +} /* radb5 */ + +static NEVER_INLINE(v4sf *) rfftf1_ps(int n, const v4sf *input_readonly, v4sf *work1, v4sf *work2, + const float *wa, const int *ifac) { + v4sf *in = (v4sf*)input_readonly; + v4sf *out = (in == work2 ? work1 : work2); + int nf = ifac[1], k1; + int l2 = n; + int iw = n-1; + assert(in != out && work1 != work2); + for (k1 = 1; k1 <= nf; ++k1) { + int kh = nf - k1; + int ip = ifac[kh + 2]; + int l1 = l2 / ip; + int ido = n / l2; + iw -= (ip - 1)*ido; + switch (ip) { + case 5: { + int ix2 = iw + ido; + int ix3 = ix2 + ido; + int ix4 = ix3 + ido; + radf5_ps(ido, l1, in, out, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4]); + } break; + case 4: { + int ix2 = iw + ido; + int ix3 = ix2 + ido; + radf4_ps(ido, l1, in, out, &wa[iw], &wa[ix2], &wa[ix3]); + } break; + case 3: { + int ix2 = iw + ido; + radf3_ps(ido, l1, in, out, &wa[iw], &wa[ix2]); + } break; + case 2: + radf2_ps(ido, l1, in, out, &wa[iw]); + break; + default: + assert(0); + break; + } + l2 = l1; + if (out == work2) { + out = work1; in = work2; + } else { + out = work2; in = work1; + } + } + return in; /* this is in fact the output .. */ +} /* rfftf1 */ + +static NEVER_INLINE(v4sf *) rfftb1_ps(int n, const v4sf *input_readonly, v4sf *work1, v4sf *work2, + const float *wa, const int *ifac) { + v4sf *in = (v4sf*)input_readonly; + v4sf *out = (in == work2 ? work1 : work2); + int nf = ifac[1], k1; + int l1 = 1; + int iw = 0; + assert(in != out); + for (k1=1; k1<=nf; k1++) { + int ip = ifac[k1 + 1]; + int l2 = ip*l1; + int ido = n / l2; + switch (ip) { + case 5: { + int ix2 = iw + ido; + int ix3 = ix2 + ido; + int ix4 = ix3 + ido; + radb5_ps(ido, l1, in, out, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4]); + } break; + case 4: { + int ix2 = iw + ido; + int ix3 = ix2 + ido; + radb4_ps(ido, l1, in, out, &wa[iw], &wa[ix2], &wa[ix3]); + } break; + case 3: { + int ix2 = iw + ido; + radb3_ps(ido, l1, in, out, &wa[iw], &wa[ix2]); + } break; + case 2: + radb2_ps(ido, l1, in, out, &wa[iw]); + break; + default: + assert(0); + break; + } + l1 = l2; + iw += (ip - 1)*ido; + + if (out == work2) { + out = work1; in = work2; + } else { + out = work2; in = work1; + } + } + return in; /* this is in fact the output .. */ +} + +static int decompose(int n, int *ifac, const int *ntryh) { + int nl = n, nf = 0, i, j = 0; + for (j=0; ntryh[j]; ++j) { + int ntry = ntryh[j]; + while (nl != 1) { + int nq = nl / ntry; + int nr = nl - ntry * nq; + if (nr == 0) { + ifac[2+nf++] = ntry; + nl = nq; + if (ntry == 2 && nf != 1) { + for (i = 2; i <= nf; ++i) { + int ib = nf - i + 2; + ifac[ib + 1] = ifac[ib]; + } + ifac[2] = 2; + } + } else break; + } + } + ifac[0] = n; + ifac[1] = nf; + return nf; +} + + + +static void rffti1_ps(int n, float *wa, int *ifac) +{ + static const int ntryh[] = { 4,2,3,5,0 }; + int k1, j, ii; + + int nf = decompose(n,ifac,ntryh); + float argh = (2*M_PI) / n; + int is = 0; + int nfm1 = nf - 1; + int l1 = 1; + for (k1 = 1; k1 <= nfm1; k1++) { + int ip = ifac[k1 + 1]; + int ld = 0; + int l2 = l1*ip; + int ido = n / l2; + int ipm = ip - 1; + for (j = 1; j <= ipm; ++j) { + float argld; + int i = is, fi=0; + ld += l1; + argld = ld*argh; + for (ii = 3; ii <= ido; ii += 2) { + i += 2; + fi += 1; + wa[i - 2] = cos(fi*argld); + wa[i - 1] = sin(fi*argld); + } + is += ido; + } + l1 = l2; + } +} /* rffti1 */ + +void cffti1_ps(int n, float *wa, int *ifac) +{ + static const int ntryh[] = { 5,3,4,2,0 }; + int k1, j, ii; + + int nf = decompose(n,ifac,ntryh); + float argh = (2*M_PI)/(float)n; + int i = 1; + int l1 = 1; + for (k1=1; k1<=nf; k1++) { + int ip = ifac[k1+1]; + int ld = 0; + int l2 = l1*ip; + int ido = n / l2; + int idot = ido + ido + 2; + int ipm = ip - 1; + for (j=1; j<=ipm; j++) { + float argld; + int i1 = i, fi = 0; + wa[i-1] = 1; + wa[i] = 0; + ld += l1; + argld = ld*argh; + for (ii = 4; ii <= idot; ii += 2) { + i += 2; + fi += 1; + wa[i-1] = cos(fi*argld); + wa[i] = sin(fi*argld); + } + if (ip > 5) { + wa[i1-1] = wa[i-1]; + wa[i1] = wa[i]; + } + } + l1 = l2; + } +} /* cffti1 */ + + +v4sf *cfftf1_ps(int n, const v4sf *input_readonly, v4sf *work1, v4sf *work2, const float *wa, const int *ifac, int isign) { + v4sf *in = (v4sf*)input_readonly; + v4sf *out = (in == work2 ? work1 : work2); + int nf = ifac[1], k1; + int l1 = 1; + int iw = 0; + assert(in != out && work1 != work2); + for (k1=2; k1<=nf+1; k1++) { + int ip = ifac[k1]; + int l2 = ip*l1; + int ido = n / l2; + int idot = ido + ido; + switch (ip) { + case 5: { + int ix2 = iw + idot; + int ix3 = ix2 + idot; + int ix4 = ix3 + idot; + passf5_ps(idot, l1, in, out, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4], isign); + } break; + case 4: { + int ix2 = iw + idot; + int ix3 = ix2 + idot; + passf4_ps(idot, l1, in, out, &wa[iw], &wa[ix2], &wa[ix3], isign); + } break; + case 2: { + passf2_ps(idot, l1, in, out, &wa[iw], isign); + } break; + case 3: { + int ix2 = iw + idot; + passf3_ps(idot, l1, in, out, &wa[iw], &wa[ix2], isign); + } break; + default: + assert(0); + } + l1 = l2; + iw += (ip - 1)*idot; + if (out == work2) { + out = work1; in = work2; + } else { + out = work2; in = work1; + } + } + + return in; /* this is in fact the output .. */ +} + + +struct PFFFT_Setup { + int N; + int Ncvec; // nb of complex simd vectors (N/4 if PFFFT_COMPLEX, N/8 if PFFFT_REAL) + int ifac[15]; + pffft_transform_t transform; + v4sf *data; // allocated room for twiddle coefs + float *e; // points into 'data' , N/4*3 elements + float *twiddle; // points into 'data', N/4 elements +}; + +PFFFT_Setup *pffft_new_setup(int N, pffft_transform_t transform) { + PFFFT_Setup *s = (PFFFT_Setup*)malloc(sizeof(PFFFT_Setup)); + int k, m; + /* unfortunately, the fft size must be a multiple of 16 for complex FFTs + and 32 for real FFTs -- a lot of stuff would need to be rewritten to + handle other cases (or maybe just switch to a scalar fft, I don't know..) */ + if (transform == PFFFT_REAL) { assert((N%(2*SIMD_SZ*SIMD_SZ))==0 && N>0); } + if (transform == PFFFT_COMPLEX) { assert((N%(SIMD_SZ*SIMD_SZ))==0 && N>0); } + //assert((N % 32) == 0); + s->N = N; + s->transform = transform; + /* nb of complex simd vectors */ + s->Ncvec = (transform == PFFFT_REAL ? N/2 : N)/SIMD_SZ; + s->data = (v4sf*)pffft_aligned_malloc(2*s->Ncvec * sizeof(v4sf)); + s->e = (float*)s->data; + s->twiddle = (float*)(s->data + (2*s->Ncvec*(SIMD_SZ-1))/SIMD_SZ); + + if (transform == PFFFT_REAL) { + for (k=0; k < s->Ncvec; ++k) { + int i = k/SIMD_SZ; + int j = k%SIMD_SZ; + for (m=0; m < SIMD_SZ-1; ++m) { + float A = -2*M_PI*(m+1)*k / N; + s->e[(2*(i*3 + m) + 0) * SIMD_SZ + j] = cos(A); + s->e[(2*(i*3 + m) + 1) * SIMD_SZ + j] = sin(A); + } + } + rffti1_ps(N/SIMD_SZ, s->twiddle, s->ifac); + } else { + for (k=0; k < s->Ncvec; ++k) { + int i = k/SIMD_SZ; + int j = k%SIMD_SZ; + for (m=0; m < SIMD_SZ-1; ++m) { + float A = -2*M_PI*(m+1)*k / N; + s->e[(2*(i*3 + m) + 0)*SIMD_SZ + j] = cos(A); + s->e[(2*(i*3 + m) + 1)*SIMD_SZ + j] = sin(A); + } + } + cffti1_ps(N/SIMD_SZ, s->twiddle, s->ifac); + } + + /* check that N is decomposable with allowed prime factors */ + for (k=0, m=1; k < s->ifac[1]; ++k) { m *= s->ifac[2+k]; } + if (m != N/SIMD_SZ) { + pffft_destroy_setup(s); s = 0; + } + + return s; +} + + +void pffft_destroy_setup(PFFFT_Setup *s) { + pffft_aligned_free(s->data); + free(s); +} + +#if !defined(PFFFT_SIMD_DISABLE) + +/* [0 0 1 2 3 4 5 6 7 8] -> [0 8 7 6 5 4 3 2 1] */ +static void reversed_copy(int N, const v4sf *in, int in_stride, v4sf *out) { + v4sf g0, g1; + int k; + INTERLEAVE2(in[0], in[1], g0, g1); in += in_stride; + + *--out = VSWAPHL(g0, g1); // [g0l, g0h], [g1l g1h] -> [g1l, g0h] + for (k=1; k < N; ++k) { + v4sf h0, h1; + INTERLEAVE2(in[0], in[1], h0, h1); in += in_stride; + *--out = VSWAPHL(g1, h0); + *--out = VSWAPHL(h0, h1); + g1 = h1; + } + *--out = VSWAPHL(g1, g0); +} + +static void unreversed_copy(int N, const v4sf *in, v4sf *out, int out_stride) { + v4sf g0, g1, h0, h1; + int k; + g0 = g1 = in[0]; ++in; + for (k=1; k < N; ++k) { + h0 = *in++; h1 = *in++; + g1 = VSWAPHL(g1, h0); + h0 = VSWAPHL(h0, h1); + UNINTERLEAVE2(h0, g1, out[0], out[1]); out += out_stride; + g1 = h1; + } + h0 = *in++; h1 = g0; + g1 = VSWAPHL(g1, h0); + h0 = VSWAPHL(h0, h1); + UNINTERLEAVE2(h0, g1, out[0], out[1]); +} + +void pffft_zreorder(PFFFT_Setup *setup, const float *in, float *out, pffft_direction_t direction) { + int k, N = setup->N, Ncvec = setup->Ncvec; + const v4sf *vin = (const v4sf*)in; + v4sf *vout = (v4sf*)out; + assert(in != out); + if (setup->transform == PFFFT_REAL) { + int dk = N/32; + if (direction == PFFFT_FORWARD) { + for (k=0; k < dk; ++k) { + INTERLEAVE2(vin[k*8 + 0], vin[k*8 + 1], vout[2*(0*dk + k) + 0], vout[2*(0*dk + k) + 1]); + INTERLEAVE2(vin[k*8 + 4], vin[k*8 + 5], vout[2*(2*dk + k) + 0], vout[2*(2*dk + k) + 1]); + } + reversed_copy(dk, vin+2, 8, (v4sf*)(out + N/2)); + reversed_copy(dk, vin+6, 8, (v4sf*)(out + N)); + } else { + for (k=0; k < dk; ++k) { + UNINTERLEAVE2(vin[2*(0*dk + k) + 0], vin[2*(0*dk + k) + 1], vout[k*8 + 0], vout[k*8 + 1]); + UNINTERLEAVE2(vin[2*(2*dk + k) + 0], vin[2*(2*dk + k) + 1], vout[k*8 + 4], vout[k*8 + 5]); + } + unreversed_copy(dk, (v4sf*)(in + N/4), (v4sf*)(out + N - 6*SIMD_SZ), -8); + unreversed_copy(dk, (v4sf*)(in + 3*N/4), (v4sf*)(out + N - 2*SIMD_SZ), -8); + } + } else { + if (direction == PFFFT_FORWARD) { + for (k=0; k < Ncvec; ++k) { + int kk = (k/4) + (k%4)*(Ncvec/4); + INTERLEAVE2(vin[k*2], vin[k*2+1], vout[kk*2], vout[kk*2+1]); + } + } else { + for (k=0; k < Ncvec; ++k) { + int kk = (k/4) + (k%4)*(Ncvec/4); + UNINTERLEAVE2(vin[kk*2], vin[kk*2+1], vout[k*2], vout[k*2+1]); + } + } + } +} + +void pffft_cplx_finalize(int Ncvec, const v4sf *in, v4sf *out, const v4sf *e) { + int k, dk = Ncvec/SIMD_SZ; // number of 4x4 matrix blocks + v4sf r0, i0, r1, i1, r2, i2, r3, i3; + v4sf sr0, dr0, sr1, dr1, si0, di0, si1, di1; + assert(in != out); + for (k=0; k < dk; ++k) { + r0 = in[8*k+0]; i0 = in[8*k+1]; + r1 = in[8*k+2]; i1 = in[8*k+3]; + r2 = in[8*k+4]; i2 = in[8*k+5]; + r3 = in[8*k+6]; i3 = in[8*k+7]; + VTRANSPOSE4(r0,r1,r2,r3); + VTRANSPOSE4(i0,i1,i2,i3); + VCPLXMUL(r1,i1,e[k*6+0],e[k*6+1]); + VCPLXMUL(r2,i2,e[k*6+2],e[k*6+3]); + VCPLXMUL(r3,i3,e[k*6+4],e[k*6+5]); + + sr0 = VADD(r0,r2); dr0 = VSUB(r0, r2); + sr1 = VADD(r1,r3); dr1 = VSUB(r1, r3); + si0 = VADD(i0,i2); di0 = VSUB(i0, i2); + si1 = VADD(i1,i3); di1 = VSUB(i1, i3); + + /* + transformation for each column is: + + [1 1 1 1 0 0 0 0] [r0] + [1 0 -1 0 0 -1 0 1] [r1] + [1 -1 1 -1 0 0 0 0] [r2] + [1 0 -1 0 0 1 0 -1] [r3] + [0 0 0 0 1 1 1 1] * [i0] + [0 1 0 -1 1 0 -1 0] [i1] + [0 0 0 0 1 -1 1 -1] [i2] + [0 -1 0 1 1 0 -1 0] [i3] + */ + + r0 = VADD(sr0, sr1); i0 = VADD(si0, si1); + r1 = VADD(dr0, di1); i1 = VSUB(di0, dr1); + r2 = VSUB(sr0, sr1); i2 = VSUB(si0, si1); + r3 = VSUB(dr0, di1); i3 = VADD(di0, dr1); + + *out++ = r0; *out++ = i0; *out++ = r1; *out++ = i1; + *out++ = r2; *out++ = i2; *out++ = r3; *out++ = i3; + } +} + +void pffft_cplx_preprocess(int Ncvec, const v4sf *in, v4sf *out, const v4sf *e) { + int k, dk = Ncvec/SIMD_SZ; // number of 4x4 matrix blocks + v4sf r0, i0, r1, i1, r2, i2, r3, i3; + v4sf sr0, dr0, sr1, dr1, si0, di0, si1, di1; + assert(in != out); + for (k=0; k < dk; ++k) { + r0 = in[8*k+0]; i0 = in[8*k+1]; + r1 = in[8*k+2]; i1 = in[8*k+3]; + r2 = in[8*k+4]; i2 = in[8*k+5]; + r3 = in[8*k+6]; i3 = in[8*k+7]; + + sr0 = VADD(r0,r2); dr0 = VSUB(r0, r2); + sr1 = VADD(r1,r3); dr1 = VSUB(r1, r3); + si0 = VADD(i0,i2); di0 = VSUB(i0, i2); + si1 = VADD(i1,i3); di1 = VSUB(i1, i3); + + r0 = VADD(sr0, sr1); i0 = VADD(si0, si1); + r1 = VSUB(dr0, di1); i1 = VADD(di0, dr1); + r2 = VSUB(sr0, sr1); i2 = VSUB(si0, si1); + r3 = VADD(dr0, di1); i3 = VSUB(di0, dr1); + + VCPLXMULCONJ(r1,i1,e[k*6+0],e[k*6+1]); + VCPLXMULCONJ(r2,i2,e[k*6+2],e[k*6+3]); + VCPLXMULCONJ(r3,i3,e[k*6+4],e[k*6+5]); + + VTRANSPOSE4(r0,r1,r2,r3); + VTRANSPOSE4(i0,i1,i2,i3); + + *out++ = r0; *out++ = i0; *out++ = r1; *out++ = i1; + *out++ = r2; *out++ = i2; *out++ = r3; *out++ = i3; + } +} + + +static ALWAYS_INLINE(void) pffft_real_finalize_4x4(const v4sf *in0, const v4sf *in1, const v4sf *in, + const v4sf *e, v4sf *out) { + v4sf r0, i0, r1, i1, r2, i2, r3, i3; + v4sf sr0, dr0, sr1, dr1, si0, di0, si1, di1; + r0 = *in0; i0 = *in1; + r1 = *in++; i1 = *in++; r2 = *in++; i2 = *in++; r3 = *in++; i3 = *in++; + VTRANSPOSE4(r0,r1,r2,r3); + VTRANSPOSE4(i0,i1,i2,i3); + + /* + transformation for each column is: + + [1 1 1 1 0 0 0 0] [r0] + [1 0 -1 0 0 -1 0 1] [r1] + [1 0 -1 0 0 1 0 -1] [r2] + [1 -1 1 -1 0 0 0 0] [r3] + [0 0 0 0 1 1 1 1] * [i0] + [0 -1 0 1 -1 0 1 0] [i1] + [0 -1 0 1 1 0 -1 0] [i2] + [0 0 0 0 -1 1 -1 1] [i3] + */ + + //cerr << "matrix initial, before e , REAL:\n 1: " << r0 << "\n 1: " << r1 << "\n 1: " << r2 << "\n 1: " << r3 << "\n"; + //cerr << "matrix initial, before e, IMAG :\n 1: " << i0 << "\n 1: " << i1 << "\n 1: " << i2 << "\n 1: " << i3 << "\n"; + + VCPLXMUL(r1,i1,e[0],e[1]); + VCPLXMUL(r2,i2,e[2],e[3]); + VCPLXMUL(r3,i3,e[4],e[5]); + + //cerr << "matrix initial, real part:\n 1: " << r0 << "\n 1: " << r1 << "\n 1: " << r2 << "\n 1: " << r3 << "\n"; + //cerr << "matrix initial, imag part:\n 1: " << i0 << "\n 1: " << i1 << "\n 1: " << i2 << "\n 1: " << i3 << "\n"; + + sr0 = VADD(r0,r2); dr0 = VSUB(r0,r2); + sr1 = VADD(r1,r3); dr1 = VSUB(r3,r1); + si0 = VADD(i0,i2); di0 = VSUB(i0,i2); + si1 = VADD(i1,i3); di1 = VSUB(i3,i1); + + r0 = VADD(sr0, sr1); + r3 = VSUB(sr0, sr1); + i0 = VADD(si0, si1); + i3 = VSUB(si1, si0); + r1 = VADD(dr0, di1); + r2 = VSUB(dr0, di1); + i1 = VSUB(dr1, di0); + i2 = VADD(dr1, di0); + + *out++ = r0; + *out++ = i0; + *out++ = r1; + *out++ = i1; + *out++ = r2; + *out++ = i2; + *out++ = r3; + *out++ = i3; + +} + +static NEVER_INLINE(void) pffft_real_finalize(int Ncvec, const v4sf *in, v4sf *out, const v4sf *e) { + int k, dk = Ncvec/SIMD_SZ; // number of 4x4 matrix blocks + /* fftpack order is f0r f1r f1i f2r f2i ... f(n-1)r f(n-1)i f(n)r */ + + v4sf_union cr, ci, *uout = (v4sf_union*)out; + v4sf save = in[7], zero=VZERO(); + float xr0, xi0, xr1, xi1, xr2, xi2, xr3, xi3; + static const float s = M_SQRT2/2; + + cr.v = in[0]; ci.v = in[Ncvec*2-1]; + assert(in != out); + pffft_real_finalize_4x4(&zero, &zero, in+1, e, out); + + /* + [cr0 cr1 cr2 cr3 ci0 ci1 ci2 ci3] + + [Xr(1)] ] [1 1 1 1 0 0 0 0] + [Xr(N/4) ] [0 0 0 0 1 s 0 -s] + [Xr(N/2) ] [1 0 -1 0 0 0 0 0] + [Xr(3N/4)] [0 0 0 0 1 -s 0 s] + [Xi(1) ] [1 -1 1 -1 0 0 0 0] + [Xi(N/4) ] [0 0 0 0 0 -s -1 -s] + [Xi(N/2) ] [0 -1 0 1 0 0 0 0] + [Xi(3N/4)] [0 0 0 0 0 -s 1 -s] + */ + + xr0=(cr.f[0]+cr.f[2]) + (cr.f[1]+cr.f[3]); uout[0].f[0] = xr0; + xi0=(cr.f[0]+cr.f[2]) - (cr.f[1]+cr.f[3]); uout[1].f[0] = xi0; + xr2=(cr.f[0]-cr.f[2]); uout[4].f[0] = xr2; + xi2=(cr.f[3]-cr.f[1]); uout[5].f[0] = xi2; + xr1= ci.f[0] + s*(ci.f[1]-ci.f[3]); uout[2].f[0] = xr1; + xi1=-ci.f[2] - s*(ci.f[1]+ci.f[3]); uout[3].f[0] = xi1; + xr3= ci.f[0] - s*(ci.f[1]-ci.f[3]); uout[6].f[0] = xr3; + xi3= ci.f[2] - s*(ci.f[1]+ci.f[3]); uout[7].f[0] = xi3; + + for (k=1; k < dk; ++k) { + v4sf save_next = in[8*k+7]; + pffft_real_finalize_4x4(&save, &in[8*k+0], in + 8*k+1, + e + k*6, out + k*8); + save = save_next; + } + +} + +static ALWAYS_INLINE(void) pffft_real_preprocess_4x4(const v4sf *in, + const v4sf *e, v4sf *out, int first) { + v4sf r0=in[0], i0=in[1], r1=in[2], i1=in[3], r2=in[4], i2=in[5], r3=in[6], i3=in[7]; + /* + transformation for each column is: + + [1 1 1 1 0 0 0 0] [r0] + [1 0 0 -1 0 -1 -1 0] [r1] + [1 -1 -1 1 0 0 0 0] [r2] + [1 0 0 -1 0 1 1 0] [r3] + [0 0 0 0 1 -1 1 -1] * [i0] + [0 -1 1 0 1 0 0 1] [i1] + [0 0 0 0 1 1 -1 -1] [i2] + [0 1 -1 0 1 0 0 1] [i3] + */ + + v4sf sr0 = VADD(r0,r3), dr0 = VSUB(r0,r3); + v4sf sr1 = VADD(r1,r2), dr1 = VSUB(r1,r2); + v4sf si0 = VADD(i0,i3), di0 = VSUB(i0,i3); + v4sf si1 = VADD(i1,i2), di1 = VSUB(i1,i2); + + r0 = VADD(sr0, sr1); + r2 = VSUB(sr0, sr1); + r1 = VSUB(dr0, si1); + r3 = VADD(dr0, si1); + i0 = VSUB(di0, di1); + i2 = VADD(di0, di1); + i1 = VSUB(si0, dr1); + i3 = VADD(si0, dr1); + + VCPLXMULCONJ(r1,i1,e[0],e[1]); + VCPLXMULCONJ(r2,i2,e[2],e[3]); + VCPLXMULCONJ(r3,i3,e[4],e[5]); + + VTRANSPOSE4(r0,r1,r2,r3); + VTRANSPOSE4(i0,i1,i2,i3); + + if (!first) { + *out++ = r0; + *out++ = i0; + } + *out++ = r1; + *out++ = i1; + *out++ = r2; + *out++ = i2; + *out++ = r3; + *out++ = i3; +} + +static NEVER_INLINE(void) pffft_real_preprocess(int Ncvec, const v4sf *in, v4sf *out, const v4sf *e) { + int k, dk = Ncvec/SIMD_SZ; // number of 4x4 matrix blocks + /* fftpack order is f0r f1r f1i f2r f2i ... f(n-1)r f(n-1)i f(n)r */ + + v4sf_union Xr, Xi, *uout = (v4sf_union*)out; + float cr0, ci0, cr1, ci1, cr2, ci2, cr3, ci3; + static const float s = M_SQRT2; + assert(in != out); + for (k=0; k < 4; ++k) { + Xr.f[k] = ((float*)in)[8*k]; + Xi.f[k] = ((float*)in)[8*k+4]; + } + + pffft_real_preprocess_4x4(in, e, out+1, 1); // will write only 6 values + + /* + [Xr0 Xr1 Xr2 Xr3 Xi0 Xi1 Xi2 Xi3] + + [cr0] [1 0 2 0 1 0 0 0] + [cr1] [1 0 0 0 -1 0 -2 0] + [cr2] [1 0 -2 0 1 0 0 0] + [cr3] [1 0 0 0 -1 0 2 0] + [ci0] [0 2 0 2 0 0 0 0] + [ci1] [0 s 0 -s 0 -s 0 -s] + [ci2] [0 0 0 0 0 -2 0 2] + [ci3] [0 -s 0 s 0 -s 0 -s] + */ + for (k=1; k < dk; ++k) { + pffft_real_preprocess_4x4(in+8*k, e + k*6, out-1+k*8, 0); + } + + cr0=(Xr.f[0]+Xi.f[0]) + 2*Xr.f[2]; uout[0].f[0] = cr0; + cr1=(Xr.f[0]-Xi.f[0]) - 2*Xi.f[2]; uout[0].f[1] = cr1; + cr2=(Xr.f[0]+Xi.f[0]) - 2*Xr.f[2]; uout[0].f[2] = cr2; + cr3=(Xr.f[0]-Xi.f[0]) + 2*Xi.f[2]; uout[0].f[3] = cr3; + ci0= 2*(Xr.f[1]+Xr.f[3]); uout[2*Ncvec-1].f[0] = ci0; + ci1= s*(Xr.f[1]-Xr.f[3]) - s*(Xi.f[1]+Xi.f[3]); uout[2*Ncvec-1].f[1] = ci1; + ci2= 2*(Xi.f[3]-Xi.f[1]); uout[2*Ncvec-1].f[2] = ci2; + ci3=-s*(Xr.f[1]-Xr.f[3]) - s*(Xi.f[1]+Xi.f[3]); uout[2*Ncvec-1].f[3] = ci3; +} + + +void pffft_transform_internal(PFFFT_Setup *setup, const float *finput, float *foutput, v4sf *scratch, + pffft_direction_t direction, int ordered) { + int k, Ncvec = setup->Ncvec; + int nf_odd = (setup->ifac[1] & 1); + + // temporary buffer is allocated on the stack if the scratch pointer is NULL + int stack_allocate = (scratch == 0 ? Ncvec*2 : 1); + VLA_ARRAY_ON_STACK(v4sf, scratch_on_stack, stack_allocate); + + const v4sf *vinput = (const v4sf*)finput; + v4sf *voutput = (v4sf*)foutput; + v4sf *buff[2] = { voutput, scratch ? scratch : scratch_on_stack }; + int ib = (nf_odd ^ ordered ? 1 : 0); + + assert(VALIGNED(finput) && VALIGNED(foutput)); + + //assert(finput != foutput); + if (direction == PFFFT_FORWARD) { + ib = !ib; + if (setup->transform == PFFFT_REAL) { + ib = (rfftf1_ps(Ncvec*2, vinput, buff[ib], buff[!ib], + setup->twiddle, &setup->ifac[0]) == buff[0] ? 0 : 1); + pffft_real_finalize(Ncvec, buff[ib], buff[!ib], (v4sf*)setup->e); + } else { + v4sf *tmp = buff[ib]; + for (k=0; k < Ncvec; ++k) { + UNINTERLEAVE2(vinput[k*2], vinput[k*2+1], tmp[k*2], tmp[k*2+1]); + } + ib = (cfftf1_ps(Ncvec, buff[ib], buff[!ib], buff[ib], + setup->twiddle, &setup->ifac[0], -1) == buff[0] ? 0 : 1); + pffft_cplx_finalize(Ncvec, buff[ib], buff[!ib], (v4sf*)setup->e); + } + if (ordered) { + pffft_zreorder(setup, (float*)buff[!ib], (float*)buff[ib], PFFFT_FORWARD); + } else ib = !ib; + } else { + if (vinput == buff[ib]) { + ib = !ib; // may happen when finput == foutput + } + if (ordered) { + pffft_zreorder(setup, (float*)vinput, (float*)buff[ib], PFFFT_BACKWARD); + vinput = buff[ib]; ib = !ib; + } + if (setup->transform == PFFFT_REAL) { + pffft_real_preprocess(Ncvec, vinput, buff[ib], (v4sf*)setup->e); + ib = (rfftb1_ps(Ncvec*2, buff[ib], buff[0], buff[1], + setup->twiddle, &setup->ifac[0]) == buff[0] ? 0 : 1); + } else { + pffft_cplx_preprocess(Ncvec, vinput, buff[ib], (v4sf*)setup->e); + ib = (cfftf1_ps(Ncvec, buff[ib], buff[0], buff[1], + setup->twiddle, &setup->ifac[0], +1) == buff[0] ? 0 : 1); + for (k=0; k < Ncvec; ++k) { + INTERLEAVE2(buff[ib][k*2], buff[ib][k*2+1], buff[ib][k*2], buff[ib][k*2+1]); + } + } + } + + if (buff[ib] != voutput) { + /* extra copy required -- this situation should only happen when finput == foutput */ + assert(finput==foutput); + for (k=0; k < Ncvec; ++k) { + v4sf a = buff[ib][2*k], b = buff[ib][2*k+1]; + voutput[2*k] = a; voutput[2*k+1] = b; + } + ib = !ib; + } + assert(buff[ib] == voutput); +} + +void pffft_zconvolve_accumulate(PFFFT_Setup *s, const float *a, const float *b, float *ab, float scaling) { + int Ncvec = s->Ncvec; + const v4sf * RESTRICT va = (const v4sf*)a; + const v4sf * RESTRICT vb = (const v4sf*)b; + v4sf * RESTRICT vab = (v4sf*)ab; + +#ifdef __arm__ + __builtin_prefetch(va); + __builtin_prefetch(vb); + __builtin_prefetch(vab); + __builtin_prefetch(va+2); + __builtin_prefetch(vb+2); + __builtin_prefetch(vab+2); + __builtin_prefetch(va+4); + __builtin_prefetch(vb+4); + __builtin_prefetch(vab+4); + __builtin_prefetch(va+6); + __builtin_prefetch(vb+6); + __builtin_prefetch(vab+6); +# ifndef __clang__ +# define ZCONVOLVE_USING_INLINE_NEON_ASM +# endif +#endif + + float ar, ai, br, bi, abr, abi; +#ifndef ZCONVOLVE_USING_INLINE_ASM + v4sf vscal = LD_PS1(scaling); + int i; +#endif + + assert(VALIGNED(a) && VALIGNED(b) && VALIGNED(ab)); + ar = ((v4sf_union*)va)[0].f[0]; + ai = ((v4sf_union*)va)[1].f[0]; + br = ((v4sf_union*)vb)[0].f[0]; + bi = ((v4sf_union*)vb)[1].f[0]; + abr = ((v4sf_union*)vab)[0].f[0]; + abi = ((v4sf_union*)vab)[1].f[0]; + +#ifdef ZCONVOLVE_USING_INLINE_ASM // inline asm version, unfortunately miscompiled by clang 3.2, at least on ubuntu.. so this will be restricted to gcc + const float *a_ = a, *b_ = b; float *ab_ = ab; + int N = Ncvec; + asm volatile("mov r8, %2 \n" + "vdup.f32 q15, %4 \n" + "1: \n" + "pld [%0,#64] \n" + "pld [%1,#64] \n" + "pld [%2,#64] \n" + "pld [%0,#96] \n" + "pld [%1,#96] \n" + "pld [%2,#96] \n" + "vld1.f32 {q0,q1}, [%0,:128]! \n" + "vld1.f32 {q4,q5}, [%1,:128]! \n" + "vld1.f32 {q2,q3}, [%0,:128]! \n" + "vld1.f32 {q6,q7}, [%1,:128]! \n" + "vld1.f32 {q8,q9}, [r8,:128]! \n" + + "vmul.f32 q10, q0, q4 \n" + "vmul.f32 q11, q0, q5 \n" + "vmul.f32 q12, q2, q6 \n" + "vmul.f32 q13, q2, q7 \n" + "vmls.f32 q10, q1, q5 \n" + "vmla.f32 q11, q1, q4 \n" + "vld1.f32 {q0,q1}, [r8,:128]! \n" + "vmls.f32 q12, q3, q7 \n" + "vmla.f32 q13, q3, q6 \n" + "vmla.f32 q8, q10, q15 \n" + "vmla.f32 q9, q11, q15 \n" + "vmla.f32 q0, q12, q15 \n" + "vmla.f32 q1, q13, q15 \n" + "vst1.f32 {q8,q9},[%2,:128]! \n" + "vst1.f32 {q0,q1},[%2,:128]! \n" + "subs %3, #2 \n" + "bne 1b \n" + : "+r"(a_), "+r"(b_), "+r"(ab_), "+r"(N) : "r"(scaling) : "r8", "q0","q1","q2","q3","q4","q5","q6","q7","q8","q9", "q10","q11","q12","q13","q15","memory"); +#else // default routine, works fine for non-arm cpus with current compilers + for (i=0; i < Ncvec; i += 2) { + v4sf ar, ai, br, bi; + ar = va[2*i+0]; ai = va[2*i+1]; + br = vb[2*i+0]; bi = vb[2*i+1]; + VCPLXMUL(ar, ai, br, bi); + vab[2*i+0] = VMADD(ar, vscal, vab[2*i+0]); + vab[2*i+1] = VMADD(ai, vscal, vab[2*i+1]); + ar = va[2*i+2]; ai = va[2*i+3]; + br = vb[2*i+2]; bi = vb[2*i+3]; + VCPLXMUL(ar, ai, br, bi); + vab[2*i+2] = VMADD(ar, vscal, vab[2*i+2]); + vab[2*i+3] = VMADD(ai, vscal, vab[2*i+3]); + } +#endif + if (s->transform == PFFFT_REAL) { + ((v4sf_union*)vab)[0].f[0] = abr + ar*br*scaling; + ((v4sf_union*)vab)[1].f[0] = abi + ai*bi*scaling; + } +} + + +#else // defined(PFFFT_SIMD_DISABLE) + +// standard routine using scalar floats, without SIMD stuff. + +#define pffft_zreorder_nosimd pffft_zreorder +void pffft_zreorder_nosimd(PFFFT_Setup *setup, const float *in, float *out, pffft_direction_t direction) { + int k, N = setup->N; + if (setup->transform == PFFFT_COMPLEX) { + for (k=0; k < 2*N; ++k) out[k] = in[k]; + return; + } + else if (direction == PFFFT_FORWARD) { + float x_N = in[N-1]; + for (k=N-1; k > 1; --k) out[k] = in[k-1]; + out[0] = in[0]; + out[1] = x_N; + } else { + float x_N = in[1]; + for (k=1; k < N-1; ++k) out[k] = in[k+1]; + out[0] = in[0]; + out[N-1] = x_N; + } +} + +#define pffft_transform_internal_nosimd pffft_transform_internal +void pffft_transform_internal_nosimd(PFFFT_Setup *setup, const float *input, float *output, float *scratch, + pffft_direction_t direction, int ordered) { + int Ncvec = setup->Ncvec; + int nf_odd = (setup->ifac[1] & 1); + + // temporary buffer is allocated on the stack if the scratch pointer is NULL + int stack_allocate = (scratch == 0 ? Ncvec*2 : 1); + VLA_ARRAY_ON_STACK(v4sf, scratch_on_stack, stack_allocate); + float *buff[2]; + int ib; + if (scratch == 0) scratch = scratch_on_stack; + buff[0] = output; buff[1] = scratch; + + if (setup->transform == PFFFT_COMPLEX) ordered = 0; // it is always ordered. + ib = (nf_odd ^ ordered ? 1 : 0); + + if (direction == PFFFT_FORWARD) { + if (setup->transform == PFFFT_REAL) { + ib = (rfftf1_ps(Ncvec*2, input, buff[ib], buff[!ib], + setup->twiddle, &setup->ifac[0]) == buff[0] ? 0 : 1); + } else { + ib = (cfftf1_ps(Ncvec, input, buff[ib], buff[!ib], + setup->twiddle, &setup->ifac[0], -1) == buff[0] ? 0 : 1); + } + if (ordered) { + pffft_zreorder(setup, buff[ib], buff[!ib], PFFFT_FORWARD); ib = !ib; + } + } else { + if (input == buff[ib]) { + ib = !ib; // may happen when finput == foutput + } + if (ordered) { + pffft_zreorder(setup, input, buff[!ib], PFFFT_BACKWARD); + input = buff[!ib]; + } + if (setup->transform == PFFFT_REAL) { + ib = (rfftb1_ps(Ncvec*2, input, buff[ib], buff[!ib], + setup->twiddle, &setup->ifac[0]) == buff[0] ? 0 : 1); + } else { + ib = (cfftf1_ps(Ncvec, input, buff[ib], buff[!ib], + setup->twiddle, &setup->ifac[0], +1) == buff[0] ? 0 : 1); + } + } + if (buff[ib] != output) { + int k; + // extra copy required -- this situation should happens only when finput == foutput + assert(input==output); + for (k=0; k < Ncvec; ++k) { + float a = buff[ib][2*k], b = buff[ib][2*k+1]; + output[2*k] = a; output[2*k+1] = b; + } + ib = !ib; + } + assert(buff[ib] == output); +} + +#define pffft_zconvolve_accumulate_nosimd pffft_zconvolve_accumulate +void pffft_zconvolve_accumulate_nosimd(PFFFT_Setup *s, const float *a, const float *b, + float *ab, float scaling) { + int i, Ncvec = s->Ncvec; + + if (s->transform == PFFFT_REAL) { + // take care of the fftpack ordering + ab[0] += a[0]*b[0]*scaling; + ab[2*Ncvec-1] += a[2*Ncvec-1]*b[2*Ncvec-1]*scaling; + ++ab; ++a; ++b; --Ncvec; + } + for (i=0; i < Ncvec; ++i) { + float ar, ai, br, bi; + ar = a[2*i+0]; ai = a[2*i+1]; + br = b[2*i+0]; bi = b[2*i+1]; + VCPLXMUL(ar, ai, br, bi); + ab[2*i+0] += ar*scaling; + ab[2*i+1] += ai*scaling; + } +} + +#endif // defined(PFFFT_SIMD_DISABLE) + +void pffft_transform(PFFFT_Setup *setup, const float *input, float *output, float *work, pffft_direction_t direction) { + pffft_transform_internal(setup, input, output, (v4sf*)work, direction, 0); +} + +void pffft_transform_ordered(PFFFT_Setup *setup, const float *input, float *output, float *work, pffft_direction_t direction) { + pffft_transform_internal(setup, input, output, (v4sf*)work, direction, 1); +} diff --git a/lib/src/fft/pffft/pffft.h b/lib/src/fft/pffft/pffft.h new file mode 100644 index 0000000..5db3d11 --- /dev/null +++ b/lib/src/fft/pffft/pffft.h @@ -0,0 +1,177 @@ +/* Copyright (c) 2013 Julien Pommier ( pommier@modartt.com ) + + Based on original fortran 77 code from FFTPACKv4 from NETLIB, + authored by Dr Paul Swarztrauber of NCAR, in 1985. + + As confirmed by the NCAR fftpack software curators, the following + FFTPACKv5 license applies to FFTPACKv4 sources. My changes are + released under the same terms. + + FFTPACK license: + + http://www.cisl.ucar.edu/css/software/fftpack5/ftpk.html + + Copyright (c) 2004 the University Corporation for Atmospheric + Research ("UCAR"). All rights reserved. Developed by NCAR's + Computational and Information Systems Laboratory, UCAR, + www.cisl.ucar.edu. + + Redistribution and use of the Software in source and binary forms, + with or without modification, is permitted provided that the + following conditions are met: + + - Neither the names of NCAR's Computational and Information Systems + Laboratory, the University Corporation for Atmospheric Research, + nor the names of its sponsors or contributors may be used to + endorse or promote products derived from this Software without + specific prior written permission. + + - Redistributions of source code must retain the above copyright + notices, this list of conditions, and the disclaimer below. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions, and the disclaimer below in the + documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN + ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE + SOFTWARE. +*/ + +/* + PFFFT : a Pretty Fast FFT. + + This is basically an adaptation of the single precision fftpack + (v4) as found on netlib taking advantage of SIMD instruction found + on cpus such as intel x86 (SSE1), powerpc (Altivec), and arm (NEON). + + For architectures where no SIMD instruction is available, the code + falls back to a scalar version. + + Restrictions: + + - 1D transforms only, with 32-bit single precision. + + - supports only transforms for inputs of length N of the form + N=(2^a)*(3^b)*(5^c), a >= 5, b >=0, c >= 0 (32, 48, 64, 96, 128, + 144, 160, etc are all acceptable lengths). Performance is best for + 128<=N<=8192. + + - all (float*) pointers in the functions below are expected to + have an "simd-compatible" alignment, that is 16 bytes on x86 and + powerpc CPUs. + + You can allocate such buffers with the functions + pffft_aligned_malloc / pffft_aligned_free (or with stuff like + posix_memalign..) + +*/ + +#ifndef PFFFT_H +#define PFFFT_H + +#include <stddef.h> // for size_t + +#ifdef __cplusplus +extern "C" { +#endif + + /* opaque struct holding internal stuff (precomputed twiddle factors) + this struct can be shared by many threads as it contains only + read-only data. + */ + typedef struct PFFFT_Setup PFFFT_Setup; + + /* direction of the transform */ + typedef enum { PFFFT_FORWARD, PFFFT_BACKWARD } pffft_direction_t; + + /* type of transform */ + typedef enum { PFFFT_REAL, PFFFT_COMPLEX } pffft_transform_t; + + /* + prepare for performing transforms of size N -- the returned + PFFFT_Setup structure is read-only so it can safely be shared by + multiple concurrent threads. + */ + PFFFT_Setup *pffft_new_setup(int N, pffft_transform_t transform); + void pffft_destroy_setup(PFFFT_Setup *); + /* + Perform a Fourier transform , The z-domain data is stored in the + most efficient order for transforming it back, or using it for + convolution. If you need to have its content sorted in the + "usual" way, that is as an array of interleaved complex numbers, + either use pffft_transform_ordered , or call pffft_zreorder after + the forward fft, and before the backward fft. + + Transforms are not scaled: PFFFT_BACKWARD(PFFFT_FORWARD(x)) = N*x. + Typically you will want to scale the backward transform by 1/N. + + The 'work' pointer should point to an area of N (2*N for complex + fft) floats, properly aligned. If 'work' is NULL, then stack will + be used instead (this is probably the best strategy for small + FFTs, say for N < 16384). + + input and output may alias. + */ + void pffft_transform(PFFFT_Setup *setup, const float *input, float *output, float *work, pffft_direction_t direction); + + /* + Similar to pffft_transform, but makes sure that the output is + ordered as expected (interleaved complex numbers). This is + similar to calling pffft_transform and then pffft_zreorder. + + input and output may alias. + */ + void pffft_transform_ordered(PFFFT_Setup *setup, const float *input, float *output, float *work, pffft_direction_t direction); + + /* + call pffft_zreorder(.., PFFFT_FORWARD) after pffft_transform(..., + PFFFT_FORWARD) if you want to have the frequency components in + the correct "canonical" order, as interleaved complex numbers. + + (for real transforms, both 0-frequency and half frequency + components, which are real, are assembled in the first entry as + F(0)+i*F(n/2+1). Note that the original fftpack did place + F(n/2+1) at the end of the arrays). + + input and output should not alias. + */ + void pffft_zreorder(PFFFT_Setup *setup, const float *input, float *output, pffft_direction_t direction); + + /* + Perform a multiplication of the frequency components of dft_a and + dft_b and accumulate them into dft_ab. The arrays should have + been obtained with pffft_transform(.., PFFFT_FORWARD) and should + *not* have been reordered with pffft_zreorder (otherwise just + perform the operation yourself as the dft coefs are stored as + interleaved complex numbers). + + the operation performed is: dft_ab += (dft_a * fdt_b)*scaling + + The dft_a, dft_b and dft_ab pointers may alias. + */ + void pffft_zconvolve_accumulate(PFFFT_Setup *setup, const float *dft_a, const float *dft_b, float *dft_ab, float scaling); + + /* + the float buffers must have the correct alignment (16-byte boundary + on intel and powerpc). This function may be used to obtain such + correctly aligned buffers. + */ + void *pffft_aligned_malloc(size_t nb_bytes); + void pffft_aligned_free(void *); + + /* return 4 or 1 wether support SSE/Altivec instructions was enable when building pffft.c */ + int pffft_simd_size(void); + +#ifdef __cplusplus +} +#endif + +#endif // PFFFT_H |