It's really hard (if possible) to write "highly optimized AVX2" version of complex abs since the way complex numbers are defined in the standard prevents (specifically due to all inf/nan corner cases) a lot of optimization.
However, if you don't care about the correctness you can just use -ffast-math and some compilers would optimize the code for you. See gcc output: https://godbolt.org/z/QbZlBI
You can also take this output and create your own abs function with inline assembly.
But yes, as was already mentioned, if you really need performance, you probably want to swap std::complex for something else.
I was able to get a decent output for your specific case with all the required shuffles by manually filling small re and im arrays. See: https://godbolt.org/z/sWAAXo
This could be trivially extended for ymm registers.
Anyway, here is the ultimate solution adapted from this SO answer which uses intrinsics in combination with clever compiler optimizations:
#include <complex>
#include <cassert>
#include <immintrin.h>
static inline void cabs_soa4(const float *re, const float *im, float *b) {
    __m128 x4 = _mm_loadu_ps(re);
    __m128 y4 = _mm_loadu_ps(im);
    __m128 b4 = _mm_sqrt_ps(_mm_add_ps(_mm_mul_ps(x4,x4), _mm_mul_ps(y4,y4)));
    _mm_storeu_ps(b, b4);
}
void computeAbsolute (const std::complex<float>* src,
                 float* realValuedDestinationVec,
                 int vecLength)
{
    for (int i = 0; i < vecLength; i += 4) {
        float re[4] = {src[i].real(), src[i + 1].real(), src[i + 2].real(), src[i + 3].real()};
        float im[4] = {src[i].imag(), src[i + 1].imag(), src[i + 2].imag(), src[i + 3].imag()};
        cabs_soa4(re, im, realValuedDestinationVec);
    }
}
which compiles to simple
_Z15computeAbsolutePKSt7complexIfEPfi:
        test    edx, edx
        jle     .L5
        lea     eax, [rdx-1]
        shr     eax, 2
        sal     rax, 5
        lea     rax, [rdi+32+rax]
.L3:
        vmovups xmm0, XMMWORD PTR [rdi]
        vmovups xmm2, XMMWORD PTR [rdi+16]
        add     rdi, 32
        vshufps xmm1, xmm0, xmm2, 136
        vmulps  xmm1, xmm1, xmm1
        vshufps xmm0, xmm0, xmm2, 221
        vfmadd132ps     xmm0, xmm1, xmm0
        vsqrtps xmm0, xmm0
        vmovups XMMWORD PTR [rsi], xmm0
        cmp     rax, rdi
        jne     .L3
.L5:
        ret
https://godbolt.org/z/Yu64Wg