views:

220

answers:

2

I'm compiling a bit of code using the following settings in VC++2010: /O2 /Ob2 /Oi /Ot

However I'm having some trouble understanding some parts of the assembly generated, I have put some questions in the code as comments.

Also, what prefetching distance is generally recommended on modern cpus? I can ofc test on my own cpu, but I was hoping for some value that will work well on a wider range of cpus. Maybe one could use dynamic prefetching distances?

<--EDIT:

Another thing I'm surprised about is that the compiler does not interleave in some form the movdqa and movntdq instructions? Since these instructions are in some sense asynchronous from my understanding.

This code also assumes 32 byte cache lines when prefetching, however it seems that high-end cpus have 64 byte cachelines, so 2 of the prefetches can probably be removed.

-->

void memcpy_aligned_x86(void* dest, const void* source, size_t size)
{ 
0052AC20  push        ebp  
0052AC21  mov         ebp,esp  
 const __m128i* source_128 = reinterpret_cast<const __m128i*>(source);

 for(size_t n = 0; n < size/16; n += 8) 
0052AC23  mov         edx,dword ptr [size]  
0052AC26  mov         ecx,dword ptr [dest]  
0052AC29  mov         eax,dword ptr [source]  
0052AC2C  shr         edx,4  
0052AC2F  test        edx,edx  
0052AC31  je          copy+9Eh (52ACBEh)  
 __m128i xmm0 = _mm_setzero_si128();
 __m128i xmm1 = _mm_setzero_si128();
 __m128i xmm2 = _mm_setzero_si128();
 __m128i xmm3 = _mm_setzero_si128();
 __m128i xmm4 = _mm_setzero_si128();
 __m128i xmm5 = _mm_setzero_si128();
 __m128i xmm6 = _mm_setzero_si128();
 __m128i xmm7 = _mm_setzero_si128();

 __m128i* dest_128 = reinterpret_cast<__m128i*>(dest);
0052AC37  push        esi  
0052AC38  push        edi  
0052AC39  lea         edi,[edx-1]  
0052AC3C  shr         edi,3  
0052AC3F  inc         edi  
 {
  _mm_prefetch(reinterpret_cast<const char*>(source_128+8), _MM_HINT_NTA);
  _mm_prefetch(reinterpret_cast<const char*>(source_128+10), _MM_HINT_NTA);
  _mm_prefetch(reinterpret_cast<const char*>(source_128+12), _MM_HINT_NTA);
  _mm_prefetch(reinterpret_cast<const char*>(source_128+14), _MM_HINT_NTA);

  xmm0 = _mm_load_si128(source_128++);
  xmm1 = _mm_load_si128(source_128++);
  xmm2 = _mm_load_si128(source_128++);
  xmm3 = _mm_load_si128(source_128++);
  xmm4 = _mm_load_si128(source_128++);
  xmm5 = _mm_load_si128(source_128++);
  xmm6 = _mm_load_si128(source_128++);
  xmm7 = _mm_load_si128(source_128++);
0052AC40  movdqa      xmm6,xmmword ptr [eax+70h]  // 1. Why is this moved before the pretecthes?
0052AC45  prefetchnta [eax+80h]  
0052AC4C  prefetchnta [eax+0A0h]  
0052AC53  prefetchnta [eax+0C0h]  
0052AC5A  prefetchnta [eax+0E0h]  
0052AC61  movdqa      xmm0,xmmword ptr [eax+10h]  
0052AC66  movdqa      xmm1,xmmword ptr [eax+20h]  
0052AC6B  movdqa      xmm2,xmmword ptr [eax+30h]  
0052AC70  movdqa      xmm3,xmmword ptr [eax+40h]  
0052AC75  movdqa      xmm4,xmmword ptr [eax+50h]  
0052AC7A  movdqa      xmm5,xmmword ptr [eax+60h]  
0052AC7F  lea         esi,[eax+70h]  // 2. What is happening in these 2 lines?
0052AC82  mov         edx,eax        //
0052AC84  movdqa      xmm7,xmmword ptr [edx]  // 3. Why edx? and not simply eax?

  _mm_stream_si128(dest_128++, xmm0);
0052AC88  mov         esi,ecx  // 4. Is esi never used?
0052AC8A  movntdq     xmmword ptr [esi],xmm7  
  _mm_stream_si128(dest_128++, xmm1);
0052AC8E  movntdq     xmmword ptr [ecx+10h],xmm0  
  _mm_stream_si128(dest_128++, xmm2);
0052AC93  movntdq     xmmword ptr [ecx+20h],xmm1  
  _mm_stream_si128(dest_128++, xmm3);
0052AC98  movntdq     xmmword ptr [ecx+30h],xmm2  
  _mm_stream_si128(dest_128++, xmm4);
0052AC9D  movntdq     xmmword ptr [ecx+40h],xmm3  
  _mm_stream_si128(dest_128++, xmm5);
0052ACA2  movntdq     xmmword ptr [ecx+50h],xmm4  
  _mm_stream_si128(dest_128++, xmm6);
0052ACA7  movntdq     xmmword ptr [ecx+60h],xmm5  
  _mm_stream_si128(dest_128++, xmm7);
0052ACAC  lea         edx,[ecx+70h]  
0052ACAF  sub         eax,0FFFFFF80h  
0052ACB2  sub         ecx,0FFFFFF80h  
0052ACB5  dec         edi  
0052ACB6  movntdq     xmmword ptr [edx],xmm6  // 5. Why not simply ecx?
0052ACBA  jne         copy+20h (52AC40h)  
0052ACBC  pop         edi  
0052ACBD  pop         esi  
 }
}

original code:

void memcpy_aligned_x86(void* dest, const void* source, size_t size)
{ 
 assert(dest != nullptr);
 assert(source != nullptr);
 assert(source != dest);
 assert(size % 128 == 0);

 __m128i xmm0 = _mm_setzero_si128();
 __m128i xmm1 = _mm_setzero_si128();
 __m128i xmm2 = _mm_setzero_si128();
 __m128i xmm3 = _mm_setzero_si128();
 __m128i xmm4 = _mm_setzero_si128();
 __m128i xmm5 = _mm_setzero_si128();
 __m128i xmm6 = _mm_setzero_si128();
 __m128i xmm7 = _mm_setzero_si128();

 __m128i* dest_128 = reinterpret_cast<__m128i*>(dest);
 const __m128i* source_128 = reinterpret_cast<const __m128i*>(source);

 for(size_t n = 0; n < size/16; n += 8) 
 {
  _mm_prefetch(reinterpret_cast<const char*>(source_128+8), _MM_HINT_NTA);
  _mm_prefetch(reinterpret_cast<const char*>(source_128+10), _MM_HINT_NTA);
  _mm_prefetch(reinterpret_cast<const char*>(source_128+12), _MM_HINT_NTA);
  _mm_prefetch(reinterpret_cast<const char*>(source_128+14), _MM_HINT_NTA);

  xmm0 = _mm_load_si128(source_128++);
  xmm1 = _mm_load_si128(source_128++);
  xmm2 = _mm_load_si128(source_128++);
  xmm3 = _mm_load_si128(source_128++);
  xmm4 = _mm_load_si128(source_128++);
  xmm5 = _mm_load_si128(source_128++);
  xmm6 = _mm_load_si128(source_128++);
  xmm7 = _mm_load_si128(source_128++);

  _mm_stream_si128(dest_128++, xmm0);
  _mm_stream_si128(dest_128++, xmm1);
  _mm_stream_si128(dest_128++, xmm2);
  _mm_stream_si128(dest_128++, xmm3);
  _mm_stream_si128(dest_128++, xmm4);
  _mm_stream_si128(dest_128++, xmm5);
  _mm_stream_si128(dest_128++, xmm6);
  _mm_stream_si128(dest_128++, xmm7);
 }
}
+2  A: 

eax+70h read is moved up because eax+70h is in a different cache line from eax, and the compiler probably wants the hardware prefetcher to get busy getting that line as soon as possible.

It does not interleave either because it wants to maximize performance by avoiding load-to-store dependencies (even though the AMD optimization guide explicitly says to interleave), or simply because it is not sure that stores won't overwrite loads. Does it change the behavior if you add __restrict keywords to source and dest?

The purpose of the rest of it eludes me too. Could be some obscure instruction decoding or hardware prefetcher considerations, either for AMD or Intel, but I can't find any justification for that. I wonder if the code gets faster or slower when you remove those instructions?

The recommended prefetching distance depends on the loop size. Needs to be far enough that the data has time to arrive from the memory by the time it's needed. I think that you usually need to give it at least 100 clock ticks.

__restrict makes the assembly horrible. It only uses one sse register and increments registers after each operation.
ronag
@ronag: that's interesting. I can't imagine why `restrict` would ever result in *slower* code. Might be worth submitting to MS Connect.
jalf
A: 

I haven't figured out what the compiler does, however I though I'd share some of my testing results. I've rewritten the function in assembly.

System: Xeon W3520

4.55 GB/s : regular memcpy

5.52 GB/s : memcpy in question

5.58 GB/s : memcpy below

7.48 GB/s : memcpy below multithreaded

void* memcpy(void* dest, const void* source, size_t num)
{   
    __asm
    {
        mov esi, source;    
        mov edi, dest;   

        mov ebx, num; 
        shr ebx, 7;      

        cpy:
            prefetchnta [esi+80h];
            prefetchnta [esi+0C0h];

            movdqa xmm0, [esi+00h];
            movdqa xmm1, [esi+10h];
            movdqa xmm2, [esi+20h];
            movdqa xmm3, [esi+30h];

            movntdq [edi+00h], xmm0;
            movntdq [edi+10h], xmm1;
            movntdq [edi+20h], xmm2;
            movntdq [edi+30h], xmm3;

            movdqa xmm4, [esi+40h];
            movdqa xmm5, [esi+50h];
            movdqa xmm6, [esi+60h];
            movdqa xmm7, [esi+70h];

            movntdq [edi+40h], xmm4;
            movntdq [edi+50h], xmm5;
            movntdq [edi+60h], xmm6;
            movntdq [edi+70h], xmm7;

            lea edi, [edi+80h];
            lea esi, [esi+80h];
            dec ebx;

        jnz cpy;
    }
    return dest;
}

void* memcpy_tbb(void* dest, const void* source, size_t num)
{   
    tbb::parallel_for(tbb::blocked_range<size_t>(0, num/128), [&](const tbb::blocked_range<size_t>& r)
    {
        memcpy_SSE2_3(reinterpret_cast<char*>(dest) + r.begin()*128, reinterpret_cast<const char*>(source) + r.begin()*128, r.size()*128);
    }, tbb::affinity_partitioner());

    return dest;
}
ronag