Optimal ordering of instructions for reading and writing memory

I am wondering what is the optimal order for a sequence of instructions like the ones below for Intel processors between Core 2 and Westmere. This is the AT & T syntax, so commands pxorare reads in memory, and movdqawrite to memory:

    movdqa  %xmm0, -128+64(%rbx)
    movdqa  %xmm1, -128+80(%rbx)
    movdqa  %xmm2, -128+96(%rbx)
    movdqa  %xmm3, -128+112(%rbx)
    pxor    -128(%rsp), %xmm0
    pxor    -112(%rsp), %xmm1
    pxor    -96(%rsp), %xmm2
    pxor    -80(%rsp), %xmm3
    movdqa  %xmm8, 64(%rbx)
    movdqa  %xmm9, 80(%rbx)
    movdqa  %xmm10, 96(%rbx)
    movdqa  %xmm11, 112(%rbx)
    pxor    -128(%r14), %xmm8
    pxor    -112(%r14), %xmm9
    pxor    -96(%r14), %xmm10
    pxor    -80(%r14), %xmm11
    movdqa  %xmm12, 64(%rdx)
    movdqa  %xmm13, 80(%rdx)
    movdqa  %xmm14, 96(%rdx)
    movdqa  %xmm15, 112(%rdx)
    pxor    0(%r14), %xmm12
    pxor    16(%r14), %xmm13
    pxor    32(%r14), %xmm14
    pxor    48(%r14), %xmm15

%r14, %rsp, %rdxAnd %rbxare different multiples of 256. In other words, in the above instructions do not have non-obvious pseudonyms, and were derived data for consistent access to large blocks of data. All available memory lines are in L1 cache.

, Agner Fog , , :

movdqa  %xmm0, -128+64(%rbx)
movdqa  %xmm1, -128+80(%rbx)
pxor    -128(%rsp), %xmm0
movdqa  %xmm2, -128+96(%rbx)
pxor    -112(%rsp), %xmm1
movdqa  %xmm3, -128+112(%rbx)
pxor    -96(%rsp), %xmm2
movdqa  %xmm8, 64(%rbx)
pxor    -80(%rsp), %xmm3
movdqa  %xmm9, 80(%rbx)
pxor    -128(%r14), %xmm8
movdqa  %xmm10, 96(%rbx)
pxor    -112(%r14), %xmm9
movdqa  %xmm11, 112(%rbx)
pxor    -96(%r14), %xmm10
movdqa  %xmm12, 64(%rdx)
pxor    -80(%r14), %xmm11
movdqa  %xmm13, 80(%rdx)
pxor    0(%r14), %xmm12
movdqa  %xmm14, 96(%rdx)
pxor    16(%r14), %xmm13
movdqa  %xmm15, 112(%rdx)
pxor    32(%r14), %xmm14
pxor    48(%r14), %xmm15 

" ", Microachitecture.pdf Agner Fog, .

, , , , , . - , , , ? , , , , , .

, , , , , , .

EDIT: , , , xmm , , , , . , , . rbx, rsp, r14 rdx - , .

+4
1

, , , , :

#ifdef M    
    push    %rdx
    push    %rax
    push    %rbx
    push    %rcx    
    xorq    %rax, %rax
    cpuid
    rdtsc
    movl    %eax, 256+32+UNUSED_64b
    movl    %edx, 256+32+4+UNUSED_64b
    pop     %rcx        
    pop     %rbx
    pop %rax
    pop %rdx
#endif  
    movdqa  %xmm0, -128+64(%rbx)
    movdqa  %xmm1, -128+80(%rbx)
    movdqa  %xmm2, -128+96(%rbx)
    movdqa  %xmm3, -128+112(%rbx)

    movdqa  %xmm8, 64(%rbx)
    movdqa  %xmm9, 80(%rbx)
    movdqa  %xmm10, 96(%rbx)
    movdqa  %xmm11, 112(%rbx)

    pxor    -128(%rsp), %xmm0   
    pxor    -112(%rsp), %xmm1
    pxor    -96(%rsp), %xmm2    
    pxor    -80(%rsp), %xmm3

    movdqa  %xmm12, 64(%rdx)
    movdqa  %xmm13, 80(%rdx)
    movdqa  %xmm14, 96(%rdx)
    movdqa  %xmm15, 112(%rdx)

    pxor    -128(%r14), %xmm8   
    pxor    -112(%r14), %xmm9
    pxor    -96(%r14), %xmm10
    pxor    -80(%r14), %xmm11

    movdqa  %xmm0, -128+0(%rbx)
    movdqa  %xmm1, -128+16(%rbx)
    movdqa  %xmm2, -128+32(%rbx)
    movdqa  %xmm3, -128+48(%rbx)

    pxor    0(%r14), %xmm12
    pxor    16(%r14), %xmm13
    pxor    32(%r14), %xmm14
    pxor    48(%r14), %xmm15

    movdqa  %xmm8, 0(%rbx)
    movdqa  %xmm9, 16(%rbx)
    movdqa  %xmm10, 32(%rbx)
    movdqa  %xmm11, 48(%rbx)
    movdqa  %xmm12, 0(%rdx)
    movdqa  %xmm13, 16(%rdx)
    movdqa  %xmm14, 32(%rdx)
    movdqa  %xmm15, 48(%rdx)

#ifdef M        
    push    %rdx
    push    %rax
    push    %rbx
    push    %rcx    
    xorq    %rax, %rax
    cpuid   
    rdtsc
    shlq    $32, %rdx
    orq %rdx, %rax
    subq    256+32+UNUSED_64b, %rax
    movq    %rax, 256+32+UNUSED_64b
    pop     %rcx        
    pop     %rbx    
    pop %rax
    pop %rdx
#endif
// safe place
    call do_debug
#ifdef M
    .cstring
measure:
        .ascii "%15lu\12\0"

        .section        __DATA,__data
    .align 2

count:
    .word 30000

    .text
do_measure:
    decb    count(%rip)
    jnz     done_measure
    pushq   %rax
    pushq   %rax    
    pushq   %rbx
    pushq   %rcx
    pushq   %rdx
    pushq   %rsi
    pushq   %rdi
    pushq   %rbp    
    pushq   %r9
    pushq   %r10
    pushq   %r11
    pushq   %r12
    pushq   %r13
    pushq   %r14
    pushq   %r15

        movq    16*8+UNUSED_64b, %rsi
        leaq    measure(%rip), %rdi
        xorl    %eax, %eax
        call    _applog

    popq    %r15
    popq    %r14
    popq    %r13    
    popq    %r12
    popq    %r11
    popq    %r10    
    popq    %r9
    popq    %rbp    
    popq    %rdi
    popq    %rsi
    popq    %rdx
    popq    %rcx
    popq    %rbx
    popq    %rax
    popq    %rax    
done_measure:
    ret
#endif

- , , , Westmere Xeon W3680. , , , , , , xmm , , .

UNUSED_64b - , - . , :

#define UNUSED_64b         16(%rsp) 

256+32+ , .

Mac OS X. Unix- .

+2

All Articles