#include #include #include #include #define MC 384 #define KC 384 #define NC 4096 #define MR 4 #define NR 4 // // Local buffers for storing panels from A, B and C // static double _A[MC*KC] __attribute__ ((aligned (16))); static double _B[KC*NC] __attribute__ ((aligned (16))); static double _C[MR*NR] __attribute__ ((aligned (16))); // // Packing complete panels from A (i.e. without padding) // static void pack_MRxk(int k, const double *A, int incRowA, int incColA, double *buffer) { int i, j; for (j=0; j0) { for (j=0; j0) { for (i=0; i= 1 go back " \n\t" " \n\t" ".DCONSIDERLEFT%=: \n\t" "testq %%rdi, %%rdi \n\t" // if kl==0 writeback to AB "je .DPOSTACCUMULATE%=\n\t" " \n\t" ".DLOOPLEFT%=: \n\t" // for l = kl,..,1 do " \n\t" "addpd %%xmm3, %%xmm12 \n\t" // ab_02_13 = _mm_add_pd(ab_02_13, tmp3) "movapd 16(%%rbx), %%xmm3 \n\t" // tmp3 = _mm_load_pd(B+2) "addpd %%xmm6, %%xmm13 \n\t" // ab_22_33 = _mm_add_pd(ab_22_33, tmp6) "movapd %%xmm2, %%xmm6 \n\t" // tmp6 = tmp2 "pshufd $78,%%xmm2, %%xmm4 \n\t" // tmp4 = _mm_shuffle_pd(tmp2, tmp2, " \n\t" // _MM_SHUFFLE2(0, 1)) "mulpd %%xmm0, %%xmm2 \n\t" // tmp2 = _mm_mul_pd(tmp2, tmp0); "mulpd %%xmm1, %%xmm6 \n\t" // tmp6 = _mm_mul_pd(tmp6, tmp1); " \n\t" " \n\t" "addpd %%xmm5, %%xmm14 \n\t" // ab_03_12 = _mm_add_pd(ab_03_12, tmp5) "addpd %%xmm7, %%xmm15 \n\t" // ab_23_32 = _mm_add_pd(ab_23_32, tmp7) "movapd %%xmm4, %%xmm7 \n\t" // tmp7 = tmp4 "mulpd %%xmm0, %%xmm4 \n\t" // tmp4 = _mm_mul_pd(tmp4, tmp0) "mulpd %%xmm1, %%xmm7 \n\t" // tmp7 = _mm_mul_pd(tmp7, tmp1) " \n\t" " \n\t" "addpd %%xmm2, %%xmm8 \n\t" // ab_00_11 = _mm_add_pd(ab_00_11, tmp2) "movapd 32(%%rbx), %%xmm2 \n\t" // tmp2 = _mm_load_pd(B+4) "addpd %%xmm6, %%xmm9 \n\t" // ab_20_31 = _mm_add_pd(ab_20_31, tmp6) "movapd %%xmm3, %%xmm6 \n\t" // tmp6 = tmp3 "pshufd $78,%%xmm3, %%xmm5 \n\t" // tmp5 = _mm_shuffle_pd(tmp3, tmp3, " \n\t" // _MM_SHUFFLE2(0, 1)) "mulpd %%xmm0, %%xmm3 \n\t" // tmp3 = _mm_mul_pd(tmp3, tmp0) "mulpd %%xmm1, %%xmm6 \n\t" // tmp6 = _mm_mul_pd(tmp6, tmp1) " \n\t" " \n\t" "addpd %%xmm4, %%xmm10 \n\t" // ab_01_10 = _mm_add_pd(ab_01_10, tmp4) "addpd %%xmm7, %%xmm11 \n\t" // ab_21_30 = _mm_add_pd(ab_21_30, tmp7) "movapd %%xmm5, %%xmm7 \n\t" // tmp7 = tmp5 "mulpd %%xmm0, %%xmm5 \n\t" // tmp5 = _mm_mul_pd(tmp5, tmp0) "movapd 32(%%rax), %%xmm0 \n\t" // tmp0 = _mm_load_pd(A+4) "mulpd %%xmm1, %%xmm7 \n\t" // tmp7 = _mm_mul_pd(tmp7, tmp1) "movapd 48(%%rax), %%xmm1 \n\t" // tmp1 = _mm_load_pd(A+6) " \n\t" " \n\t" "addq $32, %%rax \n\t" // A += 4; "addq $32, %%rbx \n\t" // B += 4; " \n\t" "decq %%rdi \n\t" // --l "jne .DLOOPLEFT%= \n\t" // if l>= 1 go back " \n\t" ".DPOSTACCUMULATE%=: \n\t" // Update remaining ab_*_* registers " \n\t" "addpd %%xmm3, %%xmm12 \n\t" // ab_02_13 = _mm_add_pd(ab_02_13, tmp3) "addpd %%xmm6, %%xmm13 \n\t" // ab_22_33 = _mm_add_pd(ab_22_33, tmp6) " \n\t" // "addpd %%xmm5, %%xmm14 \n\t" // ab_03_12 = _mm_add_pd(ab_03_12, tmp5) "addpd %%xmm7, %%xmm15 \n\t" // ab_23_32 = _mm_add_pd(ab_23_32, tmp7) " \n\t" // // Update C <- beta*C + alpha*AB // // "movsd %4, %%xmm0 \n\t" // load alpha "movsd %5, %%xmm1 \n\t" // load beta "movq %6, %%rcx \n\t" // Address of C stored in %rcx "movq %7, %%r8 \n\t" // load incRowC "leaq (,%%r8,8), %%r8 \n\t" // incRowC *= sizeof(double) "movq %8, %%r9 \n\t" // load incColC "leaq (,%%r9,8), %%r9 \n\t" // incRowC *= sizeof(double) " \n\t" "leaq (%%rcx,%%r9), %%r10 \n\t" // Store addr of C01 in %r10 "leaq (%%rcx,%%r8,2), %%rdx \n\t" // Store addr of C20 in %rdx "leaq (%%rdx,%%r9), %%r11 \n\t" // Store addr of C21 in %r11 " \n\t" "unpcklpd %%xmm0, %%xmm0 \n\t" // duplicate alpha "unpcklpd %%xmm1, %%xmm1 \n\t" // duplicate beta " \n\t" " \n\t" "movlpd (%%rcx), %%xmm3 \n\t" // load (C00, "movhpd (%%r10,%%r8), %%xmm3 \n\t" // C11) "mulpd %%xmm0, %%xmm8 \n\t" // scale ab_00_11 by alpha "mulpd %%xmm1, %%xmm3 \n\t" // scale (C00, C11) by beta "addpd %%xmm8, %%xmm3 \n\t" // add results "movlpd %%xmm3, (%%rcx) \n\t" // write back (C00, "movhpd %%xmm3, (%%r10,%%r8) \n\t" // C11) " \n\t" "movlpd (%%rdx), %%xmm4 \n\t" // load (C20, "movhpd (%%r11,%%r8), %%xmm4 \n\t" // C31) "mulpd %%xmm0, %%xmm9 \n\t" // scale ab_20_31 by alpha "mulpd %%xmm1, %%xmm4 \n\t" // scale (C20, C31) by beta "addpd %%xmm9, %%xmm4 \n\t" // add results "movlpd %%xmm4, (%%rdx) \n\t" // write back (C20, "movhpd %%xmm4, (%%r11,%%r8) \n\t" // C31) " \n\t" " \n\t" "movlpd (%%r10), %%xmm3 \n\t" // load (C01, "movhpd (%%rcx,%%r8), %%xmm3 \n\t" // C10) "mulpd %%xmm0, %%xmm10\n\t" // scale ab_01_10 by alpha "mulpd %%xmm1, %%xmm3 \n\t" // scale (C01, C10) by beta "addpd %%xmm10, %%xmm3 \n\t" // add results "movlpd %%xmm3, (%%r10) \n\t" // write back (C01, "movhpd %%xmm3, (%%rcx,%%r8) \n\t" // C10) " \n\t" "movlpd (%%r11), %%xmm4 \n\t" // load (C21, "movhpd (%%rdx,%%r8), %%xmm4 \n\t" // C30) "mulpd %%xmm0, %%xmm11\n\t" // scale ab_21_30 by alpha "mulpd %%xmm1, %%xmm4 \n\t" // scale (C21, C30) by beta "addpd %%xmm11, %%xmm4 \n\t" // add results "movlpd %%xmm4, (%%r11) \n\t" // write back (C21, "movhpd %%xmm4, (%%rdx,%%r8) \n\t" // C30) " \n\t" " \n\t" "leaq (%%rcx,%%r9,2), %%rcx \n\t" // Store addr of C02 in %rcx "leaq (%%r10,%%r9,2), %%r10 \n\t" // Store addr of C03 in %r10 "leaq (%%rdx,%%r9,2), %%rdx \n\t" // Store addr of C22 in $rdx "leaq (%%r11,%%r9,2), %%r11 \n\t" // Store addr of C23 in %r11 " \n\t" " \n\t" "movlpd (%%rcx), %%xmm3 \n\t" // load (C02, "movhpd (%%r10,%%r8), %%xmm3 \n\t" // C13) "mulpd %%xmm0, %%xmm12\n\t" // scale ab_02_13 by alpha "mulpd %%xmm1, %%xmm3 \n\t" // scale (C02, C13) by beta "addpd %%xmm12, %%xmm3 \n\t" // add results "movlpd %%xmm3, (%%rcx) \n\t" // write back (C02, "movhpd %%xmm3, (%%r10,%%r8) \n\t" // C13) " \n\t" "movlpd (%%rdx), %%xmm4 \n\t" // load (C22, "movhpd (%%r11, %%r8), %%xmm4 \n\t" // C33) "mulpd %%xmm0, %%xmm13\n\t" // scale ab_22_33 by alpha "mulpd %%xmm1, %%xmm4 \n\t" // scale (C22, C33) by beta "addpd %%xmm13, %%xmm4 \n\t" // add results "movlpd %%xmm4, (%%rdx) \n\t" // write back (C22, "movhpd %%xmm4, (%%r11,%%r8) \n\t" // C33) " \n\t" " \n\t" "movlpd (%%r10), %%xmm3 \n\t" // load (C03, "movhpd (%%rcx,%%r8), %%xmm3 \n\t" // C12) "mulpd %%xmm0, %%xmm14\n\t" // scale ab_03_12 by alpha "mulpd %%xmm1, %%xmm3 \n\t" // scale (C03, C12) by beta "addpd %%xmm14, %%xmm3 \n\t" // add results "movlpd %%xmm3, (%%r10) \n\t" // write back (C03, "movhpd %%xmm3, (%%rcx,%%r8) \n\t" // C12) " \n\t" "movlpd (%%r11), %%xmm4 \n\t" // load (C23, "movhpd (%%rdx,%%r8), %%xmm4 \n\t" // C32) "mulpd %%xmm0, %%xmm15\n\t" // scale ab_23_32 by alpha "mulpd %%xmm1, %%xmm4 \n\t" // scale (C23, C32) by beta "addpd %%xmm15, %%xmm4 \n\t" // add results "movlpd %%xmm4, (%%r11) \n\t" // write back (C23, "movhpd %%xmm4, (%%rdx,%%r8) \n\t" // C32) : // output : // input "m" (kb), // 0 "m" (kl), // 1 "m" (A), // 2 "m" (B), // 3 "m" (alpha), // 4 "m" (beta), // 5 "m" (C), // 6 "m" (incRowC), // 7 "m" (incColC) // 8 : // register clobber list "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" ); } // // Compute Y += alpha*X // static void dgeaxpy(int m, int n, double alpha, const double *X, int incRowX, int incColX, double *Y, int incRowY, int incColY) { int i, j; if (alpha!=1.0) { for (j=0; j