1
       2
       3
       4
       5
       6
       7
       8
       9
      10
      11
      12
      13
      14
      15
      16
      17
      18
      19
      20
      21
      22
      23
      24
      25
      26
      27
      28
      29
      30
      31
      32
      33
      34
      35
      36
      37
      38
      39
      40
      41
      42
      43
      44
      45
      46
      47
      48
      49
      50
      51
      52
      53
      54
      55
      56
      57
      58
      59
      60
      61
      62
      63
      64
      65
      66
      67
      68
      69
      70
      71
      72
      73
      74
      75
      76
      77
      78
      79
      80
      81
      82
      83
      84
      85
      86
      87
      88
      89
      90
      91
      92
      93
      94
      95
      96
      97
      98
      99
     100
     101
     102
     103
     104
     105
     106
     107
     108
     109
     110
     111
     112
     113
     114
     115
     116
     117
     118
     119
     120
     121
     122
     123
     124
     125
     126
     127
     128
     129
     130
     131
     132
     133
static void
dgemm_micro_kernel(long kc,
                   double alpha, const double *A, const double *B,
                   double beta,
                   double *C, long incRowC, long incColC)
{
    double AB[MR*NR] __attribute__ ((aligned (16)));

    int i, j, l;

//
//  Compute AB = A*B
//
    register __m128d ab_00_11, ab_20_31;
    register __m128d ab_01_10, ab_21_30;
    register __m128d ab_02_13, ab_22_33;
    register __m128d ab_03_12, ab_23_32;

    register __m128d tmp0, tmp1, tmp2, tmp3;
    register __m128d tmp4, tmp5, tmp6, tmp7;

    ab_00_11 = _mm_setzero_pd(); ab_20_31 = _mm_setzero_pd();
    ab_01_10 = _mm_setzero_pd(); ab_21_30 = _mm_setzero_pd();
    ab_02_13 = _mm_setzero_pd(); ab_22_33 = _mm_setzero_pd();
    ab_03_12 = _mm_setzero_pd(); ab_23_32 = _mm_setzero_pd();

    tmp0 = _mm_load_pd(A);
    tmp1 = _mm_load_pd(A+2);
    tmp2 = _mm_load_pd(B);

    tmp3 = _mm_setzero_pd();
    tmp4 = _mm_setzero_pd();
    tmp5 = _mm_setzero_pd();
    tmp6 = _mm_setzero_pd();
    tmp7 = _mm_setzero_pd();

    for (l=0; l<kc; ++l) {
        ab_02_13 = _mm_add_pd(ab_02_13, tmp3);
        tmp3     = _mm_load_pd(B+2);
        ab_22_33 = _mm_add_pd(ab_22_33, tmp6);
        tmp6     = tmp2;
        tmp4     = _mm_shuffle_pd(tmp2, tmp2, _MM_SHUFFLE2(01));
        tmp2     = _mm_mul_pd(tmp2, tmp0);
        tmp6     = _mm_mul_pd(tmp6, tmp1);

        ab_03_12 = _mm_add_pd(ab_03_12, tmp5);
        ab_23_32 = _mm_add_pd(ab_23_32, tmp7);
        tmp7     = tmp4;
        tmp4     = _mm_mul_pd(tmp4, tmp0);
        tmp7     = _mm_mul_pd(tmp7, tmp1);

        ab_00_11 = _mm_add_pd(ab_00_11, tmp2);
        tmp2     = _mm_load_pd(B+4);
        ab_20_31 = _mm_add_pd(ab_20_31, tmp6);
        tmp6     = tmp3;
        tmp5     = _mm_shuffle_pd(tmp3, tmp3, _MM_SHUFFLE2(01));
        tmp3     = _mm_mul_pd(tmp3, tmp0);
        tmp6     = _mm_mul_pd(tmp6, tmp1);

        ab_01_10 = _mm_add_pd(ab_01_10, tmp4);
        ab_21_30 = _mm_add_pd(ab_21_30, tmp7);
        tmp7     = tmp5;
        tmp5     = _mm_mul_pd(tmp5, tmp0);
        tmp0     = _mm_load_pd(A+4);
        tmp7     = _mm_mul_pd(tmp7, tmp1);
        tmp1     = _mm_load_pd(A+6);

        A += 4;
        B += 4;
    }

    ab_02_13 = _mm_add_pd(ab_02_13, tmp3);
    ab_22_33 = _mm_add_pd(ab_22_33, tmp6);

    ab_03_12 = _mm_add_pd(ab_03_12, tmp5);
    ab_23_32 = _mm_add_pd(ab_23_32, tmp7);

    _mm_storel_pd(&AB[0+0*4], ab_00_11);
    _mm_storeh_pd(&AB[1+0*4], ab_01_10);
    _mm_storel_pd(&AB[2+0*4], ab_20_31);
    _mm_storeh_pd(&AB[3+0*4], ab_21_30);

    _mm_storel_pd(&AB[0+1*4], ab_01_10);
    _mm_storeh_pd(&AB[1+1*4], ab_00_11);
    _mm_storel_pd(&AB[2+1*4], ab_21_30);
    _mm_storeh_pd(&AB[3+1*4], ab_20_31);

    _mm_storel_pd(&AB[0+2*4], ab_02_13);
    _mm_storeh_pd(&AB[1+2*4], ab_03_12);
    _mm_storel_pd(&AB[2+2*4], ab_22_33);
    _mm_storeh_pd(&AB[3+2*4], ab_23_32);

    _mm_storel_pd(&AB[0+3*4], ab_03_12);
    _mm_storeh_pd(&AB[1+3*4], ab_02_13);
    _mm_storel_pd(&AB[2+3*4], ab_23_32);
    _mm_storeh_pd(&AB[3+3*4], ab_22_33);

//
//  Update C <- beta*C
//
    if (beta==0.0) {
        for (j=0; j<NR; ++j) {
            for (i=0; i<MR; ++i) {
                C[i*incRowC+j*incColC] = 0.0;
            }
        }
    } else if (beta!=1.0) {
        for (j=0; j<NR; ++j) {
            for (i=0; i<MR; ++i) {
                C[i*incRowC+j*incColC] *= beta;
            }
        }
    }

//
//  Update C <- C + alpha*AB (note: the case alpha==0.0 was already treated in
//                                  the above layer dgemm_nn)
//
    if (alpha==1.0) {
        for (j=0; j<NR; ++j) {
            for (i=0; i<MR; ++i) {
                C[i*incRowC+j*incColC] += AB[i+j*MR];
            }
        }
    } else {
        for (j=0; j<NR; ++j) {
            for (i=0; i<MR; ++i) {
                C[i*incRowC+j*incColC] += alpha*AB[i+j*MR];
            }
        }
    }
}