]> git.proxmox.com Git - ceph.git/blame - ceph/src/isa-l/erasure_code/ppc64le/gf_5vect_dot_prod_vsx.c
Import ceph 15.2.8
[ceph.git] / ceph / src / isa-l / erasure_code / ppc64le / gf_5vect_dot_prod_vsx.c
CommitLineData
f91f0fd5
TL
1#include "ec_base_vsx.h"
2
3void gf_5vect_dot_prod_vsx(int len, int vlen, unsigned char *gftbls,
4 unsigned char **src, unsigned char **dest)
5{
6 unsigned char *s, *t0, *t1, *t2, *t3, *t4;
7 vector unsigned char vX1, vX2, vX3, vX4;
8 vector unsigned char vY1, vY2, vY3, vY4, vY5, vY6, vY7, vY8, vY9, vYA;
9 vector unsigned char vYD, vYE, vYF, vYG, vYH, vYI, vYJ, vYK, vYL, vYM;
10 vector unsigned char vhi0, vlo0, vhi1, vlo1, vhi2, vlo2, vhi3, vlo3, vhi4, vlo4;
11 int i, j, head;
12
13 if (vlen < 128) {
14 gf_vect_mul_vsx(len, &gftbls[0 * 32 * vlen], src[0], (unsigned char *)dest[0]);
15 gf_vect_mul_vsx(len, &gftbls[1 * 32 * vlen], src[0], (unsigned char *)dest[1]);
16 gf_vect_mul_vsx(len, &gftbls[2 * 32 * vlen], src[0], (unsigned char *)dest[2]);
17 gf_vect_mul_vsx(len, &gftbls[3 * 32 * vlen], src[0], (unsigned char *)dest[3]);
18 gf_vect_mul_vsx(len, &gftbls[4 * 32 * vlen], src[0], (unsigned char *)dest[4]);
19
20 for (j = 1; j < vlen; j++) {
21 gf_5vect_mad_vsx(len, vlen, j, gftbls, src[j], dest);
22 }
23 return;
24 }
25
26 t0 = (unsigned char *)dest[0];
27 t1 = (unsigned char *)dest[1];
28 t2 = (unsigned char *)dest[2];
29 t3 = (unsigned char *)dest[3];
30 t4 = (unsigned char *)dest[4];
31
32 head = len % 64;
33 if (head != 0) {
34 gf_vect_dot_prod_base(head, vlen, &gftbls[0 * 32 * vlen], src, t0);
35 gf_vect_dot_prod_base(head, vlen, &gftbls[1 * 32 * vlen], src, t1);
36 gf_vect_dot_prod_base(head, vlen, &gftbls[2 * 32 * vlen], src, t2);
37 gf_vect_dot_prod_base(head, vlen, &gftbls[3 * 32 * vlen], src, t3);
38 gf_vect_dot_prod_base(head, vlen, &gftbls[4 * 32 * vlen], src, t4);
39 }
40
41 for (i = head; i < len - 63; i += 64) {
42 vY1 = vY1 ^ vY1;
43 vY2 = vY2 ^ vY2;
44 vY3 = vY3 ^ vY3;
45 vY4 = vY4 ^ vY4;
46 vY5 = vY5 ^ vY5;
47 vY6 = vY6 ^ vY6;
48 vY7 = vY7 ^ vY7;
49 vY8 = vY8 ^ vY8;
50 vY9 = vY9 ^ vY9;
51 vYA = vYA ^ vYA;
52
53 vYD = vYD ^ vYD;
54 vYE = vYE ^ vYE;
55 vYF = vYF ^ vYF;
56 vYG = vYG ^ vYG;
57 vYH = vYH ^ vYH;
58 vYI = vYI ^ vYI;
59 vYJ = vYJ ^ vYJ;
60 vYK = vYK ^ vYK;
61 vYL = vYL ^ vYL;
62 vYM = vYM ^ vYM;
63
64 unsigned char *g0 = &gftbls[0 * 32 * vlen];
65 unsigned char *g1 = &gftbls[1 * 32 * vlen];
66 unsigned char *g2 = &gftbls[2 * 32 * vlen];
67 unsigned char *g3 = &gftbls[3 * 32 * vlen];
68 unsigned char *g4 = &gftbls[4 * 32 * vlen];
69
70 for (j = 0; j < vlen; j++) {
71 s = (unsigned char *)src[j];
72 vX1 = vec_xl(0, s + i);
73 vX2 = vec_xl(16, s + i);
74 vX3 = vec_xl(32, s + i);
75 vX4 = vec_xl(48, s + i);
76
77 vlo0 = EC_vec_xl(0, g0);
78 vhi0 = EC_vec_xl(16, g0);
79 vlo1 = EC_vec_xl(0, g1);
80 vhi1 = EC_vec_xl(16, g1);
81
82 vY1 = vY1 ^ EC_vec_permxor(vhi0, vlo0, vX1);
83 vY2 = vY2 ^ EC_vec_permxor(vhi0, vlo0, vX2);
84 vYD = vYD ^ EC_vec_permxor(vhi0, vlo0, vX3);
85 vYE = vYE ^ EC_vec_permxor(vhi0, vlo0, vX4);
86
87 vlo2 = vec_xl(0, g2);
88 vhi2 = vec_xl(16, g2);
89 vlo3 = vec_xl(0, g3);
90 vhi3 = vec_xl(16, g3);
91
92 vY3 = vY3 ^ EC_vec_permxor(vhi1, vlo1, vX1);
93 vY4 = vY4 ^ EC_vec_permxor(vhi1, vlo1, vX2);
94 vYF = vYF ^ EC_vec_permxor(vhi1, vlo1, vX3);
95 vYG = vYG ^ EC_vec_permxor(vhi1, vlo1, vX4);
96
97 vlo4 = vec_xl(0, g4);
98 vhi4 = vec_xl(16, g4);
99
100 vY5 = vY5 ^ EC_vec_permxor(vhi2, vlo2, vX1);
101 vY6 = vY6 ^ EC_vec_permxor(vhi2, vlo2, vX2);
102 vYH = vYH ^ EC_vec_permxor(vhi2, vlo2, vX3);
103 vYI = vYI ^ EC_vec_permxor(vhi2, vlo2, vX4);
104
105 vY7 = vY7 ^ EC_vec_permxor(vhi3, vlo3, vX1);
106 vY8 = vY8 ^ EC_vec_permxor(vhi3, vlo3, vX2);
107 vYJ = vYJ ^ EC_vec_permxor(vhi3, vlo3, vX3);
108 vYK = vYK ^ EC_vec_permxor(vhi3, vlo3, vX4);
109
110 vY9 = vY9 ^ EC_vec_permxor(vhi4, vlo4, vX1);
111 vYA = vYA ^ EC_vec_permxor(vhi4, vlo4, vX2);
112 vYL = vYL ^ EC_vec_permxor(vhi4, vlo4, vX3);
113 vYM = vYM ^ EC_vec_permxor(vhi4, vlo4, vX4);
114
115 g0 += 32;
116 g1 += 32;
117 g2 += 32;
118 g3 += 32;
119 g4 += 32;
120 }
121
122 vec_xst(vY1, 0, t0 + i);
123 vec_xst(vY2, 16, t0 + i);
124 vec_xst(vY3, 0, t1 + i);
125 vec_xst(vY4, 16, t1 + i);
126 vec_xst(vY5, 0, t2 + i);
127 vec_xst(vY6, 16, t2 + i);
128 vec_xst(vY7, 0, t3 + i);
129 vec_xst(vY8, 16, t3 + i);
130 vec_xst(vY9, 0, t4 + i);
131 vec_xst(vYA, 16, t4 + i);
132
133 vec_xst(vYD, 32, t0 + i);
134 vec_xst(vYE, 48, t0 + i);
135 vec_xst(vYF, 32, t1 + i);
136 vec_xst(vYG, 48, t1 + i);
137 vec_xst(vYH, 32, t2 + i);
138 vec_xst(vYI, 48, t2 + i);
139 vec_xst(vYJ, 32, t3 + i);
140 vec_xst(vYK, 48, t3 + i);
141 vec_xst(vYL, 32, t4 + i);
142 vec_xst(vYM, 48, t4 + i);
143 }
144 return;
145}