]> git.proxmox.com Git - ceph.git/blame - ceph/src/isa-l/erasure_code/ppc64le/gf_6vect_mad_vsx.c
Import ceph 15.2.8
[ceph.git] / ceph / src / isa-l / erasure_code / ppc64le / gf_6vect_mad_vsx.c
CommitLineData
f91f0fd5
TL
1#include "ec_base_vsx.h"
2
3void gf_6vect_mad_vsx(int len, int vec, int vec_i, unsigned char *gftbls,
4 unsigned char *src, unsigned char **dest)
5{
6 unsigned char *s, *t0, *t1, *t2, *t3, *t4, *t5;
7 vector unsigned char vX1, vX2, vX3, vX4;
8 vector unsigned char vY1, vY2, vY3, vY4, vY5, vY6, vY7, vY8, vY9, vYA, vYB, vYC;
9 vector unsigned char vYD, vYE, vYF, vYG, vYH, vYI, vYJ, vYK, vYL, vYM, vYN, vYO;
10 vector unsigned char vhi0, vlo0, vhi1, vlo1, vhi2, vlo2;
11 vector unsigned char vhi3, vlo3, vhi4, vlo4, vhi5, vlo5;
12 int i, head;
13
14 s = (unsigned char *)src;
15 t0 = (unsigned char *)dest[0];
16 t1 = (unsigned char *)dest[1];
17 t2 = (unsigned char *)dest[2];
18 t3 = (unsigned char *)dest[3];
19 t4 = (unsigned char *)dest[4];
20 t5 = (unsigned char *)dest[5];
21
22 head = len % 64;
23 if (head != 0) {
24 gf_vect_mad_base(head, vec, vec_i, &gftbls[0 * 32 * vec], src, t0);
25 gf_vect_mad_base(head, vec, vec_i, &gftbls[1 * 32 * vec], src, t1);
26 gf_vect_mad_base(head, vec, vec_i, &gftbls[2 * 32 * vec], src, t2);
27 gf_vect_mad_base(head, vec, vec_i, &gftbls[3 * 32 * vec], src, t3);
28 gf_vect_mad_base(head, vec, vec_i, &gftbls[4 * 32 * vec], src, t4);
29 gf_vect_mad_base(head, vec, vec_i, &gftbls[5 * 32 * vec], src, t5);
30 }
31
32 vlo0 = EC_vec_xl(0, gftbls + (((0 * vec) << 5) + (vec_i << 5)));
33 vhi0 = EC_vec_xl(16, gftbls + (((0 * vec) << 5) + (vec_i << 5)));
34 vlo1 = EC_vec_xl(0, gftbls + (((1 * vec) << 5) + (vec_i << 5)));
35 vhi1 = EC_vec_xl(16, gftbls + (((1 * vec) << 5) + (vec_i << 5)));
36 vlo2 = EC_vec_xl(0, gftbls + (((2 * vec) << 5) + (vec_i << 5)));
37 vhi2 = EC_vec_xl(16, gftbls + (((2 * vec) << 5) + (vec_i << 5)));
38 vlo3 = EC_vec_xl(0, gftbls + (((3 * vec) << 5) + (vec_i << 5)));
39 vhi3 = EC_vec_xl(16, gftbls + (((3 * vec) << 5) + (vec_i << 5)));
40 vlo4 = EC_vec_xl(0, gftbls + (((4 * vec) << 5) + (vec_i << 5)));
41 vhi4 = EC_vec_xl(16, gftbls + (((4 * vec) << 5) + (vec_i << 5)));
42 vlo5 = EC_vec_xl(0, gftbls + (((5 * vec) << 5) + (vec_i << 5)));
43 vhi5 = EC_vec_xl(16, gftbls + (((5 * vec) << 5) + (vec_i << 5)));
44
45 for (i = head; i < len - 63; i += 64) {
46 vX1 = vec_xl(0, s + i);
47 vX2 = vec_xl(16, s + i);
48 vX3 = vec_xl(32, s + i);
49 vX4 = vec_xl(48, s + i);
50
51 vY1 = vec_xl(0, t0 + i);
52 vY2 = vec_xl(16, t0 + i);
53 vYD = vec_xl(32, t0 + i);
54 vYE = vec_xl(48, t0 + i);
55
56 vY1 = vY1 ^ EC_vec_permxor(vhi0, vlo0, vX1);
57 vY2 = vY2 ^ EC_vec_permxor(vhi0, vlo0, vX2);
58 vYD = vYD ^ EC_vec_permxor(vhi0, vlo0, vX3);
59 vYE = vYE ^ EC_vec_permxor(vhi0, vlo0, vX4);
60
61 vec_xst(vY1, 0, t0 + i);
62 vec_xst(vY2, 16, t0 + i);
63 vec_xst(vYD, 32, t0 + i);
64 vec_xst(vYE, 48, t0 + i);
65
66 vY3 = vec_xl(0, t1 + i);
67 vY4 = vec_xl(16, t1 + i);
68 vYF = vec_xl(32, t1 + i);
69 vYG = vec_xl(48, t1 + i);
70
71 vY3 = vY3 ^ EC_vec_permxor(vhi1, vlo1, vX1);
72 vY4 = vY4 ^ EC_vec_permxor(vhi1, vlo1, vX2);
73 vYF = vYF ^ EC_vec_permxor(vhi1, vlo1, vX3);
74 vYG = vYG ^ EC_vec_permxor(vhi1, vlo1, vX4);
75
76 vec_xst(vY3, 0, t1 + i);
77 vec_xst(vY4, 16, t1 + i);
78 vec_xst(vYF, 32, t1 + i);
79 vec_xst(vYG, 48, t1 + i);
80
81 vY5 = vec_xl(0, t2 + i);
82 vY6 = vec_xl(16, t2 + i);
83 vYH = vec_xl(32, t2 + i);
84 vYI = vec_xl(48, t2 + i);
85
86 vY5 = vY5 ^ EC_vec_permxor(vhi2, vlo2, vX1);
87 vY6 = vY6 ^ EC_vec_permxor(vhi2, vlo2, vX2);
88 vYH = vYH ^ EC_vec_permxor(vhi2, vlo2, vX3);
89 vYI = vYI ^ EC_vec_permxor(vhi2, vlo2, vX4);
90
91 vY7 = vec_xl(0, t3 + i);
92 vY8 = vec_xl(16, t3 + i);
93 vYJ = vec_xl(32, t3 + i);
94 vYK = vec_xl(48, t3 + i);
95
96 vec_xst(vY5, 0, t2 + i);
97 vec_xst(vY6, 16, t2 + i);
98 vec_xst(vYH, 32, t2 + i);
99 vec_xst(vYI, 48, t2 + i);
100
101 vY7 = vY7 ^ EC_vec_permxor(vhi3, vlo3, vX1);
102 vY8 = vY8 ^ EC_vec_permxor(vhi3, vlo3, vX2);
103 vYJ = vYJ ^ EC_vec_permxor(vhi3, vlo3, vX3);
104 vYK = vYK ^ EC_vec_permxor(vhi3, vlo3, vX4);
105
106 vY9 = vec_xl(0, t4 + i);
107 vYA = vec_xl(16, t4 + i);
108 vYL = vec_xl(32, t4 + i);
109 vYM = vec_xl(48, t4 + i);
110
111 vec_xst(vY7, 0, t3 + i);
112 vec_xst(vY8, 16, t3 + i);
113 vec_xst(vYJ, 32, t3 + i);
114 vec_xst(vYK, 48, t3 + i);
115
116 vY9 = vY9 ^ EC_vec_permxor(vhi4, vlo4, vX1);
117 vYA = vYA ^ EC_vec_permxor(vhi4, vlo4, vX2);
118 vYL = vYL ^ EC_vec_permxor(vhi4, vlo4, vX3);
119 vYM = vYM ^ EC_vec_permxor(vhi4, vlo4, vX4);
120
121 vYB = vec_xl(0, t5 + i);
122 vYC = vec_xl(16, t5 + i);
123 vYN = vec_xl(32, t5 + i);
124 vYO = vec_xl(48, t5 + i);
125
126 vec_xst(vY9, 0, t4 + i);
127 vec_xst(vYA, 16, t4 + i);
128 vec_xst(vYL, 32, t4 + i);
129 vec_xst(vYM, 48, t4 + i);
130
131 vYB = vYB ^ EC_vec_permxor(vhi5, vlo5, vX1);
132 vYC = vYC ^ EC_vec_permxor(vhi5, vlo5, vX2);
133 vYN = vYN ^ EC_vec_permxor(vhi5, vlo5, vX3);
134 vYO = vYO ^ EC_vec_permxor(vhi5, vlo5, vX4);
135
136 vec_xst(vYB, 0, t5 + i);
137 vec_xst(vYC, 16, t5 + i);
138 vec_xst(vYN, 32, t5 + i);
139 vec_xst(vYO, 48, t5 + i);
140 }
141 return;
142}