]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/crypto/qat/qat_common/qat_hal.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
[mirror_ubuntu-bionic-kernel.git] / drivers / crypto / qat / qat_common / qat_hal.c
CommitLineData
b3416fb8
TS
1/*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*/
47#include <linux/slab.h>
b0272276 48#include <linux/delay.h>
b3416fb8
TS
49
50#include "adf_accel_devices.h"
51#include "adf_common_drv.h"
52#include "icp_qat_hal.h"
53#include "icp_qat_uclo.h"
54
b0272276
PY
55#define BAD_REGADDR 0xffff
56#define MAX_RETRY_TIMES 10000
57#define INIT_CTX_ARB_VALUE 0x0
b3416fb8 58#define INIT_CTX_ENABLE_VALUE 0x0
b0272276 59#define INIT_PC_VALUE 0x0
b3416fb8
TS
60#define INIT_WAKEUP_EVENTS_VALUE 0x1
61#define INIT_SIG_EVENTS_VALUE 0x1
62#define INIT_CCENABLE_VALUE 0x2000
b0272276 63#define RST_CSR_QAT_LSB 20
b3416fb8
TS
64#define RST_CSR_AE_LSB 0
65#define MC_TIMESTAMP_ENABLE (0x1 << 7)
66
67#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
68 (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
69 (~(1 << CE_REG_PAR_ERR_BITPOS)))
70#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
71 (inst = ((inst & 0xFFFF00C03FFull) | \
72 ((((const_val) << 12) & 0x0FF00000ull) | \
73 (((const_val) << 10) & 0x0003FC00ull))))
74#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
75 (inst = ((inst & 0xFFFF00FFF00ull) | \
76 ((((const_val) << 12) & 0x0FF00000ull) | \
77 (((const_val) << 0) & 0x000000FFull))))
78
79#define AE(handle, ae) handle->hal_handle->aes[ae]
80
81static const uint64_t inst_4b[] = {
82 0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
83 0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
84 0x0A021000000ull
85};
86
87static const uint64_t inst[] = {
88 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
89 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
90 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
91 0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
92 0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
93 0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
94 0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
95 0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
96 0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
97 0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
98 0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
99 0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
100 0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
101 0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
102 0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
103 0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
104 0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
105 0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
106 0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
107 0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
108 0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
109 0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
110};
111
112void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
113 unsigned char ae, unsigned int ctx_mask)
114{
115 AE(handle, ae).live_ctx_mask = ctx_mask;
116}
117
118#define CSR_RETRY_TIMES 500
119static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
120 unsigned char ae, unsigned int csr,
121 unsigned int *value)
122{
123 unsigned int iterations = CSR_RETRY_TIMES;
124
125 do {
126 *value = GET_AE_CSR(handle, ae, csr);
127 if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
128 return 0;
129 } while (iterations--);
130
131 pr_err("QAT: Read CSR timeout\n");
132 return -EFAULT;
133}
134
135static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
136 unsigned char ae, unsigned int csr,
137 unsigned int value)
138{
139 unsigned int iterations = CSR_RETRY_TIMES;
140
141 do {
142 SET_AE_CSR(handle, ae, csr, value);
143 if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
144 return 0;
145 } while (iterations--);
146
147 pr_err("QAT: Write CSR Timeout\n");
148 return -EFAULT;
149}
150
151static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
152 unsigned char ae, unsigned char ctx,
153 unsigned int *events)
154{
155 unsigned int cur_ctx;
156
157 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
158 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
159 qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
160 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
161}
162
163static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
164 unsigned char ae, unsigned int cycles,
165 int chk_inactive)
166{
167 unsigned int base_cnt = 0, cur_cnt = 0;
168 unsigned int csr = (1 << ACS_ABO_BITPOS);
169 int times = MAX_RETRY_TIMES;
170 int elapsed_cycles = 0;
171
172 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
173 base_cnt &= 0xffff;
174 while ((int)cycles > elapsed_cycles && times--) {
175 if (chk_inactive)
176 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
177
178 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
179 cur_cnt &= 0xffff;
180 elapsed_cycles = cur_cnt - base_cnt;
181
182 if (elapsed_cycles < 0)
183 elapsed_cycles += 0x10000;
184
185 /* ensure at least 8 time cycles elapsed in wait_cycles */
186 if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
187 return 0;
188 }
51d77ddd 189 if (times < 0) {
b3416fb8
TS
190 pr_err("QAT: wait_num_cycles time out\n");
191 return -EFAULT;
192 }
193 return 0;
194}
195
196#define CLR_BIT(wrd, bit) (wrd & ~(1 << bit))
197#define SET_BIT(wrd, bit) (wrd | 1 << bit)
198
199int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
200 unsigned char ae, unsigned char mode)
201{
202 unsigned int csr, new_csr;
203
204 if ((mode != 4) && (mode != 8)) {
205 pr_err("QAT: bad ctx mode=%d\n", mode);
206 return -EINVAL;
207 }
208
209 /* Sets the accelaration engine context mode to either four or eight */
210 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
211 csr = IGNORE_W1C_MASK & csr;
212 new_csr = (mode == 4) ?
213 SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
214 CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
215 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
216 return 0;
217}
218
219int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
220 unsigned char ae, unsigned char mode)
221{
222 unsigned int csr, new_csr;
223
224 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
225 csr &= IGNORE_W1C_MASK;
226
227 new_csr = (mode) ?
228 SET_BIT(csr, CE_NN_MODE_BITPOS) :
229 CLR_BIT(csr, CE_NN_MODE_BITPOS);
230
231 if (new_csr != csr)
232 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
233
234 return 0;
235}
236
237int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
238 unsigned char ae, enum icp_qat_uof_regtype lm_type,
239 unsigned char mode)
240{
241 unsigned int csr, new_csr;
242
243 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
244 csr &= IGNORE_W1C_MASK;
245 switch (lm_type) {
246 case ICP_LMEM0:
247 new_csr = (mode) ?
248 SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
249 CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
250 break;
251 case ICP_LMEM1:
252 new_csr = (mode) ?
253 SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
254 CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
255 break;
256 default:
257 pr_err("QAT: lmType = 0x%x\n", lm_type);
258 return -EINVAL;
259 }
260
261 if (new_csr != csr)
262 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
263 return 0;
264}
265
266static unsigned short qat_hal_get_reg_addr(unsigned int type,
267 unsigned short reg_num)
268{
269 unsigned short reg_addr;
d65071ec 270
b3416fb8
TS
271 switch (type) {
272 case ICP_GPA_ABS:
273 case ICP_GPB_ABS:
274 reg_addr = 0x80 | (reg_num & 0x7f);
275 break;
276 case ICP_GPA_REL:
277 case ICP_GPB_REL:
278 reg_addr = reg_num & 0x1f;
279 break;
280 case ICP_SR_RD_REL:
281 case ICP_SR_WR_REL:
282 case ICP_SR_REL:
283 reg_addr = 0x180 | (reg_num & 0x1f);
284 break;
285 case ICP_SR_ABS:
286 reg_addr = 0x140 | ((reg_num & 0x3) << 1);
287 break;
288 case ICP_DR_RD_REL:
289 case ICP_DR_WR_REL:
290 case ICP_DR_REL:
291 reg_addr = 0x1c0 | (reg_num & 0x1f);
292 break;
293 case ICP_DR_ABS:
294 reg_addr = 0x100 | ((reg_num & 0x3) << 1);
295 break;
296 case ICP_NEIGH_REL:
297 reg_addr = 0x280 | (reg_num & 0x1f);
298 break;
299 case ICP_LMEM0:
300 reg_addr = 0x200;
301 break;
302 case ICP_LMEM1:
303 reg_addr = 0x220;
304 break;
305 case ICP_NO_DEST:
306 reg_addr = 0x300 | (reg_num & 0xff);
307 break;
308 default:
309 reg_addr = BAD_REGADDR;
310 break;
311 }
312 return reg_addr;
313}
314
315void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
316{
317 unsigned int ae_reset_csr;
318
319 ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
320 ae_reset_csr |= handle->hal_handle->ae_mask << RST_CSR_AE_LSB;
321 ae_reset_csr |= handle->hal_handle->slice_mask << RST_CSR_QAT_LSB;
322 SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
323}
324
325static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
326 unsigned char ae, unsigned int ctx_mask,
327 unsigned int ae_csr, unsigned int csr_val)
328{
329 unsigned int ctx, cur_ctx;
330
331 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
332
333 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
334 if (!(ctx_mask & (1 << ctx)))
335 continue;
336 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
337 qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
338 }
339
340 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
341}
342
343static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
344 unsigned char ae, unsigned char ctx,
345 unsigned int ae_csr, unsigned int *csr_val)
346{
347 unsigned int cur_ctx;
348
349 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
350 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
351 qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
352 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
353}
354
355static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
356 unsigned char ae, unsigned int ctx_mask,
357 unsigned int events)
358{
359 unsigned int ctx, cur_ctx;
360
361 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
362 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
363 if (!(ctx_mask & (1 << ctx)))
364 continue;
365 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
366 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
367 }
368 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
369}
370
371static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
372 unsigned char ae, unsigned int ctx_mask,
373 unsigned int events)
374{
375 unsigned int ctx, cur_ctx;
376
377 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
378 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
379 if (!(ctx_mask & (1 << ctx)))
380 continue;
381 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
382 qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
383 events);
384 }
385 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
386}
387
388static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
389{
390 unsigned int base_cnt, cur_cnt;
391 unsigned char ae;
c0e77a11 392 int times = MAX_RETRY_TIMES;
b3416fb8
TS
393
394 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
b3416fb8
TS
395 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
396 (unsigned int *)&base_cnt);
397 base_cnt &= 0xffff;
398
399 do {
400 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
401 (unsigned int *)&cur_cnt);
402 cur_cnt &= 0xffff;
403 } while (times-- && (cur_cnt == base_cnt));
404
c0e77a11 405 if (times < 0) {
b3416fb8
TS
406 pr_err("QAT: AE%d is inactive!!\n", ae);
407 return -EFAULT;
408 }
409 }
410
411 return 0;
412}
413
b0272276
PY
414int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
415 unsigned int ae)
416{
417 unsigned int enable = 0, active = 0;
418
419 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable);
420 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active);
46621e6f 421 if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
b0272276
PY
422 (active & (1 << ACS_ABO_BITPOS)))
423 return 1;
424 else
425 return 0;
426}
427
b3416fb8
TS
428static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
429{
430 unsigned int misc_ctl;
431 unsigned char ae;
432
433 /* stop the timestamp timers */
434 misc_ctl = GET_GLB_CSR(handle, MISC_CONTROL);
435 if (misc_ctl & MC_TIMESTAMP_ENABLE)
436 SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl &
437 (~MC_TIMESTAMP_ENABLE));
438
9a147cb3 439 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
b3416fb8
TS
440 qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
441 qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
442 }
443 /* start timestamp timers */
444 SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl | MC_TIMESTAMP_ENABLE);
445}
446
af6f2a7b
AB
447#define ESRAM_AUTO_TINIT BIT(2)
448#define ESRAM_AUTO_TINIT_DONE BIT(3)
b3416fb8
TS
449#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
450#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
451static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
452{
b0272276
PY
453 void __iomem *csr_addr =
454 (void __iomem *)((uintptr_t)handle->hal_ep_csr_addr_v +
455 ESRAM_AUTO_INIT_CSR_OFFSET);
c0e77a11
PY
456 unsigned int csr_val;
457 int times = 30;
b3416fb8 458
70401f4e
TS
459 if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID)
460 return 0;
461
b3416fb8
TS
462 csr_val = ADF_CSR_RD(csr_addr, 0);
463 if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
464 return 0;
465
466 csr_val = ADF_CSR_RD(csr_addr, 0);
467 csr_val |= ESRAM_AUTO_TINIT;
468 ADF_CSR_WR(csr_addr, 0, csr_val);
469
470 do {
471 qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
472 csr_val = ADF_CSR_RD(csr_addr, 0);
473 } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
c0e77a11 474 if ((times < 0)) {
b3416fb8
TS
475 pr_err("QAT: Fail to init eSram!\n");
476 return -EFAULT;
477 }
478 return 0;
479}
480
481#define SHRAM_INIT_CYCLES 2060
482int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
483{
484 unsigned int ae_reset_csr;
485 unsigned char ae;
486 unsigned int clk_csr;
487 unsigned int times = 100;
488 unsigned int csr;
489
490 /* write to the reset csr */
491 ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
492 ae_reset_csr &= ~(handle->hal_handle->ae_mask << RST_CSR_AE_LSB);
493 ae_reset_csr &= ~(handle->hal_handle->slice_mask << RST_CSR_QAT_LSB);
494 do {
495 SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
496 if (!(times--))
497 goto out_err;
498 csr = GET_GLB_CSR(handle, ICP_RESET);
499 } while ((handle->hal_handle->ae_mask |
500 (handle->hal_handle->slice_mask << RST_CSR_QAT_LSB)) & csr);
501 /* enable clock */
502 clk_csr = GET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE);
503 clk_csr |= handle->hal_handle->ae_mask << 0;
504 clk_csr |= handle->hal_handle->slice_mask << 20;
505 SET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE, clk_csr);
506 if (qat_hal_check_ae_alive(handle))
507 goto out_err;
508
509 /* Set undefined power-up/reset states to reasonable default values */
9a147cb3 510 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
b3416fb8
TS
511 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
512 INIT_CTX_ENABLE_VALUE);
513 qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
514 CTX_STS_INDIRECT,
515 handle->hal_handle->upc_mask &
516 INIT_PC_VALUE);
517 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
518 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
519 qat_hal_put_wakeup_event(handle, ae,
520 ICP_QAT_UCLO_AE_ALL_CTX,
521 INIT_WAKEUP_EVENTS_VALUE);
522 qat_hal_put_sig_event(handle, ae,
523 ICP_QAT_UCLO_AE_ALL_CTX,
524 INIT_SIG_EVENTS_VALUE);
525 }
526 if (qat_hal_init_esram(handle))
527 goto out_err;
528 if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
529 goto out_err;
530 qat_hal_reset_timestamp(handle);
531
532 return 0;
533out_err:
534 pr_err("QAT: failed to get device out of reset\n");
535 return -EFAULT;
536}
537
538static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
539 unsigned char ae, unsigned int ctx_mask)
540{
541 unsigned int ctx;
542
543 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
544 ctx &= IGNORE_W1C_MASK &
545 (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
546 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
547}
548
549static uint64_t qat_hal_parity_64bit(uint64_t word)
550{
551 word ^= word >> 1;
552 word ^= word >> 2;
553 word ^= word >> 4;
554 word ^= word >> 8;
555 word ^= word >> 16;
556 word ^= word >> 32;
557 return word & 1;
558}
559
560static uint64_t qat_hal_set_uword_ecc(uint64_t uword)
561{
562 uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
563 bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
564 bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
565 bit6_mask = 0xdaf69a46910ULL;
566
567 /* clear the ecc bits */
568 uword &= ~(0x7fULL << 0x2C);
569 uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
570 uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
571 uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
572 uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
573 uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
574 uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
575 uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
576 return uword;
577}
578
579void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
580 unsigned char ae, unsigned int uaddr,
581 unsigned int words_num, uint64_t *uword)
582{
583 unsigned int ustore_addr;
584 unsigned int i;
585
586 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
587 uaddr |= UA_ECS;
588 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
589 for (i = 0; i < words_num; i++) {
590 unsigned int uwrd_lo, uwrd_hi;
591 uint64_t tmp;
592
593 tmp = qat_hal_set_uword_ecc(uword[i]);
594 uwrd_lo = (unsigned int)(tmp & 0xffffffff);
595 uwrd_hi = (unsigned int)(tmp >> 0x20);
596 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
597 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
598 }
599 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
600}
601
602static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
603 unsigned char ae, unsigned int ctx_mask)
604{
605 unsigned int ctx;
606
607 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
608 ctx &= IGNORE_W1C_MASK;
609 ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
610 ctx |= (ctx_mask << CE_ENABLE_BITPOS);
611 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
612}
613
b0272276 614static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle)
b3416fb8
TS
615{
616 unsigned char ae;
b3416fb8 617 unsigned short reg;
b3416fb8 618
9a147cb3 619 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
b3416fb8
TS
620 for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
621 qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
622 reg, 0);
623 qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
624 reg, 0);
625 }
b0272276
PY
626 }
627}
628
629static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
630{
631 unsigned char ae;
632 unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
633 int times = MAX_RETRY_TIMES;
634 unsigned int csr_val = 0;
635 unsigned int savctx = 0;
636 int ret = 0;
637
638 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
b3416fb8
TS
639 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
640 csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
641 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
642 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
643 csr_val &= IGNORE_W1C_MASK;
644 csr_val |= CE_NN_MODE;
645 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
646 qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
647 (uint64_t *)inst);
648 qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
649 handle->hal_handle->upc_mask &
650 INIT_PC_VALUE);
651 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
652 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
653 qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
654 qat_hal_wr_indr_csr(handle, ae, ctx_mask,
655 CTX_SIG_EVENTS_INDIRECT, 0);
656 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
657 qat_hal_enable_ctx(handle, ae, ctx_mask);
658 }
9a147cb3 659 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
b3416fb8
TS
660 /* wait for AE to finish */
661 do {
662 ret = qat_hal_wait_cycles(handle, ae, 20, 1);
663 } while (ret && times--);
664
c0e77a11 665 if (times < 0) {
b3416fb8
TS
666 pr_err("QAT: clear GPR of AE %d failed", ae);
667 return -EINVAL;
668 }
669 qat_hal_disable_ctx(handle, ae, ctx_mask);
670 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
671 savctx & ACS_ACNO);
672 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
673 INIT_CTX_ENABLE_VALUE);
674 qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
675 handle->hal_handle->upc_mask &
676 INIT_PC_VALUE);
677 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
678 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
679 qat_hal_put_wakeup_event(handle, ae, ctx_mask,
680 INIT_WAKEUP_EVENTS_VALUE);
681 qat_hal_put_sig_event(handle, ae, ctx_mask,
682 INIT_SIG_EVENTS_VALUE);
683 }
684 return 0;
685}
686
b0272276
PY
687#define ICP_QAT_AE_OFFSET 0x20000
688#define ICP_QAT_CAP_OFFSET (ICP_QAT_AE_OFFSET + 0x10000)
b3416fb8 689#define LOCAL_TO_XFER_REG_OFFSET 0x800
b0272276 690#define ICP_QAT_EP_OFFSET 0x3a000
b3416fb8
TS
691int qat_hal_init(struct adf_accel_dev *accel_dev)
692{
9a147cb3
TS
693 unsigned char ae;
694 unsigned int max_en_ae_id = 0;
695 struct icp_qat_fw_loader_handle *handle;
b3416fb8
TS
696 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
697 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
f3dd7e60 698 struct adf_bar *misc_bar =
a727c4b6 699 &pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
91a93eaf 700 struct adf_bar *sram_bar;
b3416fb8
TS
701
702 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
703 if (!handle)
9a147cb3 704 return -ENOMEM;
b3416fb8 705
b0272276
PY
706 handle->hal_cap_g_ctl_csr_addr_v =
707 (void __iomem *)((uintptr_t)misc_bar->virt_addr +
708 ICP_QAT_CAP_OFFSET);
709 handle->hal_cap_ae_xfer_csr_addr_v =
710 (void __iomem *)((uintptr_t)misc_bar->virt_addr +
711 ICP_QAT_AE_OFFSET);
712 handle->hal_ep_csr_addr_v =
713 (void __iomem *)((uintptr_t)misc_bar->virt_addr +
714 ICP_QAT_EP_OFFSET);
715 handle->hal_cap_ae_local_csr_addr_v =
716 (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
717 LOCAL_TO_XFER_REG_OFFSET);
718 handle->pci_dev = pci_info->pci_dev;
91a93eaf
PY
719 if (handle->pci_dev->device != ADF_C3XXX_PCI_DEVICE_ID) {
720 sram_bar =
721 &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
722 handle->hal_sram_addr_v = sram_bar->virt_addr;
723 }
b0272276
PY
724 handle->fw_auth = (handle->pci_dev->device ==
725 ADF_DH895XCC_PCI_DEVICE_ID) ? false : true;
b3416fb8
TS
726 handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
727 if (!handle->hal_handle)
728 goto out_hal_handle;
729 handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
730 handle->hal_handle->ae_mask = hw_data->ae_mask;
731 handle->hal_handle->slice_mask = hw_data->accel_mask;
732 /* create AE objects */
733 handle->hal_handle->upc_mask = 0x1ffff;
734 handle->hal_handle->max_ustore = 0x4000;
735 for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) {
736 if (!(hw_data->ae_mask & (1 << ae)))
737 continue;
738 handle->hal_handle->aes[ae].free_addr = 0;
739 handle->hal_handle->aes[ae].free_size =
740 handle->hal_handle->max_ustore;
741 handle->hal_handle->aes[ae].ustore_size =
742 handle->hal_handle->max_ustore;
743 handle->hal_handle->aes[ae].live_ctx_mask =
744 ICP_QAT_UCLO_AE_ALL_CTX;
9a147cb3 745 max_en_ae_id = ae;
b3416fb8 746 }
9a147cb3 747 handle->hal_handle->ae_max_num = max_en_ae_id + 1;
b3416fb8
TS
748 /* take all AEs out of reset */
749 if (qat_hal_clr_reset(handle)) {
66550304 750 dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
b3416fb8
TS
751 goto out_err;
752 }
b0272276
PY
753 qat_hal_clear_xfer(handle);
754 if (!handle->fw_auth) {
755 if (qat_hal_clear_gpr(handle))
756 goto out_err;
757 }
758
b3416fb8 759 /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
9a147cb3
TS
760 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
761 unsigned int csr_val = 0;
762
b3416fb8
TS
763 qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
764 csr_val |= 0x1;
765 qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
766 }
767 accel_dev->fw_loader->fw_loader = handle;
768 return 0;
769
770out_err:
771 kfree(handle->hal_handle);
772out_hal_handle:
773 kfree(handle);
b3416fb8
TS
774 return -EFAULT;
775}
776
777void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
778{
779 if (!handle)
780 return;
781 kfree(handle->hal_handle);
782 kfree(handle);
783}
784
785void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
786 unsigned int ctx_mask)
787{
b0272276
PY
788 int retry = 0;
789 unsigned int fcu_sts = 0;
790
791 if (handle->fw_auth) {
792 SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_START);
793 do {
794 msleep(FW_AUTH_WAIT_PERIOD);
795 fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
796 if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1))
797 return;
798 } while (retry++ < FW_AUTH_MAX_RETRY);
799 pr_err("QAT: start error (AE 0x%x FCU_STS = 0x%x)\n", ae,
800 fcu_sts);
801 } else {
802 qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) &
b3416fb8 803 ICP_QAT_UCLO_AE_ALL_CTX, 0x10000);
b0272276
PY
804 qat_hal_enable_ctx(handle, ae, ctx_mask);
805 }
b3416fb8
TS
806}
807
808void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
809 unsigned int ctx_mask)
810{
b0272276
PY
811 if (!handle->fw_auth)
812 qat_hal_disable_ctx(handle, ae, ctx_mask);
b3416fb8
TS
813}
814
815void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
816 unsigned char ae, unsigned int ctx_mask, unsigned int upc)
817{
818 qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
819 handle->hal_handle->upc_mask & upc);
820}
821
822static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
823 unsigned char ae, unsigned int uaddr,
824 unsigned int words_num, uint64_t *uword)
825{
826 unsigned int i, uwrd_lo, uwrd_hi;
827 unsigned int ustore_addr, misc_control;
828
829 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
830 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
831 misc_control & 0xfffffffb);
832 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
833 uaddr |= UA_ECS;
834 for (i = 0; i < words_num; i++) {
835 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
836 uaddr++;
837 qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
838 qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
839 uword[i] = uwrd_hi;
840 uword[i] = (uword[i] << 0x20) | uwrd_lo;
841 }
842 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
843 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
844}
845
b3416fb8
TS
846void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
847 unsigned char ae, unsigned int uaddr,
848 unsigned int words_num, unsigned int *data)
849{
850 unsigned int i, ustore_addr;
851
852 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
853 uaddr |= UA_ECS;
854 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
855 for (i = 0; i < words_num; i++) {
856 unsigned int uwrd_lo, uwrd_hi, tmp;
d65071ec 857
b3416fb8
TS
858 uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
859 ((data[i] & 0xff00) << 2) |
860 (0x3 << 8) | (data[i] & 0xff);
861 uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
d9a44abf 862 uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
b3416fb8 863 tmp = ((data[i] >> 0x10) & 0xffff);
d9a44abf 864 uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
b3416fb8
TS
865 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
866 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
867 }
868 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
869}
870
871#define MAX_EXEC_INST 100
872static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
873 unsigned char ae, unsigned char ctx,
874 uint64_t *micro_inst, unsigned int inst_num,
875 int code_off, unsigned int max_cycle,
876 unsigned int *endpc)
877{
878 uint64_t savuwords[MAX_EXEC_INST];
879 unsigned int ind_lm_addr0, ind_lm_addr1;
880 unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
881 unsigned int ind_cnt_sig;
882 unsigned int ind_sig, act_sig;
883 unsigned int csr_val = 0, newcsr_val;
884 unsigned int savctx;
885 unsigned int savcc, wakeup_events, savpc;
886 unsigned int ctxarb_ctl, ctx_enables;
887
888 if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
68991721 889 pr_err("QAT: invalid instruction num %d\n", inst_num);
b3416fb8
TS
890 return -EINVAL;
891 }
892 /* save current context */
893 qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
894 qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
895 qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
896 &ind_lm_addr_byte0);
897 qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
898 &ind_lm_addr_byte1);
899 if (inst_num <= MAX_EXEC_INST)
900 qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
901 qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
902 qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
903 savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
904 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
905 ctx_enables &= IGNORE_W1C_MASK;
906 qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
907 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
908 qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
909 qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
910 &ind_cnt_sig);
911 qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
912 qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
913 /* execute micro codes */
914 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
915 qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
916 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
917 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
918 if (code_off)
919 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
920 qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
921 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
922 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
923 qat_hal_enable_ctx(handle, ae, (1 << ctx));
924 /* wait for micro codes to finish */
925 if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
926 return -EFAULT;
927 if (endpc) {
928 unsigned int ctx_status;
d65071ec 929
b3416fb8
TS
930 qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT,
931 &ctx_status);
932 *endpc = ctx_status & handle->hal_handle->upc_mask;
933 }
934 /* retore to saved context */
935 qat_hal_disable_ctx(handle, ae, (1 << ctx));
936 if (inst_num <= MAX_EXEC_INST)
937 qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
938 qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
939 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
940 handle->hal_handle->upc_mask & savpc);
941 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
942 newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
943 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
944 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
945 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
946 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
947 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
948 LM_ADDR_0_INDIRECT, ind_lm_addr0);
949 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
950 LM_ADDR_1_INDIRECT, ind_lm_addr1);
951 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
952 INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
953 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
954 INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
955 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
956 FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
957 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
958 CTX_SIG_EVENTS_INDIRECT, ind_sig);
959 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
960 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
961
962 return 0;
963}
964
965static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
966 unsigned char ae, unsigned char ctx,
967 enum icp_qat_uof_regtype reg_type,
968 unsigned short reg_num, unsigned int *data)
969{
970 unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
971 unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
972 unsigned short reg_addr;
973 int status = 0;
974 uint64_t insts, savuword;
975
976 reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
977 if (reg_addr == BAD_REGADDR) {
978 pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
979 return -EINVAL;
980 }
981 switch (reg_type) {
982 case ICP_GPA_REL:
983 insts = 0xA070000000ull | (reg_addr & 0x3ff);
984 break;
985 default:
986 insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
987 break;
988 }
989 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
990 qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
991 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
992 ctx_enables &= IGNORE_W1C_MASK;
993 if (ctx != (savctx & ACS_ACNO))
994 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
995 ctx & ACS_ACNO);
996 qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
997 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
998 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
999 uaddr = UA_ECS;
1000 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
1001 insts = qat_hal_set_uword_ecc(insts);
1002 uwrd_lo = (unsigned int)(insts & 0xffffffff);
1003 uwrd_hi = (unsigned int)(insts >> 0x20);
1004 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
1005 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
1006 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
1007 /* delay for at least 8 cycles */
1008 qat_hal_wait_cycles(handle, ae, 0x8, 0);
1009 /*
1010 * read ALU output
1011 * the instruction should have been executed
1012 * prior to clearing the ECS in putUwords
1013 */
1014 qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
1015 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
1016 qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
1017 if (ctx != (savctx & ACS_ACNO))
1018 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
1019 savctx & ACS_ACNO);
1020 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
1021 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1022
1023 return status;
1024}
1025
1026static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
1027 unsigned char ae, unsigned char ctx,
1028 enum icp_qat_uof_regtype reg_type,
1029 unsigned short reg_num, unsigned int data)
1030{
1031 unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
1032 uint64_t insts[] = {
1033 0x0F440000000ull,
1034 0x0F040000000ull,
1035 0x0F0000C0300ull,
1036 0x0E000010000ull
1037 };
1038 const int num_inst = ARRAY_SIZE(insts), code_off = 1;
1039 const int imm_w1 = 0, imm_w0 = 1;
1040
1041 dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1042 if (dest_addr == BAD_REGADDR) {
1043 pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
1044 return -EINVAL;
1045 }
1046
1047 data16lo = 0xffff & data;
1048 data16hi = 0xffff & (data >> 0x10);
1049 src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
1050 (0xff & data16hi));
1051 src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
1052 (0xff & data16lo));
1053 switch (reg_type) {
1054 case ICP_GPA_REL:
1055 insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1056 ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1057 insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1058 ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1059 break;
1060 default:
1061 insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1062 ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1063
1064 insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1065 ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1066 break;
1067 }
1068
1069 return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
1070 code_off, num_inst * 0x5, NULL);
1071}
1072
1073int qat_hal_get_ins_num(void)
1074{
1075 return ARRAY_SIZE(inst_4b);
1076}
1077
1078static int qat_hal_concat_micro_code(uint64_t *micro_inst,
1079 unsigned int inst_num, unsigned int size,
1080 unsigned int addr, unsigned int *value)
1081{
9196d967 1082 int i;
b3416fb8
TS
1083 unsigned int cur_value;
1084 const uint64_t *inst_arr;
1085 int fixup_offset;
1086 int usize = 0;
1087 int orig_num;
1088
1089 orig_num = inst_num;
9196d967 1090 cur_value = value[0];
b3416fb8
TS
1091 inst_arr = inst_4b;
1092 usize = ARRAY_SIZE(inst_4b);
1093 fixup_offset = inst_num;
1094 for (i = 0; i < usize; i++)
1095 micro_inst[inst_num++] = inst_arr[i];
1096 INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
1097 fixup_offset++;
1098 INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
1099 fixup_offset++;
1100 INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
1101 fixup_offset++;
1102 INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
1103
1104 return inst_num - orig_num;
1105}
1106
1107static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
1108 unsigned char ae, unsigned char ctx,
1109 int *pfirst_exec, uint64_t *micro_inst,
1110 unsigned int inst_num)
1111{
1112 int stat = 0;
1113 unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
1114 unsigned int gprb0 = 0, gprb1 = 0;
1115
1116 if (*pfirst_exec) {
1117 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
1118 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
1119 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
1120 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
1121 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
1122 *pfirst_exec = 0;
1123 }
1124 stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
1125 inst_num * 0x5, NULL);
1126 if (stat != 0)
1127 return -EFAULT;
1128 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
1129 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
1130 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
1131 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
1132 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
1133
1134 return 0;
1135}
1136
1137int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
1138 unsigned char ae,
1139 struct icp_qat_uof_batch_init *lm_init_header)
1140{
1141 struct icp_qat_uof_batch_init *plm_init;
1142 uint64_t *micro_inst_arry;
1143 int micro_inst_num;
1144 int alloc_inst_size;
1145 int first_exec = 1;
1146 int stat = 0;
1147
1148 plm_init = lm_init_header->next;
1149 alloc_inst_size = lm_init_header->size;
1150 if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
1151 alloc_inst_size = handle->hal_handle->max_ustore;
d65071ec
TS
1152 micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t),
1153 GFP_KERNEL);
b3416fb8
TS
1154 if (!micro_inst_arry)
1155 return -ENOMEM;
1156 micro_inst_num = 0;
1157 while (plm_init) {
1158 unsigned int addr, *value, size;
1159
1160 ae = plm_init->ae;
1161 addr = plm_init->addr;
1162 value = plm_init->value;
1163 size = plm_init->size;
1164 micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
1165 micro_inst_num,
1166 size, addr, value);
1167 plm_init = plm_init->next;
1168 }
1169 /* exec micro codes */
1170 if (micro_inst_arry && (micro_inst_num > 0)) {
1171 micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
1172 stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
1173 micro_inst_arry,
1174 micro_inst_num);
1175 }
1176 kfree(micro_inst_arry);
1177 return stat;
1178}
1179
1180static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1181 unsigned char ae, unsigned char ctx,
1182 enum icp_qat_uof_regtype reg_type,
1183 unsigned short reg_num, unsigned int val)
1184{
1185 int status = 0;
1186 unsigned int reg_addr;
1187 unsigned int ctx_enables;
1188 unsigned short mask;
1189 unsigned short dr_offset = 0x10;
1190
1191 status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1192 if (CE_INUSE_CONTEXTS & ctx_enables) {
1193 if (ctx & 0x1) {
1194 pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
1195 return -EINVAL;
1196 }
1197 mask = 0x1f;
1198 dr_offset = 0x20;
1199 } else {
1200 mask = 0x0f;
1201 }
1202 if (reg_num & ~mask)
1203 return -EINVAL;
1204 reg_addr = reg_num + (ctx << 0x5);
1205 switch (reg_type) {
1206 case ICP_SR_RD_REL:
1207 case ICP_SR_REL:
1208 SET_AE_XFER(handle, ae, reg_addr, val);
1209 break;
1210 case ICP_DR_RD_REL:
1211 case ICP_DR_REL:
1212 SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
1213 break;
1214 default:
1215 status = -EINVAL;
1216 break;
1217 }
1218 return status;
1219}
1220
1221static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1222 unsigned char ae, unsigned char ctx,
1223 enum icp_qat_uof_regtype reg_type,
1224 unsigned short reg_num, unsigned int data)
1225{
1226 unsigned int gprval, ctx_enables;
1227 unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
1228 data16low;
1229 unsigned short reg_mask;
1230 int status = 0;
1231 uint64_t micro_inst[] = {
1232 0x0F440000000ull,
1233 0x0F040000000ull,
1234 0x0A000000000ull,
1235 0x0F0000C0300ull,
1236 0x0E000010000ull
1237 };
1238 const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
1239 const unsigned short gprnum = 0, dly = num_inst * 0x5;
1240
1241 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1242 if (CE_INUSE_CONTEXTS & ctx_enables) {
1243 if (ctx & 0x1) {
1244 pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
1245 return -EINVAL;
1246 }
1247 reg_mask = (unsigned short)~0x1f;
1248 } else {
1249 reg_mask = (unsigned short)~0xf;
1250 }
1251 if (reg_num & reg_mask)
1252 return -EINVAL;
1253 xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1254 if (xfr_addr == BAD_REGADDR) {
1255 pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
1256 return -EINVAL;
1257 }
1258 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
1259 gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
1260 data16low = 0xffff & data;
1261 data16hi = 0xffff & (data >> 0x10);
1262 src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1263 (unsigned short)(0xff & data16hi));
1264 src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1265 (unsigned short)(0xff & data16low));
1266 micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
1267 ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1268 micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
1269 ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1270 micro_inst[0x2] = micro_inst[0x2] |
1271 ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
1272 status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
1273 code_off, dly, NULL);
1274 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
1275 return status;
1276}
1277
1278static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
1279 unsigned char ae, unsigned char ctx,
1280 unsigned short nn, unsigned int val)
1281{
1282 unsigned int ctx_enables;
1283 int stat = 0;
1284
1285 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1286 ctx_enables &= IGNORE_W1C_MASK;
1287 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
1288
1289 stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
1290 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1291 return stat;
1292}
1293
1294static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
1295 *handle, unsigned char ae,
1296 unsigned short absreg_num,
1297 unsigned short *relreg,
1298 unsigned char *ctx)
1299{
1300 unsigned int ctx_enables;
1301
1302 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1303 if (ctx_enables & CE_INUSE_CONTEXTS) {
1304 /* 4-ctx mode */
1305 *relreg = absreg_num & 0x1F;
1306 *ctx = (absreg_num >> 0x4) & 0x6;
1307 } else {
1308 /* 8-ctx mode */
1309 *relreg = absreg_num & 0x0F;
1310 *ctx = (absreg_num >> 0x4) & 0x7;
1311 }
1312 return 0;
1313}
1314
1315int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
1316 unsigned char ae, unsigned char ctx_mask,
1317 enum icp_qat_uof_regtype reg_type,
1318 unsigned short reg_num, unsigned int regdata)
1319{
1320 int stat = 0;
1321 unsigned short reg;
1322 unsigned char ctx = 0;
1323 enum icp_qat_uof_regtype type;
1324
1325 if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
1326 return -EINVAL;
1327
1328 do {
1329 if (ctx_mask == 0) {
1330 qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
1331 &ctx);
1332 type = reg_type - 1;
1333 } else {
1334 reg = reg_num;
1335 type = reg_type;
1336 if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1337 continue;
1338 }
1339 stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
1340 if (stat) {
1341 pr_err("QAT: write gpr fail\n");
1342 return -EINVAL;
1343 }
1344 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1345
1346 return 0;
1347}
1348
1349int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1350 unsigned char ae, unsigned char ctx_mask,
1351 enum icp_qat_uof_regtype reg_type,
1352 unsigned short reg_num, unsigned int regdata)
1353{
1354 int stat = 0;
1355 unsigned short reg;
1356 unsigned char ctx = 0;
1357 enum icp_qat_uof_regtype type;
1358
1359 if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1360 return -EINVAL;
1361
1362 do {
1363 if (ctx_mask == 0) {
1364 qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
1365 &ctx);
1366 type = reg_type - 3;
1367 } else {
1368 reg = reg_num;
1369 type = reg_type;
1370 if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1371 continue;
1372 }
1373 stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
1374 regdata);
1375 if (stat) {
1376 pr_err("QAT: write wr xfer fail\n");
1377 return -EINVAL;
1378 }
1379 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1380
1381 return 0;
1382}
1383
1384int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1385 unsigned char ae, unsigned char ctx_mask,
1386 enum icp_qat_uof_regtype reg_type,
1387 unsigned short reg_num, unsigned int regdata)
1388{
1389 int stat = 0;
1390 unsigned short reg;
1391 unsigned char ctx = 0;
1392 enum icp_qat_uof_regtype type;
1393
1394 if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1395 return -EINVAL;
1396
1397 do {
1398 if (ctx_mask == 0) {
1399 qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
1400 &ctx);
1401 type = reg_type - 3;
1402 } else {
1403 reg = reg_num;
1404 type = reg_type;
1405 if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1406 continue;
1407 }
1408 stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
1409 regdata);
1410 if (stat) {
1411 pr_err("QAT: write rd xfer fail\n");
1412 return -EINVAL;
1413 }
1414 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1415
1416 return 0;
1417}
1418
1419int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
1420 unsigned char ae, unsigned char ctx_mask,
1421 unsigned short reg_num, unsigned int regdata)
1422{
1423 int stat = 0;
1424 unsigned char ctx;
1425
1426 if (ctx_mask == 0)
1427 return -EINVAL;
1428
1429 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
1430 if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1431 continue;
1432 stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
1433 if (stat) {
1434 pr_err("QAT: write neigh error\n");
1435 return -EINVAL;
1436 }
1437 }
1438
1439 return 0;
1440}