]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/crypto/qat/qat_common/qat_crypto.c
Merge tag 'tegra-for-4.3-cleanup' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-eoan-kernel.git] / drivers / crypto / qat / qat_common / qat_crypto.c
1 /*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include "adf_accel_devices.h"
50 #include "adf_common_drv.h"
51 #include "adf_transport.h"
52 #include "adf_cfg.h"
53 #include "adf_cfg_strings.h"
54 #include "qat_crypto.h"
55 #include "icp_qat_fw.h"
56
57 #define SEC ADF_KERNEL_SEC
58
59 static struct service_hndl qat_crypto;
60
61 void qat_crypto_put_instance(struct qat_crypto_instance *inst)
62 {
63 if (atomic_sub_return(1, &inst->refctr) == 0)
64 adf_dev_put(inst->accel_dev);
65 }
66
67 static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
68 {
69 struct qat_crypto_instance *inst;
70 struct list_head *list_ptr, *tmp;
71 int i;
72
73 list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) {
74 inst = list_entry(list_ptr, struct qat_crypto_instance, list);
75
76 for (i = 0; i < atomic_read(&inst->refctr); i++)
77 qat_crypto_put_instance(inst);
78
79 if (inst->sym_tx)
80 adf_remove_ring(inst->sym_tx);
81
82 if (inst->sym_rx)
83 adf_remove_ring(inst->sym_rx);
84
85 if (inst->pke_tx)
86 adf_remove_ring(inst->pke_tx);
87
88 if (inst->pke_rx)
89 adf_remove_ring(inst->pke_rx);
90
91 if (inst->rnd_tx)
92 adf_remove_ring(inst->rnd_tx);
93
94 if (inst->rnd_rx)
95 adf_remove_ring(inst->rnd_rx);
96
97 list_del(list_ptr);
98 kfree(inst);
99 }
100 return 0;
101 }
102
103 struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
104 {
105 struct adf_accel_dev *accel_dev = NULL;
106 struct qat_crypto_instance *inst_best = NULL;
107 struct list_head *itr;
108 unsigned long best = ~0;
109
110 list_for_each(itr, adf_devmgr_get_head()) {
111 accel_dev = list_entry(itr, struct adf_accel_dev, list);
112 if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
113 dev_to_node(&GET_DEV(accel_dev)) < 0) &&
114 adf_dev_started(accel_dev))
115 break;
116 accel_dev = NULL;
117 }
118 if (!accel_dev) {
119 pr_err("QAT: Could not find a device on node %d\n", node);
120 accel_dev = adf_devmgr_get_first();
121 }
122 if (!accel_dev || !adf_dev_started(accel_dev))
123 return NULL;
124
125 list_for_each(itr, &accel_dev->crypto_list) {
126 struct qat_crypto_instance *inst;
127 unsigned long cur;
128
129 inst = list_entry(itr, struct qat_crypto_instance, list);
130 cur = atomic_read(&inst->refctr);
131 if (best > cur) {
132 inst_best = inst;
133 best = cur;
134 }
135 }
136 if (inst_best) {
137 if (atomic_add_return(1, &inst_best->refctr) == 1) {
138 if (adf_dev_get(accel_dev)) {
139 atomic_dec(&inst_best->refctr);
140 dev_err(&GET_DEV(accel_dev),
141 "Could not increment dev refctr\n");
142 return NULL;
143 }
144 }
145 }
146 return inst_best;
147 }
148
149 static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
150 {
151 int i;
152 unsigned long bank;
153 unsigned long num_inst, num_msg_sym, num_msg_asym;
154 int msg_size;
155 struct qat_crypto_instance *inst;
156 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
157 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
158
159 INIT_LIST_HEAD(&accel_dev->crypto_list);
160 strlcpy(key, ADF_NUM_CY, sizeof(key));
161
162 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
163 return -EFAULT;
164
165 if (kstrtoul(val, 0, &num_inst))
166 return -EFAULT;
167
168 for (i = 0; i < num_inst; i++) {
169 inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
170 dev_to_node(&GET_DEV(accel_dev)));
171 if (!inst)
172 goto err;
173
174 list_add_tail(&inst->list, &accel_dev->crypto_list);
175 inst->id = i;
176 atomic_set(&inst->refctr, 0);
177 inst->accel_dev = accel_dev;
178 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
179 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
180 goto err;
181
182 if (kstrtoul(val, 10, &bank))
183 goto err;
184 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
185 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
186 goto err;
187
188 if (kstrtoul(val, 10, &num_msg_sym))
189 goto err;
190 num_msg_sym = num_msg_sym >> 1;
191 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
192 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
193 goto err;
194
195 if (kstrtoul(val, 10, &num_msg_asym))
196 goto err;
197 num_msg_asym = num_msg_asym >> 1;
198
199 msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
200 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
201 if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
202 msg_size, key, NULL, 0, &inst->sym_tx))
203 goto err;
204
205 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
206 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
207 msg_size, key, NULL, 0, &inst->rnd_tx))
208 goto err;
209
210 msg_size = msg_size >> 1;
211 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
212 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
213 msg_size, key, NULL, 0, &inst->pke_tx))
214 goto err;
215
216 msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
217 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
218 if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
219 msg_size, key, qat_alg_callback, 0,
220 &inst->sym_rx))
221 goto err;
222
223 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
224 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
225 msg_size, key, qat_alg_callback, 0,
226 &inst->rnd_rx))
227 goto err;
228
229 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
230 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
231 msg_size, key, qat_alg_callback, 0,
232 &inst->pke_rx))
233 goto err;
234 }
235 return 0;
236 err:
237 qat_crypto_free_instances(accel_dev);
238 return -ENOMEM;
239 }
240
241 static int qat_crypto_init(struct adf_accel_dev *accel_dev)
242 {
243 if (qat_crypto_create_instances(accel_dev))
244 return -EFAULT;
245
246 return 0;
247 }
248
249 static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
250 {
251 return qat_crypto_free_instances(accel_dev);
252 }
253
254 static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
255 enum adf_event event)
256 {
257 int ret;
258
259 switch (event) {
260 case ADF_EVENT_INIT:
261 ret = qat_crypto_init(accel_dev);
262 break;
263 case ADF_EVENT_SHUTDOWN:
264 ret = qat_crypto_shutdown(accel_dev);
265 break;
266 case ADF_EVENT_RESTARTING:
267 case ADF_EVENT_RESTARTED:
268 case ADF_EVENT_START:
269 case ADF_EVENT_STOP:
270 default:
271 ret = 0;
272 }
273 return ret;
274 }
275
276 int qat_crypto_register(void)
277 {
278 memset(&qat_crypto, 0, sizeof(qat_crypto));
279 qat_crypto.event_hld = qat_crypto_event_handler;
280 qat_crypto.name = "qat_crypto";
281 return adf_service_register(&qat_crypto);
282 }
283
284 int qat_crypto_unregister(void)
285 {
286 return adf_service_unregister(&qat_crypto);
287 }