]>
Commit | Line | Data |
---|---|---|
324429d7 HS |
1 | /** |
2 | * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. | |
3 | * | |
4 | * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation. | |
9 | * | |
10 | * Written and Maintained by: | |
11 | * Manoj Malviya (manojmalviya@chelsio.com) | |
12 | * Atul Gupta (atul.gupta@chelsio.com) | |
13 | * Jitendra Lulla (jlulla@chelsio.com) | |
14 | * Yeshaswi M R Gowda (yeshaswi@chelsio.com) | |
15 | * Harsh Jain (harsh@chelsio.com) | |
16 | */ | |
17 | ||
18 | #include <linux/kernel.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/skbuff.h> | |
21 | ||
22 | #include <crypto/aes.h> | |
23 | #include <crypto/hash.h> | |
24 | ||
25 | #include "t4_msg.h" | |
26 | #include "chcr_core.h" | |
27 | #include "cxgb4_uld.h" | |
28 | ||
29 | static LIST_HEAD(uld_ctx_list); | |
30 | static DEFINE_MUTEX(dev_mutex); | |
31 | static atomic_t dev_count; | |
32 | ||
33 | typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input); | |
34 | static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input); | |
35 | static void *chcr_uld_add(const struct cxgb4_lld_info *lld); | |
36 | static int chcr_uld_state_change(void *handle, enum cxgb4_state state); | |
37 | ||
38 | static chcr_handler_func work_handlers[NUM_CPL_CMDS] = { | |
39 | [CPL_FW6_PLD] = cpl_fw6_pld_handler, | |
40 | }; | |
41 | ||
0fbc81b3 | 42 | static struct cxgb4_uld_info chcr_uld_info = { |
324429d7 | 43 | .name = DRV_MODULE_NAME, |
0fbc81b3 | 44 | .nrxq = MAX_ULD_QSETS, |
324429d7 | 45 | .rxq_size = 1024, |
324429d7 HS |
46 | .add = chcr_uld_add, |
47 | .state_change = chcr_uld_state_change, | |
48 | .rx_handler = chcr_uld_rx_handler, | |
49 | }; | |
50 | ||
51 | int assign_chcr_device(struct chcr_dev **dev) | |
52 | { | |
53 | struct uld_ctx *u_ctx; | |
54 | ||
55 | /* | |
56 | * Which device to use if multiple devices are available TODO | |
57 | * May be select the device based on round robin. One session | |
58 | * must go to the same device to maintain the ordering. | |
59 | */ | |
60 | mutex_lock(&dev_mutex); /* TODO ? */ | |
61 | u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry); | |
62 | if (!u_ctx) { | |
63 | mutex_unlock(&dev_mutex); | |
64 | return -ENXIO; | |
65 | } | |
66 | ||
67 | *dev = u_ctx->dev; | |
68 | mutex_unlock(&dev_mutex); | |
69 | return 0; | |
70 | } | |
71 | ||
72 | static int chcr_dev_add(struct uld_ctx *u_ctx) | |
73 | { | |
74 | struct chcr_dev *dev; | |
75 | ||
76 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | |
77 | if (!dev) | |
78 | return -ENXIO; | |
79 | ||
80 | spin_lock_init(&dev->lock_chcr_dev); | |
81 | u_ctx->dev = dev; | |
82 | dev->u_ctx = u_ctx; | |
83 | atomic_inc(&dev_count); | |
84 | return 0; | |
85 | } | |
86 | ||
87 | static int chcr_dev_remove(struct uld_ctx *u_ctx) | |
88 | { | |
89 | kfree(u_ctx->dev); | |
90 | u_ctx->dev = NULL; | |
91 | atomic_dec(&dev_count); | |
92 | return 0; | |
93 | } | |
94 | ||
95 | static int cpl_fw6_pld_handler(struct chcr_dev *dev, | |
96 | unsigned char *input) | |
97 | { | |
98 | struct crypto_async_request *req; | |
99 | struct cpl_fw6_pld *fw6_pld; | |
100 | u32 ack_err_status = 0; | |
101 | int error_status = 0; | |
102 | ||
103 | fw6_pld = (struct cpl_fw6_pld *)input; | |
104 | req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu( | |
105 | fw6_pld->data[1]); | |
106 | ||
107 | ack_err_status = | |
108 | ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4)); | |
109 | if (ack_err_status) { | |
110 | if (CHK_MAC_ERR_BIT(ack_err_status) || | |
111 | CHK_PAD_ERR_BIT(ack_err_status)) | |
2debd332 | 112 | error_status = -EBADMSG; |
324429d7 HS |
113 | } |
114 | /* call completion callback with failure status */ | |
115 | if (req) { | |
2debd332 HJ |
116 | error_status = chcr_handle_resp(req, input, error_status); |
117 | req->complete(req, error_status); | |
324429d7 HS |
118 | } else { |
119 | pr_err("Incorrect request address from the firmware\n"); | |
120 | return -EFAULT; | |
121 | } | |
122 | return 0; | |
123 | } | |
124 | ||
125 | int chcr_send_wr(struct sk_buff *skb) | |
126 | { | |
127 | return cxgb4_ofld_send(skb->dev, skb); | |
128 | } | |
129 | ||
130 | static void *chcr_uld_add(const struct cxgb4_lld_info *lld) | |
131 | { | |
132 | struct uld_ctx *u_ctx; | |
133 | ||
134 | /* Create the device and add it in the device list */ | |
135 | u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); | |
136 | if (!u_ctx) { | |
137 | u_ctx = ERR_PTR(-ENOMEM); | |
138 | goto out; | |
139 | } | |
140 | u_ctx->lldi = *lld; | |
141 | mutex_lock(&dev_mutex); | |
142 | list_add_tail(&u_ctx->entry, &uld_ctx_list); | |
143 | mutex_unlock(&dev_mutex); | |
144 | out: | |
145 | return u_ctx; | |
146 | } | |
147 | ||
148 | int chcr_uld_rx_handler(void *handle, const __be64 *rsp, | |
149 | const struct pkt_gl *pgl) | |
150 | { | |
151 | struct uld_ctx *u_ctx = (struct uld_ctx *)handle; | |
152 | struct chcr_dev *dev = u_ctx->dev; | |
153 | const struct cpl_act_establish *rpl = (struct cpl_act_establish | |
154 | *)rsp; | |
155 | ||
156 | if (rpl->ot.opcode != CPL_FW6_PLD) { | |
157 | pr_err("Unsupported opcode\n"); | |
158 | return 0; | |
159 | } | |
160 | ||
161 | if (!pgl) | |
162 | work_handlers[rpl->ot.opcode](dev, (unsigned char *)&rsp[1]); | |
163 | else | |
164 | work_handlers[rpl->ot.opcode](dev, pgl->va); | |
165 | return 0; | |
166 | } | |
167 | ||
168 | static int chcr_uld_state_change(void *handle, enum cxgb4_state state) | |
169 | { | |
170 | struct uld_ctx *u_ctx = handle; | |
171 | int ret = 0; | |
172 | ||
173 | switch (state) { | |
174 | case CXGB4_STATE_UP: | |
175 | if (!u_ctx->dev) { | |
176 | ret = chcr_dev_add(u_ctx); | |
177 | if (ret != 0) | |
178 | return ret; | |
179 | } | |
180 | if (atomic_read(&dev_count) == 1) | |
181 | ret = start_crypto(); | |
182 | break; | |
183 | ||
184 | case CXGB4_STATE_DETACH: | |
185 | if (u_ctx->dev) { | |
186 | mutex_lock(&dev_mutex); | |
187 | chcr_dev_remove(u_ctx); | |
188 | mutex_unlock(&dev_mutex); | |
189 | } | |
190 | if (!atomic_read(&dev_count)) | |
191 | stop_crypto(); | |
192 | break; | |
193 | ||
194 | case CXGB4_STATE_START_RECOVERY: | |
195 | case CXGB4_STATE_DOWN: | |
196 | default: | |
197 | break; | |
198 | } | |
199 | return ret; | |
200 | } | |
201 | ||
202 | static int __init chcr_crypto_init(void) | |
203 | { | |
0fbc81b3 | 204 | if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) { |
324429d7 HS |
205 | pr_err("ULD register fail: No chcr crypto support in cxgb4"); |
206 | return -1; | |
207 | } | |
208 | ||
209 | return 0; | |
210 | } | |
211 | ||
212 | static void __exit chcr_crypto_exit(void) | |
213 | { | |
214 | struct uld_ctx *u_ctx, *tmp; | |
215 | ||
216 | if (atomic_read(&dev_count)) | |
217 | stop_crypto(); | |
218 | ||
219 | /* Remove all devices from list */ | |
220 | mutex_lock(&dev_mutex); | |
221 | list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) { | |
222 | if (u_ctx->dev) | |
223 | chcr_dev_remove(u_ctx); | |
224 | kfree(u_ctx); | |
225 | } | |
226 | mutex_unlock(&dev_mutex); | |
0fbc81b3 | 227 | cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); |
324429d7 HS |
228 | } |
229 | ||
230 | module_init(chcr_crypto_init); | |
231 | module_exit(chcr_crypto_exit); | |
232 | ||
233 | MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards."); | |
234 | MODULE_LICENSE("GPL"); | |
235 | MODULE_AUTHOR("Chelsio Communications"); | |
236 | MODULE_VERSION(DRV_VERSION); |