]>
Commit | Line | Data |
---|---|---|
cee9fbd8 | 1 | /* QLogic qedr NIC Driver |
e8f1cb50 | 2 | * Copyright (c) 2015-2017 QLogic Corporation |
cee9fbd8 RA |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and /or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | #include <linux/pci.h> | |
33 | #include <linux/netdevice.h> | |
34 | #include <linux/list.h> | |
35 | #include <linux/mutex.h> | |
b262a06e | 36 | #include <linux/qed/qede_rdma.h> |
cee9fbd8 RA |
37 | #include "qede.h" |
38 | ||
39 | static struct qedr_driver *qedr_drv; | |
40 | static LIST_HEAD(qedr_dev_list); | |
41 | static DEFINE_MUTEX(qedr_dev_list_lock); | |
42 | ||
bbfcd1e8 | 43 | bool qede_rdma_supported(struct qede_dev *dev) |
cee9fbd8 RA |
44 | { |
45 | return dev->dev_info.common.rdma_supported; | |
46 | } | |
47 | ||
bbfcd1e8 | 48 | static void _qede_rdma_dev_add(struct qede_dev *edev) |
cee9fbd8 RA |
49 | { |
50 | if (!qedr_drv) | |
51 | return; | |
52 | ||
53 | edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev, | |
54 | edev->ndev); | |
55 | } | |
56 | ||
bbfcd1e8 | 57 | static int qede_rdma_create_wq(struct qede_dev *edev) |
cee9fbd8 | 58 | { |
bbfcd1e8 MK |
59 | INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list); |
60 | edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq"); | |
61 | if (!edev->rdma_info.rdma_wq) { | |
cee9fbd8 RA |
62 | DP_NOTICE(edev, "qedr: Could not create workqueue\n"); |
63 | return -ENOMEM; | |
64 | } | |
65 | ||
66 | return 0; | |
67 | } | |
68 | ||
bbfcd1e8 | 69 | static void qede_rdma_cleanup_event(struct qede_dev *edev) |
cee9fbd8 | 70 | { |
bbfcd1e8 MK |
71 | struct list_head *head = &edev->rdma_info.rdma_event_list; |
72 | struct qede_rdma_event_work *event_node; | |
cee9fbd8 | 73 | |
bbfcd1e8 | 74 | flush_workqueue(edev->rdma_info.rdma_wq); |
cee9fbd8 | 75 | while (!list_empty(head)) { |
bbfcd1e8 | 76 | event_node = list_entry(head->next, struct qede_rdma_event_work, |
cee9fbd8 RA |
77 | list); |
78 | cancel_work_sync(&event_node->work); | |
79 | list_del(&event_node->list); | |
80 | kfree(event_node); | |
81 | } | |
82 | } | |
83 | ||
bbfcd1e8 | 84 | static void qede_rdma_destroy_wq(struct qede_dev *edev) |
cee9fbd8 | 85 | { |
bbfcd1e8 MK |
86 | qede_rdma_cleanup_event(edev); |
87 | destroy_workqueue(edev->rdma_info.rdma_wq); | |
cee9fbd8 RA |
88 | } |
89 | ||
bbfcd1e8 | 90 | int qede_rdma_dev_add(struct qede_dev *edev) |
cee9fbd8 RA |
91 | { |
92 | int rc = 0; | |
93 | ||
bbfcd1e8 MK |
94 | if (qede_rdma_supported(edev)) { |
95 | rc = qede_rdma_create_wq(edev); | |
cee9fbd8 RA |
96 | if (rc) |
97 | return rc; | |
98 | ||
99 | INIT_LIST_HEAD(&edev->rdma_info.entry); | |
100 | mutex_lock(&qedr_dev_list_lock); | |
101 | list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); | |
bbfcd1e8 | 102 | _qede_rdma_dev_add(edev); |
cee9fbd8 RA |
103 | mutex_unlock(&qedr_dev_list_lock); |
104 | } | |
105 | ||
106 | return rc; | |
107 | } | |
108 | ||
bbfcd1e8 | 109 | static void _qede_rdma_dev_remove(struct qede_dev *edev) |
cee9fbd8 RA |
110 | { |
111 | if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev) | |
112 | qedr_drv->remove(edev->rdma_info.qedr_dev); | |
113 | edev->rdma_info.qedr_dev = NULL; | |
114 | } | |
115 | ||
bbfcd1e8 | 116 | void qede_rdma_dev_remove(struct qede_dev *edev) |
cee9fbd8 | 117 | { |
bbfcd1e8 | 118 | if (!qede_rdma_supported(edev)) |
cee9fbd8 RA |
119 | return; |
120 | ||
bbfcd1e8 | 121 | qede_rdma_destroy_wq(edev); |
cee9fbd8 | 122 | mutex_lock(&qedr_dev_list_lock); |
bbfcd1e8 | 123 | _qede_rdma_dev_remove(edev); |
cee9fbd8 RA |
124 | list_del(&edev->rdma_info.entry); |
125 | mutex_unlock(&qedr_dev_list_lock); | |
126 | } | |
127 | ||
bbfcd1e8 | 128 | static void _qede_rdma_dev_open(struct qede_dev *edev) |
cee9fbd8 RA |
129 | { |
130 | if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) | |
131 | qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP); | |
132 | } | |
133 | ||
bbfcd1e8 | 134 | static void qede_rdma_dev_open(struct qede_dev *edev) |
cee9fbd8 | 135 | { |
bbfcd1e8 | 136 | if (!qede_rdma_supported(edev)) |
cee9fbd8 RA |
137 | return; |
138 | ||
139 | mutex_lock(&qedr_dev_list_lock); | |
bbfcd1e8 | 140 | _qede_rdma_dev_open(edev); |
cee9fbd8 RA |
141 | mutex_unlock(&qedr_dev_list_lock); |
142 | } | |
143 | ||
bbfcd1e8 | 144 | static void _qede_rdma_dev_close(struct qede_dev *edev) |
cee9fbd8 RA |
145 | { |
146 | if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) | |
147 | qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN); | |
148 | } | |
149 | ||
bbfcd1e8 | 150 | static void qede_rdma_dev_close(struct qede_dev *edev) |
cee9fbd8 | 151 | { |
bbfcd1e8 | 152 | if (!qede_rdma_supported(edev)) |
cee9fbd8 RA |
153 | return; |
154 | ||
155 | mutex_lock(&qedr_dev_list_lock); | |
bbfcd1e8 | 156 | _qede_rdma_dev_close(edev); |
cee9fbd8 RA |
157 | mutex_unlock(&qedr_dev_list_lock); |
158 | } | |
159 | ||
bbfcd1e8 | 160 | static void qede_rdma_dev_shutdown(struct qede_dev *edev) |
cee9fbd8 | 161 | { |
bbfcd1e8 | 162 | if (!qede_rdma_supported(edev)) |
cee9fbd8 RA |
163 | return; |
164 | ||
165 | mutex_lock(&qedr_dev_list_lock); | |
166 | if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) | |
167 | qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE); | |
168 | mutex_unlock(&qedr_dev_list_lock); | |
169 | } | |
170 | ||
bbfcd1e8 | 171 | int qede_rdma_register_driver(struct qedr_driver *drv) |
cee9fbd8 RA |
172 | { |
173 | struct qede_dev *edev; | |
174 | u8 qedr_counter = 0; | |
175 | ||
176 | mutex_lock(&qedr_dev_list_lock); | |
177 | if (qedr_drv) { | |
178 | mutex_unlock(&qedr_dev_list_lock); | |
179 | return -EINVAL; | |
180 | } | |
181 | qedr_drv = drv; | |
182 | ||
183 | list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { | |
184 | struct net_device *ndev; | |
185 | ||
186 | qedr_counter++; | |
bbfcd1e8 | 187 | _qede_rdma_dev_add(edev); |
cee9fbd8 RA |
188 | ndev = edev->ndev; |
189 | if (netif_running(ndev) && netif_oper_up(ndev)) | |
bbfcd1e8 | 190 | _qede_rdma_dev_open(edev); |
cee9fbd8 RA |
191 | } |
192 | mutex_unlock(&qedr_dev_list_lock); | |
193 | ||
bbfcd1e8 | 194 | pr_notice("qedr: discovered and registered %d RDMA funcs\n", |
22b1ae61 | 195 | qedr_counter); |
cee9fbd8 RA |
196 | |
197 | return 0; | |
198 | } | |
bbfcd1e8 | 199 | EXPORT_SYMBOL(qede_rdma_register_driver); |
cee9fbd8 | 200 | |
bbfcd1e8 | 201 | void qede_rdma_unregister_driver(struct qedr_driver *drv) |
cee9fbd8 RA |
202 | { |
203 | struct qede_dev *edev; | |
204 | ||
205 | mutex_lock(&qedr_dev_list_lock); | |
206 | list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { | |
207 | if (edev->rdma_info.qedr_dev) | |
bbfcd1e8 | 208 | _qede_rdma_dev_remove(edev); |
cee9fbd8 RA |
209 | } |
210 | qedr_drv = NULL; | |
211 | mutex_unlock(&qedr_dev_list_lock); | |
212 | } | |
bbfcd1e8 | 213 | EXPORT_SYMBOL(qede_rdma_unregister_driver); |
cee9fbd8 | 214 | |
bbfcd1e8 | 215 | static void qede_rdma_changeaddr(struct qede_dev *edev) |
cee9fbd8 | 216 | { |
bbfcd1e8 | 217 | if (!qede_rdma_supported(edev)) |
cee9fbd8 RA |
218 | return; |
219 | ||
220 | if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) | |
221 | qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR); | |
222 | } | |
223 | ||
bbfcd1e8 MK |
224 | static struct qede_rdma_event_work * |
225 | qede_rdma_get_free_event_node(struct qede_dev *edev) | |
cee9fbd8 | 226 | { |
bbfcd1e8 | 227 | struct qede_rdma_event_work *event_node = NULL; |
cee9fbd8 RA |
228 | struct list_head *list_node = NULL; |
229 | bool found = false; | |
230 | ||
bbfcd1e8 MK |
231 | list_for_each(list_node, &edev->rdma_info.rdma_event_list) { |
232 | event_node = list_entry(list_node, struct qede_rdma_event_work, | |
cee9fbd8 RA |
233 | list); |
234 | if (!work_pending(&event_node->work)) { | |
235 | found = true; | |
236 | break; | |
237 | } | |
238 | } | |
239 | ||
240 | if (!found) { | |
97dbcc64 | 241 | event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC); |
cee9fbd8 RA |
242 | if (!event_node) { |
243 | DP_NOTICE(edev, | |
bbfcd1e8 | 244 | "qedr: Could not allocate memory for rdma work\n"); |
cee9fbd8 RA |
245 | return NULL; |
246 | } | |
247 | list_add_tail(&event_node->list, | |
bbfcd1e8 | 248 | &edev->rdma_info.rdma_event_list); |
cee9fbd8 RA |
249 | } |
250 | ||
251 | return event_node; | |
252 | } | |
253 | ||
bbfcd1e8 | 254 | static void qede_rdma_handle_event(struct work_struct *work) |
cee9fbd8 | 255 | { |
bbfcd1e8 MK |
256 | struct qede_rdma_event_work *event_node; |
257 | enum qede_rdma_event event; | |
cee9fbd8 RA |
258 | struct qede_dev *edev; |
259 | ||
bbfcd1e8 | 260 | event_node = container_of(work, struct qede_rdma_event_work, work); |
cee9fbd8 RA |
261 | event = event_node->event; |
262 | edev = event_node->ptr; | |
263 | ||
264 | switch (event) { | |
265 | case QEDE_UP: | |
bbfcd1e8 | 266 | qede_rdma_dev_open(edev); |
cee9fbd8 RA |
267 | break; |
268 | case QEDE_DOWN: | |
bbfcd1e8 | 269 | qede_rdma_dev_close(edev); |
cee9fbd8 RA |
270 | break; |
271 | case QEDE_CLOSE: | |
bbfcd1e8 | 272 | qede_rdma_dev_shutdown(edev); |
cee9fbd8 RA |
273 | break; |
274 | case QEDE_CHANGE_ADDR: | |
bbfcd1e8 | 275 | qede_rdma_changeaddr(edev); |
cee9fbd8 RA |
276 | break; |
277 | default: | |
bbfcd1e8 | 278 | DP_NOTICE(edev, "Invalid rdma event %d", event); |
cee9fbd8 RA |
279 | } |
280 | } | |
281 | ||
bbfcd1e8 MK |
282 | static void qede_rdma_add_event(struct qede_dev *edev, |
283 | enum qede_rdma_event event) | |
cee9fbd8 | 284 | { |
bbfcd1e8 | 285 | struct qede_rdma_event_work *event_node; |
cee9fbd8 RA |
286 | |
287 | if (!edev->rdma_info.qedr_dev) | |
288 | return; | |
289 | ||
bbfcd1e8 | 290 | event_node = qede_rdma_get_free_event_node(edev); |
cee9fbd8 RA |
291 | if (!event_node) |
292 | return; | |
293 | ||
294 | event_node->event = event; | |
295 | event_node->ptr = edev; | |
296 | ||
bbfcd1e8 MK |
297 | INIT_WORK(&event_node->work, qede_rdma_handle_event); |
298 | queue_work(edev->rdma_info.rdma_wq, &event_node->work); | |
cee9fbd8 RA |
299 | } |
300 | ||
bbfcd1e8 | 301 | void qede_rdma_dev_event_open(struct qede_dev *edev) |
cee9fbd8 | 302 | { |
bbfcd1e8 | 303 | qede_rdma_add_event(edev, QEDE_UP); |
cee9fbd8 RA |
304 | } |
305 | ||
bbfcd1e8 | 306 | void qede_rdma_dev_event_close(struct qede_dev *edev) |
cee9fbd8 | 307 | { |
bbfcd1e8 | 308 | qede_rdma_add_event(edev, QEDE_DOWN); |
cee9fbd8 RA |
309 | } |
310 | ||
bbfcd1e8 | 311 | void qede_rdma_event_changeaddr(struct qede_dev *edev) |
cee9fbd8 | 312 | { |
bbfcd1e8 | 313 | qede_rdma_add_event(edev, QEDE_CHANGE_ADDR); |
cee9fbd8 | 314 | } |