]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
slip: Fix use-after-free Read in slip_open
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / marvell / octeontx2 / af / rvu_cgx.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14
15 #include "rvu.h"
16 #include "cgx.h"
17
18 struct cgx_evq_entry {
19 struct list_head evq_node;
20 struct cgx_link_event link_event;
21 };
22
23 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
24 static struct _req_type __maybe_unused \
25 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
26 { \
27 struct _req_type *req; \
28 \
29 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
30 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
31 sizeof(struct _rsp_type)); \
32 if (!req) \
33 return NULL; \
34 req->hdr.sig = OTX2_MBOX_REQ_SIG; \
35 req->hdr.id = _id; \
36 return req; \
37 }
38
39 MBOX_UP_CGX_MESSAGES
40 #undef M
41
42 /* Returns bitmap of mapped PFs */
43 static inline u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
44 {
45 return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
46 }
47
48 static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
49 {
50 return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
51 }
52
53 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
54 {
55 if (cgx_id >= rvu->cgx_cnt_max)
56 return NULL;
57
58 return rvu->cgx_idmap[cgx_id];
59 }
60
61 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
62 {
63 struct npc_pkind *pkind = &rvu->hw->pkind;
64 int cgx_cnt_max = rvu->cgx_cnt_max;
65 int cgx, lmac_cnt, lmac;
66 int pf = PF_CGXMAP_BASE;
67 int size, free_pkind;
68
69 if (!cgx_cnt_max)
70 return 0;
71
72 if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
73 return -EINVAL;
74
75 /* Alloc map table
76 * An additional entry is required since PF id starts from 1 and
77 * hence entry at offset 0 is invalid.
78 */
79 size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
80 rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
81 if (!rvu->pf2cgxlmac_map)
82 return -ENOMEM;
83
84 /* Initialize all entries with an invalid cgx and lmac id */
85 memset(rvu->pf2cgxlmac_map, 0xFF, size);
86
87 /* Reverse map table */
88 rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
89 cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
90 GFP_KERNEL);
91 if (!rvu->cgxlmac2pf_map)
92 return -ENOMEM;
93
94 rvu->cgx_mapped_pfs = 0;
95 for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
96 if (!rvu_cgx_pdata(cgx, rvu))
97 continue;
98 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
99 for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
100 rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
101 rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
102 free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
103 pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
104 rvu->cgx_mapped_pfs++;
105 }
106 }
107 return 0;
108 }
109
110 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
111 {
112 struct cgx_evq_entry *qentry;
113 unsigned long flags;
114 int err;
115
116 qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
117 if (!qentry)
118 return -ENOMEM;
119
120 /* Lock the event queue before we read the local link status */
121 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
122 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
123 &qentry->link_event.link_uinfo);
124 qentry->link_event.cgx_id = cgx_id;
125 qentry->link_event.lmac_id = lmac_id;
126 if (err)
127 goto skip_add;
128 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
129 skip_add:
130 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
131
132 /* start worker to process the events */
133 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
134
135 return 0;
136 }
137
138 /* This is called from interrupt context and is expected to be atomic */
139 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
140 {
141 struct cgx_evq_entry *qentry;
142 struct rvu *rvu = data;
143
144 /* post event to the event queue */
145 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
146 if (!qentry)
147 return -ENOMEM;
148 qentry->link_event = *event;
149 spin_lock(&rvu->cgx_evq_lock);
150 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
151 spin_unlock(&rvu->cgx_evq_lock);
152
153 /* start worker to process the events */
154 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
155
156 return 0;
157 }
158
159 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
160 {
161 struct cgx_link_user_info *linfo;
162 struct cgx_link_info_msg *msg;
163 unsigned long pfmap;
164 int err, pfid;
165
166 linfo = &event->link_uinfo;
167 pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
168
169 do {
170 pfid = find_first_bit(&pfmap, 16);
171 clear_bit(pfid, &pfmap);
172
173 /* check if notification is enabled */
174 if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
175 dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
176 event->cgx_id, event->lmac_id,
177 linfo->link_up ? "UP" : "DOWN");
178 continue;
179 }
180
181 /* Send mbox message to PF */
182 msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
183 if (!msg)
184 continue;
185 msg->link_info = *linfo;
186 otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
187 err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
188 if (err)
189 dev_warn(rvu->dev, "notification to pf %d failed\n",
190 pfid);
191 } while (pfmap);
192 }
193
194 static void cgx_evhandler_task(struct work_struct *work)
195 {
196 struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
197 struct cgx_evq_entry *qentry;
198 struct cgx_link_event *event;
199 unsigned long flags;
200
201 do {
202 /* Dequeue an event */
203 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
204 qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
205 struct cgx_evq_entry,
206 evq_node);
207 if (qentry)
208 list_del(&qentry->evq_node);
209 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
210 if (!qentry)
211 break; /* nothing more to process */
212
213 event = &qentry->link_event;
214
215 /* process event */
216 cgx_notify_pfs(event, rvu);
217 kfree(qentry);
218 } while (1);
219 }
220
221 static int cgx_lmac_event_handler_init(struct rvu *rvu)
222 {
223 struct cgx_event_cb cb;
224 int cgx, lmac, err;
225 void *cgxd;
226
227 spin_lock_init(&rvu->cgx_evq_lock);
228 INIT_LIST_HEAD(&rvu->cgx_evq_head);
229 INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
230 rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
231 if (!rvu->cgx_evh_wq) {
232 dev_err(rvu->dev, "alloc workqueue failed");
233 return -ENOMEM;
234 }
235
236 cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
237 cb.data = rvu;
238
239 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
240 cgxd = rvu_cgx_pdata(cgx, rvu);
241 if (!cgxd)
242 continue;
243 for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
244 err = cgx_lmac_evh_register(&cb, cgxd, lmac);
245 if (err)
246 dev_err(rvu->dev,
247 "%d:%d handler register failed\n",
248 cgx, lmac);
249 }
250 }
251
252 return 0;
253 }
254
255 static void rvu_cgx_wq_destroy(struct rvu *rvu)
256 {
257 if (rvu->cgx_evh_wq) {
258 flush_workqueue(rvu->cgx_evh_wq);
259 destroy_workqueue(rvu->cgx_evh_wq);
260 rvu->cgx_evh_wq = NULL;
261 }
262 }
263
264 int rvu_cgx_init(struct rvu *rvu)
265 {
266 int cgx, err;
267 void *cgxd;
268
269 /* CGX port id starts from 0 and are not necessarily contiguous
270 * Hence we allocate resources based on the maximum port id value.
271 */
272 rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
273 if (!rvu->cgx_cnt_max) {
274 dev_info(rvu->dev, "No CGX devices found!\n");
275 return -ENODEV;
276 }
277
278 rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
279 sizeof(void *), GFP_KERNEL);
280 if (!rvu->cgx_idmap)
281 return -ENOMEM;
282
283 /* Initialize the cgxdata table */
284 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
285 rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
286
287 /* Map CGX LMAC interfaces to RVU PFs */
288 err = rvu_map_cgx_lmac_pf(rvu);
289 if (err)
290 return err;
291
292 /* Register for CGX events */
293 err = cgx_lmac_event_handler_init(rvu);
294 if (err)
295 return err;
296
297 /* Ensure event handler registration is completed, before
298 * we turn on the links
299 */
300 mb();
301
302 /* Do link up for all CGX ports */
303 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
304 cgxd = rvu_cgx_pdata(cgx, rvu);
305 if (!cgxd)
306 continue;
307 err = cgx_lmac_linkup_start(cgxd);
308 if (err)
309 dev_err(rvu->dev,
310 "Link up process failed to start on cgx %d\n",
311 cgx);
312 }
313
314 return 0;
315 }
316
317 int rvu_cgx_exit(struct rvu *rvu)
318 {
319 int cgx, lmac;
320 void *cgxd;
321
322 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
323 cgxd = rvu_cgx_pdata(cgx, rvu);
324 if (!cgxd)
325 continue;
326 for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++)
327 cgx_lmac_evh_unregister(cgxd, lmac);
328 }
329
330 /* Ensure event handler unregister is completed */
331 mb();
332
333 rvu_cgx_wq_destroy(rvu);
334 return 0;
335 }
336
337 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
338 {
339 int pf = rvu_get_pf(pcifunc);
340 u8 cgx_id, lmac_id;
341
342 /* This msg is expected only from PFs that are mapped to CGX LMACs,
343 * if received from other PF/VF simply ACK, nothing to do.
344 */
345 if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
346 return -ENODEV;
347
348 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
349
350 cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
351
352 return 0;
353 }
354
355 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
356 struct msg_rsp *rsp)
357 {
358 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
359 return 0;
360 }
361
362 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
363 struct msg_rsp *rsp)
364 {
365 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
366 return 0;
367 }
368
369 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
370 struct cgx_stats_rsp *rsp)
371 {
372 int pf = rvu_get_pf(req->hdr.pcifunc);
373 int stat = 0, err = 0;
374 u64 tx_stat, rx_stat;
375 u8 cgx_idx, lmac;
376 void *cgxd;
377
378 if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
379 !is_pf_cgxmapped(rvu, pf))
380 return -ENODEV;
381
382 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
383 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
384
385 /* Rx stats */
386 while (stat < CGX_RX_STATS_COUNT) {
387 err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat);
388 if (err)
389 return err;
390 rsp->rx_stats[stat] = rx_stat;
391 stat++;
392 }
393
394 /* Tx stats */
395 stat = 0;
396 while (stat < CGX_TX_STATS_COUNT) {
397 err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat);
398 if (err)
399 return err;
400 rsp->tx_stats[stat] = tx_stat;
401 stat++;
402 }
403 return 0;
404 }
405
406 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
407 struct cgx_mac_addr_set_or_get *req,
408 struct cgx_mac_addr_set_or_get *rsp)
409 {
410 int pf = rvu_get_pf(req->hdr.pcifunc);
411 u8 cgx_id, lmac_id;
412
413 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
414
415 cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
416
417 return 0;
418 }
419
420 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
421 struct cgx_mac_addr_set_or_get *req,
422 struct cgx_mac_addr_set_or_get *rsp)
423 {
424 int pf = rvu_get_pf(req->hdr.pcifunc);
425 u8 cgx_id, lmac_id;
426 int rc = 0, i;
427 u64 cfg;
428
429 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
430
431 rsp->hdr.rc = rc;
432 cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
433 /* copy 48 bit mac address to req->mac_addr */
434 for (i = 0; i < ETH_ALEN; i++)
435 rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
436 return 0;
437 }
438
439 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
440 struct msg_rsp *rsp)
441 {
442 u16 pcifunc = req->hdr.pcifunc;
443 int pf = rvu_get_pf(pcifunc);
444 u8 cgx_id, lmac_id;
445
446 /* This msg is expected only from PFs that are mapped to CGX LMACs,
447 * if received from other PF/VF simply ACK, nothing to do.
448 */
449 if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
450 !is_pf_cgxmapped(rvu, pf))
451 return -ENODEV;
452
453 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
454
455 cgx_lmac_promisc_config(cgx_id, lmac_id, true);
456 return 0;
457 }
458
459 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
460 struct msg_rsp *rsp)
461 {
462 u16 pcifunc = req->hdr.pcifunc;
463 int pf = rvu_get_pf(pcifunc);
464 u8 cgx_id, lmac_id;
465
466 /* This msg is expected only from PFs that are mapped to CGX LMACs,
467 * if received from other PF/VF simply ACK, nothing to do.
468 */
469 if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
470 !is_pf_cgxmapped(rvu, pf))
471 return -ENODEV;
472
473 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
474
475 cgx_lmac_promisc_config(cgx_id, lmac_id, false);
476 return 0;
477 }
478
479 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
480 {
481 int pf = rvu_get_pf(pcifunc);
482 u8 cgx_id, lmac_id;
483
484 /* This msg is expected only from PFs that are mapped to CGX LMACs,
485 * if received from other PF/VF simply ACK, nothing to do.
486 */
487 if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
488 return -ENODEV;
489
490 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
491
492 if (en) {
493 set_bit(pf, &rvu->pf_notify_bmap);
494 /* Send the current link status to PF */
495 rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
496 } else {
497 clear_bit(pf, &rvu->pf_notify_bmap);
498 }
499
500 return 0;
501 }
502
503 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
504 struct msg_rsp *rsp)
505 {
506 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
507 return 0;
508 }
509
510 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
511 struct msg_rsp *rsp)
512 {
513 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
514 return 0;
515 }
516
517 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
518 struct cgx_link_info_msg *rsp)
519 {
520 u8 cgx_id, lmac_id;
521 int pf, err;
522
523 pf = rvu_get_pf(req->hdr.pcifunc);
524
525 if (!is_pf_cgxmapped(rvu, pf))
526 return -ENODEV;
527
528 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
529
530 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
531 &rsp->link_info);
532 return err;
533 }
534
535 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
536 {
537 int pf = rvu_get_pf(pcifunc);
538 u8 cgx_id, lmac_id;
539
540 /* This msg is expected only from PFs that are mapped to CGX LMACs,
541 * if received from other PF/VF simply ACK, nothing to do.
542 */
543 if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
544 return -ENODEV;
545
546 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
547
548 return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu),
549 lmac_id, en);
550 }
551
552 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
553 struct msg_rsp *rsp)
554 {
555 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
556 return 0;
557 }
558
559 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
560 struct msg_rsp *rsp)
561 {
562 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
563 return 0;
564 }