]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/ibm/ehea/ehea_qmr.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / ibm / ehea / ehea_qmr.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
4 *
5 * eHEA ethernet device driver for IBM eServer System p
6 *
7 * (C) Copyright IBM Corp. 2006
8 *
9 * Authors:
10 * Christoph Raisch <raisch@de.ibm.com>
11 * Jan-Bernd Themann <themann@de.ibm.com>
12 * Thomas Klein <tklein@de.ibm.com>
13 */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/mm.h>
18 #include <linux/slab.h>
19 #include "ehea.h"
20 #include "ehea_phyp.h"
21 #include "ehea_qmr.h"
22
23 static struct ehea_bmap *ehea_bmap;
24
25 static void *hw_qpageit_get_inc(struct hw_queue *queue)
26 {
27 void *retvalue = hw_qeit_get(queue);
28
29 queue->current_q_offset += queue->pagesize;
30 if (queue->current_q_offset > queue->queue_length) {
31 queue->current_q_offset -= queue->pagesize;
32 retvalue = NULL;
33 } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
34 pr_err("not on pageboundary\n");
35 retvalue = NULL;
36 }
37 return retvalue;
38 }
39
40 static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
41 const u32 pagesize, const u32 qe_size)
42 {
43 int pages_per_kpage = PAGE_SIZE / pagesize;
44 int i, k;
45
46 if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
47 pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
48 (int)PAGE_SIZE, (int)pagesize);
49 return -EINVAL;
50 }
51
52 queue->queue_length = nr_of_pages * pagesize;
53 queue->queue_pages = kmalloc_array(nr_of_pages, sizeof(void *),
54 GFP_KERNEL);
55 if (!queue->queue_pages)
56 return -ENOMEM;
57
58 /*
59 * allocate pages for queue:
60 * outer loop allocates whole kernel pages (page aligned) and
61 * inner loop divides a kernel page into smaller hea queue pages
62 */
63 i = 0;
64 while (i < nr_of_pages) {
65 u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
66 if (!kpage)
67 goto out_nomem;
68 for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
69 (queue->queue_pages)[i] = (struct ehea_page *)kpage;
70 kpage += pagesize;
71 i++;
72 }
73 }
74
75 queue->current_q_offset = 0;
76 queue->qe_size = qe_size;
77 queue->pagesize = pagesize;
78 queue->toggle_state = 1;
79
80 return 0;
81 out_nomem:
82 for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
83 if (!(queue->queue_pages)[i])
84 break;
85 free_page((unsigned long)(queue->queue_pages)[i]);
86 }
87 return -ENOMEM;
88 }
89
90 static void hw_queue_dtor(struct hw_queue *queue)
91 {
92 int pages_per_kpage;
93 int i, nr_pages;
94
95 if (!queue || !queue->queue_pages)
96 return;
97
98 pages_per_kpage = PAGE_SIZE / queue->pagesize;
99
100 nr_pages = queue->queue_length / queue->pagesize;
101
102 for (i = 0; i < nr_pages; i += pages_per_kpage)
103 free_page((unsigned long)(queue->queue_pages)[i]);
104
105 kfree(queue->queue_pages);
106 }
107
108 struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
109 int nr_of_cqe, u64 eq_handle, u32 cq_token)
110 {
111 struct ehea_cq *cq;
112 u64 hret, rpage;
113 u32 counter;
114 int ret;
115 void *vpage;
116
117 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
118 if (!cq)
119 goto out_nomem;
120
121 cq->attr.max_nr_of_cqes = nr_of_cqe;
122 cq->attr.cq_token = cq_token;
123 cq->attr.eq_handle = eq_handle;
124
125 cq->adapter = adapter;
126
127 hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
128 &cq->fw_handle, &cq->epas);
129 if (hret != H_SUCCESS) {
130 pr_err("alloc_resource_cq failed\n");
131 goto out_freemem;
132 }
133
134 ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
135 EHEA_PAGESIZE, sizeof(struct ehea_cqe));
136 if (ret)
137 goto out_freeres;
138
139 for (counter = 0; counter < cq->attr.nr_pages; counter++) {
140 vpage = hw_qpageit_get_inc(&cq->hw_queue);
141 if (!vpage) {
142 pr_err("hw_qpageit_get_inc failed\n");
143 goto out_kill_hwq;
144 }
145
146 rpage = __pa(vpage);
147 hret = ehea_h_register_rpage(adapter->handle,
148 0, EHEA_CQ_REGISTER_ORIG,
149 cq->fw_handle, rpage, 1);
150 if (hret < H_SUCCESS) {
151 pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
152 cq, hret, counter, cq->attr.nr_pages);
153 goto out_kill_hwq;
154 }
155
156 if (counter == (cq->attr.nr_pages - 1)) {
157 vpage = hw_qpageit_get_inc(&cq->hw_queue);
158
159 if ((hret != H_SUCCESS) || (vpage)) {
160 pr_err("registration of pages not complete hret=%llx\n",
161 hret);
162 goto out_kill_hwq;
163 }
164 } else {
165 if (hret != H_PAGE_REGISTERED) {
166 pr_err("CQ: registration of page failed hret=%llx\n",
167 hret);
168 goto out_kill_hwq;
169 }
170 }
171 }
172
173 hw_qeit_reset(&cq->hw_queue);
174 ehea_reset_cq_ep(cq);
175 ehea_reset_cq_n1(cq);
176
177 return cq;
178
179 out_kill_hwq:
180 hw_queue_dtor(&cq->hw_queue);
181
182 out_freeres:
183 ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE);
184
185 out_freemem:
186 kfree(cq);
187
188 out_nomem:
189 return NULL;
190 }
191
192 static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
193 {
194 u64 hret;
195 u64 adapter_handle = cq->adapter->handle;
196
197 /* deregister all previous registered pages */
198 hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
199 if (hret != H_SUCCESS)
200 return hret;
201
202 hw_queue_dtor(&cq->hw_queue);
203 kfree(cq);
204
205 return hret;
206 }
207
208 int ehea_destroy_cq(struct ehea_cq *cq)
209 {
210 u64 hret, aer, aerr;
211 if (!cq)
212 return 0;
213
214 hcp_epas_dtor(&cq->epas);
215 hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
216 if (hret == H_R_STATE) {
217 ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
218 hret = ehea_destroy_cq_res(cq, FORCE_FREE);
219 }
220
221 if (hret != H_SUCCESS) {
222 pr_err("destroy CQ failed\n");
223 return -EIO;
224 }
225
226 return 0;
227 }
228
229 struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
230 const enum ehea_eq_type type,
231 const u32 max_nr_of_eqes, const u8 eqe_gen)
232 {
233 int ret, i;
234 u64 hret, rpage;
235 void *vpage;
236 struct ehea_eq *eq;
237
238 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
239 if (!eq)
240 return NULL;
241
242 eq->adapter = adapter;
243 eq->attr.type = type;
244 eq->attr.max_nr_of_eqes = max_nr_of_eqes;
245 eq->attr.eqe_gen = eqe_gen;
246 spin_lock_init(&eq->spinlock);
247
248 hret = ehea_h_alloc_resource_eq(adapter->handle,
249 &eq->attr, &eq->fw_handle);
250 if (hret != H_SUCCESS) {
251 pr_err("alloc_resource_eq failed\n");
252 goto out_freemem;
253 }
254
255 ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
256 EHEA_PAGESIZE, sizeof(struct ehea_eqe));
257 if (ret) {
258 pr_err("can't allocate eq pages\n");
259 goto out_freeres;
260 }
261
262 for (i = 0; i < eq->attr.nr_pages; i++) {
263 vpage = hw_qpageit_get_inc(&eq->hw_queue);
264 if (!vpage) {
265 pr_err("hw_qpageit_get_inc failed\n");
266 hret = H_RESOURCE;
267 goto out_kill_hwq;
268 }
269
270 rpage = __pa(vpage);
271
272 hret = ehea_h_register_rpage(adapter->handle, 0,
273 EHEA_EQ_REGISTER_ORIG,
274 eq->fw_handle, rpage, 1);
275
276 if (i == (eq->attr.nr_pages - 1)) {
277 /* last page */
278 vpage = hw_qpageit_get_inc(&eq->hw_queue);
279 if ((hret != H_SUCCESS) || (vpage))
280 goto out_kill_hwq;
281
282 } else {
283 if (hret != H_PAGE_REGISTERED)
284 goto out_kill_hwq;
285
286 }
287 }
288
289 hw_qeit_reset(&eq->hw_queue);
290 return eq;
291
292 out_kill_hwq:
293 hw_queue_dtor(&eq->hw_queue);
294
295 out_freeres:
296 ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE);
297
298 out_freemem:
299 kfree(eq);
300 return NULL;
301 }
302
303 struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
304 {
305 struct ehea_eqe *eqe;
306 unsigned long flags;
307
308 spin_lock_irqsave(&eq->spinlock, flags);
309 eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue);
310 spin_unlock_irqrestore(&eq->spinlock, flags);
311
312 return eqe;
313 }
314
315 static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
316 {
317 u64 hret;
318 unsigned long flags;
319
320 spin_lock_irqsave(&eq->spinlock, flags);
321
322 hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force);
323 spin_unlock_irqrestore(&eq->spinlock, flags);
324
325 if (hret != H_SUCCESS)
326 return hret;
327
328 hw_queue_dtor(&eq->hw_queue);
329 kfree(eq);
330
331 return hret;
332 }
333
334 int ehea_destroy_eq(struct ehea_eq *eq)
335 {
336 u64 hret, aer, aerr;
337 if (!eq)
338 return 0;
339
340 hcp_epas_dtor(&eq->epas);
341
342 hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
343 if (hret == H_R_STATE) {
344 ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
345 hret = ehea_destroy_eq_res(eq, FORCE_FREE);
346 }
347
348 if (hret != H_SUCCESS) {
349 pr_err("destroy EQ failed\n");
350 return -EIO;
351 }
352
353 return 0;
354 }
355
356 /* allocates memory for a queue and registers pages in phyp */
357 static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
358 int nr_pages, int wqe_size, int act_nr_sges,
359 struct ehea_adapter *adapter, int h_call_q_selector)
360 {
361 u64 hret, rpage;
362 int ret, cnt;
363 void *vpage;
364
365 ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
366 if (ret)
367 return ret;
368
369 for (cnt = 0; cnt < nr_pages; cnt++) {
370 vpage = hw_qpageit_get_inc(hw_queue);
371 if (!vpage) {
372 pr_err("hw_qpageit_get_inc failed\n");
373 goto out_kill_hwq;
374 }
375 rpage = __pa(vpage);
376 hret = ehea_h_register_rpage(adapter->handle,
377 0, h_call_q_selector,
378 qp->fw_handle, rpage, 1);
379 if (hret < H_SUCCESS) {
380 pr_err("register_rpage_qp failed\n");
381 goto out_kill_hwq;
382 }
383 }
384 hw_qeit_reset(hw_queue);
385 return 0;
386
387 out_kill_hwq:
388 hw_queue_dtor(hw_queue);
389 return -EIO;
390 }
391
392 static inline u32 map_wqe_size(u8 wqe_enc_size)
393 {
394 return 128 << wqe_enc_size;
395 }
396
397 struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
398 u32 pd, struct ehea_qp_init_attr *init_attr)
399 {
400 int ret;
401 u64 hret;
402 struct ehea_qp *qp;
403 u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
404 u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
405
406
407 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
408 if (!qp)
409 return NULL;
410
411 qp->adapter = adapter;
412
413 hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
414 &qp->fw_handle, &qp->epas);
415 if (hret != H_SUCCESS) {
416 pr_err("ehea_h_alloc_resource_qp failed\n");
417 goto out_freemem;
418 }
419
420 wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
421 wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
422 wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
423 wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
424
425 ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
426 wqe_size_in_bytes_sq,
427 init_attr->act_wqe_size_enc_sq, adapter,
428 0);
429 if (ret) {
430 pr_err("can't register for sq ret=%x\n", ret);
431 goto out_freeres;
432 }
433
434 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
435 init_attr->nr_rq1_pages,
436 wqe_size_in_bytes_rq1,
437 init_attr->act_wqe_size_enc_rq1,
438 adapter, 1);
439 if (ret) {
440 pr_err("can't register for rq1 ret=%x\n", ret);
441 goto out_kill_hwsq;
442 }
443
444 if (init_attr->rq_count > 1) {
445 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
446 init_attr->nr_rq2_pages,
447 wqe_size_in_bytes_rq2,
448 init_attr->act_wqe_size_enc_rq2,
449 adapter, 2);
450 if (ret) {
451 pr_err("can't register for rq2 ret=%x\n", ret);
452 goto out_kill_hwr1q;
453 }
454 }
455
456 if (init_attr->rq_count > 2) {
457 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
458 init_attr->nr_rq3_pages,
459 wqe_size_in_bytes_rq3,
460 init_attr->act_wqe_size_enc_rq3,
461 adapter, 3);
462 if (ret) {
463 pr_err("can't register for rq3 ret=%x\n", ret);
464 goto out_kill_hwr2q;
465 }
466 }
467
468 qp->init_attr = *init_attr;
469
470 return qp;
471
472 out_kill_hwr2q:
473 hw_queue_dtor(&qp->hw_rqueue2);
474
475 out_kill_hwr1q:
476 hw_queue_dtor(&qp->hw_rqueue1);
477
478 out_kill_hwsq:
479 hw_queue_dtor(&qp->hw_squeue);
480
481 out_freeres:
482 ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
483 ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE);
484
485 out_freemem:
486 kfree(qp);
487 return NULL;
488 }
489
490 static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
491 {
492 u64 hret;
493 struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
494
495
496 ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
497 hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
498 if (hret != H_SUCCESS)
499 return hret;
500
501 hw_queue_dtor(&qp->hw_squeue);
502 hw_queue_dtor(&qp->hw_rqueue1);
503
504 if (qp_attr->rq_count > 1)
505 hw_queue_dtor(&qp->hw_rqueue2);
506 if (qp_attr->rq_count > 2)
507 hw_queue_dtor(&qp->hw_rqueue3);
508 kfree(qp);
509
510 return hret;
511 }
512
513 int ehea_destroy_qp(struct ehea_qp *qp)
514 {
515 u64 hret, aer, aerr;
516 if (!qp)
517 return 0;
518
519 hcp_epas_dtor(&qp->epas);
520
521 hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
522 if (hret == H_R_STATE) {
523 ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
524 hret = ehea_destroy_qp_res(qp, FORCE_FREE);
525 }
526
527 if (hret != H_SUCCESS) {
528 pr_err("destroy QP failed\n");
529 return -EIO;
530 }
531
532 return 0;
533 }
534
535 static inline int ehea_calc_index(unsigned long i, unsigned long s)
536 {
537 return (i >> s) & EHEA_INDEX_MASK;
538 }
539
540 static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
541 int dir)
542 {
543 if (!ehea_top_bmap->dir[dir]) {
544 ehea_top_bmap->dir[dir] =
545 kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
546 if (!ehea_top_bmap->dir[dir])
547 return -ENOMEM;
548 }
549 return 0;
550 }
551
552 static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
553 {
554 if (!ehea_bmap->top[top]) {
555 ehea_bmap->top[top] =
556 kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
557 if (!ehea_bmap->top[top])
558 return -ENOMEM;
559 }
560 return ehea_init_top_bmap(ehea_bmap->top[top], dir);
561 }
562
563 static DEFINE_MUTEX(ehea_busmap_mutex);
564 static unsigned long ehea_mr_len;
565
566 #define EHEA_BUSMAP_ADD_SECT 1
567 #define EHEA_BUSMAP_REM_SECT 0
568
569 static void ehea_rebuild_busmap(void)
570 {
571 u64 vaddr = EHEA_BUSMAP_START;
572 int top, dir, idx;
573
574 for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
575 struct ehea_top_bmap *ehea_top;
576 int valid_dir_entries = 0;
577
578 if (!ehea_bmap->top[top])
579 continue;
580 ehea_top = ehea_bmap->top[top];
581 for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
582 struct ehea_dir_bmap *ehea_dir;
583 int valid_entries = 0;
584
585 if (!ehea_top->dir[dir])
586 continue;
587 valid_dir_entries++;
588 ehea_dir = ehea_top->dir[dir];
589 for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
590 if (!ehea_dir->ent[idx])
591 continue;
592 valid_entries++;
593 ehea_dir->ent[idx] = vaddr;
594 vaddr += EHEA_SECTSIZE;
595 }
596 if (!valid_entries) {
597 ehea_top->dir[dir] = NULL;
598 kfree(ehea_dir);
599 }
600 }
601 if (!valid_dir_entries) {
602 ehea_bmap->top[top] = NULL;
603 kfree(ehea_top);
604 }
605 }
606 }
607
608 static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
609 {
610 unsigned long i, start_section, end_section;
611
612 if (!nr_pages)
613 return 0;
614
615 if (!ehea_bmap) {
616 ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
617 if (!ehea_bmap)
618 return -ENOMEM;
619 }
620
621 start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
622 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
623 /* Mark entries as valid or invalid only; address is assigned later */
624 for (i = start_section; i < end_section; i++) {
625 u64 flag;
626 int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
627 int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
628 int idx = i & EHEA_INDEX_MASK;
629
630 if (add) {
631 int ret = ehea_init_bmap(ehea_bmap, top, dir);
632 if (ret)
633 return ret;
634 flag = 1; /* valid */
635 ehea_mr_len += EHEA_SECTSIZE;
636 } else {
637 if (!ehea_bmap->top[top])
638 continue;
639 if (!ehea_bmap->top[top]->dir[dir])
640 continue;
641 flag = 0; /* invalid */
642 ehea_mr_len -= EHEA_SECTSIZE;
643 }
644
645 ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
646 }
647 ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
648 return 0;
649 }
650
651 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
652 {
653 int ret;
654
655 mutex_lock(&ehea_busmap_mutex);
656 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
657 mutex_unlock(&ehea_busmap_mutex);
658 return ret;
659 }
660
661 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
662 {
663 int ret;
664
665 mutex_lock(&ehea_busmap_mutex);
666 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
667 mutex_unlock(&ehea_busmap_mutex);
668 return ret;
669 }
670
671 static int ehea_is_hugepage(unsigned long pfn)
672 {
673 if (pfn & EHEA_HUGEPAGE_PFN_MASK)
674 return 0;
675
676 if (page_shift(pfn_to_page(pfn)) != EHEA_HUGEPAGESHIFT)
677 return 0;
678
679 return 1;
680 }
681
682 static int ehea_create_busmap_callback(unsigned long initial_pfn,
683 unsigned long total_nr_pages, void *arg)
684 {
685 int ret;
686 unsigned long pfn, start_pfn, end_pfn, nr_pages;
687
688 if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
689 return ehea_update_busmap(initial_pfn, total_nr_pages,
690 EHEA_BUSMAP_ADD_SECT);
691
692 /* Given chunk is >= 16GB -> check for hugepages */
693 start_pfn = initial_pfn;
694 end_pfn = initial_pfn + total_nr_pages;
695 pfn = start_pfn;
696
697 while (pfn < end_pfn) {
698 if (ehea_is_hugepage(pfn)) {
699 /* Add mem found in front of the hugepage */
700 nr_pages = pfn - start_pfn;
701 ret = ehea_update_busmap(start_pfn, nr_pages,
702 EHEA_BUSMAP_ADD_SECT);
703 if (ret)
704 return ret;
705
706 /* Skip the hugepage */
707 pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
708 start_pfn = pfn;
709 } else
710 pfn += (EHEA_SECTSIZE / PAGE_SIZE);
711 }
712
713 /* Add mem found behind the hugepage(s) */
714 nr_pages = pfn - start_pfn;
715 return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
716 }
717
718 int ehea_create_busmap(void)
719 {
720 int ret;
721
722 mutex_lock(&ehea_busmap_mutex);
723 ehea_mr_len = 0;
724 ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
725 ehea_create_busmap_callback);
726 mutex_unlock(&ehea_busmap_mutex);
727 return ret;
728 }
729
730 void ehea_destroy_busmap(void)
731 {
732 int top, dir;
733 mutex_lock(&ehea_busmap_mutex);
734 if (!ehea_bmap)
735 goto out_destroy;
736
737 for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
738 if (!ehea_bmap->top[top])
739 continue;
740
741 for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
742 if (!ehea_bmap->top[top]->dir[dir])
743 continue;
744
745 kfree(ehea_bmap->top[top]->dir[dir]);
746 }
747
748 kfree(ehea_bmap->top[top]);
749 }
750
751 kfree(ehea_bmap);
752 ehea_bmap = NULL;
753 out_destroy:
754 mutex_unlock(&ehea_busmap_mutex);
755 }
756
757 u64 ehea_map_vaddr(void *caddr)
758 {
759 int top, dir, idx;
760 unsigned long index, offset;
761
762 if (!ehea_bmap)
763 return EHEA_INVAL_ADDR;
764
765 index = __pa(caddr) >> SECTION_SIZE_BITS;
766 top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
767 if (!ehea_bmap->top[top])
768 return EHEA_INVAL_ADDR;
769
770 dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
771 if (!ehea_bmap->top[top]->dir[dir])
772 return EHEA_INVAL_ADDR;
773
774 idx = index & EHEA_INDEX_MASK;
775 if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
776 return EHEA_INVAL_ADDR;
777
778 offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
779 return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
780 }
781
782 static inline void *ehea_calc_sectbase(int top, int dir, int idx)
783 {
784 unsigned long ret = idx;
785 ret |= dir << EHEA_DIR_INDEX_SHIFT;
786 ret |= top << EHEA_TOP_INDEX_SHIFT;
787 return __va(ret << SECTION_SIZE_BITS);
788 }
789
790 static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
791 struct ehea_adapter *adapter,
792 struct ehea_mr *mr)
793 {
794 void *pg;
795 u64 j, m, hret;
796 unsigned long k = 0;
797 u64 pt_abs = __pa(pt);
798
799 void *sectbase = ehea_calc_sectbase(top, dir, idx);
800
801 for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
802
803 for (m = 0; m < EHEA_MAX_RPAGE; m++) {
804 pg = sectbase + ((k++) * EHEA_PAGESIZE);
805 pt[m] = __pa(pg);
806 }
807 hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
808 0, pt_abs, EHEA_MAX_RPAGE);
809
810 if ((hret != H_SUCCESS) &&
811 (hret != H_PAGE_REGISTERED)) {
812 ehea_h_free_resource(adapter->handle, mr->handle,
813 FORCE_FREE);
814 pr_err("register_rpage_mr failed\n");
815 return hret;
816 }
817 }
818 return hret;
819 }
820
821 static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
822 struct ehea_adapter *adapter,
823 struct ehea_mr *mr)
824 {
825 u64 hret = H_SUCCESS;
826 int idx;
827
828 for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
829 if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
830 continue;
831
832 hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
833 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
834 return hret;
835 }
836 return hret;
837 }
838
839 static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
840 struct ehea_adapter *adapter,
841 struct ehea_mr *mr)
842 {
843 u64 hret = H_SUCCESS;
844 int dir;
845
846 for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
847 if (!ehea_bmap->top[top]->dir[dir])
848 continue;
849
850 hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
851 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
852 return hret;
853 }
854 return hret;
855 }
856
857 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
858 {
859 int ret;
860 u64 *pt;
861 u64 hret;
862 u32 acc_ctrl = EHEA_MR_ACC_CTRL;
863
864 unsigned long top;
865
866 pt = (void *)get_zeroed_page(GFP_KERNEL);
867 if (!pt) {
868 pr_err("no mem\n");
869 ret = -ENOMEM;
870 goto out;
871 }
872
873 hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
874 ehea_mr_len, acc_ctrl, adapter->pd,
875 &mr->handle, &mr->lkey);
876
877 if (hret != H_SUCCESS) {
878 pr_err("alloc_resource_mr failed\n");
879 ret = -EIO;
880 goto out;
881 }
882
883 if (!ehea_bmap) {
884 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
885 pr_err("no busmap available\n");
886 ret = -EIO;
887 goto out;
888 }
889
890 for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
891 if (!ehea_bmap->top[top])
892 continue;
893
894 hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
895 if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
896 break;
897 }
898
899 if (hret != H_SUCCESS) {
900 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
901 pr_err("registering mr failed\n");
902 ret = -EIO;
903 goto out;
904 }
905
906 mr->vaddr = EHEA_BUSMAP_START;
907 mr->adapter = adapter;
908 ret = 0;
909 out:
910 free_page((unsigned long)pt);
911 return ret;
912 }
913
914 int ehea_rem_mr(struct ehea_mr *mr)
915 {
916 u64 hret;
917
918 if (!mr || !mr->adapter)
919 return -EINVAL;
920
921 hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
922 FORCE_FREE);
923 if (hret != H_SUCCESS) {
924 pr_err("destroy MR failed\n");
925 return -EIO;
926 }
927
928 return 0;
929 }
930
931 int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
932 struct ehea_mr *shared_mr)
933 {
934 u64 hret;
935
936 hret = ehea_h_register_smr(adapter->handle, old_mr->handle,
937 old_mr->vaddr, EHEA_MR_ACC_CTRL,
938 adapter->pd, shared_mr);
939 if (hret != H_SUCCESS)
940 return -EIO;
941
942 shared_mr->adapter = adapter;
943
944 return 0;
945 }
946
947 static void print_error_data(u64 *data)
948 {
949 int length;
950 u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
951 u64 resource = data[1];
952
953 length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]);
954
955 if (length > EHEA_PAGESIZE)
956 length = EHEA_PAGESIZE;
957
958 if (type == EHEA_AER_RESTYPE_QP)
959 pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
960 resource, data[6], data[12], data[22]);
961 else if (type == EHEA_AER_RESTYPE_CQ)
962 pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
963 resource, data[6]);
964 else if (type == EHEA_AER_RESTYPE_EQ)
965 pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
966 resource, data[6]);
967
968 ehea_dump(data, length, "error data");
969 }
970
971 u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
972 u64 *aer, u64 *aerr)
973 {
974 unsigned long ret;
975 u64 *rblock;
976 u64 type = 0;
977
978 rblock = (void *)get_zeroed_page(GFP_KERNEL);
979 if (!rblock) {
980 pr_err("Cannot allocate rblock memory\n");
981 goto out;
982 }
983
984 ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
985
986 if (ret == H_SUCCESS) {
987 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
988 *aer = rblock[6];
989 *aerr = rblock[12];
990 print_error_data(rblock);
991 } else if (ret == H_R_STATE) {
992 pr_err("No error data available: %llX\n", res_handle);
993 } else
994 pr_err("Error data could not be fetched: %llX\n", res_handle);
995
996 free_page((unsigned long)rblock);
997 out:
998 return type;
999 }