]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - include/linux/qed/qed_chain.h
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/livepatchin...
[mirror_ubuntu-eoan-kernel.git] / include / linux / qed / qed_chain.h
1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef _QED_CHAIN_H
34 #define _QED_CHAIN_H
35
36 #include <linux/types.h>
37 #include <asm/byteorder.h>
38 #include <linux/kernel.h>
39 #include <linux/list.h>
40 #include <linux/slab.h>
41 #include <linux/qed/common_hsi.h>
42
43 enum qed_chain_mode {
44 /* Each Page contains a next pointer at its end */
45 QED_CHAIN_MODE_NEXT_PTR,
46
47 /* Chain is a single page (next ptr) is unrequired */
48 QED_CHAIN_MODE_SINGLE,
49
50 /* Page pointers are located in a side list */
51 QED_CHAIN_MODE_PBL,
52 };
53
54 enum qed_chain_use_mode {
55 QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
56 QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */
57 QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
58 };
59
60 enum qed_chain_cnt_type {
61 /* The chain's size/prod/cons are kept in 16-bit variables */
62 QED_CHAIN_CNT_TYPE_U16,
63
64 /* The chain's size/prod/cons are kept in 32-bit variables */
65 QED_CHAIN_CNT_TYPE_U32,
66 };
67
68 struct qed_chain_next {
69 struct regpair next_phys;
70 void *next_virt;
71 };
72
73 struct qed_chain_pbl_u16 {
74 u16 prod_page_idx;
75 u16 cons_page_idx;
76 };
77
78 struct qed_chain_pbl_u32 {
79 u32 prod_page_idx;
80 u32 cons_page_idx;
81 };
82
83 struct qed_chain_ext_pbl {
84 dma_addr_t p_pbl_phys;
85 void *p_pbl_virt;
86 };
87
88 struct qed_chain_u16 {
89 /* Cyclic index of next element to produce/consme */
90 u16 prod_idx;
91 u16 cons_idx;
92 };
93
94 struct qed_chain_u32 {
95 /* Cyclic index of next element to produce/consme */
96 u32 prod_idx;
97 u32 cons_idx;
98 };
99
100 struct qed_chain {
101 /* fastpath portion of the chain - required for commands such
102 * as produce / consume.
103 */
104 /* Point to next element to produce/consume */
105 void *p_prod_elem;
106 void *p_cons_elem;
107
108 /* Fastpath portions of the PBL [if exists] */
109 struct {
110 /* Table for keeping the virtual addresses of the chain pages,
111 * respectively to the physical addresses in the pbl table.
112 */
113 void **pp_virt_addr_tbl;
114
115 union {
116 struct qed_chain_pbl_u16 u16;
117 struct qed_chain_pbl_u32 u32;
118 } c;
119 } pbl;
120
121 union {
122 struct qed_chain_u16 chain16;
123 struct qed_chain_u32 chain32;
124 } u;
125
126 /* Capacity counts only usable elements */
127 u32 capacity;
128 u32 page_cnt;
129
130 enum qed_chain_mode mode;
131
132 /* Elements information for fast calculations */
133 u16 elem_per_page;
134 u16 elem_per_page_mask;
135 u16 elem_size;
136 u16 next_page_mask;
137 u16 usable_per_page;
138 u8 elem_unusable;
139
140 u8 cnt_type;
141
142 /* Slowpath of the chain - required for initialization and destruction,
143 * but isn't involved in regular functionality.
144 */
145
146 /* Base address of a pre-allocated buffer for pbl */
147 struct {
148 dma_addr_t p_phys_table;
149 void *p_virt_table;
150 } pbl_sp;
151
152 /* Address of first page of the chain - the address is required
153 * for fastpath operation [consume/produce] but only for the the SINGLE
154 * flavour which isn't considered fastpath [== SPQ].
155 */
156 void *p_virt_addr;
157 dma_addr_t p_phys_addr;
158
159 /* Total number of elements [for entire chain] */
160 u32 size;
161
162 u8 intended_use;
163
164 bool b_external_pbl;
165 };
166
167 #define QED_CHAIN_PBL_ENTRY_SIZE (8)
168 #define QED_CHAIN_PAGE_SIZE (0x1000)
169 #define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
170
171 #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
172 (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
173 (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
174 (elem_size))) : 0)
175
176 #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
177 ((u32)(ELEMS_PER_PAGE(elem_size) - \
178 UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
179
180 #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
181 DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
182
183 #define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
184 #define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
185
186 /* Accessors */
187 static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
188 {
189 return p_chain->u.chain16.prod_idx;
190 }
191
192 static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
193 {
194 return p_chain->u.chain16.cons_idx;
195 }
196
197 static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
198 {
199 return p_chain->u.chain32.cons_idx;
200 }
201
202 static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
203 {
204 u16 used;
205
206 used = (u16) (((u32)0x10000 +
207 (u32)p_chain->u.chain16.prod_idx) -
208 (u32)p_chain->u.chain16.cons_idx);
209 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
210 used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
211 p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
212
213 return (u16)(p_chain->capacity - used);
214 }
215
216 static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
217 {
218 u32 used;
219
220 used = (u32) (((u64)0x100000000ULL +
221 (u64)p_chain->u.chain32.prod_idx) -
222 (u64)p_chain->u.chain32.cons_idx);
223 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
224 used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
225 p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
226
227 return p_chain->capacity - used;
228 }
229
230 static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
231 {
232 return p_chain->usable_per_page;
233 }
234
235 static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
236 {
237 return p_chain->elem_unusable;
238 }
239
240 static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
241 {
242 return p_chain->page_cnt;
243 }
244
245 static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
246 {
247 return p_chain->pbl_sp.p_phys_table;
248 }
249
250 /**
251 * @brief qed_chain_advance_page -
252 *
253 * Advance the next element accros pages for a linked chain
254 *
255 * @param p_chain
256 * @param p_next_elem
257 * @param idx_to_inc
258 * @param page_to_inc
259 */
260 static inline void
261 qed_chain_advance_page(struct qed_chain *p_chain,
262 void **p_next_elem, void *idx_to_inc, void *page_to_inc)
263 {
264 struct qed_chain_next *p_next = NULL;
265 u32 page_index = 0;
266
267 switch (p_chain->mode) {
268 case QED_CHAIN_MODE_NEXT_PTR:
269 p_next = *p_next_elem;
270 *p_next_elem = p_next->next_virt;
271 if (is_chain_u16(p_chain))
272 *(u16 *)idx_to_inc += p_chain->elem_unusable;
273 else
274 *(u32 *)idx_to_inc += p_chain->elem_unusable;
275 break;
276 case QED_CHAIN_MODE_SINGLE:
277 *p_next_elem = p_chain->p_virt_addr;
278 break;
279
280 case QED_CHAIN_MODE_PBL:
281 if (is_chain_u16(p_chain)) {
282 if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
283 *(u16 *)page_to_inc = 0;
284 page_index = *(u16 *)page_to_inc;
285 } else {
286 if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
287 *(u32 *)page_to_inc = 0;
288 page_index = *(u32 *)page_to_inc;
289 }
290 *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
291 }
292 }
293
294 #define is_unusable_idx(p, idx) \
295 (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
296
297 #define is_unusable_idx_u32(p, idx) \
298 (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
299 #define is_unusable_next_idx(p, idx) \
300 ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
301 (p)->usable_per_page)
302
303 #define is_unusable_next_idx_u32(p, idx) \
304 ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
305 (p)->usable_per_page)
306
307 #define test_and_skip(p, idx) \
308 do { \
309 if (is_chain_u16(p)) { \
310 if (is_unusable_idx(p, idx)) \
311 (p)->u.chain16.idx += (p)->elem_unusable; \
312 } else { \
313 if (is_unusable_idx_u32(p, idx)) \
314 (p)->u.chain32.idx += (p)->elem_unusable; \
315 } \
316 } while (0)
317
318 /**
319 * @brief qed_chain_return_produced -
320 *
321 * A chain in which the driver "Produces" elements should use this API
322 * to indicate previous produced elements are now consumed.
323 *
324 * @param p_chain
325 */
326 static inline void qed_chain_return_produced(struct qed_chain *p_chain)
327 {
328 if (is_chain_u16(p_chain))
329 p_chain->u.chain16.cons_idx++;
330 else
331 p_chain->u.chain32.cons_idx++;
332 test_and_skip(p_chain, cons_idx);
333 }
334
335 /**
336 * @brief qed_chain_produce -
337 *
338 * A chain in which the driver "Produces" elements should use this to get
339 * a pointer to the next element which can be "Produced". It's driver
340 * responsibility to validate that the chain has room for new element.
341 *
342 * @param p_chain
343 *
344 * @return void*, a pointer to next element
345 */
346 static inline void *qed_chain_produce(struct qed_chain *p_chain)
347 {
348 void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
349
350 if (is_chain_u16(p_chain)) {
351 if ((p_chain->u.chain16.prod_idx &
352 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
353 p_prod_idx = &p_chain->u.chain16.prod_idx;
354 p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
355 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
356 p_prod_idx, p_prod_page_idx);
357 }
358 p_chain->u.chain16.prod_idx++;
359 } else {
360 if ((p_chain->u.chain32.prod_idx &
361 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
362 p_prod_idx = &p_chain->u.chain32.prod_idx;
363 p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
364 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
365 p_prod_idx, p_prod_page_idx);
366 }
367 p_chain->u.chain32.prod_idx++;
368 }
369
370 p_ret = p_chain->p_prod_elem;
371 p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
372 p_chain->elem_size);
373
374 return p_ret;
375 }
376
377 /**
378 * @brief qed_chain_get_capacity -
379 *
380 * Get the maximum number of BDs in chain
381 *
382 * @param p_chain
383 * @param num
384 *
385 * @return number of unusable BDs
386 */
387 static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
388 {
389 return p_chain->capacity;
390 }
391
392 /**
393 * @brief qed_chain_recycle_consumed -
394 *
395 * Returns an element which was previously consumed;
396 * Increments producers so they could be written to FW.
397 *
398 * @param p_chain
399 */
400 static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
401 {
402 test_and_skip(p_chain, prod_idx);
403 if (is_chain_u16(p_chain))
404 p_chain->u.chain16.prod_idx++;
405 else
406 p_chain->u.chain32.prod_idx++;
407 }
408
409 /**
410 * @brief qed_chain_consume -
411 *
412 * A Chain in which the driver utilizes data written by a different source
413 * (i.e., FW) should use this to access passed buffers.
414 *
415 * @param p_chain
416 *
417 * @return void*, a pointer to the next buffer written
418 */
419 static inline void *qed_chain_consume(struct qed_chain *p_chain)
420 {
421 void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
422
423 if (is_chain_u16(p_chain)) {
424 if ((p_chain->u.chain16.cons_idx &
425 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
426 p_cons_idx = &p_chain->u.chain16.cons_idx;
427 p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
428 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
429 p_cons_idx, p_cons_page_idx);
430 }
431 p_chain->u.chain16.cons_idx++;
432 } else {
433 if ((p_chain->u.chain32.cons_idx &
434 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
435 p_cons_idx = &p_chain->u.chain32.cons_idx;
436 p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
437 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
438 p_cons_idx, p_cons_page_idx);
439 }
440 p_chain->u.chain32.cons_idx++;
441 }
442
443 p_ret = p_chain->p_cons_elem;
444 p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
445 p_chain->elem_size);
446
447 return p_ret;
448 }
449
450 /**
451 * @brief qed_chain_reset - Resets the chain to its start state
452 *
453 * @param p_chain pointer to a previously allocted chain
454 */
455 static inline void qed_chain_reset(struct qed_chain *p_chain)
456 {
457 u32 i;
458
459 if (is_chain_u16(p_chain)) {
460 p_chain->u.chain16.prod_idx = 0;
461 p_chain->u.chain16.cons_idx = 0;
462 } else {
463 p_chain->u.chain32.prod_idx = 0;
464 p_chain->u.chain32.cons_idx = 0;
465 }
466 p_chain->p_cons_elem = p_chain->p_virt_addr;
467 p_chain->p_prod_elem = p_chain->p_virt_addr;
468
469 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
470 /* Use (page_cnt - 1) as a reset value for the prod/cons page's
471 * indices, to avoid unnecessary page advancing on the first
472 * call to qed_chain_produce/consume. Instead, the indices
473 * will be advanced to page_cnt and then will be wrapped to 0.
474 */
475 u32 reset_val = p_chain->page_cnt - 1;
476
477 if (is_chain_u16(p_chain)) {
478 p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
479 p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
480 } else {
481 p_chain->pbl.c.u32.prod_page_idx = reset_val;
482 p_chain->pbl.c.u32.cons_page_idx = reset_val;
483 }
484 }
485
486 switch (p_chain->intended_use) {
487 case QED_CHAIN_USE_TO_CONSUME:
488 /* produce empty elements */
489 for (i = 0; i < p_chain->capacity; i++)
490 qed_chain_recycle_consumed(p_chain);
491 break;
492
493 case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
494 case QED_CHAIN_USE_TO_PRODUCE:
495 default:
496 /* Do nothing */
497 break;
498 }
499 }
500
501 /**
502 * @brief qed_chain_init - Initalizes a basic chain struct
503 *
504 * @param p_chain
505 * @param p_virt_addr
506 * @param p_phys_addr physical address of allocated buffer's beginning
507 * @param page_cnt number of pages in the allocated buffer
508 * @param elem_size size of each element in the chain
509 * @param intended_use
510 * @param mode
511 */
512 static inline void qed_chain_init_params(struct qed_chain *p_chain,
513 u32 page_cnt,
514 u8 elem_size,
515 enum qed_chain_use_mode intended_use,
516 enum qed_chain_mode mode,
517 enum qed_chain_cnt_type cnt_type)
518 {
519 /* chain fixed parameters */
520 p_chain->p_virt_addr = NULL;
521 p_chain->p_phys_addr = 0;
522 p_chain->elem_size = elem_size;
523 p_chain->intended_use = (u8)intended_use;
524 p_chain->mode = mode;
525 p_chain->cnt_type = (u8)cnt_type;
526
527 p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
528 p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
529 p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
530 p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
531 p_chain->next_page_mask = (p_chain->usable_per_page &
532 p_chain->elem_per_page_mask);
533
534 p_chain->page_cnt = page_cnt;
535 p_chain->capacity = p_chain->usable_per_page * page_cnt;
536 p_chain->size = p_chain->elem_per_page * page_cnt;
537
538 p_chain->pbl_sp.p_phys_table = 0;
539 p_chain->pbl_sp.p_virt_table = NULL;
540 p_chain->pbl.pp_virt_addr_tbl = NULL;
541 }
542
543 /**
544 * @brief qed_chain_init_mem -
545 *
546 * Initalizes a basic chain struct with its chain buffers
547 *
548 * @param p_chain
549 * @param p_virt_addr virtual address of allocated buffer's beginning
550 * @param p_phys_addr physical address of allocated buffer's beginning
551 *
552 */
553 static inline void qed_chain_init_mem(struct qed_chain *p_chain,
554 void *p_virt_addr, dma_addr_t p_phys_addr)
555 {
556 p_chain->p_virt_addr = p_virt_addr;
557 p_chain->p_phys_addr = p_phys_addr;
558 }
559
560 /**
561 * @brief qed_chain_init_pbl_mem -
562 *
563 * Initalizes a basic chain struct with its pbl buffers
564 *
565 * @param p_chain
566 * @param p_virt_pbl pointer to a pre allocated side table which will hold
567 * virtual page addresses.
568 * @param p_phys_pbl pointer to a pre-allocated side table which will hold
569 * physical page addresses.
570 * @param pp_virt_addr_tbl
571 * pointer to a pre-allocated side table which will hold
572 * the virtual addresses of the chain pages.
573 *
574 */
575 static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
576 void *p_virt_pbl,
577 dma_addr_t p_phys_pbl,
578 void **pp_virt_addr_tbl)
579 {
580 p_chain->pbl_sp.p_phys_table = p_phys_pbl;
581 p_chain->pbl_sp.p_virt_table = p_virt_pbl;
582 p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
583 }
584
585 /**
586 * @brief qed_chain_init_next_ptr_elem -
587 *
588 * Initalizes a next pointer element
589 *
590 * @param p_chain
591 * @param p_virt_curr virtual address of a chain page of which the next
592 * pointer element is initialized
593 * @param p_virt_next virtual address of the next chain page
594 * @param p_phys_next physical address of the next chain page
595 *
596 */
597 static inline void
598 qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
599 void *p_virt_curr,
600 void *p_virt_next, dma_addr_t p_phys_next)
601 {
602 struct qed_chain_next *p_next;
603 u32 size;
604
605 size = p_chain->elem_size * p_chain->usable_per_page;
606 p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
607
608 DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
609
610 p_next->next_virt = p_virt_next;
611 }
612
613 /**
614 * @brief qed_chain_get_last_elem -
615 *
616 * Returns a pointer to the last element of the chain
617 *
618 * @param p_chain
619 *
620 * @return void*
621 */
622 static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
623 {
624 struct qed_chain_next *p_next = NULL;
625 void *p_virt_addr = NULL;
626 u32 size, last_page_idx;
627
628 if (!p_chain->p_virt_addr)
629 goto out;
630
631 switch (p_chain->mode) {
632 case QED_CHAIN_MODE_NEXT_PTR:
633 size = p_chain->elem_size * p_chain->usable_per_page;
634 p_virt_addr = p_chain->p_virt_addr;
635 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
636 while (p_next->next_virt != p_chain->p_virt_addr) {
637 p_virt_addr = p_next->next_virt;
638 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
639 size);
640 }
641 break;
642 case QED_CHAIN_MODE_SINGLE:
643 p_virt_addr = p_chain->p_virt_addr;
644 break;
645 case QED_CHAIN_MODE_PBL:
646 last_page_idx = p_chain->page_cnt - 1;
647 p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
648 break;
649 }
650 /* p_virt_addr points at this stage to the last page of the chain */
651 size = p_chain->elem_size * (p_chain->usable_per_page - 1);
652 p_virt_addr = (u8 *)p_virt_addr + size;
653 out:
654 return p_virt_addr;
655 }
656
657 /**
658 * @brief qed_chain_set_prod - sets the prod to the given value
659 *
660 * @param prod_idx
661 * @param p_prod_elem
662 */
663 static inline void qed_chain_set_prod(struct qed_chain *p_chain,
664 u32 prod_idx, void *p_prod_elem)
665 {
666 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
667 u32 cur_prod, page_mask, page_cnt, page_diff;
668
669 cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
670 p_chain->u.chain32.prod_idx;
671
672 /* Assume that number of elements in a page is power of 2 */
673 page_mask = ~p_chain->elem_per_page_mask;
674
675 /* Use "cur_prod - 1" and "prod_idx - 1" since producer index
676 * reaches the first element of next page before the page index
677 * is incremented. See qed_chain_produce().
678 * Index wrap around is not a problem because the difference
679 * between current and given producer indices is always
680 * positive and lower than the chain's capacity.
681 */
682 page_diff = (((cur_prod - 1) & page_mask) -
683 ((prod_idx - 1) & page_mask)) /
684 p_chain->elem_per_page;
685
686 page_cnt = qed_chain_get_page_cnt(p_chain);
687 if (is_chain_u16(p_chain))
688 p_chain->pbl.c.u16.prod_page_idx =
689 (p_chain->pbl.c.u16.prod_page_idx -
690 page_diff + page_cnt) % page_cnt;
691 else
692 p_chain->pbl.c.u32.prod_page_idx =
693 (p_chain->pbl.c.u32.prod_page_idx -
694 page_diff + page_cnt) % page_cnt;
695 }
696
697 if (is_chain_u16(p_chain))
698 p_chain->u.chain16.prod_idx = (u16) prod_idx;
699 else
700 p_chain->u.chain32.prod_idx = prod_idx;
701 p_chain->p_prod_elem = p_prod_elem;
702 }
703
704 /**
705 * @brief qed_chain_pbl_zero_mem - set chain memory to 0
706 *
707 * @param p_chain
708 */
709 static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
710 {
711 u32 i, page_cnt;
712
713 if (p_chain->mode != QED_CHAIN_MODE_PBL)
714 return;
715
716 page_cnt = qed_chain_get_page_cnt(p_chain);
717
718 for (i = 0; i < page_cnt; i++)
719 memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
720 QED_CHAIN_PAGE_SIZE);
721 }
722
723 #endif