]>
Commit | Line | Data |
---|---|---|
f67539c2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2017 Huawei Technologies Co., Ltd | |
3 | */ | |
4 | ||
5 | #include "hinic_compat.h" | |
6 | #include "hinic_pmd_hwdev.h" | |
7 | #include "hinic_pmd_wq.h" | |
8 | ||
9 | static void free_wq_pages(struct hinic_hwdev *hwdev, struct hinic_wq *wq) | |
10 | { | |
11 | dma_free_coherent(hwdev, wq->wq_buf_size, (void *)wq->queue_buf_vaddr, | |
12 | (dma_addr_t)wq->queue_buf_paddr); | |
13 | ||
14 | wq->queue_buf_paddr = 0; | |
15 | wq->queue_buf_vaddr = 0; | |
16 | } | |
17 | ||
18 | static int alloc_wq_pages(struct hinic_hwdev *hwdev, struct hinic_wq *wq, | |
19 | unsigned int socket_id) | |
20 | { | |
21 | dma_addr_t dma_addr = 0; | |
22 | ||
23 | wq->queue_buf_vaddr = (u64)(u64 *) | |
24 | dma_zalloc_coherent_aligned256k(hwdev, wq->wq_buf_size, | |
25 | &dma_addr, socket_id); | |
26 | if (!wq->queue_buf_vaddr) { | |
27 | PMD_DRV_LOG(ERR, "Failed to allocate wq page"); | |
28 | return -ENOMEM; | |
29 | } | |
30 | ||
31 | if (!ADDR_256K_ALIGNED(dma_addr)) { | |
32 | PMD_DRV_LOG(ERR, "Wqe pages is not 256k aligned!"); | |
33 | dma_free_coherent(hwdev, wq->wq_buf_size, | |
34 | (void *)wq->queue_buf_vaddr, | |
35 | dma_addr); | |
36 | return -ENOMEM; | |
37 | } | |
38 | wq->queue_buf_paddr = dma_addr; | |
39 | ||
40 | return 0; | |
41 | } | |
42 | ||
43 | int hinic_wq_allocate(struct hinic_hwdev *hwdev, struct hinic_wq *wq, | |
44 | u32 wqebb_shift, u16 q_depth, unsigned int socket_id) | |
45 | { | |
46 | int err; | |
47 | ||
48 | if (q_depth & (q_depth - 1)) { | |
49 | PMD_DRV_LOG(ERR, "WQ q_depth isn't power of 2"); | |
50 | return -EINVAL; | |
51 | } | |
52 | ||
53 | wq->wqebb_size = 1 << wqebb_shift; | |
54 | wq->wqebb_shift = wqebb_shift; | |
55 | wq->wq_buf_size = ((u32)q_depth) << wqebb_shift; | |
56 | wq->q_depth = q_depth; | |
57 | ||
58 | if (wq->wq_buf_size > (HINIC_PAGE_SIZE << HINIC_PAGE_SIZE_DPDK)) { | |
59 | PMD_DRV_LOG(ERR, "Invalid q_depth %u which one page_size can not hold", | |
60 | q_depth); | |
61 | return -EINVAL; | |
62 | } | |
63 | ||
64 | err = alloc_wq_pages(hwdev, wq, socket_id); | |
65 | if (err) { | |
66 | PMD_DRV_LOG(ERR, "Failed to allocate wq pages"); | |
67 | return err; | |
68 | } | |
69 | ||
70 | wq->cons_idx = 0; | |
71 | wq->prod_idx = 0; | |
72 | wq->delta = q_depth; | |
73 | wq->mask = q_depth - 1; | |
74 | ||
75 | return 0; | |
76 | } | |
77 | ||
78 | void hinic_wq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq) | |
79 | { | |
80 | free_wq_pages(hwdev, wq); | |
81 | } | |
82 | ||
83 | void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs) | |
84 | { | |
85 | wq->cons_idx += num_wqebbs; | |
86 | wq->delta += num_wqebbs; | |
87 | } | |
88 | ||
89 | void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx) | |
90 | { | |
91 | u16 curr_cons_idx; | |
92 | ||
93 | if ((wq->delta + num_wqebbs) > wq->q_depth) | |
94 | return NULL; | |
95 | ||
96 | curr_cons_idx = (u16)(wq->cons_idx); | |
97 | ||
98 | curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx); | |
99 | ||
100 | *cons_idx = curr_cons_idx; | |
101 | ||
102 | return WQ_WQE_ADDR(wq, (u32)(*cons_idx)); | |
103 | } | |
104 | ||
105 | int hinic_cmdq_alloc(struct hinic_wq *wq, struct hinic_hwdev *hwdev, | |
106 | int cmdq_blocks, u32 wq_buf_size, u32 wqebb_shift, | |
107 | u16 q_depth) | |
108 | { | |
109 | int i, j, err = -ENOMEM; | |
110 | ||
111 | /* validate q_depth is power of 2 & wqebb_size is not 0 */ | |
112 | for (i = 0; i < cmdq_blocks; i++) { | |
113 | wq[i].wqebb_size = 1 << wqebb_shift; | |
114 | wq[i].wqebb_shift = wqebb_shift; | |
115 | wq[i].wq_buf_size = wq_buf_size; | |
116 | wq[i].q_depth = q_depth; | |
117 | ||
118 | err = alloc_wq_pages(hwdev, &wq[i], SOCKET_ID_ANY); | |
119 | if (err) { | |
120 | PMD_DRV_LOG(ERR, "Failed to alloc CMDQ blocks"); | |
121 | goto cmdq_block_err; | |
122 | } | |
123 | ||
124 | wq[i].cons_idx = 0; | |
125 | wq[i].prod_idx = 0; | |
126 | wq[i].delta = q_depth; | |
127 | ||
128 | wq[i].mask = q_depth - 1; | |
129 | } | |
130 | ||
131 | return 0; | |
132 | ||
133 | cmdq_block_err: | |
134 | for (j = 0; j < i; j++) | |
135 | free_wq_pages(hwdev, &wq[j]); | |
136 | ||
137 | return err; | |
138 | } | |
139 | ||
140 | void hinic_cmdq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq, | |
141 | int cmdq_blocks) | |
142 | { | |
143 | int i; | |
144 | ||
145 | for (i = 0; i < cmdq_blocks; i++) | |
146 | free_wq_pages(hwdev, &wq[i]); | |
147 | } | |
148 | ||
149 | void hinic_wq_wqe_pg_clear(struct hinic_wq *wq) | |
150 | { | |
151 | wq->cons_idx = 0; | |
152 | wq->prod_idx = 0; | |
153 | ||
154 | memset((void *)wq->queue_buf_vaddr, 0, wq->wq_buf_size); | |
155 | } | |
156 | ||
157 | void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx) | |
158 | { | |
159 | u16 curr_prod_idx; | |
160 | ||
161 | wq->delta -= num_wqebbs; | |
162 | curr_prod_idx = wq->prod_idx; | |
163 | wq->prod_idx += num_wqebbs; | |
164 | *prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx); | |
165 | ||
166 | return WQ_WQE_ADDR(wq, (u32)(*prod_idx)); | |
167 | } | |
168 | ||
169 | /** | |
170 | * hinic_set_sge - set dma area in scatter gather entry | |
171 | * @sge: scatter gather entry | |
172 | * @addr: dma address | |
173 | * @len: length of relevant data in the dma address | |
174 | **/ | |
175 | void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len) | |
176 | { | |
177 | sge->hi_addr = upper_32_bits(addr); | |
178 | sge->lo_addr = lower_32_bits(addr); | |
179 | sge->len = len; | |
180 | } |