1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2018
5 #include "i40e_osdep.h"
6 #include "i40e_register.h"
7 #include "i40e_status.h"
8 #include "i40e_alloc.h"
10 #include "i40e_type.h"
13 * i40e_add_sd_table_entry - Adds a segment descriptor to the table
14 * @hw: pointer to our hw struct
15 * @hmc_info: pointer to the HMC configuration information struct
16 * @sd_index: segment descriptor index to manipulate
17 * @type: what type of segment descriptor we're manipulating
18 * @direct_mode_sz: size to alloc in direct mode
20 enum i40e_status_code
i40e_add_sd_table_entry(struct i40e_hw
*hw
,
21 struct i40e_hmc_info
*hmc_info
,
23 enum i40e_sd_entry_type type
,
26 enum i40e_status_code ret_code
= I40E_SUCCESS
;
27 struct i40e_hmc_sd_entry
*sd_entry
;
28 enum i40e_memory_type mem_type
;
29 bool dma_mem_alloc_done
= false;
30 struct i40e_dma_mem mem
;
33 if (NULL
== hmc_info
->sd_table
.sd_entry
) {
34 ret_code
= I40E_ERR_BAD_PTR
;
35 DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n");
39 if (sd_index
>= hmc_info
->sd_table
.sd_cnt
) {
40 ret_code
= I40E_ERR_INVALID_SD_INDEX
;
41 DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n");
45 sd_entry
= &hmc_info
->sd_table
.sd_entry
[sd_index
];
46 if (!sd_entry
->valid
) {
47 if (I40E_SD_TYPE_PAGED
== type
) {
48 mem_type
= i40e_mem_pd
;
49 alloc_len
= I40E_HMC_PAGED_BP_SIZE
;
51 mem_type
= i40e_mem_bp_jumbo
;
52 alloc_len
= direct_mode_sz
;
55 /* allocate a 4K pd page or 2M backing page */
56 ret_code
= i40e_allocate_dma_mem(hw
, &mem
, mem_type
, alloc_len
,
57 I40E_HMC_PD_BP_BUF_ALIGNMENT
);
60 dma_mem_alloc_done
= true;
61 if (I40E_SD_TYPE_PAGED
== type
) {
62 ret_code
= i40e_allocate_virt_mem(hw
,
63 &sd_entry
->u
.pd_table
.pd_entry_virt_mem
,
64 sizeof(struct i40e_hmc_pd_entry
) * 512);
67 sd_entry
->u
.pd_table
.pd_entry
=
68 (struct i40e_hmc_pd_entry
*)
69 sd_entry
->u
.pd_table
.pd_entry_virt_mem
.va
;
70 i40e_memcpy(&sd_entry
->u
.pd_table
.pd_page_addr
,
71 &mem
, sizeof(struct i40e_dma_mem
),
72 I40E_NONDMA_TO_NONDMA
);
74 i40e_memcpy(&sd_entry
->u
.bp
.addr
,
75 &mem
, sizeof(struct i40e_dma_mem
),
76 I40E_NONDMA_TO_NONDMA
);
77 sd_entry
->u
.bp
.sd_pd_index
= sd_index
;
79 /* initialize the sd entry */
80 hmc_info
->sd_table
.sd_entry
[sd_index
].entry_type
= type
;
82 /* increment the ref count */
83 I40E_INC_SD_REFCNT(&hmc_info
->sd_table
);
85 /* Increment backing page reference count */
86 if (I40E_SD_TYPE_DIRECT
== sd_entry
->entry_type
)
87 I40E_INC_BP_REFCNT(&sd_entry
->u
.bp
);
89 if (I40E_SUCCESS
!= ret_code
)
90 if (dma_mem_alloc_done
)
91 i40e_free_dma_mem(hw
, &mem
);
97 * i40e_add_pd_table_entry - Adds page descriptor to the specified table
98 * @hw: pointer to our HW structure
99 * @hmc_info: pointer to the HMC configuration information structure
100 * @pd_index: which page descriptor index to manipulate
101 * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
104 * 1. Initializes the pd entry
105 * 2. Adds pd_entry in the pd_table
106 * 3. Mark the entry valid in i40e_hmc_pd_entry structure
107 * 4. Initializes the pd_entry's ref count to 1
109 * 1. The memory for pd should be pinned down, physically contiguous and
110 * aligned on 4K boundary and zeroed memory.
111 * 2. It should be 4K in size.
113 enum i40e_status_code
i40e_add_pd_table_entry(struct i40e_hw
*hw
,
114 struct i40e_hmc_info
*hmc_info
,
116 struct i40e_dma_mem
*rsrc_pg
)
118 enum i40e_status_code ret_code
= I40E_SUCCESS
;
119 struct i40e_hmc_pd_table
*pd_table
;
120 struct i40e_hmc_pd_entry
*pd_entry
;
121 struct i40e_dma_mem mem
;
122 struct i40e_dma_mem
*page
= &mem
;
123 u32 sd_idx
, rel_pd_idx
;
127 if (pd_index
/ I40E_HMC_PD_CNT_IN_SD
>= hmc_info
->sd_table
.sd_cnt
) {
128 ret_code
= I40E_ERR_INVALID_PAGE_DESC_INDEX
;
129 DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n");
133 /* find corresponding sd */
134 sd_idx
= (pd_index
/ I40E_HMC_PD_CNT_IN_SD
);
135 if (I40E_SD_TYPE_PAGED
!=
136 hmc_info
->sd_table
.sd_entry
[sd_idx
].entry_type
)
139 rel_pd_idx
= (pd_index
% I40E_HMC_PD_CNT_IN_SD
);
140 pd_table
= &hmc_info
->sd_table
.sd_entry
[sd_idx
].u
.pd_table
;
141 pd_entry
= &pd_table
->pd_entry
[rel_pd_idx
];
142 if (!pd_entry
->valid
) {
144 pd_entry
->rsrc_pg
= true;
147 /* allocate a 4K backing page */
148 ret_code
= i40e_allocate_dma_mem(hw
, page
, i40e_mem_bp
,
149 I40E_HMC_PAGED_BP_SIZE
,
150 I40E_HMC_PD_BP_BUF_ALIGNMENT
);
153 pd_entry
->rsrc_pg
= false;
156 i40e_memcpy(&pd_entry
->bp
.addr
, page
,
157 sizeof(struct i40e_dma_mem
), I40E_NONDMA_TO_NONDMA
);
158 pd_entry
->bp
.sd_pd_index
= pd_index
;
159 pd_entry
->bp
.entry_type
= I40E_SD_TYPE_PAGED
;
160 /* Set page address and valid bit */
161 page_desc
= page
->pa
| 0x1;
163 pd_addr
= (u64
*)pd_table
->pd_page_addr
.va
;
164 pd_addr
+= rel_pd_idx
;
166 /* Add the backing page physical address in the pd entry */
167 i40e_memcpy(pd_addr
, &page_desc
, sizeof(u64
),
170 pd_entry
->sd_index
= sd_idx
;
171 pd_entry
->valid
= true;
172 I40E_INC_PD_REFCNT(pd_table
);
174 I40E_INC_BP_REFCNT(&pd_entry
->bp
);
180 * i40e_remove_pd_bp - remove a backing page from a page descriptor
181 * @hw: pointer to our HW structure
182 * @hmc_info: pointer to the HMC configuration information structure
183 * @idx: the page index
186 * 1. Marks the entry in pd tabe (for paged address mode) or in sd table
187 * (for direct address mode) invalid.
188 * 2. Write to register PMPDINV to invalidate the backing page in FV cache
189 * 3. Decrement the ref count for the pd _entry
191 * 1. Caller can deallocate the memory used by backing storage after this
194 enum i40e_status_code
i40e_remove_pd_bp(struct i40e_hw
*hw
,
195 struct i40e_hmc_info
*hmc_info
,
198 enum i40e_status_code ret_code
= I40E_SUCCESS
;
199 struct i40e_hmc_pd_entry
*pd_entry
;
200 struct i40e_hmc_pd_table
*pd_table
;
201 struct i40e_hmc_sd_entry
*sd_entry
;
202 u32 sd_idx
, rel_pd_idx
;
205 /* calculate index */
206 sd_idx
= idx
/ I40E_HMC_PD_CNT_IN_SD
;
207 rel_pd_idx
= idx
% I40E_HMC_PD_CNT_IN_SD
;
208 if (sd_idx
>= hmc_info
->sd_table
.sd_cnt
) {
209 ret_code
= I40E_ERR_INVALID_PAGE_DESC_INDEX
;
210 DEBUGOUT("i40e_remove_pd_bp: bad idx\n");
213 sd_entry
= &hmc_info
->sd_table
.sd_entry
[sd_idx
];
214 if (I40E_SD_TYPE_PAGED
!= sd_entry
->entry_type
) {
215 ret_code
= I40E_ERR_INVALID_SD_TYPE
;
216 DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n");
219 /* get the entry and decrease its ref counter */
220 pd_table
= &hmc_info
->sd_table
.sd_entry
[sd_idx
].u
.pd_table
;
221 pd_entry
= &pd_table
->pd_entry
[rel_pd_idx
];
222 I40E_DEC_BP_REFCNT(&pd_entry
->bp
);
223 if (pd_entry
->bp
.ref_cnt
)
226 /* mark the entry invalid */
227 pd_entry
->valid
= false;
228 I40E_DEC_PD_REFCNT(pd_table
);
229 pd_addr
= (u64
*)pd_table
->pd_page_addr
.va
;
230 pd_addr
+= rel_pd_idx
;
231 i40e_memset(pd_addr
, 0, sizeof(u64
), I40E_DMA_MEM
);
232 I40E_INVALIDATE_PF_HMC_PD(hw
, sd_idx
, idx
);
234 /* free memory here */
235 if (!pd_entry
->rsrc_pg
)
236 ret_code
= i40e_free_dma_mem(hw
, &(pd_entry
->bp
.addr
));
237 if (I40E_SUCCESS
!= ret_code
)
239 if (!pd_table
->ref_cnt
)
240 i40e_free_virt_mem(hw
, &pd_table
->pd_entry_virt_mem
);
246 * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
247 * @hmc_info: pointer to the HMC configuration information structure
248 * @idx: the page index
250 enum i40e_status_code
i40e_prep_remove_sd_bp(struct i40e_hmc_info
*hmc_info
,
253 enum i40e_status_code ret_code
= I40E_SUCCESS
;
254 struct i40e_hmc_sd_entry
*sd_entry
;
256 /* get the entry and decrease its ref counter */
257 sd_entry
= &hmc_info
->sd_table
.sd_entry
[idx
];
258 I40E_DEC_BP_REFCNT(&sd_entry
->u
.bp
);
259 if (sd_entry
->u
.bp
.ref_cnt
) {
260 ret_code
= I40E_ERR_NOT_READY
;
263 I40E_DEC_SD_REFCNT(&hmc_info
->sd_table
);
265 /* mark the entry invalid */
266 sd_entry
->valid
= false;
272 * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
273 * @hw: pointer to our hw struct
274 * @hmc_info: pointer to the HMC configuration information structure
275 * @idx: the page index
276 * @is_pf: used to distinguish between VF and PF
278 enum i40e_status_code
i40e_remove_sd_bp_new(struct i40e_hw
*hw
,
279 struct i40e_hmc_info
*hmc_info
,
282 struct i40e_hmc_sd_entry
*sd_entry
;
285 return I40E_NOT_SUPPORTED
;
287 /* get the entry and decrease its ref counter */
288 sd_entry
= &hmc_info
->sd_table
.sd_entry
[idx
];
289 I40E_CLEAR_PF_SD_ENTRY(hw
, idx
, I40E_SD_TYPE_DIRECT
);
291 return i40e_free_dma_mem(hw
, &(sd_entry
->u
.bp
.addr
));
295 * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
296 * @hmc_info: pointer to the HMC configuration information structure
297 * @idx: segment descriptor index to find the relevant page descriptor
299 enum i40e_status_code
i40e_prep_remove_pd_page(struct i40e_hmc_info
*hmc_info
,
302 enum i40e_status_code ret_code
= I40E_SUCCESS
;
303 struct i40e_hmc_sd_entry
*sd_entry
;
305 sd_entry
= &hmc_info
->sd_table
.sd_entry
[idx
];
307 if (sd_entry
->u
.pd_table
.ref_cnt
) {
308 ret_code
= I40E_ERR_NOT_READY
;
312 /* mark the entry invalid */
313 sd_entry
->valid
= false;
315 I40E_DEC_SD_REFCNT(&hmc_info
->sd_table
);
321 * i40e_remove_pd_page_new - Removes a PD page from sd entry.
322 * @hw: pointer to our hw struct
323 * @hmc_info: pointer to the HMC configuration information structure
324 * @idx: segment descriptor index to find the relevant page descriptor
325 * @is_pf: used to distinguish between VF and PF
327 enum i40e_status_code
i40e_remove_pd_page_new(struct i40e_hw
*hw
,
328 struct i40e_hmc_info
*hmc_info
,
331 struct i40e_hmc_sd_entry
*sd_entry
;
334 return I40E_NOT_SUPPORTED
;
336 sd_entry
= &hmc_info
->sd_table
.sd_entry
[idx
];
337 I40E_CLEAR_PF_SD_ENTRY(hw
, idx
, I40E_SD_TYPE_PAGED
);
339 return i40e_free_dma_mem(hw
, &(sd_entry
->u
.pd_table
.pd_page_addr
));