]>
Commit | Line | Data |
---|---|---|
f67539c2 TL |
1 | /*- |
2 | * BSD LICENSE | |
3 | * | |
4 | * Copyright (c) Intel Corporation. | |
5 | * All rights reserved. | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * | |
11 | * * Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * * Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in | |
15 | * the documentation and/or other materials provided with the | |
16 | * distribution. | |
17 | * * Neither the name of Intel Corporation nor the names of its | |
18 | * contributors may be used to endorse or promote products derived | |
19 | * from this software without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
22 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
24 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
25 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
26 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
27 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
28 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
29 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
32 | */ | |
33 | ||
34 | #include "spdk/stdinc.h" | |
35 | ||
36 | #include "spdk_cunit.h" | |
37 | ||
38 | #include "common/lib/test_sock.c" | |
39 | ||
40 | #include "nvme/nvme_tcp.c" | |
41 | #include "common/lib/nvme/common_stubs.h" | |
42 | ||
43 | SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME); | |
44 | ||
45 | DEFINE_STUB(nvme_qpair_submit_request, | |
46 | int, (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0); | |
47 | ||
48 | DEFINE_STUB(spdk_sock_set_priority, | |
49 | int, (struct spdk_sock *sock, int priority), 0); | |
50 | ||
51 | DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group, | |
52 | struct spdk_nvme_qpair *qpair), 0); | |
53 | ||
54 | static void | |
55 | test_nvme_tcp_pdu_set_data_buf(void) | |
56 | { | |
57 | struct nvme_tcp_pdu pdu = {}; | |
58 | struct iovec iov[NVME_TCP_MAX_SGL_DESCRIPTORS] = {}; | |
59 | uint32_t data_len; | |
60 | uint64_t i; | |
61 | ||
62 | /* 1st case: input is a single SGL entry. */ | |
63 | iov[0].iov_base = (void *)0xDEADBEEF; | |
64 | iov[0].iov_len = 4096; | |
65 | ||
66 | nvme_tcp_pdu_set_data_buf(&pdu, iov, 1, 1024, 512); | |
67 | ||
68 | CU_ASSERT(pdu.data_iovcnt == 1); | |
69 | CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 1024); | |
70 | CU_ASSERT(pdu.data_iov[0].iov_len == 512); | |
71 | ||
72 | /* 2nd case: simulate split on multiple SGL entries. */ | |
73 | iov[0].iov_base = (void *)0xDEADBEEF; | |
74 | iov[0].iov_len = 4096; | |
75 | iov[1].iov_base = (void *)0xFEEDBEEF; | |
76 | iov[1].iov_len = 512 * 7; | |
77 | iov[2].iov_base = (void *)0xF00DF00D; | |
78 | iov[2].iov_len = 4096 * 2; | |
79 | ||
80 | nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 0, 2048); | |
81 | ||
82 | CU_ASSERT(pdu.data_iovcnt == 1); | |
83 | CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); | |
84 | CU_ASSERT(pdu.data_iov[0].iov_len == 2048); | |
85 | ||
86 | nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 2048, 2048 + 512 * 3); | |
87 | ||
88 | CU_ASSERT(pdu.data_iovcnt == 2); | |
89 | CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 2048); | |
90 | CU_ASSERT(pdu.data_iov[0].iov_len == 2048); | |
91 | CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); | |
92 | CU_ASSERT(pdu.data_iov[1].iov_len == 512 * 3); | |
93 | ||
94 | nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 4096 + 512 * 3, 512 * 4 + 4096 * 2); | |
95 | ||
96 | CU_ASSERT(pdu.data_iovcnt == 2); | |
97 | CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xFEEDBEEF + 512 * 3); | |
98 | CU_ASSERT(pdu.data_iov[0].iov_len == 512 * 4); | |
99 | CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xF00DF00D); | |
100 | CU_ASSERT(pdu.data_iov[1].iov_len == 4096 * 2); | |
101 | ||
102 | /* 3rd case: Number of input SGL entries is equal to the number of PDU SGL | |
103 | * entries. | |
104 | */ | |
105 | data_len = 0; | |
106 | for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) { | |
107 | iov[i].iov_base = (void *)(0xDEADBEEF + i); | |
108 | iov[i].iov_len = 512 * (i + 1); | |
109 | data_len += 512 * (i + 1); | |
110 | } | |
111 | ||
112 | nvme_tcp_pdu_set_data_buf(&pdu, iov, NVME_TCP_MAX_SGL_DESCRIPTORS, 0, data_len); | |
113 | ||
114 | CU_ASSERT(pdu.data_iovcnt == NVME_TCP_MAX_SGL_DESCRIPTORS); | |
115 | for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) { | |
116 | CU_ASSERT((uint64_t)pdu.data_iov[i].iov_base == 0xDEADBEEF + i); | |
117 | CU_ASSERT(pdu.data_iov[i].iov_len == 512 * (i + 1)); | |
118 | } | |
119 | } | |
120 | ||
121 | static void | |
122 | test_nvme_tcp_build_iovs(void) | |
123 | { | |
124 | const uintptr_t pdu_iov_len = 4096; | |
125 | struct nvme_tcp_pdu pdu = {}; | |
126 | struct iovec iovs[5] = {}; | |
127 | uint32_t mapped_length = 0; | |
128 | int rc; | |
129 | ||
130 | pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; | |
131 | pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); | |
132 | pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + pdu_iov_len * 2 + | |
133 | SPDK_NVME_TCP_DIGEST_LEN; | |
134 | pdu.data_len = pdu_iov_len * 2; | |
135 | pdu.padding_len = 0; | |
136 | ||
137 | pdu.data_iov[0].iov_base = (void *)0xDEADBEEF; | |
138 | pdu.data_iov[0].iov_len = pdu_iov_len; | |
139 | pdu.data_iov[1].iov_base = (void *)(0xDEADBEEF + pdu_iov_len); | |
140 | pdu.data_iov[1].iov_len = pdu_iov_len; | |
141 | pdu.data_iovcnt = 2; | |
142 | ||
143 | rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length); | |
144 | CU_ASSERT(rc == 4); | |
145 | CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw); | |
146 | CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN); | |
147 | CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF); | |
148 | CU_ASSERT(iovs[1].iov_len == pdu_iov_len); | |
149 | CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len)); | |
150 | CU_ASSERT(iovs[2].iov_len == pdu_iov_len); | |
151 | CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest); | |
152 | CU_ASSERT(iovs[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN); | |
153 | CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN + | |
154 | pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN); | |
155 | ||
156 | /* Add a new data_iov entry, update pdu iov count and data length */ | |
157 | pdu.data_iov[2].iov_base = (void *)(0xBAADF00D); | |
158 | pdu.data_iov[2].iov_len = 123; | |
159 | pdu.data_iovcnt = 3; | |
160 | pdu.data_len += 123; | |
161 | pdu.hdr.common.plen += 123; | |
162 | ||
163 | rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length); | |
164 | CU_ASSERT(rc == 5); | |
165 | CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw); | |
166 | CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN); | |
167 | CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF); | |
168 | CU_ASSERT(iovs[1].iov_len == pdu_iov_len); | |
169 | CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len)); | |
170 | CU_ASSERT(iovs[2].iov_len == pdu_iov_len); | |
171 | CU_ASSERT(iovs[3].iov_base == (void *)(0xBAADF00D)); | |
172 | CU_ASSERT(iovs[3].iov_len == 123); | |
173 | CU_ASSERT(iovs[4].iov_base == (void *)pdu.data_digest); | |
174 | CU_ASSERT(iovs[4].iov_len == SPDK_NVME_TCP_DIGEST_LEN); | |
175 | CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN + | |
176 | pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN + 123); | |
177 | } | |
178 | ||
179 | struct nvme_tcp_ut_bdev_io { | |
180 | struct iovec iovs[NVME_TCP_MAX_SGL_DESCRIPTORS]; | |
181 | int iovpos; | |
182 | }; | |
183 | ||
184 | /* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */ | |
185 | static void | |
186 | nvme_tcp_ut_reset_sgl(void *cb_arg, uint32_t offset) | |
187 | { | |
188 | struct nvme_tcp_ut_bdev_io *bio = cb_arg; | |
189 | struct iovec *iov; | |
190 | ||
191 | for (bio->iovpos = 0; bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS; bio->iovpos++) { | |
192 | iov = &bio->iovs[bio->iovpos]; | |
193 | /* Offset must be aligned with the start of any SGL entry */ | |
194 | if (offset == 0) { | |
195 | break; | |
196 | } | |
197 | ||
198 | SPDK_CU_ASSERT_FATAL(offset >= iov->iov_len); | |
199 | offset -= iov->iov_len; | |
200 | } | |
201 | ||
202 | SPDK_CU_ASSERT_FATAL(offset == 0); | |
203 | SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS); | |
204 | } | |
205 | ||
206 | static int | |
207 | nvme_tcp_ut_next_sge(void *cb_arg, void **address, uint32_t *length) | |
208 | { | |
209 | struct nvme_tcp_ut_bdev_io *bio = cb_arg; | |
210 | struct iovec *iov; | |
211 | ||
212 | SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS); | |
213 | ||
214 | iov = &bio->iovs[bio->iovpos]; | |
215 | ||
216 | *address = iov->iov_base; | |
217 | *length = iov->iov_len; | |
218 | bio->iovpos++; | |
219 | ||
220 | return 0; | |
221 | } | |
222 | ||
223 | static void | |
224 | test_nvme_tcp_build_sgl_request(void) | |
225 | { | |
226 | struct nvme_tcp_qpair tqpair; | |
227 | struct spdk_nvme_ctrlr ctrlr = {0}; | |
228 | struct nvme_tcp_req tcp_req = {0}; | |
229 | struct nvme_request req = {{0}}; | |
230 | struct nvme_tcp_ut_bdev_io bio; | |
231 | uint64_t i; | |
232 | int rc; | |
233 | ||
234 | ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS; | |
235 | tqpair.qpair.ctrlr = &ctrlr; | |
236 | tcp_req.req = &req; | |
237 | ||
238 | req.payload.reset_sgl_fn = nvme_tcp_ut_reset_sgl; | |
239 | req.payload.next_sge_fn = nvme_tcp_ut_next_sge; | |
240 | req.payload.contig_or_cb_arg = &bio; | |
241 | req.qpair = &tqpair.qpair; | |
242 | ||
243 | for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) { | |
244 | bio.iovs[i].iov_base = (void *)(0xFEEDB000 + i * 0x1000); | |
245 | bio.iovs[i].iov_len = 0; | |
246 | } | |
247 | ||
248 | /* Test case 1: Single SGL. Expected: PASS */ | |
249 | bio.iovpos = 0; | |
250 | req.payload_offset = 0; | |
251 | req.payload_size = 0x1000; | |
252 | bio.iovs[0].iov_len = 0x1000; | |
253 | rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req); | |
254 | SPDK_CU_ASSERT_FATAL(rc == 0); | |
255 | CU_ASSERT(bio.iovpos == 1); | |
256 | CU_ASSERT((uint64_t)tcp_req.iov[0].iov_base == (uint64_t)bio.iovs[0].iov_base); | |
257 | CU_ASSERT(tcp_req.iov[0].iov_len == bio.iovs[0].iov_len); | |
258 | CU_ASSERT(tcp_req.iovcnt == 1); | |
259 | ||
260 | /* Test case 2: Multiple SGL. Expected: PASS */ | |
261 | bio.iovpos = 0; | |
262 | req.payload_offset = 0; | |
263 | req.payload_size = 0x4000; | |
264 | for (i = 0; i < 4; i++) { | |
265 | bio.iovs[i].iov_len = 0x1000; | |
266 | } | |
267 | rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req); | |
268 | SPDK_CU_ASSERT_FATAL(rc == 0); | |
269 | CU_ASSERT(bio.iovpos == 4); | |
270 | CU_ASSERT(tcp_req.iovcnt == 4); | |
271 | for (i = 0; i < 4; i++) { | |
272 | CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len); | |
273 | CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base); | |
274 | } | |
275 | ||
276 | /* Test case 3: Payload is bigger than SGL. Expected: FAIL */ | |
277 | bio.iovpos = 0; | |
278 | req.payload_offset = 0; | |
279 | req.payload_size = 0x17000; | |
280 | for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) { | |
281 | bio.iovs[i].iov_len = 0x1000; | |
282 | } | |
283 | rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req); | |
284 | SPDK_CU_ASSERT_FATAL(rc != 0); | |
285 | CU_ASSERT(bio.iovpos == NVME_TCP_MAX_SGL_DESCRIPTORS); | |
286 | for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) { | |
287 | CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len); | |
288 | CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base); | |
289 | } | |
290 | } | |
291 | ||
292 | static void | |
293 | test_nvme_tcp_pdu_set_data_buf_with_md(void) | |
294 | { | |
295 | struct nvme_tcp_pdu pdu = {}; | |
296 | struct iovec iovs[7] = {}; | |
297 | struct spdk_dif_ctx dif_ctx = {}; | |
298 | int rc; | |
299 | ||
300 | pdu.dif_ctx = &dif_ctx; | |
301 | ||
302 | rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0, | |
303 | 0, 0, 0, 0, 0); | |
304 | CU_ASSERT(rc == 0); | |
305 | ||
306 | /* Single iovec case */ | |
307 | iovs[0].iov_base = (void *)0xDEADBEEF; | |
308 | iovs[0].iov_len = 2080; | |
309 | ||
310 | nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 0, 500); | |
311 | ||
312 | CU_ASSERT(dif_ctx.data_offset == 0); | |
313 | CU_ASSERT(pdu.data_len == 500); | |
314 | CU_ASSERT(pdu.data_iovcnt == 1); | |
315 | CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF); | |
316 | CU_ASSERT(pdu.data_iov[0].iov_len == 500); | |
317 | ||
318 | nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 500, 1000); | |
319 | ||
320 | CU_ASSERT(dif_ctx.data_offset == 500); | |
321 | CU_ASSERT(pdu.data_len == 1000); | |
322 | CU_ASSERT(pdu.data_iovcnt == 1); | |
323 | CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 500)); | |
324 | CU_ASSERT(pdu.data_iov[0].iov_len == 1016); | |
325 | ||
326 | nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 1500, 548); | |
327 | ||
328 | CU_ASSERT(dif_ctx.data_offset == 1500); | |
329 | CU_ASSERT(pdu.data_len == 548); | |
330 | CU_ASSERT(pdu.data_iovcnt == 1); | |
331 | CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 1516)); | |
332 | CU_ASSERT(pdu.data_iov[0].iov_len == 564); | |
333 | ||
334 | /* Multiple iovecs case */ | |
335 | iovs[0].iov_base = (void *)0xDEADBEEF; | |
336 | iovs[0].iov_len = 256; | |
337 | iovs[1].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x1000)); | |
338 | iovs[1].iov_len = 256 + 1; | |
339 | iovs[2].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x2000)); | |
340 | iovs[2].iov_len = 4; | |
341 | iovs[3].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x3000)); | |
342 | iovs[3].iov_len = 3 + 123; | |
343 | iovs[4].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x4000)); | |
344 | iovs[4].iov_len = 389 + 6; | |
345 | iovs[5].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x5000)); | |
346 | iovs[5].iov_len = 2 + 512 + 8 + 432; | |
347 | iovs[6].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x6000)); | |
348 | iovs[6].iov_len = 80 + 8; | |
349 | ||
350 | nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 0, 500); | |
351 | ||
352 | CU_ASSERT(dif_ctx.data_offset == 0); | |
353 | CU_ASSERT(pdu.data_len == 500); | |
354 | CU_ASSERT(pdu.data_iovcnt == 2); | |
355 | CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF); | |
356 | CU_ASSERT(pdu.data_iov[0].iov_len == 256); | |
357 | CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x1000)); | |
358 | CU_ASSERT(pdu.data_iov[1].iov_len == 244); | |
359 | ||
360 | nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 500, 1000); | |
361 | ||
362 | CU_ASSERT(dif_ctx.data_offset == 500); | |
363 | CU_ASSERT(pdu.data_len == 1000); | |
364 | CU_ASSERT(pdu.data_iovcnt == 5); | |
365 | CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x1000 + 244)); | |
366 | CU_ASSERT(pdu.data_iov[0].iov_len == 13); | |
367 | CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x2000)); | |
368 | CU_ASSERT(pdu.data_iov[1].iov_len == 4); | |
369 | CU_ASSERT(pdu.data_iov[2].iov_base == (void *)(0xDEADBEEF + 0x3000)); | |
370 | CU_ASSERT(pdu.data_iov[2].iov_len == 3 + 123); | |
371 | CU_ASSERT(pdu.data_iov[3].iov_base == (void *)(0xDEADBEEF + 0x4000)); | |
372 | CU_ASSERT(pdu.data_iov[3].iov_len == 395); | |
373 | CU_ASSERT(pdu.data_iov[4].iov_base == (void *)(0xDEADBEEF + 0x5000)); | |
374 | CU_ASSERT(pdu.data_iov[4].iov_len == 478); | |
375 | ||
376 | nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 1500, 548); | |
377 | ||
378 | CU_ASSERT(dif_ctx.data_offset == 1500); | |
379 | CU_ASSERT(pdu.data_len == 548); | |
380 | CU_ASSERT(pdu.data_iovcnt == 2); | |
381 | CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x5000 + 478)); | |
382 | CU_ASSERT(pdu.data_iov[0].iov_len == 476); | |
383 | CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x6000)); | |
384 | CU_ASSERT(pdu.data_iov[1].iov_len == 88); | |
385 | } | |
386 | ||
387 | static void | |
388 | test_nvme_tcp_build_iovs_with_md(void) | |
389 | { | |
390 | struct nvme_tcp_pdu pdu = {}; | |
391 | struct iovec iovs[11] = {}; | |
392 | struct spdk_dif_ctx dif_ctx = {}; | |
393 | uint32_t mapped_length = 0; | |
394 | int rc; | |
395 | ||
396 | rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0, | |
397 | 0, 0, 0, 0, 0); | |
398 | CU_ASSERT(rc == 0); | |
399 | ||
400 | pdu.dif_ctx = &dif_ctx; | |
401 | ||
402 | pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; | |
403 | pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); | |
404 | pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + 512 * 8 + | |
405 | SPDK_NVME_TCP_DIGEST_LEN; | |
406 | pdu.data_len = 512 * 8; | |
407 | pdu.padding_len = 0; | |
408 | ||
409 | pdu.data_iov[0].iov_base = (void *)0xDEADBEEF; | |
410 | pdu.data_iov[0].iov_len = (512 + 8) * 8; | |
411 | pdu.data_iovcnt = 1; | |
412 | ||
413 | rc = nvme_tcp_build_iovs(iovs, 11, &pdu, true, true, &mapped_length); | |
414 | CU_ASSERT(rc == 10); | |
415 | CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw); | |
416 | CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN); | |
417 | CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF); | |
418 | CU_ASSERT(iovs[1].iov_len == 512); | |
419 | CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + 520)); | |
420 | CU_ASSERT(iovs[2].iov_len == 512); | |
421 | CU_ASSERT(iovs[3].iov_base == (void *)(0xDEADBEEF + 520 * 2)); | |
422 | CU_ASSERT(iovs[3].iov_len == 512); | |
423 | CU_ASSERT(iovs[4].iov_base == (void *)(0xDEADBEEF + 520 * 3)); | |
424 | CU_ASSERT(iovs[4].iov_len == 512); | |
425 | CU_ASSERT(iovs[5].iov_base == (void *)(0xDEADBEEF + 520 * 4)); | |
426 | CU_ASSERT(iovs[5].iov_len == 512); | |
427 | CU_ASSERT(iovs[6].iov_base == (void *)(0xDEADBEEF + 520 * 5)); | |
428 | CU_ASSERT(iovs[6].iov_len == 512); | |
429 | CU_ASSERT(iovs[7].iov_base == (void *)(0xDEADBEEF + 520 * 6)); | |
430 | CU_ASSERT(iovs[7].iov_len == 512); | |
431 | CU_ASSERT(iovs[8].iov_base == (void *)(0xDEADBEEF + 520 * 7)); | |
432 | CU_ASSERT(iovs[8].iov_len == 512); | |
433 | CU_ASSERT(iovs[9].iov_base == (void *)pdu.data_digest); | |
434 | CU_ASSERT(iovs[9].iov_len == SPDK_NVME_TCP_DIGEST_LEN); | |
435 | CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN + | |
436 | 512 * 8 + SPDK_NVME_TCP_DIGEST_LEN); | |
437 | } | |
438 | ||
439 | int main(int argc, char **argv) | |
440 | { | |
441 | CU_pSuite suite = NULL; | |
442 | unsigned int num_failures; | |
443 | ||
444 | CU_set_error_action(CUEA_ABORT); | |
445 | CU_initialize_registry(); | |
446 | ||
447 | suite = CU_add_suite("nvme_tcp", NULL, NULL); | |
448 | CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf); | |
449 | CU_ADD_TEST(suite, test_nvme_tcp_build_iovs); | |
450 | CU_ADD_TEST(suite, test_nvme_tcp_build_sgl_request); | |
451 | CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf_with_md); | |
452 | CU_ADD_TEST(suite, test_nvme_tcp_build_iovs_with_md); | |
453 | ||
454 | CU_basic_set_mode(CU_BRM_VERBOSE); | |
455 | CU_basic_run_tests(); | |
456 | num_failures = CU_get_number_of_failures(); | |
457 | CU_cleanup_registry(); | |
458 | return num_failures; | |
459 | } |