]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/test/unit/lib/idxd/idxd.c/idxd_ut.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / test / unit / lib / idxd / idxd.c / idxd_ut.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) Intel Corporation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "spdk_cunit.h"
35 #include "spdk_internal/mock.h"
36 #include "spdk_internal/idxd.h"
37 #include "common/lib/test_env.c"
38 #include "idxd/idxd.c"
39
40 #define FAKE_REG_SIZE 0x800
41 #define NUM_GROUPS 4
42 #define NUM_WQ_PER_GROUP 1
43 #define NUM_ENGINES_PER_GROUP 1
44 #define TOTAL_WQS (NUM_GROUPS * NUM_WQ_PER_GROUP)
45 #define TOTAL_ENGINES (NUM_GROUPS * NUM_ENGINES_PER_GROUP)
46
47 int
48 spdk_pci_enumerate(struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx)
49 {
50 return -1;
51 }
52
53 int
54 spdk_pci_device_map_bar(struct spdk_pci_device *dev, uint32_t bar,
55 void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
56 {
57 *mapped_addr = NULL;
58 *phys_addr = 0;
59 *size = 0;
60 return 0;
61 }
62
63 int
64 spdk_pci_device_unmap_bar(struct spdk_pci_device *dev, uint32_t bar, void *addr)
65 {
66 return 0;
67 }
68
69 int
70 spdk_pci_device_cfg_read32(struct spdk_pci_device *dev, uint32_t *value,
71 uint32_t offset)
72 {
73 *value = 0xFFFFFFFFu;
74 return 0;
75 }
76
77 int
78 spdk_pci_device_cfg_write32(struct spdk_pci_device *dev, uint32_t value,
79 uint32_t offset)
80 {
81 return 0;
82 }
83
84 #define movdir64b mock_movdir64b
85 static inline void
86 mock_movdir64b(void *dst, const void *src)
87 {
88 return;
89 }
90
91 #define WQ_CFG_OFFSET 0x500
92 #define TOTAL_WQE_SIZE 0x40
93 static int
94 test_idxd_wq_config(void)
95 {
96 struct spdk_idxd_device idxd = {};
97 union idxd_wqcfg wqcfg = {};
98 uint32_t expected[8] = {0x10, 0, 0x11, 0x11e, 0, 0, 0x40000000, 0};
99 uint32_t wq_size;
100 int rc, i, j;
101
102 idxd.reg_base = calloc(1, FAKE_REG_SIZE);
103 SPDK_CU_ASSERT_FATAL(idxd.reg_base != NULL);
104
105 g_dev_cfg = &g_dev_cfg0;
106 idxd.registers.wqcap.total_wq_size = TOTAL_WQE_SIZE;
107 idxd.registers.wqcap.num_wqs = TOTAL_WQS;
108 idxd.registers.gencap.max_batch_shift = LOG2_WQ_MAX_BATCH;
109 idxd.registers.gencap.max_xfer_shift = LOG2_WQ_MAX_XFER;
110 idxd.wqcfg_offset = WQ_CFG_OFFSET;
111 wq_size = idxd.registers.wqcap.total_wq_size / g_dev_cfg->total_wqs;
112
113 rc = idxd_wq_config(&idxd);
114 CU_ASSERT(rc == 0);
115 for (i = 0; i < g_dev_cfg->total_wqs; i++) {
116 CU_ASSERT(idxd.queues[i].wqcfg.wq_size == wq_size);
117 CU_ASSERT(idxd.queues[i].wqcfg.mode == WQ_MODE_DEDICATED);
118 CU_ASSERT(idxd.queues[i].wqcfg.max_batch_shift == LOG2_WQ_MAX_BATCH);
119 CU_ASSERT(idxd.queues[i].wqcfg.max_xfer_shift == LOG2_WQ_MAX_XFER);
120 CU_ASSERT(idxd.queues[i].wqcfg.wq_state == WQ_ENABLED);
121 CU_ASSERT(idxd.queues[i].wqcfg.priority == WQ_PRIORITY_1);
122 CU_ASSERT(idxd.queues[i].idxd == &idxd);
123 CU_ASSERT(idxd.queues[i].group == &idxd.groups[i % g_dev_cfg->num_groups]);
124 }
125
126 for (i = 0 ; i < idxd.registers.wqcap.num_wqs; i++) {
127 for (j = 0 ; j < WQCFG_NUM_DWORDS; j++) {
128 wqcfg.raw[j] = spdk_mmio_read_4((uint32_t *)(idxd.reg_base + idxd.wqcfg_offset + i * 32 + j *
129 4));
130 CU_ASSERT(wqcfg.raw[j] == expected[j]);
131 }
132 }
133
134 free(idxd.queues);
135 free(idxd.reg_base);
136
137 return 0;
138 }
139
140 #define GRP_CFG_OFFSET 0x400
141 #define MAX_TOKENS 0x40
142 static int
143 test_idxd_group_config(void)
144 {
145 struct spdk_idxd_device idxd = {};
146 uint64_t wqs[NUM_GROUPS] = {};
147 uint64_t engines[NUM_GROUPS] = {};
148 union idxd_group_flags flags[NUM_GROUPS] = {};
149 int rc, i;
150 uint64_t base_offset;
151
152 idxd.reg_base = calloc(1, FAKE_REG_SIZE);
153 SPDK_CU_ASSERT_FATAL(idxd.reg_base != NULL);
154
155 g_dev_cfg = &g_dev_cfg0;
156 idxd.registers.groupcap.num_groups = NUM_GROUPS;
157 idxd.registers.enginecap.num_engines = TOTAL_ENGINES;
158 idxd.registers.wqcap.num_wqs = TOTAL_WQS;
159 idxd.registers.groupcap.total_tokens = MAX_TOKENS;
160 idxd.grpcfg_offset = GRP_CFG_OFFSET;
161
162 rc = idxd_group_config(&idxd);
163 CU_ASSERT(rc == 0);
164 for (i = 0 ; i < idxd.registers.groupcap.num_groups; i++) {
165 base_offset = idxd.grpcfg_offset + i * 64;
166
167 wqs[i] = spdk_mmio_read_8((uint64_t *)(idxd.reg_base + base_offset));
168 engines[i] = spdk_mmio_read_8((uint64_t *)(idxd.reg_base + base_offset + CFG_ENGINE_OFFSET));
169 flags[i].raw = spdk_mmio_read_8((uint64_t *)(idxd.reg_base + base_offset + CFG_FLAG_OFFSET));
170 }
171 /* wqe and engine arrays are indexed by group id and are bitmaps of assigned elements. */
172 CU_ASSERT(wqs[0] == 0x1);
173 CU_ASSERT(engines[0] == 0x1);
174 CU_ASSERT(wqs[1] == 0x2);
175 CU_ASSERT(engines[1] == 0x2);
176 CU_ASSERT(flags[0].tokens_allowed == MAX_TOKENS / NUM_GROUPS);
177 CU_ASSERT(flags[1].tokens_allowed == MAX_TOKENS / NUM_GROUPS);
178
179 /* groups allocated by code under test. */
180 free(idxd.groups);
181 free(idxd.reg_base);
182
183 return 0;
184 }
185
186 static int
187 test_idxd_reset_dev(void)
188 {
189 struct spdk_idxd_device idxd = {};
190 union idxd_cmdsts_reg *fake_cmd_status_reg;
191 int rc;
192
193 idxd.reg_base = calloc(1, FAKE_REG_SIZE);
194 SPDK_CU_ASSERT_FATAL(idxd.reg_base != NULL);
195 fake_cmd_status_reg = idxd.reg_base + IDXD_CMDSTS_OFFSET;
196
197 /* Test happy path */
198 rc = idxd_reset_dev(&idxd);
199 CU_ASSERT(rc == 0);
200
201 /* Test error reported path */
202 fake_cmd_status_reg->err = 1;
203 rc = idxd_reset_dev(&idxd);
204 CU_ASSERT(rc == -EINVAL);
205
206 free(idxd.reg_base);
207
208 return 0;
209 }
210
211 static int
212 test_idxd_wait_cmd(void)
213 {
214 struct spdk_idxd_device idxd = {};
215 int timeout = 1;
216 union idxd_cmdsts_reg *fake_cmd_status_reg;
217 int rc;
218
219 idxd.reg_base = calloc(1, FAKE_REG_SIZE);
220 SPDK_CU_ASSERT_FATAL(idxd.reg_base != NULL);
221 fake_cmd_status_reg = idxd.reg_base + IDXD_CMDSTS_OFFSET;
222
223 /* Test happy path. */
224 rc = idxd_wait_cmd(&idxd, timeout);
225 CU_ASSERT(rc == 0);
226
227 /* Setup up our fake register to set the error bit. */
228 fake_cmd_status_reg->err = 1;
229 rc = idxd_wait_cmd(&idxd, timeout);
230 CU_ASSERT(rc == -EINVAL);
231 fake_cmd_status_reg->err = 0;
232
233 /* Setup up our fake register to set the active bit. */
234 fake_cmd_status_reg->active = 1;
235 rc = idxd_wait_cmd(&idxd, timeout);
236 CU_ASSERT(rc == -EBUSY);
237
238 free(idxd.reg_base);
239
240 return 0;
241 }
242
243 static int
244 test_spdk_idxd_set_config(void)
245 {
246
247 g_dev_cfg = NULL;
248 spdk_idxd_set_config(0);
249 SPDK_CU_ASSERT_FATAL(g_dev_cfg != NULL);
250 CU_ASSERT(memcmp(&g_dev_cfg0, g_dev_cfg, sizeof(struct device_config)) == 0);
251
252 return 0;
253 }
254
255 static int
256 test_spdk_idxd_reconfigure_chan(void)
257 {
258 struct spdk_idxd_io_channel chan = {};
259 int rc;
260 uint32_t test_ring_size = 8;
261 uint32_t num_channels = 2;
262
263 chan.ring_ctrl.ring_slots = spdk_bit_array_create(test_ring_size);
264 chan.ring_ctrl.ring_size = test_ring_size;
265 chan.ring_ctrl.completions = spdk_zmalloc(test_ring_size * sizeof(struct idxd_hw_desc), 0, NULL,
266 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
267 SPDK_CU_ASSERT_FATAL(chan.ring_ctrl.completions != NULL);
268
269 rc = spdk_idxd_reconfigure_chan(&chan, num_channels);
270 CU_ASSERT(rc == 0);
271 CU_ASSERT(chan.ring_ctrl.max_ring_slots == test_ring_size / num_channels);
272
273 spdk_bit_array_free(&chan.ring_ctrl.ring_slots);
274 spdk_free(chan.ring_ctrl.completions);
275 return 0;
276 }
277
278 int main(int argc, char **argv)
279 {
280 CU_pSuite suite = NULL;
281 unsigned int num_failures;
282
283 CU_set_error_action(CUEA_ABORT);
284 CU_initialize_registry();
285
286 suite = CU_add_suite("idxd", NULL, NULL);
287
288 CU_ADD_TEST(suite, test_spdk_idxd_reconfigure_chan);
289 CU_ADD_TEST(suite, test_spdk_idxd_set_config);
290 CU_ADD_TEST(suite, test_idxd_wait_cmd);
291 CU_ADD_TEST(suite, test_idxd_reset_dev);
292 CU_ADD_TEST(suite, test_idxd_group_config);
293 CU_ADD_TEST(suite, test_idxd_wq_config);
294
295 CU_basic_set_mode(CU_BRM_VERBOSE);
296 CU_basic_run_tests();
297 num_failures = CU_get_number_of_failures();
298 CU_cleanup_registry();
299 return num_failures;
300 }