4 * Copyright (c) Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include "spdk_internal/copy_engine.h"
42 #include "spdk/conf.h"
44 #include "spdk/event.h"
45 #include "spdk/io_channel.h"
46 #include "spdk/ioat.h"
48 #define IOAT_MAX_CHANNELS 64
51 struct spdk_ioat_chan
*ioat
;
53 /** linked list pointer for device list */
54 TAILQ_ENTRY(ioat_device
) tailq
;
57 static TAILQ_HEAD(, ioat_device
) g_devices
= TAILQ_HEAD_INITIALIZER(g_devices
);
58 static pthread_mutex_t g_ioat_mutex
= PTHREAD_MUTEX_INITIALIZER
;
60 struct ioat_io_channel
{
61 struct spdk_ioat_chan
*ioat_ch
;
62 struct ioat_device
*ioat_dev
;
63 struct spdk_poller
*poller
;
67 ioat_find_dev_by_whitelist_bdf(const struct spdk_pci_addr
*pci_addr
,
68 const struct spdk_pci_addr
*whitelist
,
69 int num_whitelist_devices
)
73 for (i
= 0; i
< num_whitelist_devices
; i
++) {
74 if (spdk_pci_addr_compare(pci_addr
, &whitelist
[i
]) == 0) {
81 static struct ioat_device
*
82 ioat_allocate_device(void)
84 struct ioat_device
*dev
;
86 pthread_mutex_lock(&g_ioat_mutex
);
87 TAILQ_FOREACH(dev
, &g_devices
, tailq
) {
88 if (!dev
->is_allocated
) {
89 dev
->is_allocated
= true;
90 pthread_mutex_unlock(&g_ioat_mutex
);
94 pthread_mutex_unlock(&g_ioat_mutex
);
100 ioat_free_device(struct ioat_device
*dev
)
102 pthread_mutex_lock(&g_ioat_mutex
);
103 dev
->is_allocated
= false;
104 pthread_mutex_unlock(&g_ioat_mutex
);
108 spdk_copy_completion_cb cb
;
111 static int copy_engine_ioat_init(void);
112 static void copy_engine_ioat_exit(void);
115 copy_engine_ioat_get_ctx_size(void)
117 return sizeof(struct ioat_task
) + sizeof(struct spdk_copy_task
);
120 SPDK_COPY_MODULE_REGISTER(copy_engine_ioat_init
, copy_engine_ioat_exit
, NULL
,
121 copy_engine_ioat_get_ctx_size
)
124 copy_engine_ioat_exit(void)
126 struct ioat_device
*dev
;
128 while (!TAILQ_EMPTY(&g_devices
)) {
129 dev
= TAILQ_FIRST(&g_devices
);
130 TAILQ_REMOVE(&g_devices
, dev
, tailq
);
131 spdk_ioat_detach(dev
->ioat
);
132 ioat_free_device(dev
);
139 ioat_done(void *cb_arg
)
141 struct spdk_copy_task
*copy_req
;
142 struct ioat_task
*ioat_task
= cb_arg
;
144 copy_req
= (struct spdk_copy_task
*)
145 ((uintptr_t)ioat_task
-
146 offsetof(struct spdk_copy_task
, offload_ctx
));
148 ioat_task
->cb(copy_req
, 0);
152 ioat_copy_submit(void *cb_arg
, struct spdk_io_channel
*ch
, void *dst
, void *src
, uint64_t nbytes
,
153 spdk_copy_completion_cb cb
)
155 struct ioat_task
*ioat_task
= (struct ioat_task
*)cb_arg
;
156 struct ioat_io_channel
*ioat_ch
= spdk_io_channel_get_ctx(ch
);
158 assert(ioat_ch
->ioat_ch
!= NULL
);
162 return spdk_ioat_submit_copy(ioat_ch
->ioat_ch
, ioat_task
, ioat_done
, dst
, src
, nbytes
);
166 ioat_copy_submit_fill(void *cb_arg
, struct spdk_io_channel
*ch
, void *dst
, uint8_t fill
,
167 uint64_t nbytes
, spdk_copy_completion_cb cb
)
169 struct ioat_task
*ioat_task
= (struct ioat_task
*)cb_arg
;
170 struct ioat_io_channel
*ioat_ch
= spdk_io_channel_get_ctx(ch
);
171 uint64_t fill64
= 0x0101010101010101ULL
* fill
;
173 assert(ioat_ch
->ioat_ch
!= NULL
);
177 return spdk_ioat_submit_fill(ioat_ch
->ioat_ch
, ioat_task
, ioat_done
, dst
, fill64
, nbytes
);
183 struct spdk_ioat_chan
*chan
= arg
;
185 spdk_ioat_process_events(chan
);
188 static struct spdk_io_channel
*ioat_get_io_channel(uint32_t priority
);
190 static struct spdk_copy_engine ioat_copy_engine
= {
191 .copy
= ioat_copy_submit
,
192 .fill
= ioat_copy_submit_fill
,
193 .get_io_channel
= ioat_get_io_channel
,
197 ioat_create_cb(void *io_device
, uint32_t priority
, void *ctx_buf
, void *unique_ctx
)
199 struct ioat_io_channel
*ch
= ctx_buf
;
200 struct ioat_device
*ioat_dev
;
202 ioat_dev
= ioat_allocate_device();
203 if (ioat_dev
== NULL
) {
207 ch
->ioat_dev
= ioat_dev
;
208 ch
->ioat_ch
= ioat_dev
->ioat
;
209 spdk_poller_register(&ch
->poller
, ioat_poll
, ch
->ioat_ch
,
210 spdk_env_get_current_core(), 0);
215 ioat_destroy_cb(void *io_device
, void *ctx_buf
)
217 struct ioat_io_channel
*ch
= ctx_buf
;
219 ioat_free_device(ch
->ioat_dev
);
220 spdk_poller_unregister(&ch
->poller
, NULL
);
223 static struct spdk_io_channel
*
224 ioat_get_io_channel(uint32_t priority
)
226 return spdk_get_io_channel(&ioat_copy_engine
, priority
, false, NULL
);
229 struct ioat_probe_ctx
{
230 int num_whitelist_devices
;
231 struct spdk_pci_addr whitelist
[IOAT_MAX_CHANNELS
];
235 probe_cb(void *cb_ctx
, struct spdk_pci_device
*pci_dev
)
237 struct ioat_probe_ctx
*ctx
= cb_ctx
;
238 struct spdk_pci_addr pci_addr
= spdk_pci_device_get_addr(pci_dev
);
240 SPDK_NOTICELOG(" Found matching device at %04x:%02x:%02x.%x vendor:0x%04x device:0x%04x\n",
245 spdk_pci_device_get_vendor_id(pci_dev
),
246 spdk_pci_device_get_device_id(pci_dev
));
248 if (ctx
->num_whitelist_devices
> 0 &&
249 !ioat_find_dev_by_whitelist_bdf(&pci_addr
, ctx
->whitelist
, ctx
->num_whitelist_devices
)) {
253 /* Claim the device in case conflict with other process */
254 if (spdk_pci_device_claim(&pci_addr
) != 0) {
262 attach_cb(void *cb_ctx
, struct spdk_pci_device
*pci_dev
, struct spdk_ioat_chan
*ioat
)
264 struct ioat_device
*dev
;
266 dev
= spdk_zmalloc(sizeof(*dev
), 0, NULL
);
268 SPDK_ERRLOG("Failed to allocate device struct\n");
273 TAILQ_INSERT_TAIL(&g_devices
, dev
, tailq
);
277 copy_engine_ioat_init(void)
279 struct spdk_conf_section
*sp
= spdk_conf_find_section(NULL
, "Ioat");
282 struct ioat_probe_ctx probe_ctx
= {};
285 if (spdk_conf_section_get_boolval(sp
, "Disable", false)) {
290 /*Init the whitelist*/
291 for (i
= 0; i
< IOAT_MAX_CHANNELS
; i
++) {
292 pci_bdf
= spdk_conf_section_get_nmval(sp
, "Whitelist", i
, 0);
296 if (spdk_pci_addr_parse(&probe_ctx
.whitelist
[probe_ctx
.num_whitelist_devices
], pci_bdf
) < 0) {
297 SPDK_ERRLOG("Invalid Ioat Whitelist address %s\n", pci_bdf
);
300 probe_ctx
.num_whitelist_devices
++;
304 if (spdk_ioat_probe(&probe_ctx
, probe_cb
, attach_cb
) != 0) {
305 SPDK_ERRLOG("spdk_ioat_probe() failed\n");
309 SPDK_NOTICELOG("Ioat Copy Engine Offload Enabled\n");
310 spdk_copy_engine_register(&ioat_copy_engine
);
311 spdk_io_device_register(&ioat_copy_engine
, ioat_create_cb
, ioat_destroy_cb
,
312 sizeof(struct ioat_io_channel
));