]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/lib/copy/ioat/copy_engine_ioat.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / spdk / lib / copy / ioat / copy_engine_ioat.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) Intel Corporation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <assert.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <pthread.h>
38
39 #include "spdk_internal/copy_engine.h"
40
41 #include "spdk/env.h"
42 #include "spdk/conf.h"
43 #include "spdk/log.h"
44 #include "spdk/event.h"
45 #include "spdk/io_channel.h"
46 #include "spdk/ioat.h"
47
48 #define IOAT_MAX_CHANNELS 64
49
50 struct ioat_device {
51 struct spdk_ioat_chan *ioat;
52 bool is_allocated;
53 /** linked list pointer for device list */
54 TAILQ_ENTRY(ioat_device) tailq;
55 };
56
57 static TAILQ_HEAD(, ioat_device) g_devices = TAILQ_HEAD_INITIALIZER(g_devices);
58 static pthread_mutex_t g_ioat_mutex = PTHREAD_MUTEX_INITIALIZER;
59
60 struct ioat_io_channel {
61 struct spdk_ioat_chan *ioat_ch;
62 struct ioat_device *ioat_dev;
63 struct spdk_poller *poller;
64 };
65
66 static int
67 ioat_find_dev_by_whitelist_bdf(const struct spdk_pci_addr *pci_addr,
68 const struct spdk_pci_addr *whitelist,
69 int num_whitelist_devices)
70 {
71 int i;
72
73 for (i = 0; i < num_whitelist_devices; i++) {
74 if (spdk_pci_addr_compare(pci_addr, &whitelist[i]) == 0) {
75 return 1;
76 }
77 }
78 return 0;
79 }
80
81 static struct ioat_device *
82 ioat_allocate_device(void)
83 {
84 struct ioat_device *dev;
85
86 pthread_mutex_lock(&g_ioat_mutex);
87 TAILQ_FOREACH(dev, &g_devices, tailq) {
88 if (!dev->is_allocated) {
89 dev->is_allocated = true;
90 pthread_mutex_unlock(&g_ioat_mutex);
91 return dev;
92 }
93 }
94 pthread_mutex_unlock(&g_ioat_mutex);
95
96 return NULL;
97 }
98
99 static void
100 ioat_free_device(struct ioat_device *dev)
101 {
102 pthread_mutex_lock(&g_ioat_mutex);
103 dev->is_allocated = false;
104 pthread_mutex_unlock(&g_ioat_mutex);
105 }
106
107 struct ioat_task {
108 spdk_copy_completion_cb cb;
109 };
110
111 static int copy_engine_ioat_init(void);
112 static void copy_engine_ioat_exit(void);
113
114 static size_t
115 copy_engine_ioat_get_ctx_size(void)
116 {
117 return sizeof(struct ioat_task) + sizeof(struct spdk_copy_task);
118 }
119
120 SPDK_COPY_MODULE_REGISTER(copy_engine_ioat_init, copy_engine_ioat_exit, NULL,
121 copy_engine_ioat_get_ctx_size)
122
123 static void
124 copy_engine_ioat_exit(void)
125 {
126 struct ioat_device *dev;
127
128 while (!TAILQ_EMPTY(&g_devices)) {
129 dev = TAILQ_FIRST(&g_devices);
130 TAILQ_REMOVE(&g_devices, dev, tailq);
131 spdk_ioat_detach(dev->ioat);
132 ioat_free_device(dev);
133 spdk_free(dev);
134 }
135 return;
136 }
137
138 static void
139 ioat_done(void *cb_arg)
140 {
141 struct spdk_copy_task *copy_req;
142 struct ioat_task *ioat_task = cb_arg;
143
144 copy_req = (struct spdk_copy_task *)
145 ((uintptr_t)ioat_task -
146 offsetof(struct spdk_copy_task, offload_ctx));
147
148 ioat_task->cb(copy_req, 0);
149 }
150
151 static int64_t
152 ioat_copy_submit(void *cb_arg, struct spdk_io_channel *ch, void *dst, void *src, uint64_t nbytes,
153 spdk_copy_completion_cb cb)
154 {
155 struct ioat_task *ioat_task = (struct ioat_task *)cb_arg;
156 struct ioat_io_channel *ioat_ch = spdk_io_channel_get_ctx(ch);
157
158 assert(ioat_ch->ioat_ch != NULL);
159
160 ioat_task->cb = cb;
161
162 return spdk_ioat_submit_copy(ioat_ch->ioat_ch, ioat_task, ioat_done, dst, src, nbytes);
163 }
164
165 static int64_t
166 ioat_copy_submit_fill(void *cb_arg, struct spdk_io_channel *ch, void *dst, uint8_t fill,
167 uint64_t nbytes, spdk_copy_completion_cb cb)
168 {
169 struct ioat_task *ioat_task = (struct ioat_task *)cb_arg;
170 struct ioat_io_channel *ioat_ch = spdk_io_channel_get_ctx(ch);
171 uint64_t fill64 = 0x0101010101010101ULL * fill;
172
173 assert(ioat_ch->ioat_ch != NULL);
174
175 ioat_task->cb = cb;
176
177 return spdk_ioat_submit_fill(ioat_ch->ioat_ch, ioat_task, ioat_done, dst, fill64, nbytes);
178 }
179
180 static void
181 ioat_poll(void *arg)
182 {
183 struct spdk_ioat_chan *chan = arg;
184
185 spdk_ioat_process_events(chan);
186 }
187
188 static struct spdk_io_channel *ioat_get_io_channel(uint32_t priority);
189
190 static struct spdk_copy_engine ioat_copy_engine = {
191 .copy = ioat_copy_submit,
192 .fill = ioat_copy_submit_fill,
193 .get_io_channel = ioat_get_io_channel,
194 };
195
196 static int
197 ioat_create_cb(void *io_device, uint32_t priority, void *ctx_buf, void *unique_ctx)
198 {
199 struct ioat_io_channel *ch = ctx_buf;
200 struct ioat_device *ioat_dev;
201
202 ioat_dev = ioat_allocate_device();
203 if (ioat_dev == NULL) {
204 return -1;
205 }
206
207 ch->ioat_dev = ioat_dev;
208 ch->ioat_ch = ioat_dev->ioat;
209 spdk_poller_register(&ch->poller, ioat_poll, ch->ioat_ch,
210 spdk_env_get_current_core(), 0);
211 return 0;
212 }
213
214 static void
215 ioat_destroy_cb(void *io_device, void *ctx_buf)
216 {
217 struct ioat_io_channel *ch = ctx_buf;
218
219 ioat_free_device(ch->ioat_dev);
220 spdk_poller_unregister(&ch->poller, NULL);
221 }
222
223 static struct spdk_io_channel *
224 ioat_get_io_channel(uint32_t priority)
225 {
226 return spdk_get_io_channel(&ioat_copy_engine, priority, false, NULL);
227 }
228
229 struct ioat_probe_ctx {
230 int num_whitelist_devices;
231 struct spdk_pci_addr whitelist[IOAT_MAX_CHANNELS];
232 };
233
234 static bool
235 probe_cb(void *cb_ctx, struct spdk_pci_device *pci_dev)
236 {
237 struct ioat_probe_ctx *ctx = cb_ctx;
238 struct spdk_pci_addr pci_addr = spdk_pci_device_get_addr(pci_dev);
239
240 SPDK_NOTICELOG(" Found matching device at %04x:%02x:%02x.%x vendor:0x%04x device:0x%04x\n",
241 pci_addr.domain,
242 pci_addr.bus,
243 pci_addr.dev,
244 pci_addr.func,
245 spdk_pci_device_get_vendor_id(pci_dev),
246 spdk_pci_device_get_device_id(pci_dev));
247
248 if (ctx->num_whitelist_devices > 0 &&
249 !ioat_find_dev_by_whitelist_bdf(&pci_addr, ctx->whitelist, ctx->num_whitelist_devices)) {
250 return false;
251 }
252
253 /* Claim the device in case conflict with other process */
254 if (spdk_pci_device_claim(&pci_addr) != 0) {
255 return false;
256 }
257
258 return true;
259 }
260
261 static void
262 attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct spdk_ioat_chan *ioat)
263 {
264 struct ioat_device *dev;
265
266 dev = spdk_zmalloc(sizeof(*dev), 0, NULL);
267 if (dev == NULL) {
268 SPDK_ERRLOG("Failed to allocate device struct\n");
269 return;
270 }
271
272 dev->ioat = ioat;
273 TAILQ_INSERT_TAIL(&g_devices, dev, tailq);
274 }
275
276 static int
277 copy_engine_ioat_init(void)
278 {
279 struct spdk_conf_section *sp = spdk_conf_find_section(NULL, "Ioat");
280 const char *pci_bdf;
281 int i;
282 struct ioat_probe_ctx probe_ctx = {};
283
284 if (sp != NULL) {
285 if (spdk_conf_section_get_boolval(sp, "Disable", false)) {
286 /* Disable Ioat */
287 return 0;
288 }
289
290 /*Init the whitelist*/
291 for (i = 0; i < IOAT_MAX_CHANNELS; i++) {
292 pci_bdf = spdk_conf_section_get_nmval(sp, "Whitelist", i, 0);
293 if (!pci_bdf)
294 break;
295
296 if (spdk_pci_addr_parse(&probe_ctx.whitelist[probe_ctx.num_whitelist_devices], pci_bdf) < 0) {
297 SPDK_ERRLOG("Invalid Ioat Whitelist address %s\n", pci_bdf);
298 return -1;
299 }
300 probe_ctx.num_whitelist_devices++;
301 }
302 }
303
304 if (spdk_ioat_probe(&probe_ctx, probe_cb, attach_cb) != 0) {
305 SPDK_ERRLOG("spdk_ioat_probe() failed\n");
306 return -1;
307 }
308
309 SPDK_NOTICELOG("Ioat Copy Engine Offload Enabled\n");
310 spdk_copy_engine_register(&ioat_copy_engine);
311 spdk_io_device_register(&ioat_copy_engine, ioat_create_cb, ioat_destroy_cb,
312 sizeof(struct ioat_io_channel));
313
314 return 0;
315 }