]>
Commit | Line | Data |
---|---|---|
1d09f67e TL |
1 | /* |
2 | * Licensed to the Apache Software Foundation (ASF) under one | |
3 | * or more contributor license agreements. See the NOTICE file | |
4 | * distributed with this work for additional information | |
5 | * regarding copyright ownership. The ASF licenses this file | |
6 | * to you under the Apache License, Version 2.0 (the | |
7 | * "License"); you may not use this file except in compliance | |
8 | * with the License. You may obtain a copy of the License at | |
9 | * | |
10 | * http://www.apache.org/licenses/LICENSE-2.0 | |
11 | * | |
12 | * Unless required by applicable law or agreed to in writing, | |
13 | * software distributed under the License is distributed on an | |
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | |
15 | * KIND, either express or implied. See the License for the | |
16 | * specific language governing permissions and limitations | |
17 | * under the License. | |
18 | */ | |
19 | ||
20 | #include <arrow-glib/buffer.hpp> | |
21 | #include <arrow-glib/error.hpp> | |
22 | #include <arrow-glib/input-stream.hpp> | |
23 | #include <arrow-glib/ipc-options.hpp> | |
24 | #include <arrow-glib/output-stream.hpp> | |
25 | #include <arrow-glib/readable.hpp> | |
26 | #include <arrow-glib/record-batch.hpp> | |
27 | #include <arrow-glib/schema.hpp> | |
28 | ||
29 | #include <arrow-cuda-glib/cuda.hpp> | |
30 | ||
31 | G_BEGIN_DECLS | |
32 | ||
33 | /** | |
34 | * SECTION: cuda | |
35 | * @section_id: cuda-classes | |
36 | * @title: CUDA related classes | |
37 | * @include: arrow-cuda-glib/arrow-cuda-glib.h | |
38 | * | |
39 | * The following classes provide CUDA support for Apache Arrow data. | |
40 | * | |
41 | * #GArrowCUDADeviceManager is the starting point. You need at | |
42 | * least one #GArrowCUDAContext to process Apache Arrow data on | |
43 | * NVIDIA GPU. | |
44 | * | |
45 | * #GArrowCUDAContext is a class to keep context for one GPU. You | |
46 | * need to create #GArrowCUDAContext for each GPU that you want to | |
47 | * use. You can create #GArrowCUDAContext by | |
48 | * garrow_cuda_device_manager_get_context(). | |
49 | * | |
50 | * #GArrowCUDABuffer is a class for data on GPU. You can copy data | |
51 | * on GPU to/from CPU by garrow_cuda_buffer_copy_to_host() and | |
52 | * garrow_cuda_buffer_copy_from_host(). You can share data on GPU | |
53 | * with other processes by garrow_cuda_buffer_export() and | |
54 | * garrow_cuda_buffer_new_ipc(). | |
55 | * | |
56 | * #GArrowCUDAHostBuffer is a class for data on CPU that is | |
57 | * directly accessible from GPU. | |
58 | * | |
59 | * #GArrowCUDAIPCMemoryHandle is a class to share data on GPU with | |
60 | * other processes. You can export your data on GPU to other processes | |
61 | * by garrow_cuda_buffer_export() and | |
62 | * garrow_cuda_ipc_memory_handle_new(). You can import other | |
63 | * process data on GPU by garrow_cuda_ipc_memory_handle_new() and | |
64 | * garrow_cuda_buffer_new_ipc(). | |
65 | * | |
66 | * #GArrowCUDABufferInputStream is a class to read data in | |
67 | * #GArrowCUDABuffer. | |
68 | * | |
69 | * #GArrowCUDABufferOutputStream is a class to write data into | |
70 | * #GArrowCUDABuffer. | |
71 | */ | |
72 | ||
73 | G_DEFINE_TYPE(GArrowCUDADeviceManager, | |
74 | garrow_cuda_device_manager, | |
75 | G_TYPE_OBJECT) | |
76 | ||
77 | static void | |
78 | garrow_cuda_device_manager_init(GArrowCUDADeviceManager *object) | |
79 | { | |
80 | } | |
81 | ||
82 | static void | |
83 | garrow_cuda_device_manager_class_init(GArrowCUDADeviceManagerClass *klass) | |
84 | { | |
85 | } | |
86 | ||
87 | /** | |
88 | * garrow_cuda_device_manager_new: | |
89 | * @error: (nullable): Return location for a #GError or %NULL. | |
90 | * | |
91 | * Returns: A newly created #GArrowCUDADeviceManager on success, | |
92 | * %NULL on error. | |
93 | * | |
94 | * Since: 0.8.0 | |
95 | */ | |
96 | GArrowCUDADeviceManager * | |
97 | garrow_cuda_device_manager_new(GError **error) | |
98 | { | |
99 | auto arrow_manager = arrow::cuda::CudaDeviceManager::Instance(); | |
100 | if (garrow::check(error, arrow_manager, "[cuda][device-manager][new]")) { | |
101 | auto manager = g_object_new(GARROW_CUDA_TYPE_DEVICE_MANAGER, | |
102 | NULL); | |
103 | return GARROW_CUDA_DEVICE_MANAGER(manager); | |
104 | } else { | |
105 | return NULL; | |
106 | } | |
107 | } | |
108 | ||
109 | /** | |
110 | * garrow_cuda_device_manager_get_context: | |
111 | * @manager: A #GArrowCUDADeviceManager. | |
112 | * @gpu_number: A GPU device number for the target context. | |
113 | * @error: (nullable): Return location for a #GError or %NULL. | |
114 | * | |
115 | * Returns: (transfer full): A newly created #GArrowCUDAContext on | |
116 | * success, %NULL on error. Contexts for the same GPU device number | |
117 | * share the same data internally. | |
118 | * | |
119 | * Since: 0.8.0 | |
120 | */ | |
121 | GArrowCUDAContext * | |
122 | garrow_cuda_device_manager_get_context(GArrowCUDADeviceManager *manager, | |
123 | gint gpu_number, | |
124 | GError **error) | |
125 | { | |
126 | auto arrow_manager = arrow::cuda::CudaDeviceManager::Instance(); | |
127 | auto arrow_cuda_context = (*arrow_manager)->GetContext(gpu_number); | |
128 | if (garrow::check(error, arrow_cuda_context, | |
129 | "[cuda][device-manager][get-context]]")) { | |
130 | return garrow_cuda_context_new_raw(&(*arrow_cuda_context)); | |
131 | } else { | |
132 | return NULL; | |
133 | } | |
134 | } | |
135 | ||
136 | /** | |
137 | * garrow_cuda_device_manager_get_n_devices: | |
138 | * @manager: A #GArrowCUDADeviceManager. | |
139 | * | |
140 | * Returns: The number of GPU devices. | |
141 | * | |
142 | * Since: 0.8.0 | |
143 | */ | |
144 | gsize | |
145 | garrow_cuda_device_manager_get_n_devices(GArrowCUDADeviceManager *manager) | |
146 | { | |
147 | auto arrow_manager = arrow::cuda::CudaDeviceManager::Instance(); | |
148 | return (*arrow_manager)->num_devices(); | |
149 | } | |
150 | ||
151 | ||
152 | typedef struct GArrowCUDAContextPrivate_ { | |
153 | std::shared_ptr<arrow::cuda::CudaContext> context; | |
154 | } GArrowCUDAContextPrivate; | |
155 | ||
156 | enum { | |
157 | PROP_CONTEXT = 1 | |
158 | }; | |
159 | ||
160 | G_DEFINE_TYPE_WITH_PRIVATE(GArrowCUDAContext, | |
161 | garrow_cuda_context, | |
162 | G_TYPE_OBJECT) | |
163 | ||
164 | #define GARROW_CUDA_CONTEXT_GET_PRIVATE(object) \ | |
165 | static_cast<GArrowCUDAContextPrivate *>( \ | |
166 | garrow_cuda_context_get_instance_private( \ | |
167 | GARROW_CUDA_CONTEXT(object))) | |
168 | ||
169 | static void | |
170 | garrow_cuda_context_finalize(GObject *object) | |
171 | { | |
172 | auto priv = GARROW_CUDA_CONTEXT_GET_PRIVATE(object); | |
173 | ||
174 | priv->context.~shared_ptr(); | |
175 | ||
176 | G_OBJECT_CLASS(garrow_cuda_context_parent_class)->finalize(object); | |
177 | } | |
178 | ||
179 | static void | |
180 | garrow_cuda_context_set_property(GObject *object, | |
181 | guint prop_id, | |
182 | const GValue *value, | |
183 | GParamSpec *pspec) | |
184 | { | |
185 | auto priv = GARROW_CUDA_CONTEXT_GET_PRIVATE(object); | |
186 | ||
187 | switch (prop_id) { | |
188 | case PROP_CONTEXT: | |
189 | priv->context = | |
190 | *static_cast<std::shared_ptr<arrow::cuda::CudaContext> *>(g_value_get_pointer(value)); | |
191 | break; | |
192 | default: | |
193 | G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec); | |
194 | break; | |
195 | } | |
196 | } | |
197 | ||
198 | static void | |
199 | garrow_cuda_context_get_property(GObject *object, | |
200 | guint prop_id, | |
201 | GValue *value, | |
202 | GParamSpec *pspec) | |
203 | { | |
204 | switch (prop_id) { | |
205 | default: | |
206 | G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec); | |
207 | break; | |
208 | } | |
209 | } | |
210 | ||
211 | static void | |
212 | garrow_cuda_context_init(GArrowCUDAContext *object) | |
213 | { | |
214 | auto priv = GARROW_CUDA_CONTEXT_GET_PRIVATE(object); | |
215 | new(&priv->context) std::shared_ptr<arrow::cuda::CudaContext>; | |
216 | } | |
217 | ||
218 | static void | |
219 | garrow_cuda_context_class_init(GArrowCUDAContextClass *klass) | |
220 | { | |
221 | GParamSpec *spec; | |
222 | ||
223 | auto gobject_class = G_OBJECT_CLASS(klass); | |
224 | ||
225 | gobject_class->finalize = garrow_cuda_context_finalize; | |
226 | gobject_class->set_property = garrow_cuda_context_set_property; | |
227 | gobject_class->get_property = garrow_cuda_context_get_property; | |
228 | ||
229 | /** | |
230 | * GArrowCUDAContext:context: | |
231 | * | |
232 | * Since: 0.8.0 | |
233 | */ | |
234 | spec = g_param_spec_pointer("context", | |
235 | "Context", | |
236 | "The raw std::shared_ptr<arrow::cuda::CudaContext>", | |
237 | static_cast<GParamFlags>(G_PARAM_WRITABLE | | |
238 | G_PARAM_CONSTRUCT_ONLY)); | |
239 | g_object_class_install_property(gobject_class, PROP_CONTEXT, spec); | |
240 | } | |
241 | ||
242 | /** | |
243 | * garrow_cuda_context_get_allocated_size: | |
244 | * @context: A #GArrowCUDAContext. | |
245 | * | |
246 | * Returns: The allocated memory by this context in bytes. | |
247 | * | |
248 | * Since: 0.8.0 | |
249 | */ | |
250 | gint64 | |
251 | garrow_cuda_context_get_allocated_size(GArrowCUDAContext *context) | |
252 | { | |
253 | auto arrow_context = garrow_cuda_context_get_raw(context); | |
254 | return arrow_context->bytes_allocated(); | |
255 | } | |
256 | ||
257 | ||
258 | G_DEFINE_TYPE(GArrowCUDABuffer, | |
259 | garrow_cuda_buffer, | |
260 | GARROW_TYPE_BUFFER) | |
261 | ||
262 | static void | |
263 | garrow_cuda_buffer_init(GArrowCUDABuffer *object) | |
264 | { | |
265 | } | |
266 | ||
267 | static void | |
268 | garrow_cuda_buffer_class_init(GArrowCUDABufferClass *klass) | |
269 | { | |
270 | } | |
271 | ||
272 | /** | |
273 | * garrow_cuda_buffer_new: | |
274 | * @context: A #GArrowCUDAContext. | |
275 | * @size: The number of bytes to be allocated on GPU device for this context. | |
276 | * @error: (nullable): Return location for a #GError or %NULL. | |
277 | * | |
278 | * Returns: (transfer full): A newly created #GArrowCUDABuffer on | |
279 | * success, %NULL on error. | |
280 | * | |
281 | * Since: 0.8.0 | |
282 | */ | |
283 | GArrowCUDABuffer * | |
284 | garrow_cuda_buffer_new(GArrowCUDAContext *context, | |
285 | gint64 size, | |
286 | GError **error) | |
287 | { | |
288 | auto arrow_context = garrow_cuda_context_get_raw(context); | |
289 | auto arrow_buffer = arrow_context->Allocate(size); | |
290 | if (garrow::check(error, arrow_buffer, "[cuda][buffer][new]")) { | |
291 | return garrow_cuda_buffer_new_raw(&(*arrow_buffer)); | |
292 | } else { | |
293 | return NULL; | |
294 | } | |
295 | } | |
296 | ||
297 | /** | |
298 | * garrow_cuda_buffer_new_ipc: | |
299 | * @context: A #GArrowCUDAContext. | |
300 | * @handle: A #GArrowCUDAIPCMemoryHandle to be communicated. | |
301 | * @error: (nullable): Return location for a #GError or %NULL. | |
302 | * | |
303 | * Returns: (transfer full): A newly created #GArrowCUDABuffer on | |
304 | * success, %NULL on error. The buffer has data from the IPC target. | |
305 | * | |
306 | * Since: 0.8.0 | |
307 | */ | |
308 | GArrowCUDABuffer * | |
309 | garrow_cuda_buffer_new_ipc(GArrowCUDAContext *context, | |
310 | GArrowCUDAIPCMemoryHandle *handle, | |
311 | GError **error) | |
312 | { | |
313 | auto arrow_context = garrow_cuda_context_get_raw(context); | |
314 | auto arrow_handle = garrow_cuda_ipc_memory_handle_get_raw(handle); | |
315 | auto arrow_buffer = arrow_context->OpenIpcBuffer(*arrow_handle); | |
316 | if (garrow::check(error, arrow_buffer, "[cuda][buffer][new-ipc]")) { | |
317 | return garrow_cuda_buffer_new_raw(&(*arrow_buffer)); | |
318 | } else { | |
319 | return NULL; | |
320 | } | |
321 | } | |
322 | ||
323 | /** | |
324 | * garrow_cuda_buffer_new_record_batch: | |
325 | * @context: A #GArrowCUDAContext. | |
326 | * @record_batch: A #GArrowRecordBatch to be serialized. | |
327 | * @error: (nullable): Return location for a #GError or %NULL. | |
328 | * | |
329 | * Returns: (transfer full): A newly created #GArrowCUDABuffer on | |
330 | * success, %NULL on error. The buffer has serialized record batch | |
331 | * data. | |
332 | * | |
333 | * Since: 0.8.0 | |
334 | */ | |
335 | GArrowCUDABuffer * | |
336 | garrow_cuda_buffer_new_record_batch(GArrowCUDAContext *context, | |
337 | GArrowRecordBatch *record_batch, | |
338 | GError **error) | |
339 | { | |
340 | auto arrow_context = garrow_cuda_context_get_raw(context); | |
341 | auto arrow_record_batch = garrow_record_batch_get_raw(record_batch); | |
342 | auto arrow_buffer = arrow::cuda::SerializeRecordBatch(*arrow_record_batch, | |
343 | arrow_context.get()); | |
344 | if (garrow::check(error, arrow_buffer, "[cuda][buffer][new-record-batch]")) { | |
345 | return garrow_cuda_buffer_new_raw(&(*arrow_buffer)); | |
346 | } else { | |
347 | return NULL; | |
348 | } | |
349 | } | |
350 | ||
351 | /** | |
352 | * garrow_cuda_buffer_copy_to_host: | |
353 | * @buffer: A #GArrowCUDABuffer. | |
354 | * @position: The offset of memory on GPU device to be copied. | |
355 | * @size: The size of memory on GPU device to be copied in bytes. | |
356 | * @error: (nullable): Return location for a #GError or %NULL. | |
357 | * | |
358 | * Returns: (transfer full): A #GBytes that have copied memory on CPU | |
359 | * host on success, %NULL on error. | |
360 | * | |
361 | * Since: 0.8.0 | |
362 | */ | |
363 | GBytes * | |
364 | garrow_cuda_buffer_copy_to_host(GArrowCUDABuffer *buffer, | |
365 | gint64 position, | |
366 | gint64 size, | |
367 | GError **error) | |
368 | { | |
369 | auto arrow_buffer = garrow_cuda_buffer_get_raw(buffer); | |
370 | auto data = static_cast<uint8_t *>(g_malloc(size)); | |
371 | auto status = arrow_buffer->CopyToHost(position, size, data); | |
372 | if (garrow_error_check(error, status, "[cuda][buffer][copy-to-host]")) { | |
373 | return g_bytes_new_take(data, size); | |
374 | } else { | |
375 | g_free(data); | |
376 | return NULL; | |
377 | } | |
378 | } | |
379 | ||
380 | /** | |
381 | * garrow_cuda_buffer_copy_from_host: | |
382 | * @buffer: A #GArrowCUDABuffer. | |
383 | * @data: (array length=size): Data on CPU host to be copied. | |
384 | * @size: The size of data on CPU host to be copied in bytes. | |
385 | * @error: (nullable): Return location for a #GError or %NULL. | |
386 | * | |
387 | * Returns: %TRUE on success, %FALSE if there was an error. | |
388 | * | |
389 | * Since: 0.8.0 | |
390 | */ | |
391 | gboolean | |
392 | garrow_cuda_buffer_copy_from_host(GArrowCUDABuffer *buffer, | |
393 | const guint8 *data, | |
394 | gint64 size, | |
395 | GError **error) | |
396 | { | |
397 | auto arrow_buffer = garrow_cuda_buffer_get_raw(buffer); | |
398 | auto status = arrow_buffer->CopyFromHost(0, data, size); | |
399 | return garrow_error_check(error, | |
400 | status, | |
401 | "[cuda][buffer][copy-from-host]"); | |
402 | } | |
403 | ||
404 | /** | |
405 | * garrow_cuda_buffer_export: | |
406 | * @buffer: A #GArrowCUDABuffer. | |
407 | * @error: (nullable): Return location for a #GError or %NULL. | |
408 | * | |
409 | * Returns: (transfer full): A newly created | |
410 | * #GArrowCUDAIPCMemoryHandle to handle the exported buffer on | |
411 | * success, %NULL on error | |
412 | * | |
413 | * Since: 0.8.0 | |
414 | */ | |
415 | GArrowCUDAIPCMemoryHandle * | |
416 | garrow_cuda_buffer_export(GArrowCUDABuffer *buffer, GError **error) | |
417 | { | |
418 | auto arrow_buffer = garrow_cuda_buffer_get_raw(buffer); | |
419 | auto arrow_handle = arrow_buffer->ExportForIpc(); | |
420 | if (garrow::check(error, arrow_handle, "[cuda][buffer][export-for-ipc]")) { | |
421 | return garrow_cuda_ipc_memory_handle_new_raw(&(*arrow_handle)); | |
422 | } else { | |
423 | return NULL; | |
424 | } | |
425 | } | |
426 | ||
427 | /** | |
428 | * garrow_cuda_buffer_get_context: | |
429 | * @buffer: A #GArrowCUDABuffer. | |
430 | * | |
431 | * Returns: (transfer full): A newly created #GArrowCUDAContext for the | |
432 | * buffer. Contexts for the same buffer share the same data internally. | |
433 | * | |
434 | * Since: 0.8.0 | |
435 | */ | |
436 | GArrowCUDAContext * | |
437 | garrow_cuda_buffer_get_context(GArrowCUDABuffer *buffer) | |
438 | { | |
439 | auto arrow_buffer = garrow_cuda_buffer_get_raw(buffer); | |
440 | auto arrow_context = arrow_buffer->context(); | |
441 | return garrow_cuda_context_new_raw(&arrow_context); | |
442 | } | |
443 | ||
444 | /** | |
445 | * garrow_cuda_buffer_read_record_batch: | |
446 | * @buffer: A #GArrowCUDABuffer. | |
447 | * @schema: A #GArrowSchema for record batch. | |
448 | * @options: (nullable): A #GArrowReadOptions. | |
449 | * @error: (nullable): Return location for a #GError or %NULL. | |
450 | * | |
451 | * Returns: (transfer full): A newly created #GArrowRecordBatch on | |
452 | * success, %NULL on error. The record batch data is located on GPU. | |
453 | * | |
454 | * Since: 0.8.0 | |
455 | */ | |
456 | GArrowRecordBatch * | |
457 | garrow_cuda_buffer_read_record_batch(GArrowCUDABuffer *buffer, | |
458 | GArrowSchema *schema, | |
459 | GArrowReadOptions *options, | |
460 | GError **error) | |
461 | { | |
462 | auto arrow_buffer = garrow_cuda_buffer_get_raw(buffer); | |
463 | auto arrow_schema = garrow_schema_get_raw(schema); | |
464 | ||
465 | if (options) { | |
466 | auto arrow_options = garrow_read_options_get_raw(options); | |
467 | auto arrow_dictionary_memo = | |
468 | garrow_read_options_get_dictionary_memo_raw(options); | |
469 | auto arrow_record_batch = | |
470 | arrow::cuda::ReadRecordBatch(arrow_schema, | |
471 | arrow_dictionary_memo, | |
472 | arrow_buffer, | |
473 | arrow_options->memory_pool); | |
474 | if (garrow::check(error, arrow_record_batch, | |
475 | "[cuda][buffer][read-record-batch]")) { | |
476 | return garrow_record_batch_new_raw(&(*arrow_record_batch)); | |
477 | } else { | |
478 | return NULL; | |
479 | } | |
480 | } else { | |
481 | auto arrow_pool = arrow::default_memory_pool(); | |
482 | auto arrow_record_batch = | |
483 | arrow::cuda::ReadRecordBatch(arrow_schema, | |
484 | nullptr, | |
485 | arrow_buffer, | |
486 | arrow_pool); | |
487 | if (garrow::check(error, arrow_record_batch, | |
488 | "[cuda][buffer][read-record-batch]")) { | |
489 | return garrow_record_batch_new_raw(&(*arrow_record_batch)); | |
490 | } else { | |
491 | return NULL; | |
492 | } | |
493 | } | |
494 | } | |
495 | ||
496 | ||
497 | G_DEFINE_TYPE(GArrowCUDAHostBuffer, | |
498 | garrow_cuda_host_buffer, | |
499 | GARROW_TYPE_MUTABLE_BUFFER) | |
500 | ||
501 | static void | |
502 | garrow_cuda_host_buffer_init(GArrowCUDAHostBuffer *object) | |
503 | { | |
504 | } | |
505 | ||
506 | static void | |
507 | garrow_cuda_host_buffer_class_init(GArrowCUDAHostBufferClass *klass) | |
508 | { | |
509 | } | |
510 | ||
511 | /** | |
512 | * garrow_cuda_host_buffer_new: | |
513 | * @gpu_number: A GPU device number for the target context. | |
514 | * @size: The number of bytes to be allocated on CPU host. | |
515 | * @error: (nullable): Return location for a #GError or %NULL. | |
516 | * | |
517 | * Returns: A newly created #GArrowCUDAHostBuffer on success, | |
518 | * %NULL on error. The allocated memory is accessible from GPU | |
519 | * device for the @context. | |
520 | * | |
521 | * Since: 0.8.0 | |
522 | */ | |
523 | GArrowCUDAHostBuffer * | |
524 | garrow_cuda_host_buffer_new(gint gpu_number, gint64 size, GError **error) | |
525 | { | |
526 | auto arrow_manager = arrow::cuda::CudaDeviceManager::Instance(); | |
527 | auto arrow_buffer = (*arrow_manager)->AllocateHost(gpu_number, size); | |
528 | if (garrow::check(error, arrow_buffer, "[cuda][host-buffer][new]")) { | |
529 | return garrow_cuda_host_buffer_new_raw(&(*arrow_buffer)); | |
530 | } else { | |
531 | return NULL; | |
532 | } | |
533 | } | |
534 | ||
535 | ||
536 | typedef struct GArrowCUDAIPCMemoryHandlePrivate_ { | |
537 | std::shared_ptr<arrow::cuda::CudaIpcMemHandle> ipc_memory_handle; | |
538 | } GArrowCUDAIPCMemoryHandlePrivate; | |
539 | ||
540 | enum { | |
541 | PROP_IPC_MEMORY_HANDLE = 1 | |
542 | }; | |
543 | ||
544 | G_DEFINE_TYPE_WITH_PRIVATE(GArrowCUDAIPCMemoryHandle, | |
545 | garrow_cuda_ipc_memory_handle, | |
546 | G_TYPE_OBJECT) | |
547 | ||
548 | #define GARROW_CUDA_IPC_MEMORY_HANDLE_GET_PRIVATE(object) \ | |
549 | static_cast<GArrowCUDAIPCMemoryHandlePrivate *>( \ | |
550 | garrow_cuda_ipc_memory_handle_get_instance_private( \ | |
551 | GARROW_CUDA_IPC_MEMORY_HANDLE(object))) | |
552 | ||
553 | static void | |
554 | garrow_cuda_ipc_memory_handle_finalize(GObject *object) | |
555 | { | |
556 | auto priv = GARROW_CUDA_IPC_MEMORY_HANDLE_GET_PRIVATE(object); | |
557 | ||
558 | priv->ipc_memory_handle = nullptr; | |
559 | ||
560 | G_OBJECT_CLASS(garrow_cuda_ipc_memory_handle_parent_class)->finalize(object); | |
561 | } | |
562 | ||
563 | static void | |
564 | garrow_cuda_ipc_memory_handle_set_property(GObject *object, | |
565 | guint prop_id, | |
566 | const GValue *value, | |
567 | GParamSpec *pspec) | |
568 | { | |
569 | auto priv = GARROW_CUDA_IPC_MEMORY_HANDLE_GET_PRIVATE(object); | |
570 | ||
571 | switch (prop_id) { | |
572 | case PROP_IPC_MEMORY_HANDLE: | |
573 | priv->ipc_memory_handle = | |
574 | *static_cast<std::shared_ptr<arrow::cuda::CudaIpcMemHandle> *>(g_value_get_pointer(value)); | |
575 | break; | |
576 | default: | |
577 | G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec); | |
578 | break; | |
579 | } | |
580 | } | |
581 | ||
582 | static void | |
583 | garrow_cuda_ipc_memory_handle_get_property(GObject *object, | |
584 | guint prop_id, | |
585 | GValue *value, | |
586 | GParamSpec *pspec) | |
587 | { | |
588 | switch (prop_id) { | |
589 | default: | |
590 | G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec); | |
591 | break; | |
592 | } | |
593 | } | |
594 | ||
595 | static void | |
596 | garrow_cuda_ipc_memory_handle_init(GArrowCUDAIPCMemoryHandle *object) | |
597 | { | |
598 | } | |
599 | ||
600 | static void | |
601 | garrow_cuda_ipc_memory_handle_class_init(GArrowCUDAIPCMemoryHandleClass *klass) | |
602 | { | |
603 | GParamSpec *spec; | |
604 | ||
605 | auto gobject_class = G_OBJECT_CLASS(klass); | |
606 | ||
607 | gobject_class->finalize = garrow_cuda_ipc_memory_handle_finalize; | |
608 | gobject_class->set_property = garrow_cuda_ipc_memory_handle_set_property; | |
609 | gobject_class->get_property = garrow_cuda_ipc_memory_handle_get_property; | |
610 | ||
611 | /** | |
612 | * GArrowCUDAIPCMemoryHandle:ipc-memory-handle: | |
613 | * | |
614 | * Since: 0.8.0 | |
615 | */ | |
616 | spec = g_param_spec_pointer("ipc-memory-handle", | |
617 | "IPC Memory Handle", | |
618 | "The raw std::shared_ptr<arrow::cuda::CudaIpcMemHandle>", | |
619 | static_cast<GParamFlags>(G_PARAM_WRITABLE | | |
620 | G_PARAM_CONSTRUCT_ONLY)); | |
621 | g_object_class_install_property(gobject_class, PROP_IPC_MEMORY_HANDLE, spec); | |
622 | } | |
623 | ||
624 | /** | |
625 | * garrow_cuda_ipc_memory_handle_new: | |
626 | * @data: (array length=size): A serialized #GArrowCUDAIPCMemoryHandle. | |
627 | * @size: The size of data. | |
628 | * @error: (nullable): Return location for a #GError or %NULL. | |
629 | * | |
630 | * Returns: (transfer full): A newly created #GArrowCUDAIPCMemoryHandle | |
631 | * on success, %NULL on error. | |
632 | * | |
633 | * Since: 0.8.0 | |
634 | */ | |
635 | GArrowCUDAIPCMemoryHandle * | |
636 | garrow_cuda_ipc_memory_handle_new(const guint8 *data, | |
637 | gsize size, | |
638 | GError **error) | |
639 | { | |
640 | auto arrow_handle = arrow::cuda::CudaIpcMemHandle::FromBuffer(data); | |
641 | if (garrow::check(error, arrow_handle, "[cuda][ipc-memory-handle][new]")) { | |
642 | return garrow_cuda_ipc_memory_handle_new_raw(&(*arrow_handle)); | |
643 | } else { | |
644 | return NULL; | |
645 | } | |
646 | } | |
647 | ||
648 | /** | |
649 | * garrow_cuda_ipc_memory_handle_serialize: | |
650 | * @handle: A #GArrowCUDAIPCMemoryHandle. | |
651 | * @error: (nullable): Return location for a #GError or %NULL. | |
652 | * | |
653 | * Returns: (transfer full): A newly created #GArrowBuffer on success, | |
654 | * %NULL on error. The buffer has serialized @handle. The serialized | |
655 | * @handle can be deserialized by garrow_cuda_ipc_memory_handle_new() | |
656 | * in other process. | |
657 | * | |
658 | * Since: 0.8.0 | |
659 | */ | |
660 | GArrowBuffer * | |
661 | garrow_cuda_ipc_memory_handle_serialize(GArrowCUDAIPCMemoryHandle *handle, | |
662 | GError **error) | |
663 | { | |
664 | auto arrow_handle = garrow_cuda_ipc_memory_handle_get_raw(handle); | |
665 | auto arrow_buffer = arrow_handle->Serialize(arrow::default_memory_pool()); | |
666 | if (garrow::check(error, arrow_buffer, | |
667 | "[cuda][ipc-memory-handle][serialize]")) { | |
668 | return garrow_buffer_new_raw(&(*arrow_buffer)); | |
669 | } else { | |
670 | return NULL; | |
671 | } | |
672 | } | |
673 | ||
674 | static GArrowBuffer * | |
675 | garrow_cuda_buffer_input_stream_buffer_new_raw_readable_interface(std::shared_ptr<arrow::Buffer> *arrow_buffer) | |
676 | { | |
677 | auto arrow_cuda_buffer = | |
678 | reinterpret_cast<std::shared_ptr<arrow::cuda::CudaBuffer> *>(arrow_buffer); | |
679 | auto cuda_buffer = garrow_cuda_buffer_new_raw(arrow_cuda_buffer); | |
680 | return GARROW_BUFFER(cuda_buffer); | |
681 | } | |
682 | ||
683 | static std::shared_ptr<arrow::io::Readable> | |
684 | garrow_cuda_buffer_input_stream_get_raw_readable_interface(GArrowReadable *readable) | |
685 | { | |
686 | auto input_stream = GARROW_INPUT_STREAM(readable); | |
687 | auto arrow_input_stream = garrow_input_stream_get_raw(input_stream); | |
688 | return arrow_input_stream; | |
689 | } | |
690 | ||
691 | static void | |
692 | garrow_cuda_buffer_input_stream_readable_interface_init(GArrowReadableInterface *iface) | |
693 | { | |
694 | iface->buffer_new_raw = | |
695 | garrow_cuda_buffer_input_stream_buffer_new_raw_readable_interface; | |
696 | iface->get_raw = | |
697 | garrow_cuda_buffer_input_stream_get_raw_readable_interface; | |
698 | } | |
699 | ||
700 | G_DEFINE_TYPE_WITH_CODE( | |
701 | GArrowCUDABufferInputStream, | |
702 | garrow_cuda_buffer_input_stream, | |
703 | GARROW_TYPE_BUFFER_INPUT_STREAM, | |
704 | G_IMPLEMENT_INTERFACE( | |
705 | GARROW_TYPE_READABLE, | |
706 | garrow_cuda_buffer_input_stream_readable_interface_init)) | |
707 | ||
708 | static void | |
709 | garrow_cuda_buffer_input_stream_init(GArrowCUDABufferInputStream *object) | |
710 | { | |
711 | } | |
712 | ||
713 | static void | |
714 | garrow_cuda_buffer_input_stream_class_init(GArrowCUDABufferInputStreamClass *klass) | |
715 | { | |
716 | } | |
717 | ||
718 | /** | |
719 | * garrow_cuda_buffer_input_stream_new: | |
720 | * @buffer: A #GArrowCUDABuffer. | |
721 | * | |
722 | * Returns: (transfer full): A newly created | |
723 | * #GArrowCUDABufferInputStream. | |
724 | * | |
725 | * Since: 0.8.0 | |
726 | */ | |
727 | GArrowCUDABufferInputStream * | |
728 | garrow_cuda_buffer_input_stream_new(GArrowCUDABuffer *buffer) | |
729 | { | |
730 | auto arrow_buffer = garrow_cuda_buffer_get_raw(buffer); | |
731 | auto arrow_reader = | |
732 | std::make_shared<arrow::cuda::CudaBufferReader>(arrow_buffer); | |
733 | return garrow_cuda_buffer_input_stream_new_raw(&arrow_reader); | |
734 | } | |
735 | ||
736 | ||
737 | G_DEFINE_TYPE(GArrowCUDABufferOutputStream, | |
738 | garrow_cuda_buffer_output_stream, | |
739 | GARROW_TYPE_OUTPUT_STREAM) | |
740 | ||
741 | static void | |
742 | garrow_cuda_buffer_output_stream_init(GArrowCUDABufferOutputStream *object) | |
743 | { | |
744 | } | |
745 | ||
746 | static void | |
747 | garrow_cuda_buffer_output_stream_class_init(GArrowCUDABufferOutputStreamClass *klass) | |
748 | { | |
749 | } | |
750 | ||
751 | /** | |
752 | * garrow_cuda_buffer_output_stream_new: | |
753 | * @buffer: A #GArrowCUDABuffer. | |
754 | * | |
755 | * Returns: (transfer full): A newly created | |
756 | * #GArrowCUDABufferOutputStream. | |
757 | * | |
758 | * Since: 0.8.0 | |
759 | */ | |
760 | GArrowCUDABufferOutputStream * | |
761 | garrow_cuda_buffer_output_stream_new(GArrowCUDABuffer *buffer) | |
762 | { | |
763 | auto arrow_buffer = garrow_cuda_buffer_get_raw(buffer); | |
764 | auto arrow_writer = | |
765 | std::make_shared<arrow::cuda::CudaBufferWriter>(arrow_buffer); | |
766 | return garrow_cuda_buffer_output_stream_new_raw(&arrow_writer); | |
767 | } | |
768 | ||
769 | /** | |
770 | * garrow_cuda_buffer_output_stream_set_buffer_size: | |
771 | * @stream: A #GArrowCUDABufferOutputStream. | |
772 | * @size: A size of CPU buffer in bytes. | |
773 | * @error: (nullable): Return location for a #GError or %NULL. | |
774 | * | |
775 | * Returns: %TRUE on success, %FALSE if there was an error. | |
776 | * | |
777 | * Sets CPU buffer size. to limit `cudaMemcpy()` calls. If CPU buffer | |
778 | * size is `0`, buffering is disabled. | |
779 | * | |
780 | * The default is `0`. | |
781 | * | |
782 | * Since: 0.8.0 | |
783 | */ | |
784 | gboolean | |
785 | garrow_cuda_buffer_output_stream_set_buffer_size(GArrowCUDABufferOutputStream *stream, | |
786 | gint64 size, | |
787 | GError **error) | |
788 | { | |
789 | auto arrow_stream = garrow_cuda_buffer_output_stream_get_raw(stream); | |
790 | auto status = arrow_stream->SetBufferSize(size); | |
791 | return garrow_error_check(error, | |
792 | status, | |
793 | "[cuda][buffer-output-stream][set-buffer-size]"); | |
794 | } | |
795 | ||
796 | /** | |
797 | * garrow_cuda_buffer_output_stream_get_buffer_size: | |
798 | * @stream: A #GArrowCUDABufferOutputStream. | |
799 | * | |
800 | * Returns: The CPU buffer size in bytes. | |
801 | * | |
802 | * See garrow_cuda_buffer_output_stream_set_buffer_size() for CPU | |
803 | * buffer size details. | |
804 | * | |
805 | * Since: 0.8.0 | |
806 | */ | |
807 | gint64 | |
808 | garrow_cuda_buffer_output_stream_get_buffer_size(GArrowCUDABufferOutputStream *stream) | |
809 | { | |
810 | auto arrow_stream = garrow_cuda_buffer_output_stream_get_raw(stream); | |
811 | return arrow_stream->buffer_size(); | |
812 | } | |
813 | ||
814 | /** | |
815 | * garrow_cuda_buffer_output_stream_get_buffered_size: | |
816 | * @stream: A #GArrowCUDABufferOutputStream. | |
817 | * | |
818 | * Returns: The size of buffered data in bytes. | |
819 | * | |
820 | * Since: 0.8.0 | |
821 | */ | |
822 | gint64 | |
823 | garrow_cuda_buffer_output_stream_get_buffered_size(GArrowCUDABufferOutputStream *stream) | |
824 | { | |
825 | auto arrow_stream = garrow_cuda_buffer_output_stream_get_raw(stream); | |
826 | return arrow_stream->num_bytes_buffered(); | |
827 | } | |
828 | ||
829 | ||
830 | G_END_DECLS | |
831 | ||
832 | GArrowCUDAContext * | |
833 | garrow_cuda_context_new_raw(std::shared_ptr<arrow::cuda::CudaContext> *arrow_context) | |
834 | { | |
835 | return GARROW_CUDA_CONTEXT(g_object_new(GARROW_CUDA_TYPE_CONTEXT, | |
836 | "context", arrow_context, | |
837 | NULL)); | |
838 | } | |
839 | ||
840 | std::shared_ptr<arrow::cuda::CudaContext> | |
841 | garrow_cuda_context_get_raw(GArrowCUDAContext *context) | |
842 | { | |
843 | if (!context) | |
844 | return nullptr; | |
845 | ||
846 | auto priv = GARROW_CUDA_CONTEXT_GET_PRIVATE(context); | |
847 | return priv->context; | |
848 | } | |
849 | ||
850 | GArrowCUDAIPCMemoryHandle * | |
851 | garrow_cuda_ipc_memory_handle_new_raw(std::shared_ptr<arrow::cuda::CudaIpcMemHandle> *arrow_handle) | |
852 | { | |
853 | auto handle = g_object_new(GARROW_CUDA_TYPE_IPC_MEMORY_HANDLE, | |
854 | "ipc-memory-handle", arrow_handle, | |
855 | NULL); | |
856 | return GARROW_CUDA_IPC_MEMORY_HANDLE(handle); | |
857 | } | |
858 | ||
859 | std::shared_ptr<arrow::cuda::CudaIpcMemHandle> | |
860 | garrow_cuda_ipc_memory_handle_get_raw(GArrowCUDAIPCMemoryHandle *handle) | |
861 | { | |
862 | if (!handle) | |
863 | return nullptr; | |
864 | ||
865 | auto priv = GARROW_CUDA_IPC_MEMORY_HANDLE_GET_PRIVATE(handle); | |
866 | return priv->ipc_memory_handle; | |
867 | } | |
868 | ||
869 | GArrowCUDABuffer * | |
870 | garrow_cuda_buffer_new_raw(std::shared_ptr<arrow::cuda::CudaBuffer> *arrow_buffer) | |
871 | { | |
872 | return GARROW_CUDA_BUFFER(g_object_new(GARROW_CUDA_TYPE_BUFFER, | |
873 | "buffer", arrow_buffer, | |
874 | NULL)); | |
875 | } | |
876 | ||
877 | std::shared_ptr<arrow::cuda::CudaBuffer> | |
878 | garrow_cuda_buffer_get_raw(GArrowCUDABuffer *buffer) | |
879 | { | |
880 | if (!buffer) | |
881 | return nullptr; | |
882 | ||
883 | auto arrow_buffer = garrow_buffer_get_raw(GARROW_BUFFER(buffer)); | |
884 | return std::static_pointer_cast<arrow::cuda::CudaBuffer>(arrow_buffer); | |
885 | } | |
886 | ||
887 | GArrowCUDAHostBuffer * | |
888 | garrow_cuda_host_buffer_new_raw(std::shared_ptr<arrow::cuda::CudaHostBuffer> *arrow_buffer) | |
889 | { | |
890 | auto buffer = g_object_new(GARROW_CUDA_TYPE_HOST_BUFFER, | |
891 | "buffer", arrow_buffer, | |
892 | NULL); | |
893 | return GARROW_CUDA_HOST_BUFFER(buffer); | |
894 | } | |
895 | ||
896 | std::shared_ptr<arrow::cuda::CudaHostBuffer> | |
897 | garrow_cuda_host_buffer_get_raw(GArrowCUDAHostBuffer *buffer) | |
898 | { | |
899 | if (!buffer) | |
900 | return nullptr; | |
901 | ||
902 | auto arrow_buffer = garrow_buffer_get_raw(GARROW_BUFFER(buffer)); | |
903 | return std::static_pointer_cast<arrow::cuda::CudaHostBuffer>(arrow_buffer); | |
904 | } | |
905 | ||
906 | GArrowCUDABufferInputStream * | |
907 | garrow_cuda_buffer_input_stream_new_raw(std::shared_ptr<arrow::cuda::CudaBufferReader> *arrow_reader) | |
908 | { | |
909 | auto input_stream = g_object_new(GARROW_CUDA_TYPE_BUFFER_INPUT_STREAM, | |
910 | "input-stream", arrow_reader, | |
911 | NULL); | |
912 | return GARROW_CUDA_BUFFER_INPUT_STREAM(input_stream); | |
913 | } | |
914 | ||
915 | std::shared_ptr<arrow::cuda::CudaBufferReader> | |
916 | garrow_cuda_buffer_input_stream_get_raw(GArrowCUDABufferInputStream *input_stream) | |
917 | { | |
918 | if (!input_stream) | |
919 | return nullptr; | |
920 | ||
921 | auto arrow_reader = | |
922 | garrow_input_stream_get_raw(GARROW_INPUT_STREAM(input_stream)); | |
923 | return std::static_pointer_cast<arrow::cuda::CudaBufferReader>(arrow_reader); | |
924 | } | |
925 | ||
926 | GArrowCUDABufferOutputStream * | |
927 | garrow_cuda_buffer_output_stream_new_raw(std::shared_ptr<arrow::cuda::CudaBufferWriter> *arrow_writer) | |
928 | { | |
929 | auto output_stream = g_object_new(GARROW_CUDA_TYPE_BUFFER_OUTPUT_STREAM, | |
930 | "output-stream", arrow_writer, | |
931 | NULL); | |
932 | return GARROW_CUDA_BUFFER_OUTPUT_STREAM(output_stream); | |
933 | } | |
934 | ||
935 | std::shared_ptr<arrow::cuda::CudaBufferWriter> | |
936 | garrow_cuda_buffer_output_stream_get_raw(GArrowCUDABufferOutputStream *output_stream) | |
937 | { | |
938 | if (!output_stream) | |
939 | return nullptr; | |
940 | ||
941 | auto arrow_writer = | |
942 | garrow_output_stream_get_raw(GARROW_OUTPUT_STREAM(output_stream)); | |
943 | return std::static_pointer_cast<arrow::cuda::CudaBufferWriter>(arrow_writer); | |
944 | } |