]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | //---------------------------------------------------------------------------// |
2 | // Copyright (c) 2013 Kyle Lutz <kyle.r.lutz@gmail.com> | |
3 | // | |
4 | // Distributed under the Boost Software License, Version 1.0 | |
5 | // See accompanying file LICENSE_1_0.txt or copy at | |
6 | // http://www.boost.org/LICENSE_1_0.txt | |
7 | // | |
8 | // See http://boostorg.github.com/compute for more information. | |
9 | //---------------------------------------------------------------------------// | |
10 | ||
11 | #ifndef BOOST_COMPUTE_COMMAND_QUEUE_HPP | |
12 | #define BOOST_COMPUTE_COMMAND_QUEUE_HPP | |
13 | ||
14 | #include <cstddef> | |
15 | #include <algorithm> | |
16 | ||
17 | #include <boost/assert.hpp> | |
18 | ||
19 | #include <boost/compute/config.hpp> | |
20 | #include <boost/compute/event.hpp> | |
21 | #include <boost/compute/buffer.hpp> | |
22 | #include <boost/compute/device.hpp> | |
23 | #include <boost/compute/kernel.hpp> | |
24 | #include <boost/compute/context.hpp> | |
25 | #include <boost/compute/exception.hpp> | |
26 | #include <boost/compute/image/image1d.hpp> | |
27 | #include <boost/compute/image/image2d.hpp> | |
28 | #include <boost/compute/image/image3d.hpp> | |
29 | #include <boost/compute/image/image_object.hpp> | |
30 | #include <boost/compute/utility/wait_list.hpp> | |
31 | #include <boost/compute/detail/get_object_info.hpp> | |
32 | #include <boost/compute/detail/assert_cl_success.hpp> | |
33 | #include <boost/compute/detail/diagnostic.hpp> | |
34 | #include <boost/compute/utility/extents.hpp> | |
35 | ||
36 | namespace boost { | |
37 | namespace compute { | |
38 | namespace detail { | |
39 | ||
40 | inline void BOOST_COMPUTE_CL_CALLBACK | |
41 | nullary_native_kernel_trampoline(void *user_func_ptr) | |
42 | { | |
43 | void (*user_func)(); | |
44 | std::memcpy(&user_func, user_func_ptr, sizeof(user_func)); | |
45 | user_func(); | |
46 | } | |
47 | ||
48 | } // end detail namespace | |
49 | ||
50 | /// \class command_queue | |
51 | /// \brief A command queue. | |
52 | /// | |
53 | /// Command queues provide the interface for interacting with compute | |
54 | /// devices. The command_queue class provides methods to copy data to | |
55 | /// and from a compute device as well as execute compute kernels. | |
56 | /// | |
57 | /// Command queues are created for a compute device within a compute | |
58 | /// context. | |
59 | /// | |
60 | /// For example, to create a context and command queue for the default device | |
61 | /// on the system (this is the normal set up code used by almost all OpenCL | |
62 | /// programs): | |
63 | /// \code | |
64 | /// #include <boost/compute/core.hpp> | |
65 | /// | |
66 | /// // get the default compute device | |
67 | /// boost::compute::device device = boost::compute::system::default_device(); | |
68 | /// | |
69 | /// // set up a compute context and command queue | |
70 | /// boost::compute::context context(device); | |
71 | /// boost::compute::command_queue queue(context, device); | |
72 | /// \endcode | |
73 | /// | |
74 | /// The default command queue for the system can be obtained with the | |
75 | /// system::default_queue() method. | |
76 | /// | |
77 | /// \see buffer, context, kernel | |
78 | class command_queue | |
79 | { | |
80 | public: | |
81 | enum properties { | |
82 | enable_profiling = CL_QUEUE_PROFILING_ENABLE, | |
83 | enable_out_of_order_execution = CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | |
84 | }; | |
85 | ||
86 | enum map_flags { | |
87 | map_read = CL_MAP_READ, | |
88 | map_write = CL_MAP_WRITE | |
89 | #ifdef CL_VERSION_1_2 | |
90 | , | |
91 | map_write_invalidate_region = CL_MAP_WRITE_INVALIDATE_REGION | |
92 | #endif | |
93 | }; | |
94 | ||
95 | /// Creates a null command queue. | |
96 | command_queue() | |
97 | : m_queue(0) | |
98 | { | |
99 | } | |
100 | ||
101 | explicit command_queue(cl_command_queue queue, bool retain = true) | |
102 | : m_queue(queue) | |
103 | { | |
104 | if(m_queue && retain){ | |
105 | clRetainCommandQueue(m_queue); | |
106 | } | |
107 | } | |
108 | ||
109 | /// Creates a command queue in \p context for \p device with | |
110 | /// \p properties. | |
111 | /// | |
112 | /// \see_opencl_ref{clCreateCommandQueue} | |
113 | command_queue(const context &context, | |
114 | const device &device, | |
115 | cl_command_queue_properties properties = 0) | |
116 | { | |
117 | BOOST_ASSERT(device.id() != 0); | |
118 | ||
119 | cl_int error = 0; | |
120 | ||
121 | #ifdef CL_VERSION_2_0 | |
122 | if (device.check_version(2, 0)){ | |
123 | std::vector<cl_queue_properties> queue_properties; | |
124 | if(properties){ | |
125 | queue_properties.push_back(CL_QUEUE_PROPERTIES); | |
126 | queue_properties.push_back(cl_queue_properties(properties)); | |
127 | queue_properties.push_back(cl_queue_properties(0)); | |
128 | } | |
129 | ||
130 | const cl_queue_properties *queue_properties_ptr = | |
131 | queue_properties.empty() ? 0 : &queue_properties[0]; | |
132 | ||
133 | m_queue = clCreateCommandQueueWithProperties( | |
134 | context, device.id(), queue_properties_ptr, &error | |
135 | ); | |
136 | } else | |
137 | #endif | |
138 | { | |
139 | // Suppress deprecated declarations warning | |
140 | BOOST_COMPUTE_DISABLE_DEPRECATED_DECLARATIONS(); | |
141 | m_queue = clCreateCommandQueue( | |
142 | context, device.id(), properties, &error | |
143 | ); | |
144 | BOOST_COMPUTE_ENABLE_DEPRECATED_DECLARATIONS(); | |
145 | } | |
146 | ||
147 | if(!m_queue){ | |
148 | BOOST_THROW_EXCEPTION(opencl_error(error)); | |
149 | } | |
150 | } | |
151 | ||
152 | /// Creates a new command queue object as a copy of \p other. | |
153 | command_queue(const command_queue &other) | |
154 | : m_queue(other.m_queue) | |
155 | { | |
156 | if(m_queue){ | |
157 | clRetainCommandQueue(m_queue); | |
158 | } | |
159 | } | |
160 | ||
161 | /// Copies the command queue object from \p other to \c *this. | |
162 | command_queue& operator=(const command_queue &other) | |
163 | { | |
164 | if(this != &other){ | |
165 | if(m_queue){ | |
166 | clReleaseCommandQueue(m_queue); | |
167 | } | |
168 | ||
169 | m_queue = other.m_queue; | |
170 | ||
171 | if(m_queue){ | |
172 | clRetainCommandQueue(m_queue); | |
173 | } | |
174 | } | |
175 | ||
176 | return *this; | |
177 | } | |
178 | ||
179 | #ifndef BOOST_COMPUTE_NO_RVALUE_REFERENCES | |
180 | /// Move-constructs a new command queue object from \p other. | |
181 | command_queue(command_queue&& other) BOOST_NOEXCEPT | |
182 | : m_queue(other.m_queue) | |
183 | { | |
184 | other.m_queue = 0; | |
185 | } | |
186 | ||
187 | /// Move-assigns the command queue from \p other to \c *this. | |
188 | command_queue& operator=(command_queue&& other) BOOST_NOEXCEPT | |
189 | { | |
190 | if(m_queue){ | |
191 | clReleaseCommandQueue(m_queue); | |
192 | } | |
193 | ||
194 | m_queue = other.m_queue; | |
195 | other.m_queue = 0; | |
196 | ||
197 | return *this; | |
198 | } | |
199 | #endif // BOOST_COMPUTE_NO_RVALUE_REFERENCES | |
200 | ||
201 | /// Destroys the command queue. | |
202 | /// | |
203 | /// \see_opencl_ref{clReleaseCommandQueue} | |
204 | ~command_queue() | |
205 | { | |
206 | if(m_queue){ | |
207 | BOOST_COMPUTE_ASSERT_CL_SUCCESS( | |
208 | clReleaseCommandQueue(m_queue) | |
209 | ); | |
210 | } | |
211 | } | |
212 | ||
213 | /// Returns the underlying OpenCL command queue. | |
214 | cl_command_queue& get() const | |
215 | { | |
216 | return const_cast<cl_command_queue &>(m_queue); | |
217 | } | |
218 | ||
219 | /// Returns the device that the command queue issues commands to. | |
220 | device get_device() const | |
221 | { | |
222 | return device(get_info<cl_device_id>(CL_QUEUE_DEVICE)); | |
223 | } | |
224 | ||
225 | /// Returns the context for the command queue. | |
226 | context get_context() const | |
227 | { | |
228 | return context(get_info<cl_context>(CL_QUEUE_CONTEXT)); | |
229 | } | |
230 | ||
231 | /// Returns information about the command queue. | |
232 | /// | |
233 | /// \see_opencl_ref{clGetCommandQueueInfo} | |
234 | template<class T> | |
235 | T get_info(cl_command_queue_info info) const | |
236 | { | |
237 | return detail::get_object_info<T>(clGetCommandQueueInfo, m_queue, info); | |
238 | } | |
239 | ||
240 | /// \overload | |
241 | template<int Enum> | |
242 | typename detail::get_object_info_type<command_queue, Enum>::type | |
243 | get_info() const; | |
244 | ||
245 | /// Returns the properties for the command queue. | |
246 | cl_command_queue_properties get_properties() const | |
247 | { | |
248 | return get_info<cl_command_queue_properties>(CL_QUEUE_PROPERTIES); | |
249 | } | |
250 | ||
251 | /// Enqueues a command to read data from \p buffer to host memory. | |
252 | /// | |
253 | /// \see_opencl_ref{clEnqueueReadBuffer} | |
254 | /// | |
255 | /// \see copy() | |
256 | event enqueue_read_buffer(const buffer &buffer, | |
257 | size_t offset, | |
258 | size_t size, | |
259 | void *host_ptr, | |
260 | const wait_list &events = wait_list()) | |
261 | { | |
262 | BOOST_ASSERT(m_queue != 0); | |
263 | BOOST_ASSERT(size <= buffer.size()); | |
264 | BOOST_ASSERT(buffer.get_context() == this->get_context()); | |
265 | BOOST_ASSERT(host_ptr != 0); | |
266 | ||
267 | event event_; | |
268 | ||
269 | cl_int ret = clEnqueueReadBuffer( | |
270 | m_queue, | |
271 | buffer.get(), | |
272 | CL_TRUE, | |
273 | offset, | |
274 | size, | |
275 | host_ptr, | |
276 | events.size(), | |
277 | events.get_event_ptr(), | |
278 | &event_.get() | |
279 | ); | |
280 | ||
281 | if(ret != CL_SUCCESS){ | |
282 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
283 | } | |
284 | ||
285 | return event_; | |
286 | } | |
287 | ||
288 | /// Enqueues a command to read data from \p buffer to host memory. The | |
289 | /// copy will be performed asynchronously. | |
290 | /// | |
291 | /// \see_opencl_ref{clEnqueueReadBuffer} | |
292 | /// | |
293 | /// \see copy_async() | |
294 | event enqueue_read_buffer_async(const buffer &buffer, | |
295 | size_t offset, | |
296 | size_t size, | |
297 | void *host_ptr, | |
298 | const wait_list &events = wait_list()) | |
299 | { | |
300 | BOOST_ASSERT(m_queue != 0); | |
301 | BOOST_ASSERT(size <= buffer.size()); | |
302 | BOOST_ASSERT(buffer.get_context() == this->get_context()); | |
303 | BOOST_ASSERT(host_ptr != 0); | |
304 | ||
305 | event event_; | |
306 | ||
307 | cl_int ret = clEnqueueReadBuffer( | |
308 | m_queue, | |
309 | buffer.get(), | |
310 | CL_FALSE, | |
311 | offset, | |
312 | size, | |
313 | host_ptr, | |
314 | events.size(), | |
315 | events.get_event_ptr(), | |
316 | &event_.get() | |
317 | ); | |
318 | ||
319 | if(ret != CL_SUCCESS){ | |
320 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
321 | } | |
322 | ||
323 | return event_; | |
324 | } | |
325 | ||
326 | #if defined(CL_VERSION_1_1) || defined(BOOST_COMPUTE_DOXYGEN_INVOKED) | |
327 | /// Enqueues a command to read a rectangular region from \p buffer to | |
328 | /// host memory. | |
329 | /// | |
330 | /// \see_opencl_ref{clEnqueueReadBufferRect} | |
331 | /// | |
332 | /// \opencl_version_warning{1,1} | |
333 | event enqueue_read_buffer_rect(const buffer &buffer, | |
334 | const size_t buffer_origin[3], | |
335 | const size_t host_origin[3], | |
336 | const size_t region[3], | |
337 | size_t buffer_row_pitch, | |
338 | size_t buffer_slice_pitch, | |
339 | size_t host_row_pitch, | |
340 | size_t host_slice_pitch, | |
341 | void *host_ptr, | |
342 | const wait_list &events = wait_list()) | |
343 | { | |
344 | BOOST_ASSERT(m_queue != 0); | |
345 | BOOST_ASSERT(buffer.get_context() == this->get_context()); | |
346 | BOOST_ASSERT(host_ptr != 0); | |
347 | ||
348 | event event_; | |
349 | ||
350 | cl_int ret = clEnqueueReadBufferRect( | |
351 | m_queue, | |
352 | buffer.get(), | |
353 | CL_TRUE, | |
354 | buffer_origin, | |
355 | host_origin, | |
356 | region, | |
357 | buffer_row_pitch, | |
358 | buffer_slice_pitch, | |
359 | host_row_pitch, | |
360 | host_slice_pitch, | |
361 | host_ptr, | |
362 | events.size(), | |
363 | events.get_event_ptr(), | |
364 | &event_.get() | |
365 | ); | |
366 | ||
367 | if(ret != CL_SUCCESS){ | |
368 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
369 | } | |
370 | ||
371 | return event_; | |
372 | } | |
373 | ||
374 | /// Enqueues a command to read a rectangular region from \p buffer to | |
375 | /// host memory. The copy will be performed asynchronously. | |
376 | /// | |
377 | /// \see_opencl_ref{clEnqueueReadBufferRect} | |
378 | /// | |
379 | /// \opencl_version_warning{1,1} | |
380 | event enqueue_read_buffer_rect_async(const buffer &buffer, | |
381 | const size_t buffer_origin[3], | |
382 | const size_t host_origin[3], | |
383 | const size_t region[3], | |
384 | size_t buffer_row_pitch, | |
385 | size_t buffer_slice_pitch, | |
386 | size_t host_row_pitch, | |
387 | size_t host_slice_pitch, | |
388 | void *host_ptr, | |
389 | const wait_list &events = wait_list()) | |
390 | { | |
391 | BOOST_ASSERT(m_queue != 0); | |
392 | BOOST_ASSERT(buffer.get_context() == this->get_context()); | |
393 | BOOST_ASSERT(host_ptr != 0); | |
394 | ||
395 | event event_; | |
396 | ||
397 | cl_int ret = clEnqueueReadBufferRect( | |
398 | m_queue, | |
399 | buffer.get(), | |
400 | CL_FALSE, | |
401 | buffer_origin, | |
402 | host_origin, | |
403 | region, | |
404 | buffer_row_pitch, | |
405 | buffer_slice_pitch, | |
406 | host_row_pitch, | |
407 | host_slice_pitch, | |
408 | host_ptr, | |
409 | events.size(), | |
410 | events.get_event_ptr(), | |
411 | &event_.get() | |
412 | ); | |
413 | ||
414 | if(ret != CL_SUCCESS){ | |
415 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
416 | } | |
417 | ||
418 | return event_; | |
419 | } | |
420 | #endif // CL_VERSION_1_1 | |
421 | ||
422 | /// Enqueues a command to write data from host memory to \p buffer. | |
423 | /// | |
424 | /// \see_opencl_ref{clEnqueueWriteBuffer} | |
425 | /// | |
426 | /// \see copy() | |
427 | event enqueue_write_buffer(const buffer &buffer, | |
428 | size_t offset, | |
429 | size_t size, | |
430 | const void *host_ptr, | |
431 | const wait_list &events = wait_list()) | |
432 | { | |
433 | BOOST_ASSERT(m_queue != 0); | |
434 | BOOST_ASSERT(size <= buffer.size()); | |
435 | BOOST_ASSERT(buffer.get_context() == this->get_context()); | |
436 | BOOST_ASSERT(host_ptr != 0); | |
437 | ||
438 | event event_; | |
439 | ||
440 | cl_int ret = clEnqueueWriteBuffer( | |
441 | m_queue, | |
442 | buffer.get(), | |
443 | CL_TRUE, | |
444 | offset, | |
445 | size, | |
446 | host_ptr, | |
447 | events.size(), | |
448 | events.get_event_ptr(), | |
449 | &event_.get() | |
450 | ); | |
451 | ||
452 | if(ret != CL_SUCCESS){ | |
453 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
454 | } | |
455 | ||
456 | return event_; | |
457 | } | |
458 | ||
459 | /// Enqueues a command to write data from host memory to \p buffer. | |
460 | /// The copy is performed asynchronously. | |
461 | /// | |
462 | /// \see_opencl_ref{clEnqueueWriteBuffer} | |
463 | /// | |
464 | /// \see copy_async() | |
465 | event enqueue_write_buffer_async(const buffer &buffer, | |
466 | size_t offset, | |
467 | size_t size, | |
468 | const void *host_ptr, | |
469 | const wait_list &events = wait_list()) | |
470 | { | |
471 | BOOST_ASSERT(m_queue != 0); | |
472 | BOOST_ASSERT(size <= buffer.size()); | |
473 | BOOST_ASSERT(buffer.get_context() == this->get_context()); | |
474 | BOOST_ASSERT(host_ptr != 0); | |
475 | ||
476 | event event_; | |
477 | ||
478 | cl_int ret = clEnqueueWriteBuffer( | |
479 | m_queue, | |
480 | buffer.get(), | |
481 | CL_FALSE, | |
482 | offset, | |
483 | size, | |
484 | host_ptr, | |
485 | events.size(), | |
486 | events.get_event_ptr(), | |
487 | &event_.get() | |
488 | ); | |
489 | ||
490 | if(ret != CL_SUCCESS){ | |
491 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
492 | } | |
493 | ||
494 | return event_; | |
495 | } | |
496 | ||
497 | #if defined(CL_VERSION_1_1) || defined(BOOST_COMPUTE_DOXYGEN_INVOKED) | |
498 | /// Enqueues a command to write a rectangular region from host memory | |
499 | /// to \p buffer. | |
500 | /// | |
501 | /// \see_opencl_ref{clEnqueueWriteBufferRect} | |
502 | /// | |
503 | /// \opencl_version_warning{1,1} | |
504 | event enqueue_write_buffer_rect(const buffer &buffer, | |
505 | const size_t buffer_origin[3], | |
506 | const size_t host_origin[3], | |
507 | const size_t region[3], | |
508 | size_t buffer_row_pitch, | |
509 | size_t buffer_slice_pitch, | |
510 | size_t host_row_pitch, | |
511 | size_t host_slice_pitch, | |
512 | void *host_ptr, | |
513 | const wait_list &events = wait_list()) | |
514 | { | |
515 | BOOST_ASSERT(m_queue != 0); | |
516 | BOOST_ASSERT(buffer.get_context() == this->get_context()); | |
517 | BOOST_ASSERT(host_ptr != 0); | |
518 | ||
519 | event event_; | |
520 | ||
521 | cl_int ret = clEnqueueWriteBufferRect( | |
522 | m_queue, | |
523 | buffer.get(), | |
524 | CL_TRUE, | |
525 | buffer_origin, | |
526 | host_origin, | |
527 | region, | |
528 | buffer_row_pitch, | |
529 | buffer_slice_pitch, | |
530 | host_row_pitch, | |
531 | host_slice_pitch, | |
532 | host_ptr, | |
533 | events.size(), | |
534 | events.get_event_ptr(), | |
535 | &event_.get() | |
536 | ); | |
537 | ||
538 | if(ret != CL_SUCCESS){ | |
539 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
540 | } | |
541 | ||
542 | return event_; | |
543 | } | |
544 | ||
545 | /// Enqueues a command to write a rectangular region from host memory | |
546 | /// to \p buffer. The copy is performed asynchronously. | |
547 | /// | |
548 | /// \see_opencl_ref{clEnqueueWriteBufferRect} | |
549 | /// | |
550 | /// \opencl_version_warning{1,1} | |
551 | event enqueue_write_buffer_rect_async(const buffer &buffer, | |
552 | const size_t buffer_origin[3], | |
553 | const size_t host_origin[3], | |
554 | const size_t region[3], | |
555 | size_t buffer_row_pitch, | |
556 | size_t buffer_slice_pitch, | |
557 | size_t host_row_pitch, | |
558 | size_t host_slice_pitch, | |
559 | void *host_ptr, | |
560 | const wait_list &events = wait_list()) | |
561 | { | |
562 | BOOST_ASSERT(m_queue != 0); | |
563 | BOOST_ASSERT(buffer.get_context() == this->get_context()); | |
564 | BOOST_ASSERT(host_ptr != 0); | |
565 | ||
566 | event event_; | |
567 | ||
568 | cl_int ret = clEnqueueWriteBufferRect( | |
569 | m_queue, | |
570 | buffer.get(), | |
571 | CL_FALSE, | |
572 | buffer_origin, | |
573 | host_origin, | |
574 | region, | |
575 | buffer_row_pitch, | |
576 | buffer_slice_pitch, | |
577 | host_row_pitch, | |
578 | host_slice_pitch, | |
579 | host_ptr, | |
580 | events.size(), | |
581 | events.get_event_ptr(), | |
582 | &event_.get() | |
583 | ); | |
584 | ||
585 | if(ret != CL_SUCCESS){ | |
586 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
587 | } | |
588 | ||
589 | return event_; | |
590 | } | |
591 | #endif // CL_VERSION_1_1 | |
592 | ||
593 | /// Enqueues a command to copy data from \p src_buffer to | |
594 | /// \p dst_buffer. | |
595 | /// | |
596 | /// \see_opencl_ref{clEnqueueCopyBuffer} | |
597 | /// | |
598 | /// \see copy() | |
599 | event enqueue_copy_buffer(const buffer &src_buffer, | |
600 | const buffer &dst_buffer, | |
601 | size_t src_offset, | |
602 | size_t dst_offset, | |
603 | size_t size, | |
604 | const wait_list &events = wait_list()) | |
605 | { | |
606 | BOOST_ASSERT(m_queue != 0); | |
607 | BOOST_ASSERT(src_offset + size <= src_buffer.size()); | |
608 | BOOST_ASSERT(dst_offset + size <= dst_buffer.size()); | |
609 | BOOST_ASSERT(src_buffer.get_context() == this->get_context()); | |
610 | BOOST_ASSERT(dst_buffer.get_context() == this->get_context()); | |
611 | ||
612 | event event_; | |
613 | ||
614 | cl_int ret = clEnqueueCopyBuffer( | |
615 | m_queue, | |
616 | src_buffer.get(), | |
617 | dst_buffer.get(), | |
618 | src_offset, | |
619 | dst_offset, | |
620 | size, | |
621 | events.size(), | |
622 | events.get_event_ptr(), | |
623 | &event_.get() | |
624 | ); | |
625 | ||
626 | if(ret != CL_SUCCESS){ | |
627 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
628 | } | |
629 | ||
630 | return event_; | |
631 | } | |
632 | ||
633 | #if defined(CL_VERSION_1_1) || defined(BOOST_COMPUTE_DOXYGEN_INVOKED) | |
634 | /// Enqueues a command to copy a rectangular region from | |
635 | /// \p src_buffer to \p dst_buffer. | |
636 | /// | |
637 | /// \see_opencl_ref{clEnqueueCopyBufferRect} | |
638 | /// | |
639 | /// \opencl_version_warning{1,1} | |
640 | event enqueue_copy_buffer_rect(const buffer &src_buffer, | |
641 | const buffer &dst_buffer, | |
642 | const size_t src_origin[3], | |
643 | const size_t dst_origin[3], | |
644 | const size_t region[3], | |
645 | size_t buffer_row_pitch, | |
646 | size_t buffer_slice_pitch, | |
647 | size_t host_row_pitch, | |
648 | size_t host_slice_pitch, | |
649 | const wait_list &events = wait_list()) | |
650 | { | |
651 | BOOST_ASSERT(m_queue != 0); | |
652 | BOOST_ASSERT(src_buffer.get_context() == this->get_context()); | |
653 | BOOST_ASSERT(dst_buffer.get_context() == this->get_context()); | |
654 | ||
655 | event event_; | |
656 | ||
657 | cl_int ret = clEnqueueCopyBufferRect( | |
658 | m_queue, | |
659 | src_buffer.get(), | |
660 | dst_buffer.get(), | |
661 | src_origin, | |
662 | dst_origin, | |
663 | region, | |
664 | buffer_row_pitch, | |
665 | buffer_slice_pitch, | |
666 | host_row_pitch, | |
667 | host_slice_pitch, | |
668 | events.size(), | |
669 | events.get_event_ptr(), | |
670 | &event_.get() | |
671 | ); | |
672 | ||
673 | if(ret != CL_SUCCESS){ | |
674 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
675 | } | |
676 | ||
677 | return event_; | |
678 | } | |
679 | #endif // CL_VERSION_1_1 | |
680 | ||
681 | #if defined(CL_VERSION_1_2) || defined(BOOST_COMPUTE_DOXYGEN_INVOKED) | |
682 | /// Enqueues a command to fill \p buffer with \p pattern. | |
683 | /// | |
684 | /// \see_opencl_ref{clEnqueueFillBuffer} | |
685 | /// | |
686 | /// \opencl_version_warning{1,2} | |
687 | /// | |
688 | /// \see fill() | |
689 | event enqueue_fill_buffer(const buffer &buffer, | |
690 | const void *pattern, | |
691 | size_t pattern_size, | |
692 | size_t offset, | |
693 | size_t size, | |
694 | const wait_list &events = wait_list()) | |
695 | { | |
696 | BOOST_ASSERT(m_queue != 0); | |
697 | BOOST_ASSERT(offset + size <= buffer.size()); | |
698 | BOOST_ASSERT(buffer.get_context() == this->get_context()); | |
699 | ||
700 | event event_; | |
701 | ||
702 | cl_int ret = clEnqueueFillBuffer( | |
703 | m_queue, | |
704 | buffer.get(), | |
705 | pattern, | |
706 | pattern_size, | |
707 | offset, | |
708 | size, | |
709 | events.size(), | |
710 | events.get_event_ptr(), | |
711 | &event_.get() | |
712 | ); | |
713 | ||
714 | if(ret != CL_SUCCESS){ | |
715 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
716 | } | |
717 | ||
718 | return event_; | |
719 | } | |
720 | #endif // CL_VERSION_1_2 | |
721 | ||
722 | /// Enqueues a command to map \p buffer into the host address space. | |
723 | /// Event associated with map operation is returned through | |
724 | /// \p map_buffer_event parameter. | |
725 | /// | |
726 | /// \see_opencl_ref{clEnqueueMapBuffer} | |
727 | void* enqueue_map_buffer(const buffer &buffer, | |
728 | cl_map_flags flags, | |
729 | size_t offset, | |
730 | size_t size, | |
731 | event &map_buffer_event, | |
732 | const wait_list &events = wait_list()) | |
733 | { | |
734 | BOOST_ASSERT(m_queue != 0); | |
735 | BOOST_ASSERT(offset + size <= buffer.size()); | |
736 | BOOST_ASSERT(buffer.get_context() == this->get_context()); | |
737 | ||
738 | cl_int ret = 0; | |
739 | void *pointer = clEnqueueMapBuffer( | |
740 | m_queue, | |
741 | buffer.get(), | |
742 | CL_TRUE, | |
743 | flags, | |
744 | offset, | |
745 | size, | |
746 | events.size(), | |
747 | events.get_event_ptr(), | |
748 | &map_buffer_event.get(), | |
749 | &ret | |
750 | ); | |
751 | ||
752 | if(ret != CL_SUCCESS){ | |
753 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
754 | } | |
755 | ||
756 | return pointer; | |
757 | } | |
758 | ||
759 | /// \overload | |
760 | void* enqueue_map_buffer(const buffer &buffer, | |
761 | cl_map_flags flags, | |
762 | size_t offset, | |
763 | size_t size, | |
764 | const wait_list &events = wait_list()) | |
765 | { | |
766 | event event_; | |
767 | return enqueue_map_buffer(buffer, flags, offset, size, event_, events); | |
768 | } | |
769 | ||
770 | /// Enqueues a command to map \p buffer into the host address space. | |
771 | /// Map operation is performed asynchronously. The pointer to the mapped | |
772 | /// region cannot be used until the map operation has completed. | |
773 | /// | |
774 | /// Event associated with map operation is returned through | |
775 | /// \p map_buffer_event parameter. | |
776 | /// | |
777 | /// \see_opencl_ref{clEnqueueMapBuffer} | |
778 | void* enqueue_map_buffer_async(const buffer &buffer, | |
779 | cl_map_flags flags, | |
780 | size_t offset, | |
781 | size_t size, | |
782 | event &map_buffer_event, | |
783 | const wait_list &events = wait_list()) | |
784 | { | |
785 | BOOST_ASSERT(m_queue != 0); | |
786 | BOOST_ASSERT(offset + size <= buffer.size()); | |
787 | BOOST_ASSERT(buffer.get_context() == this->get_context()); | |
788 | ||
789 | cl_int ret = 0; | |
790 | void *pointer = clEnqueueMapBuffer( | |
791 | m_queue, | |
792 | buffer.get(), | |
793 | CL_FALSE, | |
794 | flags, | |
795 | offset, | |
796 | size, | |
797 | events.size(), | |
798 | events.get_event_ptr(), | |
799 | &map_buffer_event.get(), | |
800 | &ret | |
801 | ); | |
802 | ||
803 | if(ret != CL_SUCCESS){ | |
804 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
805 | } | |
806 | ||
807 | return pointer; | |
808 | } | |
809 | ||
810 | /// Enqueues a command to unmap \p buffer from the host memory space. | |
811 | /// | |
812 | /// \see_opencl_ref{clEnqueueUnmapMemObject} | |
813 | event enqueue_unmap_buffer(const buffer &buffer, | |
814 | void *mapped_ptr, | |
815 | const wait_list &events = wait_list()) | |
816 | { | |
817 | BOOST_ASSERT(buffer.get_context() == this->get_context()); | |
818 | ||
819 | return enqueue_unmap_mem_object(buffer.get(), mapped_ptr, events); | |
820 | } | |
821 | ||
822 | /// Enqueues a command to unmap \p mem from the host memory space. | |
823 | /// | |
824 | /// \see_opencl_ref{clEnqueueUnmapMemObject} | |
825 | event enqueue_unmap_mem_object(cl_mem mem, | |
826 | void *mapped_ptr, | |
827 | const wait_list &events = wait_list()) | |
828 | { | |
829 | BOOST_ASSERT(m_queue != 0); | |
830 | ||
831 | event event_; | |
832 | ||
833 | cl_int ret = clEnqueueUnmapMemObject( | |
834 | m_queue, | |
835 | mem, | |
836 | mapped_ptr, | |
837 | events.size(), | |
838 | events.get_event_ptr(), | |
839 | &event_.get() | |
840 | ); | |
841 | ||
842 | if(ret != CL_SUCCESS){ | |
843 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
844 | } | |
845 | ||
846 | return event_; | |
847 | } | |
848 | ||
849 | /// Enqueues a command to read data from \p image to host memory. | |
850 | /// | |
851 | /// \see_opencl_ref{clEnqueueReadImage} | |
852 | event enqueue_read_image(const image_object& image, | |
853 | const size_t *origin, | |
854 | const size_t *region, | |
855 | size_t row_pitch, | |
856 | size_t slice_pitch, | |
857 | void *host_ptr, | |
858 | const wait_list &events = wait_list()) | |
859 | { | |
860 | BOOST_ASSERT(m_queue != 0); | |
861 | ||
862 | event event_; | |
863 | ||
864 | cl_int ret = clEnqueueReadImage( | |
865 | m_queue, | |
866 | image.get(), | |
867 | CL_TRUE, | |
868 | origin, | |
869 | region, | |
870 | row_pitch, | |
871 | slice_pitch, | |
872 | host_ptr, | |
873 | events.size(), | |
874 | events.get_event_ptr(), | |
875 | &event_.get() | |
876 | ); | |
877 | ||
878 | if(ret != CL_SUCCESS){ | |
879 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
880 | } | |
881 | ||
882 | return event_; | |
883 | } | |
884 | ||
885 | /// \overload | |
886 | template<size_t N> | |
887 | event enqueue_read_image(const image_object& image, | |
888 | const extents<N> origin, | |
889 | const extents<N> region, | |
890 | void *host_ptr, | |
891 | size_t row_pitch = 0, | |
892 | size_t slice_pitch = 0, | |
893 | const wait_list &events = wait_list()) | |
894 | { | |
895 | BOOST_ASSERT(image.get_context() == this->get_context()); | |
896 | ||
897 | size_t origin3[3] = { 0, 0, 0 }; | |
898 | size_t region3[3] = { 1, 1, 1 }; | |
899 | ||
900 | std::copy(origin.data(), origin.data() + N, origin3); | |
901 | std::copy(region.data(), region.data() + N, region3); | |
902 | ||
903 | return enqueue_read_image( | |
904 | image, origin3, region3, row_pitch, slice_pitch, host_ptr, events | |
905 | ); | |
906 | } | |
907 | ||
908 | /// Enqueues a command to write data from host memory to \p image. | |
909 | /// | |
910 | /// \see_opencl_ref{clEnqueueWriteImage} | |
911 | event enqueue_write_image(image_object& image, | |
912 | const size_t *origin, | |
913 | const size_t *region, | |
914 | const void *host_ptr, | |
915 | size_t input_row_pitch = 0, | |
916 | size_t input_slice_pitch = 0, | |
917 | const wait_list &events = wait_list()) | |
918 | { | |
919 | BOOST_ASSERT(m_queue != 0); | |
920 | ||
921 | event event_; | |
922 | ||
923 | cl_int ret = clEnqueueWriteImage( | |
924 | m_queue, | |
925 | image.get(), | |
926 | CL_TRUE, | |
927 | origin, | |
928 | region, | |
929 | input_row_pitch, | |
930 | input_slice_pitch, | |
931 | host_ptr, | |
932 | events.size(), | |
933 | events.get_event_ptr(), | |
934 | &event_.get() | |
935 | ); | |
936 | ||
937 | if(ret != CL_SUCCESS){ | |
938 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
939 | } | |
940 | ||
941 | return event_; | |
942 | } | |
943 | ||
944 | /// \overload | |
945 | template<size_t N> | |
946 | event enqueue_write_image(image_object& image, | |
947 | const extents<N> origin, | |
948 | const extents<N> region, | |
949 | const void *host_ptr, | |
950 | const size_t input_row_pitch = 0, | |
951 | const size_t input_slice_pitch = 0, | |
952 | const wait_list &events = wait_list()) | |
953 | { | |
954 | BOOST_ASSERT(image.get_context() == this->get_context()); | |
955 | ||
956 | size_t origin3[3] = { 0, 0, 0 }; | |
957 | size_t region3[3] = { 1, 1, 1 }; | |
958 | ||
959 | std::copy(origin.data(), origin.data() + N, origin3); | |
960 | std::copy(region.data(), region.data() + N, region3); | |
961 | ||
962 | return enqueue_write_image( | |
963 | image, origin3, region3, host_ptr, input_row_pitch, input_slice_pitch, events | |
964 | ); | |
965 | } | |
966 | ||
967 | /// Enqueues a command to map \p image into the host address space. | |
968 | /// | |
969 | /// Event associated with map operation is returned through | |
970 | /// \p map_image_event parameter. | |
971 | /// | |
972 | /// \see_opencl_ref{clEnqueueMapImage} | |
973 | void* enqueue_map_image(const image_object &image, | |
974 | cl_map_flags flags, | |
975 | const size_t *origin, | |
976 | const size_t *region, | |
977 | size_t &output_row_pitch, | |
978 | size_t &output_slice_pitch, | |
979 | event &map_image_event, | |
980 | const wait_list &events = wait_list()) | |
981 | { | |
982 | BOOST_ASSERT(m_queue != 0); | |
983 | BOOST_ASSERT(image.get_context() == this->get_context()); | |
984 | ||
985 | cl_int ret = 0; | |
986 | void *pointer = clEnqueueMapImage( | |
987 | m_queue, | |
988 | image.get(), | |
989 | CL_TRUE, | |
990 | flags, | |
991 | origin, | |
992 | region, | |
993 | &output_row_pitch, | |
994 | &output_slice_pitch, | |
995 | events.size(), | |
996 | events.get_event_ptr(), | |
997 | &map_image_event.get(), | |
998 | &ret | |
999 | ); | |
1000 | ||
1001 | if(ret != CL_SUCCESS){ | |
1002 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1003 | } | |
1004 | ||
1005 | return pointer; | |
1006 | } | |
1007 | ||
1008 | /// \overload | |
1009 | void* enqueue_map_image(const image_object &image, | |
1010 | cl_map_flags flags, | |
1011 | const size_t *origin, | |
1012 | const size_t *region, | |
1013 | size_t &output_row_pitch, | |
1014 | size_t &output_slice_pitch, | |
1015 | const wait_list &events = wait_list()) | |
1016 | { | |
1017 | event event_; | |
1018 | return enqueue_map_image( | |
1019 | image, flags, origin, region, | |
1020 | output_row_pitch, output_slice_pitch, event_, events | |
1021 | ); | |
1022 | } | |
1023 | ||
1024 | /// \overload | |
1025 | template<size_t N> | |
1026 | void* enqueue_map_image(image_object& image, | |
1027 | cl_map_flags flags, | |
1028 | const extents<N> origin, | |
1029 | const extents<N> region, | |
1030 | size_t &output_row_pitch, | |
1031 | size_t &output_slice_pitch, | |
1032 | event &map_image_event, | |
1033 | const wait_list &events = wait_list()) | |
1034 | { | |
1035 | BOOST_ASSERT(image.get_context() == this->get_context()); | |
1036 | ||
1037 | size_t origin3[3] = { 0, 0, 0 }; | |
1038 | size_t region3[3] = { 1, 1, 1 }; | |
1039 | ||
1040 | std::copy(origin.data(), origin.data() + N, origin3); | |
1041 | std::copy(region.data(), region.data() + N, region3); | |
1042 | ||
1043 | return enqueue_map_image( | |
1044 | image, flags, origin3, region3, | |
1045 | output_row_pitch, output_slice_pitch, map_image_event, events | |
1046 | ); | |
1047 | } | |
1048 | ||
1049 | /// \overload | |
1050 | template<size_t N> | |
1051 | void* enqueue_map_image(image_object& image, | |
1052 | cl_map_flags flags, | |
1053 | const extents<N> origin, | |
1054 | const extents<N> region, | |
1055 | size_t &output_row_pitch, | |
1056 | size_t &output_slice_pitch, | |
1057 | const wait_list &events = wait_list()) | |
1058 | { | |
1059 | event event_; | |
1060 | return enqueue_map_image( | |
1061 | image, flags, origin, region, | |
1062 | output_row_pitch, output_slice_pitch, event_, events | |
1063 | ); | |
1064 | } | |
1065 | ||
1066 | /// Enqueues a command to map \p image into the host address space. | |
1067 | /// Map operation is performed asynchronously. The pointer to the mapped | |
1068 | /// region cannot be used until the map operation has completed. | |
1069 | /// | |
1070 | /// Event associated with map operation is returned through | |
1071 | /// \p map_image_event parameter. | |
1072 | /// | |
1073 | /// \see_opencl_ref{clEnqueueMapImage} | |
1074 | void* enqueue_map_image_async(const image_object &image, | |
1075 | cl_map_flags flags, | |
1076 | const size_t *origin, | |
1077 | const size_t *region, | |
1078 | size_t &output_row_pitch, | |
1079 | size_t &output_slice_pitch, | |
1080 | event &map_image_event, | |
1081 | const wait_list &events = wait_list()) | |
1082 | { | |
1083 | BOOST_ASSERT(m_queue != 0); | |
1084 | BOOST_ASSERT(image.get_context() == this->get_context()); | |
1085 | ||
1086 | cl_int ret = 0; | |
1087 | void *pointer = clEnqueueMapImage( | |
1088 | m_queue, | |
1089 | image.get(), | |
1090 | CL_FALSE, | |
1091 | flags, | |
1092 | origin, | |
1093 | region, | |
1094 | &output_row_pitch, | |
1095 | &output_slice_pitch, | |
1096 | events.size(), | |
1097 | events.get_event_ptr(), | |
1098 | &map_image_event.get(), | |
1099 | &ret | |
1100 | ); | |
1101 | ||
1102 | if(ret != CL_SUCCESS){ | |
1103 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1104 | } | |
1105 | ||
1106 | return pointer; | |
1107 | } | |
1108 | ||
1109 | /// \overload | |
1110 | template<size_t N> | |
1111 | void* enqueue_map_image_async(image_object& image, | |
1112 | cl_map_flags flags, | |
1113 | const extents<N> origin, | |
1114 | const extents<N> region, | |
1115 | size_t &output_row_pitch, | |
1116 | size_t &output_slice_pitch, | |
1117 | event &map_image_event, | |
1118 | const wait_list &events = wait_list()) | |
1119 | { | |
1120 | BOOST_ASSERT(image.get_context() == this->get_context()); | |
1121 | ||
1122 | size_t origin3[3] = { 0, 0, 0 }; | |
1123 | size_t region3[3] = { 1, 1, 1 }; | |
1124 | ||
1125 | std::copy(origin.data(), origin.data() + N, origin3); | |
1126 | std::copy(region.data(), region.data() + N, region3); | |
1127 | ||
1128 | return enqueue_map_image_async( | |
1129 | image, flags, origin3, region3, | |
1130 | output_row_pitch, output_slice_pitch, map_image_event, events | |
1131 | ); | |
1132 | } | |
1133 | ||
1134 | /// Enqueues a command to unmap \p image from the host memory space. | |
1135 | /// | |
1136 | /// \see_opencl_ref{clEnqueueUnmapMemObject} | |
1137 | event enqueue_unmap_image(const image_object &image, | |
1138 | void *mapped_ptr, | |
1139 | const wait_list &events = wait_list()) | |
1140 | { | |
1141 | BOOST_ASSERT(image.get_context() == this->get_context()); | |
1142 | ||
1143 | return enqueue_unmap_mem_object(image.get(), mapped_ptr, events); | |
1144 | } | |
1145 | ||
1146 | /// Enqueues a command to copy data from \p src_image to \p dst_image. | |
1147 | /// | |
1148 | /// \see_opencl_ref{clEnqueueCopyImage} | |
1149 | event enqueue_copy_image(const image_object& src_image, | |
1150 | image_object& dst_image, | |
1151 | const size_t *src_origin, | |
1152 | const size_t *dst_origin, | |
1153 | const size_t *region, | |
1154 | const wait_list &events = wait_list()) | |
1155 | { | |
1156 | BOOST_ASSERT(m_queue != 0); | |
1157 | ||
1158 | event event_; | |
1159 | ||
1160 | cl_int ret = clEnqueueCopyImage( | |
1161 | m_queue, | |
1162 | src_image.get(), | |
1163 | dst_image.get(), | |
1164 | src_origin, | |
1165 | dst_origin, | |
1166 | region, | |
1167 | events.size(), | |
1168 | events.get_event_ptr(), | |
1169 | &event_.get() | |
1170 | ); | |
1171 | ||
1172 | if(ret != CL_SUCCESS){ | |
1173 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1174 | } | |
1175 | ||
1176 | return event_; | |
1177 | } | |
1178 | ||
1179 | /// \overload | |
1180 | template<size_t N> | |
1181 | event enqueue_copy_image(const image_object& src_image, | |
1182 | image_object& dst_image, | |
1183 | const extents<N> src_origin, | |
1184 | const extents<N> dst_origin, | |
1185 | const extents<N> region, | |
1186 | const wait_list &events = wait_list()) | |
1187 | { | |
1188 | BOOST_ASSERT(src_image.get_context() == this->get_context()); | |
1189 | BOOST_ASSERT(dst_image.get_context() == this->get_context()); | |
1190 | BOOST_ASSERT_MSG(src_image.format() == dst_image.format(), | |
1191 | "Source and destination image formats must match."); | |
1192 | ||
1193 | size_t src_origin3[3] = { 0, 0, 0 }; | |
1194 | size_t dst_origin3[3] = { 0, 0, 0 }; | |
1195 | size_t region3[3] = { 1, 1, 1 }; | |
1196 | ||
1197 | std::copy(src_origin.data(), src_origin.data() + N, src_origin3); | |
1198 | std::copy(dst_origin.data(), dst_origin.data() + N, dst_origin3); | |
1199 | std::copy(region.data(), region.data() + N, region3); | |
1200 | ||
1201 | return enqueue_copy_image( | |
1202 | src_image, dst_image, src_origin3, dst_origin3, region3, events | |
1203 | ); | |
1204 | } | |
1205 | ||
1206 | /// Enqueues a command to copy data from \p src_image to \p dst_buffer. | |
1207 | /// | |
1208 | /// \see_opencl_ref{clEnqueueCopyImageToBuffer} | |
1209 | event enqueue_copy_image_to_buffer(const image_object& src_image, | |
1210 | memory_object& dst_buffer, | |
1211 | const size_t *src_origin, | |
1212 | const size_t *region, | |
1213 | size_t dst_offset, | |
1214 | const wait_list &events = wait_list()) | |
1215 | { | |
1216 | BOOST_ASSERT(m_queue != 0); | |
1217 | ||
1218 | event event_; | |
1219 | ||
1220 | cl_int ret = clEnqueueCopyImageToBuffer( | |
1221 | m_queue, | |
1222 | src_image.get(), | |
1223 | dst_buffer.get(), | |
1224 | src_origin, | |
1225 | region, | |
1226 | dst_offset, | |
1227 | events.size(), | |
1228 | events.get_event_ptr(), | |
1229 | &event_.get() | |
1230 | ); | |
1231 | ||
1232 | if(ret != CL_SUCCESS){ | |
1233 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1234 | } | |
1235 | ||
1236 | return event_; | |
1237 | } | |
1238 | ||
1239 | /// Enqueues a command to copy data from \p src_buffer to \p dst_image. | |
1240 | /// | |
1241 | /// \see_opencl_ref{clEnqueueCopyBufferToImage} | |
1242 | event enqueue_copy_buffer_to_image(const memory_object& src_buffer, | |
1243 | image_object& dst_image, | |
1244 | size_t src_offset, | |
1245 | const size_t *dst_origin, | |
1246 | const size_t *region, | |
1247 | const wait_list &events = wait_list()) | |
1248 | { | |
1249 | BOOST_ASSERT(m_queue != 0); | |
1250 | ||
1251 | event event_; | |
1252 | ||
1253 | cl_int ret = clEnqueueCopyBufferToImage( | |
1254 | m_queue, | |
1255 | src_buffer.get(), | |
1256 | dst_image.get(), | |
1257 | src_offset, | |
1258 | dst_origin, | |
1259 | region, | |
1260 | events.size(), | |
1261 | events.get_event_ptr(), | |
1262 | &event_.get() | |
1263 | ); | |
1264 | ||
1265 | if(ret != CL_SUCCESS){ | |
1266 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1267 | } | |
1268 | ||
1269 | return event_; | |
1270 | } | |
1271 | ||
1272 | #if defined(CL_VERSION_1_2) || defined(BOOST_COMPUTE_DOXYGEN_INVOKED) | |
1273 | /// Enqueues a command to fill \p image with \p fill_color. | |
1274 | /// | |
1275 | /// \see_opencl_ref{clEnqueueFillImage} | |
1276 | /// | |
1277 | /// \opencl_version_warning{1,2} | |
1278 | event enqueue_fill_image(image_object& image, | |
1279 | const void *fill_color, | |
1280 | const size_t *origin, | |
1281 | const size_t *region, | |
1282 | const wait_list &events = wait_list()) | |
1283 | { | |
1284 | BOOST_ASSERT(m_queue != 0); | |
1285 | ||
1286 | event event_; | |
1287 | ||
1288 | cl_int ret = clEnqueueFillImage( | |
1289 | m_queue, | |
1290 | image.get(), | |
1291 | fill_color, | |
1292 | origin, | |
1293 | region, | |
1294 | events.size(), | |
1295 | events.get_event_ptr(), | |
1296 | &event_.get() | |
1297 | ); | |
1298 | ||
1299 | if(ret != CL_SUCCESS){ | |
1300 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1301 | } | |
1302 | ||
1303 | return event_; | |
1304 | } | |
1305 | ||
1306 | /// \overload | |
1307 | template<size_t N> | |
1308 | event enqueue_fill_image(image_object& image, | |
1309 | const void *fill_color, | |
1310 | const extents<N> origin, | |
1311 | const extents<N> region, | |
1312 | const wait_list &events = wait_list()) | |
1313 | { | |
1314 | BOOST_ASSERT(image.get_context() == this->get_context()); | |
1315 | ||
1316 | size_t origin3[3] = { 0, 0, 0 }; | |
1317 | size_t region3[3] = { 1, 1, 1 }; | |
1318 | ||
1319 | std::copy(origin.data(), origin.data() + N, origin3); | |
1320 | std::copy(region.data(), region.data() + N, region3); | |
1321 | ||
1322 | return enqueue_fill_image( | |
1323 | image, fill_color, origin3, region3, events | |
1324 | ); | |
1325 | } | |
1326 | ||
1327 | /// Enqueues a command to migrate \p mem_objects. | |
1328 | /// | |
1329 | /// \see_opencl_ref{clEnqueueMigrateMemObjects} | |
1330 | /// | |
1331 | /// \opencl_version_warning{1,2} | |
1332 | event enqueue_migrate_memory_objects(uint_ num_mem_objects, | |
1333 | const cl_mem *mem_objects, | |
1334 | cl_mem_migration_flags flags, | |
1335 | const wait_list &events = wait_list()) | |
1336 | { | |
1337 | BOOST_ASSERT(m_queue != 0); | |
1338 | ||
1339 | event event_; | |
1340 | ||
1341 | cl_int ret = clEnqueueMigrateMemObjects( | |
1342 | m_queue, | |
1343 | num_mem_objects, | |
1344 | mem_objects, | |
1345 | flags, | |
1346 | events.size(), | |
1347 | events.get_event_ptr(), | |
1348 | &event_.get() | |
1349 | ); | |
1350 | ||
1351 | if(ret != CL_SUCCESS){ | |
1352 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1353 | } | |
1354 | ||
1355 | return event_; | |
1356 | } | |
1357 | #endif // CL_VERSION_1_2 | |
1358 | ||
1359 | /// Enqueues a kernel for execution. | |
1360 | /// | |
1361 | /// \see_opencl_ref{clEnqueueNDRangeKernel} | |
1362 | event enqueue_nd_range_kernel(const kernel &kernel, | |
1363 | size_t work_dim, | |
1364 | const size_t *global_work_offset, | |
1365 | const size_t *global_work_size, | |
1366 | const size_t *local_work_size, | |
1367 | const wait_list &events = wait_list()) | |
1368 | { | |
1369 | BOOST_ASSERT(m_queue != 0); | |
1370 | BOOST_ASSERT(kernel.get_context() == this->get_context()); | |
1371 | ||
1372 | event event_; | |
1373 | ||
1374 | cl_int ret = clEnqueueNDRangeKernel( | |
1375 | m_queue, | |
1376 | kernel, | |
1377 | static_cast<cl_uint>(work_dim), | |
1378 | global_work_offset, | |
1379 | global_work_size, | |
1380 | local_work_size, | |
1381 | events.size(), | |
1382 | events.get_event_ptr(), | |
1383 | &event_.get() | |
1384 | ); | |
1385 | ||
1386 | if(ret != CL_SUCCESS){ | |
1387 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1388 | } | |
1389 | ||
1390 | return event_; | |
1391 | } | |
1392 | ||
1393 | /// \overload | |
1394 | template<size_t N> | |
1395 | event enqueue_nd_range_kernel(const kernel &kernel, | |
1396 | const extents<N> &global_work_offset, | |
1397 | const extents<N> &global_work_size, | |
1398 | const extents<N> &local_work_size, | |
1399 | const wait_list &events = wait_list()) | |
1400 | { | |
1401 | return enqueue_nd_range_kernel( | |
1402 | kernel, | |
1403 | N, | |
1404 | global_work_offset.data(), | |
1405 | global_work_size.data(), | |
1406 | local_work_size.data(), | |
1407 | events | |
1408 | ); | |
1409 | } | |
1410 | ||
1411 | /// Convenience method which calls enqueue_nd_range_kernel() with a | |
1412 | /// one-dimensional range. | |
1413 | event enqueue_1d_range_kernel(const kernel &kernel, | |
1414 | size_t global_work_offset, | |
1415 | size_t global_work_size, | |
1416 | size_t local_work_size, | |
1417 | const wait_list &events = wait_list()) | |
1418 | { | |
1419 | return enqueue_nd_range_kernel( | |
1420 | kernel, | |
1421 | 1, | |
1422 | &global_work_offset, | |
1423 | &global_work_size, | |
1424 | local_work_size ? &local_work_size : 0, | |
1425 | events | |
1426 | ); | |
1427 | } | |
1428 | ||
1429 | /// Enqueues a kernel to execute using a single work-item. | |
1430 | /// | |
1431 | /// \see_opencl_ref{clEnqueueTask} | |
1432 | event enqueue_task(const kernel &kernel, const wait_list &events = wait_list()) | |
1433 | { | |
1434 | BOOST_ASSERT(m_queue != 0); | |
1435 | BOOST_ASSERT(kernel.get_context() == this->get_context()); | |
1436 | ||
1437 | event event_; | |
1438 | ||
1439 | // clEnqueueTask() was deprecated in OpenCL 2.0. In that case we | |
1440 | // just forward to the equivalent clEnqueueNDRangeKernel() call. | |
1441 | #ifdef CL_VERSION_2_0 | |
1442 | size_t one = 1; | |
1443 | cl_int ret = clEnqueueNDRangeKernel( | |
1444 | m_queue, kernel, 1, 0, &one, &one, | |
1445 | events.size(), events.get_event_ptr(), &event_.get() | |
1446 | ); | |
1447 | #else | |
1448 | cl_int ret = clEnqueueTask( | |
1449 | m_queue, kernel, events.size(), events.get_event_ptr(), &event_.get() | |
1450 | ); | |
1451 | #endif | |
1452 | ||
1453 | if(ret != CL_SUCCESS){ | |
1454 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1455 | } | |
1456 | ||
1457 | return event_; | |
1458 | } | |
1459 | ||
1460 | /// Enqueues a function to execute on the host. | |
1461 | event enqueue_native_kernel(void (BOOST_COMPUTE_CL_CALLBACK *user_func)(void *), | |
1462 | void *args, | |
1463 | size_t cb_args, | |
1464 | uint_ num_mem_objects, | |
1465 | const cl_mem *mem_list, | |
1466 | const void **args_mem_loc, | |
1467 | const wait_list &events = wait_list()) | |
1468 | { | |
1469 | BOOST_ASSERT(m_queue != 0); | |
1470 | ||
1471 | event event_; | |
1472 | cl_int ret = clEnqueueNativeKernel( | |
1473 | m_queue, | |
1474 | user_func, | |
1475 | args, | |
1476 | cb_args, | |
1477 | num_mem_objects, | |
1478 | mem_list, | |
1479 | args_mem_loc, | |
1480 | events.size(), | |
1481 | events.get_event_ptr(), | |
1482 | &event_.get() | |
1483 | ); | |
1484 | if(ret != CL_SUCCESS){ | |
1485 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1486 | } | |
1487 | ||
1488 | return event_; | |
1489 | } | |
1490 | ||
1491 | /// Convenience overload for enqueue_native_kernel() which enqueues a | |
1492 | /// native kernel on the host with a nullary function. | |
1493 | event enqueue_native_kernel(void (BOOST_COMPUTE_CL_CALLBACK *user_func)(void), | |
1494 | const wait_list &events = wait_list()) | |
1495 | { | |
1496 | return enqueue_native_kernel( | |
1497 | detail::nullary_native_kernel_trampoline, | |
1498 | reinterpret_cast<void *>(&user_func), | |
1499 | sizeof(user_func), | |
1500 | 0, | |
1501 | 0, | |
1502 | 0, | |
1503 | events | |
1504 | ); | |
1505 | } | |
1506 | ||
1507 | /// Flushes the command queue. | |
1508 | /// | |
1509 | /// \see_opencl_ref{clFlush} | |
1510 | void flush() | |
1511 | { | |
1512 | BOOST_ASSERT(m_queue != 0); | |
1513 | ||
1514 | clFlush(m_queue); | |
1515 | } | |
1516 | ||
1517 | /// Blocks until all outstanding commands in the queue have finished. | |
1518 | /// | |
1519 | /// \see_opencl_ref{clFinish} | |
1520 | void finish() | |
1521 | { | |
1522 | BOOST_ASSERT(m_queue != 0); | |
1523 | ||
1524 | clFinish(m_queue); | |
1525 | } | |
1526 | ||
1527 | /// Enqueues a barrier in the queue. | |
1528 | void enqueue_barrier() | |
1529 | { | |
1530 | BOOST_ASSERT(m_queue != 0); | |
1531 | cl_int ret = CL_SUCCESS; | |
1532 | ||
1533 | #ifdef CL_VERSION_1_2 | |
1534 | if(get_device().check_version(1, 2)){ | |
1535 | ret = clEnqueueBarrierWithWaitList(m_queue, 0, 0, 0); | |
1536 | } else | |
1537 | #endif // CL_VERSION_1_2 | |
1538 | { | |
1539 | // Suppress deprecated declarations warning | |
1540 | BOOST_COMPUTE_DISABLE_DEPRECATED_DECLARATIONS(); | |
1541 | ret = clEnqueueBarrier(m_queue); | |
1542 | BOOST_COMPUTE_ENABLE_DEPRECATED_DECLARATIONS(); | |
1543 | } | |
1544 | ||
1545 | if(ret != CL_SUCCESS){ | |
1546 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1547 | } | |
1548 | } | |
1549 | ||
1550 | #if defined(CL_VERSION_1_2) || defined(BOOST_COMPUTE_DOXYGEN_INVOKED) | |
1551 | /// Enqueues a barrier in the queue after \p events. | |
1552 | /// | |
1553 | /// \opencl_version_warning{1,2} | |
1554 | event enqueue_barrier(const wait_list &events) | |
1555 | { | |
1556 | BOOST_ASSERT(m_queue != 0); | |
1557 | ||
1558 | event event_; | |
1559 | cl_int ret = CL_SUCCESS; | |
1560 | ||
1561 | ret = clEnqueueBarrierWithWaitList( | |
1562 | m_queue, events.size(), events.get_event_ptr(), &event_.get() | |
1563 | ); | |
1564 | ||
1565 | if(ret != CL_SUCCESS){ | |
1566 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1567 | } | |
1568 | ||
1569 | return event_; | |
1570 | } | |
1571 | #endif // CL_VERSION_1_2 | |
1572 | ||
1573 | /// Enqueues a marker in the queue and returns an event that can be | |
1574 | /// used to track its progress. | |
1575 | event enqueue_marker() | |
1576 | { | |
1577 | event event_; | |
1578 | cl_int ret = CL_SUCCESS; | |
1579 | ||
1580 | #ifdef CL_VERSION_1_2 | |
1581 | if(get_device().check_version(1, 2)){ | |
1582 | ret = clEnqueueMarkerWithWaitList(m_queue, 0, 0, &event_.get()); | |
1583 | } else | |
1584 | #endif | |
1585 | { | |
1586 | // Suppress deprecated declarations warning | |
1587 | BOOST_COMPUTE_DISABLE_DEPRECATED_DECLARATIONS(); | |
1588 | ret = clEnqueueMarker(m_queue, &event_.get()); | |
1589 | BOOST_COMPUTE_ENABLE_DEPRECATED_DECLARATIONS(); | |
1590 | } | |
1591 | ||
1592 | if(ret != CL_SUCCESS){ | |
1593 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1594 | } | |
1595 | ||
1596 | return event_; | |
1597 | } | |
1598 | ||
1599 | #if defined(CL_VERSION_1_2) || defined(BOOST_COMPUTE_DOXYGEN_INVOKED) | |
1600 | /// Enqueues a marker after \p events in the queue and returns an | |
1601 | /// event that can be used to track its progress. | |
1602 | /// | |
1603 | /// \opencl_version_warning{1,2} | |
1604 | event enqueue_marker(const wait_list &events) | |
1605 | { | |
1606 | event event_; | |
1607 | ||
1608 | cl_int ret = clEnqueueMarkerWithWaitList( | |
1609 | m_queue, events.size(), events.get_event_ptr(), &event_.get() | |
1610 | ); | |
1611 | ||
1612 | if(ret != CL_SUCCESS){ | |
1613 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1614 | } | |
1615 | ||
1616 | return event_; | |
1617 | } | |
1618 | #endif // CL_VERSION_1_2 | |
1619 | ||
1620 | #if defined(CL_VERSION_2_0) || defined(BOOST_COMPUTE_DOXYGEN_INVOKED) | |
1621 | /// Enqueues a command to copy \p size bytes of data from \p src_ptr to | |
1622 | /// \p dst_ptr. | |
1623 | /// | |
1624 | /// \opencl_version_warning{2,0} | |
1625 | /// | |
1626 | /// \see_opencl2_ref{clEnqueueSVMMemcpy} | |
1627 | event enqueue_svm_memcpy(void *dst_ptr, | |
1628 | const void *src_ptr, | |
1629 | size_t size, | |
1630 | const wait_list &events = wait_list()) | |
1631 | { | |
1632 | event event_; | |
1633 | ||
1634 | cl_int ret = clEnqueueSVMMemcpy( | |
1635 | m_queue, | |
1636 | CL_TRUE, | |
1637 | dst_ptr, | |
1638 | src_ptr, | |
1639 | size, | |
1640 | events.size(), | |
1641 | events.get_event_ptr(), | |
1642 | &event_.get() | |
1643 | ); | |
1644 | ||
1645 | if(ret != CL_SUCCESS){ | |
1646 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1647 | } | |
1648 | ||
1649 | return event_; | |
1650 | } | |
1651 | ||
1652 | /// Enqueues a command to copy \p size bytes of data from \p src_ptr to | |
1653 | /// \p dst_ptr. The operation is performed asynchronously. | |
1654 | /// | |
1655 | /// \opencl_version_warning{2,0} | |
1656 | /// | |
1657 | /// \see_opencl2_ref{clEnqueueSVMMemcpy} | |
1658 | event enqueue_svm_memcpy_async(void *dst_ptr, | |
1659 | const void *src_ptr, | |
1660 | size_t size, | |
1661 | const wait_list &events = wait_list()) | |
1662 | { | |
1663 | event event_; | |
1664 | ||
1665 | cl_int ret = clEnqueueSVMMemcpy( | |
1666 | m_queue, | |
1667 | CL_FALSE, | |
1668 | dst_ptr, | |
1669 | src_ptr, | |
1670 | size, | |
1671 | events.size(), | |
1672 | events.get_event_ptr(), | |
1673 | &event_.get() | |
1674 | ); | |
1675 | ||
1676 | if(ret != CL_SUCCESS){ | |
1677 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1678 | } | |
1679 | ||
1680 | return event_; | |
1681 | } | |
1682 | ||
1683 | /// Enqueues a command to fill \p size bytes of data at \p svm_ptr with | |
1684 | /// \p pattern. | |
1685 | /// | |
1686 | /// \opencl_version_warning{2,0} | |
1687 | /// | |
1688 | /// \see_opencl2_ref{clEnqueueSVMMemFill} | |
1689 | event enqueue_svm_fill(void *svm_ptr, | |
1690 | const void *pattern, | |
1691 | size_t pattern_size, | |
1692 | size_t size, | |
1693 | const wait_list &events = wait_list()) | |
1694 | ||
1695 | { | |
1696 | event event_; | |
1697 | ||
1698 | cl_int ret = clEnqueueSVMMemFill( | |
1699 | m_queue, | |
1700 | svm_ptr, | |
1701 | pattern, | |
1702 | pattern_size, | |
1703 | size, | |
1704 | events.size(), | |
1705 | events.get_event_ptr(), | |
1706 | &event_.get() | |
1707 | ); | |
1708 | ||
1709 | if(ret != CL_SUCCESS){ | |
1710 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1711 | } | |
1712 | ||
1713 | return event_; | |
1714 | } | |
1715 | ||
1716 | /// Enqueues a command to free \p svm_ptr. | |
1717 | /// | |
1718 | /// \opencl_version_warning{2,0} | |
1719 | /// | |
1720 | /// \see_opencl2_ref{clEnqueueSVMFree} | |
1721 | /// | |
1722 | /// \see svm_free() | |
1723 | event enqueue_svm_free(void *svm_ptr, | |
1724 | const wait_list &events = wait_list()) | |
1725 | { | |
1726 | event event_; | |
1727 | ||
1728 | cl_int ret = clEnqueueSVMFree( | |
1729 | m_queue, | |
1730 | 1, | |
1731 | &svm_ptr, | |
1732 | 0, | |
1733 | 0, | |
1734 | events.size(), | |
1735 | events.get_event_ptr(), | |
1736 | &event_.get() | |
1737 | ); | |
1738 | ||
1739 | if(ret != CL_SUCCESS){ | |
1740 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1741 | } | |
1742 | ||
1743 | return event_; | |
1744 | } | |
1745 | ||
1746 | /// Enqueues a command to map \p svm_ptr to the host memory space. | |
1747 | /// | |
1748 | /// \opencl_version_warning{2,0} | |
1749 | /// | |
1750 | /// \see_opencl2_ref{clEnqueueSVMMap} | |
1751 | event enqueue_svm_map(void *svm_ptr, | |
1752 | size_t size, | |
1753 | cl_map_flags flags, | |
1754 | const wait_list &events = wait_list()) | |
1755 | { | |
1756 | event event_; | |
1757 | ||
1758 | cl_int ret = clEnqueueSVMMap( | |
1759 | m_queue, | |
1760 | CL_TRUE, | |
1761 | flags, | |
1762 | svm_ptr, | |
1763 | size, | |
1764 | events.size(), | |
1765 | events.get_event_ptr(), | |
1766 | &event_.get() | |
1767 | ); | |
1768 | ||
1769 | if(ret != CL_SUCCESS){ | |
1770 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1771 | } | |
1772 | ||
1773 | return event_; | |
1774 | } | |
1775 | ||
1776 | /// Enqueues a command to unmap \p svm_ptr from the host memory space. | |
1777 | /// | |
1778 | /// \opencl_version_warning{2,0} | |
1779 | /// | |
1780 | /// \see_opencl2_ref{clEnqueueSVMUnmap} | |
1781 | event enqueue_svm_unmap(void *svm_ptr, | |
1782 | const wait_list &events = wait_list()) | |
1783 | { | |
1784 | event event_; | |
1785 | ||
1786 | cl_int ret = clEnqueueSVMUnmap( | |
1787 | m_queue, | |
1788 | svm_ptr, | |
1789 | events.size(), | |
1790 | events.get_event_ptr(), | |
1791 | &event_.get() | |
1792 | ); | |
1793 | ||
1794 | if(ret != CL_SUCCESS){ | |
1795 | BOOST_THROW_EXCEPTION(opencl_error(ret)); | |
1796 | } | |
1797 | ||
1798 | return event_; | |
1799 | } | |
1800 | #endif // CL_VERSION_2_0 | |
1801 | ||
1802 | /// Returns \c true if the command queue is the same at \p other. | |
1803 | bool operator==(const command_queue &other) const | |
1804 | { | |
1805 | return m_queue == other.m_queue; | |
1806 | } | |
1807 | ||
1808 | /// Returns \c true if the command queue is different from \p other. | |
1809 | bool operator!=(const command_queue &other) const | |
1810 | { | |
1811 | return m_queue != other.m_queue; | |
1812 | } | |
1813 | ||
1814 | /// \internal_ | |
1815 | operator cl_command_queue() const | |
1816 | { | |
1817 | return m_queue; | |
1818 | } | |
1819 | ||
1820 | /// \internal_ | |
1821 | bool check_device_version(int major, int minor) const | |
1822 | { | |
1823 | return get_device().check_version(major, minor); | |
1824 | } | |
1825 | ||
1826 | private: | |
1827 | cl_command_queue m_queue; | |
1828 | }; | |
1829 | ||
1830 | inline buffer buffer::clone(command_queue &queue) const | |
1831 | { | |
1832 | buffer copy(get_context(), size(), get_memory_flags()); | |
1833 | queue.enqueue_copy_buffer(*this, copy, 0, 0, size()); | |
1834 | return copy; | |
1835 | } | |
1836 | ||
1837 | inline image1d image1d::clone(command_queue &queue) const | |
1838 | { | |
1839 | image1d copy( | |
1840 | get_context(), width(), format(), get_memory_flags() | |
1841 | ); | |
1842 | ||
1843 | queue.enqueue_copy_image(*this, copy, origin(), copy.origin(), size()); | |
1844 | ||
1845 | return copy; | |
1846 | } | |
1847 | ||
1848 | inline image2d image2d::clone(command_queue &queue) const | |
1849 | { | |
1850 | image2d copy( | |
1851 | get_context(), width(), height(), format(), get_memory_flags() | |
1852 | ); | |
1853 | ||
1854 | queue.enqueue_copy_image(*this, copy, origin(), copy.origin(), size()); | |
1855 | ||
1856 | return copy; | |
1857 | } | |
1858 | ||
1859 | inline image3d image3d::clone(command_queue &queue) const | |
1860 | { | |
1861 | image3d copy( | |
1862 | get_context(), width(), height(), depth(), format(), get_memory_flags() | |
1863 | ); | |
1864 | ||
1865 | queue.enqueue_copy_image(*this, copy, origin(), copy.origin(), size()); | |
1866 | ||
1867 | return copy; | |
1868 | } | |
1869 | ||
1870 | /// \internal_ define get_info() specializations for command_queue | |
1871 | BOOST_COMPUTE_DETAIL_DEFINE_GET_INFO_SPECIALIZATIONS(command_queue, | |
1872 | ((cl_context, CL_QUEUE_CONTEXT)) | |
1873 | ((cl_device_id, CL_QUEUE_DEVICE)) | |
1874 | ((uint_, CL_QUEUE_REFERENCE_COUNT)) | |
1875 | ((cl_command_queue_properties, CL_QUEUE_PROPERTIES)) | |
1876 | ) | |
1877 | ||
1878 | } // end compute namespace | |
1879 | } // end boost namespace | |
1880 | ||
1881 | #endif // BOOST_COMPUTE_COMMAND_QUEUE_HPP |