]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/libs/interprocess/include/boost/interprocess/mapped_region.hpp
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / boost / libs / interprocess / include / boost / interprocess / mapped_region.hpp
1 //////////////////////////////////////////////////////////////////////////////
2 //
3 // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
4 // Software License, Version 1.0. (See accompanying file
5 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 //
7 // See http://www.boost.org/libs/interprocess for documentation.
8 //
9 //////////////////////////////////////////////////////////////////////////////
10
11 #ifndef BOOST_INTERPROCESS_MAPPED_REGION_HPP
12 #define BOOST_INTERPROCESS_MAPPED_REGION_HPP
13
14 #ifndef BOOST_CONFIG_HPP
15 # include <boost/config.hpp>
16 #endif
17 #
18 #if defined(BOOST_HAS_PRAGMA_ONCE)
19 # pragma once
20 #endif
21
22 #include <boost/interprocess/detail/config_begin.hpp>
23 #include <boost/interprocess/detail/workaround.hpp>
24
25 #include <boost/interprocess/interprocess_fwd.hpp>
26 #include <boost/interprocess/exceptions.hpp>
27 #include <boost/move/utility_core.hpp>
28 #include <boost/interprocess/detail/utilities.hpp>
29 #include <boost/interprocess/detail/os_file_functions.hpp>
30 #include <string>
31 #include <boost/cstdint.hpp>
32 #include <boost/assert.hpp>
33 #include <boost/move/adl_move_swap.hpp>
34
35 //Some Unixes use caddr_t instead of void * in madvise
36 // SunOS Tru64 HP-UX AIX
37 #if defined(sun) || defined(__sun) || defined(__osf__) || defined(__osf) || defined(_hpux) || defined(hpux) || defined(_AIX)
38 #define BOOST_INTERPROCESS_MADVISE_USES_CADDR_T
39 #include <sys/types.h>
40 #endif
41
42 //A lot of UNIXes have destructive semantics for MADV_DONTNEED, so
43 //we need to be careful to allow it.
44 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__APPLE__)
45 #define BOOST_INTERPROCESS_MADV_DONTNEED_HAS_NONDESTRUCTIVE_SEMANTICS
46 #endif
47
48 #if defined (BOOST_INTERPROCESS_WINDOWS)
49 # include <boost/interprocess/detail/win32_api.hpp>
50 # include <boost/interprocess/sync/windows/sync_utils.hpp>
51 #else
52 # ifdef BOOST_HAS_UNISTD_H
53 # include <fcntl.h>
54 # include <sys/mman.h> //mmap
55 # include <unistd.h>
56 # include <sys/stat.h>
57 # include <sys/types.h>
58 # if defined(BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS)
59 # include <sys/shm.h> //System V shared memory...
60 # endif
61 # include <boost/assert.hpp>
62 # else
63 # error Unknown platform
64 # endif
65
66 #endif //#if defined (BOOST_INTERPROCESS_WINDOWS)
67
68 //!\file
69 //!Describes mapped region class
70
71 namespace boost {
72 namespace interprocess {
73
74 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
75
76 //Solaris declares madvise only in some configurations but defines MADV_XXX, a bit confusing.
77 //Predeclare it here to avoid any compilation error
78 #if (defined(sun) || defined(__sun)) && defined(MADV_NORMAL)
79 extern "C" int madvise(caddr_t, size_t, int);
80 #endif
81
82 namespace ipcdetail{ class interprocess_tester; }
83 namespace ipcdetail{ class raw_mapped_region_creator; }
84
85 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
86
87 //!The mapped_region class represents a portion or region created from a
88 //!memory_mappable object.
89 //!
90 //!The OS can map a region bigger than the requested one, as region must
91 //!be multiple of the page size, but mapped_region will always refer to
92 //!the region specified by the user.
93 class mapped_region
94 {
95 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
96 //Non-copyable
97 BOOST_MOVABLE_BUT_NOT_COPYABLE(mapped_region)
98 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
99
100 public:
101
102 //!Creates a mapping region of the mapped memory "mapping", starting in
103 //!offset "offset", and the mapping's size will be "size". The mapping
104 //!can be opened for read only, read-write or copy-on-write.
105 //!
106 //!If an address is specified, both the offset and the address must be
107 //!multiples of the page size.
108 //!
109 //!The map is created using "default_map_options". This flag is OS
110 //!dependant and it should not be changed unless the user needs to
111 //!specify special options.
112 //!
113 //!In Windows systems "map_options" is a DWORD value passed as
114 //!"dwDesiredAccess" to "MapViewOfFileEx". If "default_map_options" is passed
115 //!it's initialized to zero. "map_options" is XORed with FILE_MAP_[COPY|READ|WRITE].
116 //!
117 //!In UNIX systems and POSIX mappings "map_options" is an int value passed as "flags"
118 //!to "mmap". If "default_map_options" is specified it's initialized to MAP_NOSYNC
119 //!if that option exists and to zero otherwise. "map_options" XORed with MAP_PRIVATE or MAP_SHARED.
120 //!
121 //!In UNIX systems and XSI mappings "map_options" is an int value passed as "shmflg"
122 //!to "shmat". If "default_map_options" is specified it's initialized to zero.
123 //!"map_options" is XORed with SHM_RDONLY if needed.
124 //!
125 //!The OS could allocate more pages than size/page_size(), but get_address()
126 //!will always return the address passed in this function (if not null) and
127 //!get_size() will return the specified size.
128 template<class MemoryMappable>
129 mapped_region(const MemoryMappable& mapping
130 ,mode_t mode
131 ,offset_t offset = 0
132 ,std::size_t size = 0
133 ,const void *address = 0
134 ,map_options_t map_options = default_map_options);
135
136 //!Default constructor. Address will be 0 (nullptr).
137 //!Size will be 0.
138 //!Does not throw
139 mapped_region();
140
141 //!Move constructor. *this will be constructed taking ownership of "other"'s
142 //!region and "other" will be left in default constructor state.
143 mapped_region(BOOST_RV_REF(mapped_region) other)
144 #if defined (BOOST_INTERPROCESS_WINDOWS)
145 : m_base(0), m_size(0)
146 , m_page_offset(0)
147 , m_mode(read_only)
148 , m_file_or_mapping_hnd(ipcdetail::invalid_file())
149 #else
150 : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only), m_is_xsi(false)
151 #endif
152 { this->swap(other); }
153
154 //!Destroys the mapped region.
155 //!Does not throw
156 ~mapped_region();
157
158 //!Move assignment. If *this owns a memory mapped region, it will be
159 //!destroyed and it will take ownership of "other"'s memory mapped region.
160 mapped_region &operator=(BOOST_RV_REF(mapped_region) other)
161 {
162 mapped_region tmp(boost::move(other));
163 this->swap(tmp);
164 return *this;
165 }
166
167 //!Swaps the mapped_region with another
168 //!mapped region
169 void swap(mapped_region &other);
170
171 //!Returns the size of the mapping. Never throws.
172 std::size_t get_size() const;
173
174 //!Returns the base address of the mapping.
175 //!Never throws.
176 void* get_address() const;
177
178 //!Returns the mode of the mapping used to construct the mapped region.
179 //!Never throws.
180 mode_t get_mode() const;
181
182 //!Flushes to the disk a byte range within the mapped memory.
183 //!If 'async' is true, the function will return before flushing operation is completed
184 //!If 'async' is false, function will return once data has been written into the underlying
185 //!device (i.e., in mapped files OS cached information is written to disk).
186 //!Never throws. Returns false if operation could not be performed.
187 bool flush(std::size_t mapping_offset = 0, std::size_t numbytes = 0, bool async = true);
188
189 //!Shrinks current mapped region. If after shrinking there is no longer need for a previously
190 //!mapped memory page, accessing that page can trigger a segmentation fault.
191 //!Depending on the OS, this operation might fail (XSI shared memory), it can decommit storage
192 //!and free a portion of the virtual address space (e.g.POSIX) or this
193 //!function can release some physical memory wihout freeing any virtual address space(Windows).
194 //!Returns true on success. Never throws.
195 bool shrink_by(std::size_t bytes, bool from_back = true);
196
197 //!This enum specifies region usage behaviors that an application can specify
198 //!to the mapped region implementation.
199 enum advice_types{
200 //!Specifies that the application has no advice to give on its behavior with respect to
201 //!the region. It is the default characteristic if no advice is given for a range of memory.
202 advice_normal,
203 //!Specifies that the application expects to access the region sequentially from
204 //!lower addresses to higher addresses. The implementation can lower the priority of
205 //!preceding pages within the region once a page have been accessed.
206 advice_sequential,
207 //!Specifies that the application expects to access the region in a random order,
208 //!and prefetching is likely not advantageous.
209 advice_random,
210 //!Specifies that the application expects to access the region in the near future.
211 //!The implementation can prefetch pages of the region.
212 advice_willneed,
213 //!Specifies that the application expects that it will not access the region in the near future.
214 //!The implementation can unload pages within the range to save system resources.
215 advice_dontneed
216 };
217
218 //!Advises the implementation on the expected behavior of the application with respect to the data
219 //!in the region. The implementation may use this information to optimize handling of the region data.
220 //!This function has no effect on the semantics of access to memory in the region, although it may affect
221 //!the performance of access.
222 //!If the advise type is not known to the implementation, the function returns false. True otherwise.
223 bool advise(advice_types advise);
224
225 //!Returns the size of the page. This size is the minimum memory that
226 //!will be used by the system when mapping a memory mappable source and
227 //!will restrict the address and the offset to map.
228 static std::size_t get_page_size();
229
230 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
231 private:
232 //!Closes a previously opened memory mapping. Never throws
233 void priv_close();
234
235 void* priv_map_address() const;
236 std::size_t priv_map_size() const;
237 bool priv_flush_param_check(std::size_t mapping_offset, void *&addr, std::size_t &numbytes) const;
238 bool priv_shrink_param_check(std::size_t bytes, bool from_back, void *&shrink_page_start, std::size_t &shrink_page_bytes);
239 static void priv_size_from_mapping_size
240 (offset_t mapping_size, offset_t offset, offset_t page_offset, std::size_t &size);
241 static offset_t priv_page_offset_addr_fixup(offset_t page_offset, const void *&addr);
242
243 template<int dummy>
244 struct page_size_holder
245 {
246 static const std::size_t PageSize;
247 static std::size_t get_page_size();
248 };
249
250 void* m_base;
251 std::size_t m_size;
252 std::size_t m_page_offset;
253 mode_t m_mode;
254 #if defined(BOOST_INTERPROCESS_WINDOWS)
255 file_handle_t m_file_or_mapping_hnd;
256 #else
257 bool m_is_xsi;
258 #endif
259
260 friend class ipcdetail::interprocess_tester;
261 friend class ipcdetail::raw_mapped_region_creator;
262 void dont_close_on_destruction();
263 #if defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
264 template<int Dummy>
265 static void destroy_syncs_in_range(const void *addr, std::size_t size);
266 #endif
267 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
268 };
269
270 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
271
272 inline void swap(mapped_region &x, mapped_region &y)
273 { x.swap(y); }
274
275 inline mapped_region::~mapped_region()
276 { this->priv_close(); }
277
278 inline std::size_t mapped_region::get_size() const
279 { return m_size; }
280
281 inline mode_t mapped_region::get_mode() const
282 { return m_mode; }
283
284 inline void* mapped_region::get_address() const
285 { return m_base; }
286
287 inline void* mapped_region::priv_map_address() const
288 { return static_cast<char*>(m_base) - m_page_offset; }
289
290 inline std::size_t mapped_region::priv_map_size() const
291 { return m_size + m_page_offset; }
292
293 inline bool mapped_region::priv_flush_param_check
294 (std::size_t mapping_offset, void *&addr, std::size_t &numbytes) const
295 {
296 //Check some errors
297 if(m_base == 0)
298 return false;
299
300 if(mapping_offset >= m_size || (mapping_offset + numbytes) > m_size){
301 return false;
302 }
303
304 //Update flush size if the user does not provide it
305 if(numbytes == 0){
306 numbytes = m_size - mapping_offset;
307 }
308 addr = (char*)this->priv_map_address() + mapping_offset;
309 numbytes += m_page_offset;
310 return true;
311 }
312
313 inline bool mapped_region::priv_shrink_param_check
314 (std::size_t bytes, bool from_back, void *&shrink_page_start, std::size_t &shrink_page_bytes)
315 {
316 //Check some errors
317 if(m_base == 0 || bytes > m_size){
318 return false;
319 }
320 else if(bytes == m_size){
321 this->priv_close();
322 return true;
323 }
324 else{
325 const std::size_t page_size = mapped_region::get_page_size();
326 if(from_back){
327 const std::size_t new_pages = (m_size + m_page_offset - bytes - 1)/page_size + 1;
328 shrink_page_start = static_cast<char*>(this->priv_map_address()) + new_pages*page_size;
329 shrink_page_bytes = m_page_offset + m_size - new_pages*page_size;
330 m_size -= bytes;
331 }
332 else{
333 shrink_page_start = this->priv_map_address();
334 m_page_offset += bytes;
335 shrink_page_bytes = (m_page_offset/page_size)*page_size;
336 m_page_offset = m_page_offset % page_size;
337 m_size -= bytes;
338 m_base = static_cast<char *>(m_base) + bytes;
339 BOOST_ASSERT(shrink_page_bytes%page_size == 0);
340 }
341 return true;
342 }
343 }
344
345 inline void mapped_region::priv_size_from_mapping_size
346 (offset_t mapping_size, offset_t offset, offset_t page_offset, std::size_t &size)
347 {
348 //Check if mapping size fits in the user address space
349 //as offset_t is the maximum file size and its signed.
350 if(mapping_size < offset ||
351 boost::uintmax_t(mapping_size - (offset - page_offset)) >
352 boost::uintmax_t(std::size_t(-1))){
353 error_info err(size_error);
354 throw interprocess_exception(err);
355 }
356 size = static_cast<std::size_t>(mapping_size - (offset - page_offset));
357 }
358
359 inline offset_t mapped_region::priv_page_offset_addr_fixup(offset_t offset, const void *&address)
360 {
361 //We can't map any offset so we have to obtain system's
362 //memory granularity
363 const std::size_t page_size = mapped_region::get_page_size();
364
365 //We calculate the difference between demanded and valid offset
366 //(always less than a page in std::size_t, thus, representable by std::size_t)
367 const std::size_t page_offset =
368 static_cast<std::size_t>(offset - (offset / page_size) * page_size);
369 //Update the mapping address
370 if(address){
371 address = static_cast<const char*>(address) - page_offset;
372 }
373 return page_offset;
374 }
375
376 #if defined (BOOST_INTERPROCESS_WINDOWS)
377
378 inline mapped_region::mapped_region()
379 : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only)
380 , m_file_or_mapping_hnd(ipcdetail::invalid_file())
381 {}
382
383 template<int dummy>
384 inline std::size_t mapped_region::page_size_holder<dummy>::get_page_size()
385 {
386 winapi::system_info info;
387 winapi::get_system_info(&info);
388 return std::size_t(info.dwAllocationGranularity);
389 }
390
391 template<class MemoryMappable>
392 inline mapped_region::mapped_region
393 (const MemoryMappable &mapping
394 ,mode_t mode
395 ,offset_t offset
396 ,std::size_t size
397 ,const void *address
398 ,map_options_t map_options)
399 : m_base(0), m_size(0), m_page_offset(0), m_mode(mode)
400 , m_file_or_mapping_hnd(ipcdetail::invalid_file())
401 {
402 mapping_handle_t mhandle = mapping.get_mapping_handle();
403 {
404 file_handle_t native_mapping_handle = 0;
405
406 //Set accesses
407 //For "create_file_mapping"
408 unsigned long protection = 0;
409 //For "mapviewoffile"
410 unsigned long map_access = map_options == default_map_options ? 0 : map_options;
411
412 switch(mode)
413 {
414 case read_only:
415 case read_private:
416 protection |= winapi::page_readonly;
417 map_access |= winapi::file_map_read;
418 break;
419 case read_write:
420 protection |= winapi::page_readwrite;
421 map_access |= winapi::file_map_write;
422 break;
423 case copy_on_write:
424 protection |= winapi::page_writecopy;
425 map_access |= winapi::file_map_copy;
426 break;
427 default:
428 {
429 error_info err(mode_error);
430 throw interprocess_exception(err);
431 }
432 break;
433 }
434
435 //For file mapping (including emulated shared memory through temporary files),
436 //the device is a file handle so we need to obtain file's size and call create_file_mapping
437 //to obtain the mapping handle.
438 //For files we don't need the file mapping after mapping the memory, as the file is there
439 //so we'll program the handle close
440 void * handle_to_close = winapi::invalid_handle_value;
441 if(!mhandle.is_shm){
442 //Create mapping handle
443 native_mapping_handle = winapi::create_file_mapping
444 ( ipcdetail::file_handle_from_mapping_handle(mapping.get_mapping_handle())
445 , protection, 0, 0, 0);
446
447 //Check if all is correct
448 if(!native_mapping_handle){
449 error_info err = winapi::get_last_error();
450 throw interprocess_exception(err);
451 }
452 handle_to_close = native_mapping_handle;
453 }
454 else{
455 //For windows_shared_memory the device handle is already a mapping handle
456 //and we need to maintain it
457 native_mapping_handle = mhandle.handle;
458 }
459 //RAII handle close on scope exit
460 const winapi::handle_closer close_handle(handle_to_close);
461 (void)close_handle;
462
463 const offset_t page_offset = priv_page_offset_addr_fixup(offset, address);
464
465 //Obtain mapping size if user provides 0 size
466 if(size == 0){
467 offset_t mapping_size;
468 if(!winapi::get_file_mapping_size(native_mapping_handle, mapping_size)){
469 error_info err = winapi::get_last_error();
470 throw interprocess_exception(err);
471 }
472 //This can throw
473 priv_size_from_mapping_size(mapping_size, offset, page_offset, size);
474 }
475
476 //Map with new offsets and size
477 void *base = winapi::map_view_of_file_ex
478 (native_mapping_handle,
479 map_access,
480 offset - page_offset,
481 static_cast<std::size_t>(page_offset + size),
482 const_cast<void*>(address));
483 //Check error
484 if(!base){
485 error_info err = winapi::get_last_error();
486 throw interprocess_exception(err);
487 }
488
489 //Calculate new base for the user
490 m_base = static_cast<char*>(base) + page_offset;
491 m_page_offset = page_offset;
492 m_size = size;
493 }
494 //Windows shared memory needs the duplication of the handle if we want to
495 //make mapped_region independent from the mappable device
496 //
497 //For mapped files, we duplicate the file handle to be able to FlushFileBuffers
498 if(!winapi::duplicate_current_process_handle(mhandle.handle, &m_file_or_mapping_hnd)){
499 error_info err = winapi::get_last_error();
500 this->priv_close();
501 throw interprocess_exception(err);
502 }
503 }
504
505 inline bool mapped_region::flush(std::size_t mapping_offset, std::size_t numbytes, bool async)
506 {
507 void *addr;
508 if(!this->priv_flush_param_check(mapping_offset, addr, numbytes)){
509 return false;
510 }
511 //Flush it all
512 if(!winapi::flush_view_of_file(addr, numbytes)){
513 return false;
514 }
515 //m_file_or_mapping_hnd can be a file handle or a mapping handle.
516 //so flushing file buffers has only sense for files...
517 else if(!async && m_file_or_mapping_hnd != winapi::invalid_handle_value &&
518 winapi::get_file_type(m_file_or_mapping_hnd) == winapi::file_type_disk){
519 return winapi::flush_file_buffers(m_file_or_mapping_hnd);
520 }
521 return true;
522 }
523
524 inline bool mapped_region::shrink_by(std::size_t bytes, bool from_back)
525 {
526 void *shrink_page_start = 0;
527 std::size_t shrink_page_bytes = 0;
528 if(!this->priv_shrink_param_check(bytes, from_back, shrink_page_start, shrink_page_bytes)){
529 return false;
530 }
531 else if(shrink_page_bytes){
532 //In Windows, we can't decommit the storage or release the virtual address space,
533 //the best we can do is try to remove some memory from the process working set.
534 //With a bit of luck we can free some physical memory.
535 unsigned long old_protect_ignored;
536 bool b_ret = winapi::virtual_unlock(shrink_page_start, shrink_page_bytes)
537 || (winapi::get_last_error() == winapi::error_not_locked);
538 (void)old_protect_ignored;
539 //Change page protection to forbid any further access
540 b_ret = b_ret && winapi::virtual_protect
541 (shrink_page_start, shrink_page_bytes, winapi::page_noaccess, old_protect_ignored);
542 return b_ret;
543 }
544 else{
545 return true;
546 }
547 }
548
549 inline bool mapped_region::advise(advice_types)
550 {
551 //Windows has no madvise/posix_madvise equivalent
552 return false;
553 }
554
555 inline void mapped_region::priv_close()
556 {
557 if(m_base){
558 void *addr = this->priv_map_address();
559 #if !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
560 mapped_region::destroy_syncs_in_range<0>(addr, m_size);
561 #endif
562 winapi::unmap_view_of_file(addr);
563 m_base = 0;
564 }
565 if(m_file_or_mapping_hnd != ipcdetail::invalid_file()){
566 winapi::close_handle(m_file_or_mapping_hnd);
567 m_file_or_mapping_hnd = ipcdetail::invalid_file();
568 }
569 }
570
571 inline void mapped_region::dont_close_on_destruction()
572 {}
573
574 #else //#if defined (BOOST_INTERPROCESS_WINDOWS)
575
576 inline mapped_region::mapped_region()
577 : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only), m_is_xsi(false)
578 {}
579
580 template<int dummy>
581 inline std::size_t mapped_region::page_size_holder<dummy>::get_page_size()
582 { return std::size_t(sysconf(_SC_PAGESIZE)); }
583
584 template<class MemoryMappable>
585 inline mapped_region::mapped_region
586 ( const MemoryMappable &mapping
587 , mode_t mode
588 , offset_t offset
589 , std::size_t size
590 , const void *address
591 , map_options_t map_options)
592 : m_base(0), m_size(0), m_page_offset(0), m_mode(mode), m_is_xsi(false)
593 {
594 mapping_handle_t map_hnd = mapping.get_mapping_handle();
595
596 //Some systems dont' support XSI shared memory
597 #ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
598 if(map_hnd.is_xsi){
599 //Get the size
600 ::shmid_ds xsi_ds;
601 int ret = ::shmctl(map_hnd.handle, IPC_STAT, &xsi_ds);
602 if(ret == -1){
603 error_info err(system_error_code());
604 throw interprocess_exception(err);
605 }
606 //Compare sizess
607 if(size == 0){
608 size = (std::size_t)xsi_ds.shm_segsz;
609 }
610 else if(size != (std::size_t)xsi_ds.shm_segsz){
611 error_info err(size_error);
612 throw interprocess_exception(err);
613 }
614 //Calculate flag
615 int flag = map_options == default_map_options ? 0 : map_options;
616 if(m_mode == read_only){
617 flag |= SHM_RDONLY;
618 }
619 else if(m_mode != read_write){
620 error_info err(mode_error);
621 throw interprocess_exception(err);
622 }
623 //Attach memory
624 void *base = ::shmat(map_hnd.handle, (void*)address, flag);
625 if(base == (void*)-1){
626 error_info err(system_error_code());
627 throw interprocess_exception(err);
628 }
629 //Update members
630 m_base = base;
631 m_size = size;
632 m_mode = mode;
633 m_page_offset = 0;
634 m_is_xsi = true;
635 return;
636 }
637 #endif //ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
638
639 //We calculate the difference between demanded and valid offset
640 const offset_t page_offset = priv_page_offset_addr_fixup(offset, address);
641
642 if(size == 0){
643 struct ::stat buf;
644 if(0 != fstat(map_hnd.handle, &buf)){
645 error_info err(system_error_code());
646 throw interprocess_exception(err);
647 }
648 //This can throw
649 priv_size_from_mapping_size(buf.st_size, offset, page_offset, size);
650 }
651
652 #ifdef MAP_NOSYNC
653 #define BOOST_INTERPROCESS_MAP_NOSYNC MAP_NOSYNC
654 #else
655 #define BOOST_INTERPROCESS_MAP_NOSYNC 0
656 #endif //MAP_NOSYNC
657
658 //Create new mapping
659 int prot = 0;
660 int flags = map_options == default_map_options ? BOOST_INTERPROCESS_MAP_NOSYNC : map_options;
661
662 #undef BOOST_INTERPROCESS_MAP_NOSYNC
663
664 switch(mode)
665 {
666 case read_only:
667 prot |= PROT_READ;
668 flags |= MAP_SHARED;
669 break;
670
671 case read_private:
672 prot |= (PROT_READ);
673 flags |= MAP_PRIVATE;
674 break;
675
676 case read_write:
677 prot |= (PROT_WRITE | PROT_READ);
678 flags |= MAP_SHARED;
679 break;
680
681 case copy_on_write:
682 prot |= (PROT_WRITE | PROT_READ);
683 flags |= MAP_PRIVATE;
684 break;
685
686 default:
687 {
688 error_info err(mode_error);
689 throw interprocess_exception(err);
690 }
691 break;
692 }
693
694 //Map it to the address space
695 void* base = mmap ( const_cast<void*>(address)
696 , static_cast<std::size_t>(page_offset + size)
697 , prot
698 , flags
699 , mapping.get_mapping_handle().handle
700 , offset - page_offset);
701
702 //Check if mapping was successful
703 if(base == MAP_FAILED){
704 error_info err = system_error_code();
705 throw interprocess_exception(err);
706 }
707
708 //Calculate new base for the user
709 m_base = static_cast<char*>(base) + page_offset;
710 m_page_offset = page_offset;
711 m_size = size;
712
713 //Check for fixed mapping error
714 if(address && (base != address)){
715 error_info err(busy_error);
716 this->priv_close();
717 throw interprocess_exception(err);
718 }
719 }
720
721 inline bool mapped_region::shrink_by(std::size_t bytes, bool from_back)
722 {
723 void *shrink_page_start = 0;
724 std::size_t shrink_page_bytes = 0;
725 if(m_is_xsi || !this->priv_shrink_param_check(bytes, from_back, shrink_page_start, shrink_page_bytes)){
726 return false;
727 }
728 else if(shrink_page_bytes){
729 //In UNIX we can decommit and free virtual address space.
730 return 0 == munmap(shrink_page_start, shrink_page_bytes);
731 }
732 else{
733 return true;
734 }
735 }
736
737 inline bool mapped_region::flush(std::size_t mapping_offset, std::size_t numbytes, bool async)
738 {
739 void *addr;
740 if(m_is_xsi || !this->priv_flush_param_check(mapping_offset, addr, numbytes)){
741 return false;
742 }
743 //Flush it all
744 return msync(addr, numbytes, async ? MS_ASYNC : MS_SYNC) == 0;
745 }
746
747 inline bool mapped_region::advise(advice_types advice)
748 {
749 int unix_advice = 0;
750 //Modes; 0: none, 2: posix, 1: madvise
751 const unsigned int mode_none = 0;
752 const unsigned int mode_padv = 1;
753 const unsigned int mode_madv = 2;
754 // Suppress "unused variable" warnings
755 (void)mode_padv;
756 (void)mode_madv;
757 unsigned int mode = mode_none;
758 //Choose advice either from POSIX (preferred) or native Unix
759 switch(advice){
760 case advice_normal:
761 #if defined(POSIX_MADV_NORMAL)
762 unix_advice = POSIX_MADV_NORMAL;
763 mode = mode_padv;
764 #elif defined(MADV_NORMAL)
765 unix_advice = MADV_NORMAL;
766 mode = mode_madv;
767 #endif
768 break;
769 case advice_sequential:
770 #if defined(POSIX_MADV_SEQUENTIAL)
771 unix_advice = POSIX_MADV_SEQUENTIAL;
772 mode = mode_padv;
773 #elif defined(MADV_SEQUENTIAL)
774 unix_advice = MADV_SEQUENTIAL;
775 mode = mode_madv;
776 #endif
777 break;
778 case advice_random:
779 #if defined(POSIX_MADV_RANDOM)
780 unix_advice = POSIX_MADV_RANDOM;
781 mode = mode_padv;
782 #elif defined(MADV_RANDOM)
783 unix_advice = MADV_RANDOM;
784 mode = mode_madv;
785 #endif
786 break;
787 case advice_willneed:
788 #if defined(POSIX_MADV_WILLNEED)
789 unix_advice = POSIX_MADV_WILLNEED;
790 mode = mode_padv;
791 #elif defined(MADV_WILLNEED)
792 unix_advice = MADV_WILLNEED;
793 mode = mode_madv;
794 #endif
795 break;
796 case advice_dontneed:
797 #if defined(POSIX_MADV_DONTNEED)
798 unix_advice = POSIX_MADV_DONTNEED;
799 mode = mode_padv;
800 #elif defined(MADV_DONTNEED) && defined(BOOST_INTERPROCESS_MADV_DONTNEED_HAS_NONDESTRUCTIVE_SEMANTICS)
801 unix_advice = MADV_DONTNEED;
802 mode = mode_madv;
803 #endif
804 break;
805 default:
806 return false;
807 }
808 switch(mode){
809 #if defined(POSIX_MADV_NORMAL)
810 case mode_padv:
811 return 0 == posix_madvise(this->priv_map_address(), this->priv_map_size(), unix_advice);
812 #endif
813 #if defined(MADV_NORMAL)
814 case mode_madv:
815 return 0 == madvise(
816 #if defined(BOOST_INTERPROCESS_MADVISE_USES_CADDR_T)
817 (caddr_t)
818 #endif
819 this->priv_map_address(), this->priv_map_size(), unix_advice);
820 #endif
821 default:
822 return false;
823
824 }
825 }
826
827 inline void mapped_region::priv_close()
828 {
829 if(m_base != 0){
830 #ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
831 if(m_is_xsi){
832 int ret = ::shmdt(m_base);
833 BOOST_ASSERT(ret == 0);
834 (void)ret;
835 return;
836 }
837 #endif //#ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
838 munmap(this->priv_map_address(), this->priv_map_size());
839 m_base = 0;
840 }
841 }
842
843 inline void mapped_region::dont_close_on_destruction()
844 { m_base = 0; }
845
846 #endif //#if defined (BOOST_INTERPROCESS_WINDOWS)
847
848 template<int dummy>
849 const std::size_t mapped_region::page_size_holder<dummy>::PageSize
850 = mapped_region::page_size_holder<dummy>::get_page_size();
851
852 inline std::size_t mapped_region::get_page_size()
853 {
854 if(!page_size_holder<0>::PageSize)
855 return page_size_holder<0>::get_page_size();
856 else
857 return page_size_holder<0>::PageSize;
858 }
859
860 inline void mapped_region::swap(mapped_region &other)
861 {
862 ::boost::adl_move_swap(this->m_base, other.m_base);
863 ::boost::adl_move_swap(this->m_size, other.m_size);
864 ::boost::adl_move_swap(this->m_page_offset, other.m_page_offset);
865 ::boost::adl_move_swap(this->m_mode, other.m_mode);
866 #if defined (BOOST_INTERPROCESS_WINDOWS)
867 ::boost::adl_move_swap(this->m_file_or_mapping_hnd, other.m_file_or_mapping_hnd);
868 #else
869 ::boost::adl_move_swap(this->m_is_xsi, other.m_is_xsi);
870 #endif
871 }
872
873 //!No-op functor
874 struct null_mapped_region_function
875 {
876 bool operator()(void *, std::size_t , bool) const
877 { return true; }
878
879 static std::size_t get_min_size()
880 { return 0; }
881 };
882
883 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
884
885 } //namespace interprocess {
886 } //namespace boost {
887
888 #include <boost/interprocess/detail/config_end.hpp>
889
890 #endif //BOOST_INTERPROCESS_MAPPED_REGION_HPP
891
892 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
893
894 #ifndef BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
895 #define BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
896
897 #if defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
898 # include <boost/interprocess/sync/windows/sync_utils.hpp>
899 # include <boost/interprocess/detail/windows_intermodule_singleton.hpp>
900
901 namespace boost {
902 namespace interprocess {
903
904 template<int Dummy>
905 inline void mapped_region::destroy_syncs_in_range(const void *addr, std::size_t size)
906 {
907 ipcdetail::sync_handles &handles =
908 ipcdetail::windows_intermodule_singleton<ipcdetail::sync_handles>::get();
909 handles.destroy_syncs_in_range(addr, size);
910 }
911
912 } //namespace interprocess {
913 } //namespace boost {
914
915 #endif //defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
916
917 #endif //#ifdef BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
918
919 #endif //#if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
920