mapped_region.hpp 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883
  1. //////////////////////////////////////////////////////////////////////////////
  2. //
  3. // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
  4. // Software License, Version 1.0. (See accompanying file
  5. // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. // See http://www.boost.org/libs/interprocess for documentation.
  8. //
  9. //////////////////////////////////////////////////////////////////////////////
  10. #ifndef BOOST_INTERPROCESS_MAPPED_REGION_HPP
  11. #define BOOST_INTERPROCESS_MAPPED_REGION_HPP
  12. #include <boost/interprocess/detail/config_begin.hpp>
  13. #include <boost/interprocess/detail/workaround.hpp>
  14. #include <boost/interprocess/interprocess_fwd.hpp>
  15. #include <boost/interprocess/exceptions.hpp>
  16. #include <boost/move/move.hpp>
  17. #include <boost/interprocess/detail/utilities.hpp>
  18. #include <boost/interprocess/detail/os_file_functions.hpp>
  19. #include <string>
  20. #include <boost/cstdint.hpp>
  21. //Some Unixes use caddr_t instead of void * in madvise
  22. // SunOS Tru64 HP-UX AIX
  23. #if defined(sun) || defined(__sun) || defined(__osf__) || defined(__osf) || defined(_hpux) || defined(hpux) || defined(_AIX)
  24. #define BOOST_INTERPROCESS_MADVISE_USES_CADDR_T
  25. #include <sys/types.h>
  26. #endif
  27. //A lot of UNIXes have destructive semantics for MADV_DONTNEED, so
  28. //we need to be careful to allow it.
  29. #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__APPLE__)
  30. #define BOOST_INTERPROCESS_MADV_DONTNEED_HAS_NONDESTRUCTIVE_SEMANTICS
  31. #endif
  32. #if defined (BOOST_INTERPROCESS_WINDOWS)
  33. # include <boost/interprocess/detail/win32_api.hpp>
  34. # include <boost/interprocess/sync/windows/sync_utils.hpp>
  35. #else
  36. # ifdef BOOST_HAS_UNISTD_H
  37. # include <fcntl.h>
  38. # include <sys/mman.h> //mmap
  39. # include <unistd.h>
  40. # include <sys/stat.h>
  41. # include <sys/types.h>
  42. # if defined(BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS)
  43. # include <sys/shm.h> //System V shared memory...
  44. # endif
  45. # include <boost/assert.hpp>
  46. # else
  47. # error Unknown platform
  48. # endif
  49. #endif //#if (defined BOOST_INTERPROCESS_WINDOWS)
  50. //!\file
  51. //!Describes mapped region class
  52. namespace boost {
  53. namespace interprocess {
  54. /// @cond
  55. //Solaris declares madvise only in some configurations but defines MADV_XXX, a bit confusing.
  56. //Predeclare it here to avoid any compilation error
  57. #if (defined(sun) || defined(__sun)) && defined(MADV_NORMAL)
  58. extern "C" int madvise(caddr_t, size_t, int);
  59. #endif
  60. namespace ipcdetail{ class interprocess_tester; }
  61. namespace ipcdetail{ class raw_mapped_region_creator; }
  62. /// @endcond
  63. //!The mapped_region class represents a portion or region created from a
  64. //!memory_mappable object.
  65. //!
  66. //!The OS can map a region bigger than the requested one, as region must
  67. //!be multiple of the page size, but mapped_region will always refer to
  68. //!the region specified by the user.
  69. class mapped_region
  70. {
  71. /// @cond
  72. //Non-copyable
  73. BOOST_MOVABLE_BUT_NOT_COPYABLE(mapped_region)
  74. /// @endcond
  75. public:
  76. //!Creates a mapping region of the mapped memory "mapping", starting in
  77. //!offset "offset", and the mapping's size will be "size". The mapping
  78. //!can be opened for read only, read-write or copy-on-write.
  79. //!
  80. //!If an address is specified, both the offset and the address must be
  81. //!multiples of the page size.
  82. //!
  83. //!The OS could allocate more pages than size/page_size(), but get_address()
  84. //!will always return the address passed in this function (if not null) and
  85. //!get_size() will return the specified size.
  86. template<class MemoryMappable>
  87. mapped_region(const MemoryMappable& mapping
  88. ,mode_t mode
  89. ,offset_t offset = 0
  90. ,std::size_t size = 0
  91. ,const void *address = 0);
  92. //!Default constructor. Address will be 0 (nullptr).
  93. //!Size will be 0.
  94. //!Does not throw
  95. mapped_region();
  96. //!Move constructor. *this will be constructed taking ownership of "other"'s
  97. //!region and "other" will be left in default constructor state.
  98. mapped_region(BOOST_RV_REF(mapped_region) other)
  99. #if defined (BOOST_INTERPROCESS_WINDOWS)
  100. : m_base(0), m_size(0)
  101. , m_page_offset(0)
  102. , m_mode(read_only)
  103. , m_file_or_mapping_hnd(ipcdetail::invalid_file())
  104. #else
  105. : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only), m_is_xsi(false)
  106. #endif
  107. { this->swap(other); }
  108. //!Destroys the mapped region.
  109. //!Does not throw
  110. ~mapped_region();
  111. //!Move assignment. If *this owns a memory mapped region, it will be
  112. //!destroyed and it will take ownership of "other"'s memory mapped region.
  113. mapped_region &operator=(BOOST_RV_REF(mapped_region) other)
  114. {
  115. mapped_region tmp(boost::move(other));
  116. this->swap(tmp);
  117. return *this;
  118. }
  119. //!Swaps the mapped_region with another
  120. //!mapped region
  121. void swap(mapped_region &other);
  122. //!Returns the size of the mapping. Never throws.
  123. std::size_t get_size() const;
  124. //!Returns the base address of the mapping.
  125. //!Never throws.
  126. void* get_address() const;
  127. //!Returns the mode of the mapping used to construct the mapped region.
  128. //!Never throws.
  129. mode_t get_mode() const;
  130. //!Flushes to the disk a byte range within the mapped memory.
  131. //!If 'async' is true, the function will return before flushing operation is completed
  132. //!If 'async' is false, function will return once data has been written into the underlying
  133. //!device (i.e., in mapped files OS cached information is written to disk).
  134. //!Never throws. Returns false if operation could not be performed.
  135. bool flush(std::size_t mapping_offset = 0, std::size_t numbytes = 0, bool async = true);
  136. //!Shrinks current mapped region. If after shrinking there is no longer need for a previously
  137. //!mapped memory page, accessing that page can trigger a segmentation fault.
  138. //!Depending on the OS, this operation might fail (XSI shared memory), it can decommit storage
  139. //!and free a portion of the virtual address space (e.g.POSIX) or this
  140. //!function can release some physical memory wihout freeing any virtual address space(Windows).
  141. //!Returns true on success. Never throws.
  142. bool shrink_by(std::size_t bytes, bool from_back = true);
  143. //!This enum specifies region usage behaviors that an application can specify
  144. //!to the mapped region implementation.
  145. enum advice_types{
  146. //!Specifies that the application has no advice to give on its behavior with respect to
  147. //!the region. It is the default characteristic if no advice is given for a range of memory.
  148. advice_normal,
  149. //!Specifies that the application expects to access the region sequentially from
  150. //!lower addresses to higher addresses. The implementation can lower the priority of
  151. //!preceding pages within the region once a page have been accessed.
  152. advice_sequential,
  153. //!Specifies that the application expects to access the region in a random order,
  154. //!and prefetching is likely not advantageous.
  155. advice_random,
  156. //!Specifies that the application expects to access the region in the near future.
  157. //!The implementation can prefetch pages of the region.
  158. advice_willneed,
  159. //!Specifies that the application expects that it will not access the region in the near future.
  160. //!The implementation can unload pages within the range to save system resources.
  161. advice_dontneed
  162. };
  163. //!Advises the implementation on the expected behavior of the application with respect to the data
  164. //!in the region. The implementation may use this information to optimize handling of the region data.
  165. //!This function has no effect on the semantics of access to memory in the region, although it may affect
  166. //!the performance of access.
  167. //!If the advise type is not known to the implementation, the function returns false. True otherwise.
  168. bool advise(advice_types advise);
  169. //!Returns the size of the page. This size is the minimum memory that
  170. //!will be used by the system when mapping a memory mappable source and
  171. //!will restrict the address and the offset to map.
  172. static std::size_t get_page_size();
  173. /// @cond
  174. private:
  175. //!Closes a previously opened memory mapping. Never throws
  176. void priv_close();
  177. void* priv_map_address() const;
  178. std::size_t priv_map_size() const;
  179. bool priv_flush_param_check(std::size_t mapping_offset, void *&addr, std::size_t &numbytes) const;
  180. bool priv_shrink_param_check(std::size_t bytes, bool from_back, void *&shrink_page_start, std::size_t &shrink_page_bytes);
  181. static void priv_size_from_mapping_size
  182. (offset_t mapping_size, offset_t offset, offset_t page_offset, std::size_t &size);
  183. static offset_t priv_page_offset_addr_fixup(offset_t page_offset, const void *&addr);
  184. template<int dummy>
  185. struct page_size_holder
  186. {
  187. static const std::size_t PageSize;
  188. static std::size_t get_page_size();
  189. };
  190. void* m_base;
  191. std::size_t m_size;
  192. std::size_t m_page_offset;
  193. mode_t m_mode;
  194. #if defined(BOOST_INTERPROCESS_WINDOWS)
  195. file_handle_t m_file_or_mapping_hnd;
  196. #else
  197. bool m_is_xsi;
  198. #endif
  199. friend class ipcdetail::interprocess_tester;
  200. friend class ipcdetail::raw_mapped_region_creator;
  201. void dont_close_on_destruction();
  202. #if defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
  203. template<int Dummy>
  204. static void destroy_syncs_in_range(const void *addr, std::size_t size);
  205. #endif
  206. /// @endcond
  207. };
  208. ///@cond
  209. inline void swap(mapped_region &x, mapped_region &y)
  210. { x.swap(y); }
  211. inline mapped_region::~mapped_region()
  212. { this->priv_close(); }
  213. inline std::size_t mapped_region::get_size() const
  214. { return m_size; }
  215. inline mode_t mapped_region::get_mode() const
  216. { return m_mode; }
  217. inline void* mapped_region::get_address() const
  218. { return m_base; }
  219. inline void* mapped_region::priv_map_address() const
  220. { return static_cast<char*>(m_base) - m_page_offset; }
  221. inline std::size_t mapped_region::priv_map_size() const
  222. { return m_size + m_page_offset; }
  223. inline bool mapped_region::priv_flush_param_check
  224. (std::size_t mapping_offset, void *&addr, std::size_t &numbytes) const
  225. {
  226. //Check some errors
  227. if(m_base == 0)
  228. return false;
  229. if(mapping_offset >= m_size || (mapping_offset + numbytes) > m_size){
  230. return false;
  231. }
  232. //Update flush size if the user does not provide it
  233. if(numbytes == 0){
  234. numbytes = m_size - mapping_offset;
  235. }
  236. addr = (char*)this->priv_map_address() + mapping_offset;
  237. numbytes += m_page_offset;
  238. return true;
  239. }
  240. inline bool mapped_region::priv_shrink_param_check
  241. (std::size_t bytes, bool from_back, void *&shrink_page_start, std::size_t &shrink_page_bytes)
  242. {
  243. //Check some errors
  244. if(m_base == 0 || bytes > m_size){
  245. return false;
  246. }
  247. else if(bytes == m_size){
  248. this->priv_close();
  249. return true;
  250. }
  251. else{
  252. const std::size_t page_size = mapped_region::get_page_size();
  253. if(from_back){
  254. const std::size_t new_pages = (m_size + m_page_offset - bytes - 1)/page_size + 1;
  255. shrink_page_start = static_cast<char*>(this->priv_map_address()) + new_pages*page_size;
  256. shrink_page_bytes = m_page_offset + m_size - new_pages*page_size;
  257. m_size -= bytes;
  258. }
  259. else{
  260. shrink_page_start = this->priv_map_address();
  261. m_page_offset += bytes;
  262. shrink_page_bytes = (m_page_offset/page_size)*page_size;
  263. m_page_offset = m_page_offset % page_size;
  264. m_size -= bytes;
  265. m_base = static_cast<char *>(m_base) + bytes;
  266. assert(shrink_page_bytes%page_size == 0);
  267. }
  268. return true;
  269. }
  270. }
  271. inline void mapped_region::priv_size_from_mapping_size
  272. (offset_t mapping_size, offset_t offset, offset_t page_offset, std::size_t &size)
  273. {
  274. //Check if mapping size fits in the user address space
  275. //as offset_t is the maximum file size and its signed.
  276. if(mapping_size < offset ||
  277. boost::uintmax_t(mapping_size - (offset - page_offset)) >
  278. boost::uintmax_t(std::size_t(-1))){
  279. error_info err(size_error);
  280. throw interprocess_exception(err);
  281. }
  282. size = static_cast<std::size_t>(mapping_size - (offset - page_offset));
  283. }
  284. inline offset_t mapped_region::priv_page_offset_addr_fixup(offset_t offset, const void *&address)
  285. {
  286. //We can't map any offset so we have to obtain system's
  287. //memory granularity
  288. const std::size_t page_size = mapped_region::get_page_size();
  289. //We calculate the difference between demanded and valid offset
  290. //(always less than a page in std::size_t, thus, representable by std::size_t)
  291. const std::size_t page_offset =
  292. static_cast<std::size_t>(offset - (offset / page_size) * page_size);
  293. //Update the mapping address
  294. if(address){
  295. address = static_cast<const char*>(address) - page_offset;
  296. }
  297. return page_offset;
  298. }
  299. #if defined (BOOST_INTERPROCESS_WINDOWS)
  300. inline mapped_region::mapped_region()
  301. : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only)
  302. , m_file_or_mapping_hnd(ipcdetail::invalid_file())
  303. {}
  304. template<int dummy>
  305. inline std::size_t mapped_region::page_size_holder<dummy>::get_page_size()
  306. {
  307. winapi::system_info info;
  308. get_system_info(&info);
  309. return std::size_t(info.dwAllocationGranularity);
  310. }
  311. template<class MemoryMappable>
  312. inline mapped_region::mapped_region
  313. (const MemoryMappable &mapping
  314. ,mode_t mode
  315. ,offset_t offset
  316. ,std::size_t size
  317. ,const void *address)
  318. : m_base(0), m_size(0), m_page_offset(0), m_mode(mode)
  319. , m_file_or_mapping_hnd(ipcdetail::invalid_file())
  320. {
  321. mapping_handle_t mhandle = mapping.get_mapping_handle();
  322. {
  323. file_handle_t native_mapping_handle = 0;
  324. //Set accesses
  325. //For "create_file_mapping"
  326. unsigned long protection = 0;
  327. //For "mapviewoffile"
  328. unsigned long map_access = 0;
  329. switch(mode)
  330. {
  331. case read_only:
  332. case read_private:
  333. protection |= winapi::page_readonly;
  334. map_access |= winapi::file_map_read;
  335. break;
  336. case read_write:
  337. protection |= winapi::page_readwrite;
  338. map_access |= winapi::file_map_write;
  339. break;
  340. case copy_on_write:
  341. protection |= winapi::page_writecopy;
  342. map_access |= winapi::file_map_copy;
  343. break;
  344. default:
  345. {
  346. error_info err(mode_error);
  347. throw interprocess_exception(err);
  348. }
  349. break;
  350. }
  351. //For file mapping (including emulated shared memory through temporary files),
  352. //the device is a file handle so we need to obtain file's size and call create_file_mapping
  353. //to obtain the mapping handle.
  354. //For files we don't need the file mapping after mapping the memory, as the file is there
  355. //so we'll program the handle close
  356. void * handle_to_close = winapi::invalid_handle_value;
  357. if(!mhandle.is_shm){
  358. //Create mapping handle
  359. native_mapping_handle = winapi::create_file_mapping
  360. ( ipcdetail::file_handle_from_mapping_handle(mapping.get_mapping_handle())
  361. , protection, 0, 0, 0);
  362. //Check if all is correct
  363. if(!native_mapping_handle){
  364. error_info err = winapi::get_last_error();
  365. throw interprocess_exception(err);
  366. }
  367. handle_to_close = native_mapping_handle;
  368. }
  369. else{
  370. //For windows_shared_memory the device handle is already a mapping handle
  371. //and we need to maintain it
  372. native_mapping_handle = mhandle.handle;
  373. }
  374. //RAII handle close on scope exit
  375. const winapi::handle_closer close_handle(handle_to_close);
  376. (void)close_handle;
  377. const offset_t page_offset = priv_page_offset_addr_fixup(offset, address);
  378. //Obtain mapping size if user provides 0 size
  379. if(size == 0){
  380. offset_t mapping_size;
  381. if(!winapi::get_file_mapping_size(native_mapping_handle, mapping_size)){
  382. error_info err = winapi::get_last_error();
  383. throw interprocess_exception(err);
  384. }
  385. //This can throw
  386. priv_size_from_mapping_size(mapping_size, offset, page_offset, size);
  387. }
  388. //Map with new offsets and size
  389. void *base = winapi::map_view_of_file_ex
  390. (native_mapping_handle,
  391. map_access,
  392. offset - page_offset,
  393. static_cast<std::size_t>(page_offset + size),
  394. const_cast<void*>(address));
  395. //Check error
  396. if(!base){
  397. error_info err = winapi::get_last_error();
  398. throw interprocess_exception(err);
  399. }
  400. //Calculate new base for the user
  401. m_base = static_cast<char*>(base) + page_offset;
  402. m_page_offset = page_offset;
  403. m_size = size;
  404. }
  405. //Windows shared memory needs the duplication of the handle if we want to
  406. //make mapped_region independent from the mappable device
  407. //
  408. //For mapped files, we duplicate the file handle to be able to FlushFileBuffers
  409. if(!winapi::duplicate_current_process_handle(mhandle.handle, &m_file_or_mapping_hnd)){
  410. error_info err = winapi::get_last_error();
  411. this->priv_close();
  412. throw interprocess_exception(err);
  413. }
  414. }
  415. inline bool mapped_region::flush(std::size_t mapping_offset, std::size_t numbytes, bool async)
  416. {
  417. void *addr;
  418. if(!this->priv_flush_param_check(mapping_offset, addr, numbytes)){
  419. return false;
  420. }
  421. //Flush it all
  422. if(!winapi::flush_view_of_file(addr, numbytes)){
  423. return false;
  424. }
  425. //m_file_or_mapping_hnd can be a file handle or a mapping handle.
  426. //so flushing file buffers has only sense for files...
  427. else if(async && m_file_or_mapping_hnd != winapi::invalid_handle_value &&
  428. winapi::get_file_type(m_file_or_mapping_hnd) == winapi::file_type_disk){
  429. return winapi::flush_file_buffers(m_file_or_mapping_hnd);
  430. }
  431. return true;
  432. }
  433. inline bool mapped_region::shrink_by(std::size_t bytes, bool from_back)
  434. {
  435. void *shrink_page_start;
  436. std::size_t shrink_page_bytes;
  437. if(!this->priv_shrink_param_check(bytes, from_back, shrink_page_start, shrink_page_bytes)){
  438. return false;
  439. }
  440. else if(shrink_page_bytes){
  441. //In Windows, we can't decommit the storage or release the virtual address space,
  442. //the best we can do is try to remove some memory from the process working set.
  443. //With a bit of luck we can free some physical memory.
  444. unsigned long old_protect_ignored;
  445. bool b_ret = winapi::virtual_unlock(shrink_page_start, shrink_page_bytes)
  446. || (winapi::get_last_error() == winapi::error_not_locked);
  447. (void)old_protect_ignored;
  448. //Change page protection to forbid any further access
  449. b_ret = b_ret && winapi::virtual_protect
  450. (shrink_page_start, shrink_page_bytes, winapi::page_noaccess, old_protect_ignored);
  451. return b_ret;
  452. }
  453. else{
  454. return true;
  455. }
  456. }
  457. inline bool mapped_region::advise(advice_types)
  458. {
  459. //Windows has no madvise/posix_madvise equivalent
  460. return false;
  461. }
  462. inline void mapped_region::priv_close()
  463. {
  464. if(m_base){
  465. void *addr = this->priv_map_address();
  466. #if !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
  467. mapped_region::destroy_syncs_in_range<0>(addr, m_size);
  468. #endif
  469. winapi::unmap_view_of_file(addr);
  470. m_base = 0;
  471. }
  472. if(m_file_or_mapping_hnd != ipcdetail::invalid_file()){
  473. winapi::close_handle(m_file_or_mapping_hnd);
  474. m_file_or_mapping_hnd = ipcdetail::invalid_file();
  475. }
  476. }
  477. inline void mapped_region::dont_close_on_destruction()
  478. {}
  479. #else //#if (defined BOOST_INTERPROCESS_WINDOWS)
  480. inline mapped_region::mapped_region()
  481. : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only), m_is_xsi(false)
  482. {}
  483. template<int dummy>
  484. inline std::size_t mapped_region::page_size_holder<dummy>::get_page_size()
  485. { return std::size_t(sysconf(_SC_PAGESIZE)); }
  486. template<class MemoryMappable>
  487. inline mapped_region::mapped_region
  488. ( const MemoryMappable &mapping
  489. , mode_t mode
  490. , offset_t offset
  491. , std::size_t size
  492. , const void *address)
  493. : m_base(0), m_size(0), m_page_offset(0), m_mode(mode), m_is_xsi(false)
  494. {
  495. mapping_handle_t map_hnd = mapping.get_mapping_handle();
  496. //Some systems dont' support XSI shared memory
  497. #ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
  498. if(map_hnd.is_xsi){
  499. //Get the size
  500. ::shmid_ds xsi_ds;
  501. int ret = ::shmctl(map_hnd.handle, IPC_STAT, &xsi_ds);
  502. if(ret == -1){
  503. error_info err(system_error_code());
  504. throw interprocess_exception(err);
  505. }
  506. //Compare sizess
  507. if(size == 0){
  508. size = (std::size_t)xsi_ds.shm_segsz;
  509. }
  510. else if(size != (std::size_t)xsi_ds.shm_segsz){
  511. error_info err(size_error);
  512. throw interprocess_exception(err);
  513. }
  514. //Calculate flag
  515. int flag = 0;
  516. if(m_mode == read_only){
  517. flag |= SHM_RDONLY;
  518. }
  519. else if(m_mode != read_write){
  520. error_info err(mode_error);
  521. throw interprocess_exception(err);
  522. }
  523. //Attach memory
  524. void *base = ::shmat(map_hnd.handle, (void*)address, flag);
  525. if(base == (void*)-1){
  526. error_info err(system_error_code());
  527. throw interprocess_exception(err);
  528. }
  529. //Update members
  530. m_base = base;
  531. m_size = size;
  532. m_mode = mode;
  533. m_page_offset = 0;
  534. m_is_xsi = true;
  535. return;
  536. }
  537. #endif //ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
  538. //We calculate the difference between demanded and valid offset
  539. const offset_t page_offset = priv_page_offset_addr_fixup(offset, address);
  540. if(size == 0){
  541. struct ::stat buf;
  542. if(0 != fstat(map_hnd.handle, &buf)){
  543. error_info err(system_error_code());
  544. throw interprocess_exception(err);
  545. }
  546. //This can throw
  547. priv_size_from_mapping_size(buf.st_size, offset, page_offset, size);
  548. }
  549. //Create new mapping
  550. int prot = 0;
  551. int flags =
  552. #ifdef MAP_NOSYNC
  553. //Avoid excessive syncing in BSD systems
  554. MAP_NOSYNC;
  555. #else
  556. 0;
  557. #endif
  558. switch(mode)
  559. {
  560. case read_only:
  561. prot |= PROT_READ;
  562. flags |= MAP_SHARED;
  563. break;
  564. case read_private:
  565. prot |= (PROT_READ);
  566. flags |= MAP_PRIVATE;
  567. break;
  568. case read_write:
  569. prot |= (PROT_WRITE | PROT_READ);
  570. flags |= MAP_SHARED;
  571. break;
  572. case copy_on_write:
  573. prot |= (PROT_WRITE | PROT_READ);
  574. flags |= MAP_PRIVATE;
  575. break;
  576. default:
  577. {
  578. error_info err(mode_error);
  579. throw interprocess_exception(err);
  580. }
  581. break;
  582. }
  583. //Map it to the address space
  584. void* base = mmap ( const_cast<void*>(address)
  585. , static_cast<std::size_t>(page_offset + size)
  586. , prot
  587. , flags
  588. , mapping.get_mapping_handle().handle
  589. , offset - page_offset);
  590. //Check if mapping was successful
  591. if(base == MAP_FAILED){
  592. error_info err = system_error_code();
  593. throw interprocess_exception(err);
  594. }
  595. //Calculate new base for the user
  596. m_base = static_cast<char*>(base) + page_offset;
  597. m_page_offset = page_offset;
  598. m_size = size;
  599. //Check for fixed mapping error
  600. if(address && (base != address)){
  601. error_info err(busy_error);
  602. this->priv_close();
  603. throw interprocess_exception(err);
  604. }
  605. }
  606. inline bool mapped_region::shrink_by(std::size_t bytes, bool from_back)
  607. {
  608. void *shrink_page_start = 0;
  609. std::size_t shrink_page_bytes = 0;
  610. if(m_is_xsi || !this->priv_shrink_param_check(bytes, from_back, shrink_page_start, shrink_page_bytes)){
  611. return false;
  612. }
  613. else if(shrink_page_bytes){
  614. //In UNIX we can decommit and free virtual address space.
  615. return 0 == munmap(shrink_page_start, shrink_page_bytes);
  616. }
  617. else{
  618. return true;
  619. }
  620. }
  621. inline bool mapped_region::flush(std::size_t mapping_offset, std::size_t numbytes, bool async)
  622. {
  623. void *addr;
  624. if(m_is_xsi || !this->priv_flush_param_check(mapping_offset, addr, numbytes)){
  625. return false;
  626. }
  627. //Flush it all
  628. return msync(addr, numbytes, async ? MS_ASYNC : MS_SYNC) == 0;
  629. }
  630. inline bool mapped_region::advise(advice_types advice)
  631. {
  632. int unix_advice = 0;
  633. //Modes; 0: none, 2: posix, 1: madvise
  634. const unsigned int mode_none = 0;
  635. const unsigned int mode_padv = 1;
  636. const unsigned int mode_madv = 2;
  637. unsigned int mode = mode_none;
  638. //Choose advice either from POSIX (preferred) or native Unix
  639. switch(advice){
  640. case advice_normal:
  641. #if defined(POSIX_MADV_NORMAL)
  642. unix_advice = POSIX_MADV_NORMAL;
  643. mode = mode_padv;
  644. #elif defined(MADV_NORMAL)
  645. unix_advice = MADV_NORMAL;
  646. mode = mode_madv;
  647. #endif
  648. break;
  649. case advice_sequential:
  650. #if defined(POSIX_MADV_SEQUENTIAL)
  651. unix_advice = POSIX_MADV_SEQUENTIAL;
  652. mode = mode_padv;
  653. #elif defined(MADV_SEQUENTIAL)
  654. unix_advice = MADV_SEQUENTIAL;
  655. mode = mode_madv;
  656. #endif
  657. break;
  658. case advice_random:
  659. #if defined(POSIX_MADV_RANDOM)
  660. unix_advice = POSIX_MADV_RANDOM;
  661. mode = mode_padv;
  662. #elif defined(MADV_RANDOM)
  663. unix_advice = MADV_RANDOM;
  664. mode = mode_madv;
  665. #endif
  666. break;
  667. case advice_willneed:
  668. #if defined(POSIX_MADV_WILLNEED)
  669. unix_advice = POSIX_MADV_WILLNEED;
  670. mode = mode_padv;
  671. #elif defined(MADV_WILLNEED)
  672. unix_advice = MADV_WILLNEED;
  673. mode = mode_madv;
  674. #endif
  675. break;
  676. case advice_dontneed:
  677. #if defined(POSIX_MADV_DONTNEED)
  678. unix_advice = POSIX_MADV_DONTNEED;
  679. mode = mode_padv;
  680. #elif defined(MADV_DONTNEED) && defined(BOOST_INTERPROCESS_MADV_DONTNEED_HAS_NONDESTRUCTIVE_SEMANTICS)
  681. unix_advice = MADV_DONTNEED;
  682. mode = mode_madv;
  683. #endif
  684. break;
  685. default:
  686. return false;
  687. }
  688. switch(mode){
  689. #if defined(POSIX_MADV_NORMAL)
  690. case mode_padv:
  691. return 0 == posix_madvise(this->priv_map_address(), this->priv_map_size(), unix_advice);
  692. #endif
  693. #if defined(MADV_NORMAL)
  694. case mode_madv:
  695. return 0 == madvise(
  696. #if defined(BOOST_INTERPROCESS_MADVISE_USES_CADDR_T)
  697. (caddr_t)
  698. #endif
  699. this->priv_map_address(), this->priv_map_size(), unix_advice);
  700. #endif
  701. default:
  702. return false;
  703. }
  704. }
  705. inline void mapped_region::priv_close()
  706. {
  707. if(m_base != 0){
  708. #ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
  709. if(m_is_xsi){
  710. int ret = ::shmdt(m_base);
  711. BOOST_ASSERT(ret == 0);
  712. (void)ret;
  713. return;
  714. }
  715. #endif //#ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
  716. munmap(this->priv_map_address(), this->priv_map_size());
  717. m_base = 0;
  718. }
  719. }
  720. inline void mapped_region::dont_close_on_destruction()
  721. { m_base = 0; }
  722. #endif //##if (defined BOOST_INTERPROCESS_WINDOWS)
  723. template<int dummy>
  724. const std::size_t mapped_region::page_size_holder<dummy>::PageSize
  725. = mapped_region::page_size_holder<dummy>::get_page_size();
  726. inline std::size_t mapped_region::get_page_size()
  727. {
  728. if(!page_size_holder<0>::PageSize)
  729. return page_size_holder<0>::get_page_size();
  730. else
  731. return page_size_holder<0>::PageSize;
  732. }
  733. inline void mapped_region::swap(mapped_region &other)
  734. {
  735. ipcdetail::do_swap(this->m_base, other.m_base);
  736. ipcdetail::do_swap(this->m_size, other.m_size);
  737. ipcdetail::do_swap(this->m_page_offset, other.m_page_offset);
  738. ipcdetail::do_swap(this->m_mode, other.m_mode);
  739. #if (defined BOOST_INTERPROCESS_WINDOWS)
  740. ipcdetail::do_swap(this->m_file_or_mapping_hnd, other.m_file_or_mapping_hnd);
  741. #else
  742. ipcdetail::do_swap(this->m_is_xsi, other.m_is_xsi);
  743. #endif
  744. }
  745. //!No-op functor
  746. struct null_mapped_region_function
  747. {
  748. bool operator()(void *, std::size_t , bool) const
  749. { return true; }
  750. };
  751. /// @endcond
  752. } //namespace interprocess {
  753. } //namespace boost {
  754. #include <boost/interprocess/detail/config_end.hpp>
  755. #endif //BOOST_INTERPROCESS_MAPPED_REGION_HPP
  756. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  757. #ifndef BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
  758. #define BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
  759. #if defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
  760. # include <boost/interprocess/sync/windows/sync_utils.hpp>
  761. # include <boost/interprocess/detail/windows_intermodule_singleton.hpp>
  762. namespace boost {
  763. namespace interprocess {
  764. template<int Dummy>
  765. inline void mapped_region::destroy_syncs_in_range(const void *addr, std::size_t size)
  766. {
  767. ipcdetail::sync_handles &handles =
  768. ipcdetail::windows_intermodule_singleton<ipcdetail::sync_handles>::get();
  769. handles.destroy_syncs_in_range(addr, size);
  770. }
  771. } //namespace interprocess {
  772. } //namespace boost {
  773. #endif //defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
  774. #endif //#ifdef BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
  775. #endif //#if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)