+ range->mmap_addr = header;
+ range->mmap_len = header_len + range_len;
+
+ if (pool->attr.populate_policy == RSEQ_MEMPOOL_POPULATE_COW_INIT) {
+ range->init = base + (pool->attr.stride * pool->attr.max_nr_cpus);
+ /* Populate init values pages from memfd */
+ memfd = rseq_memfd_create_init(pool->name, pool->attr.stride);
+ if (memfd < 0)
+ goto error_alloc;
+ if (mmap(range->init, pool->attr.stride, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_FIXED, memfd, 0) != (void *) range->init)
+ goto error_alloc;
+ assert(pool->attr.type == MEMPOOL_TYPE_PERCPU);
+ /*
+ * Map per-cpu memory as private COW mappings of init values.
+ */
+ {
+ int cpu;
+
+ for (cpu = 0; cpu < pool->attr.max_nr_cpus; cpu++) {
+ void *p = base + (pool->attr.stride * cpu);
+ size_t len = pool->attr.stride;
+
+ if (mmap(p, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED,
+ memfd, 0) != (void *) p)
+ goto error_alloc;
+ }
+ }
+ /*
+ * The init values shared mapping should not be shared
+ * with the children processes across fork. Prevent the
+ * whole mapping from being used across fork.
+ */
+ if (madvise(base, range_len, MADV_DONTFORK))
+ goto error_alloc;
+
+ /*
+ * Write 0x1 in first byte of header first page, which
+ * will be WIPEONFORK (and thus cleared) in children
+ * processes. Used to find out if pool destroy is called
+ * from a child process after fork.
+ */
+ *((char *) header) = 0x1;
+ if (madvise(header, page_size, MADV_WIPEONFORK))
+ goto error_alloc;
+
+ /*
+ * The second header page contains the struct
+ * rseq_mempool_range, which is needed by pool destroy.
+ * Leave this anonymous page populated (COW) in child
+ * processes.
+ */
+ rseq_memfd_close(memfd);
+ memfd = -1;
+ }
+