+
+ /*
+ * If item is already zeroed, either because the
+ * init range update has propagated or because the
+ * content is already zeroed (e.g. zero page), don't
+ * write to the page. This eliminates useless COW over
+ * the zero page just for overwriting it with zeroes.
+ *
+ * This means zmalloc() in COW_ZERO policy pool do
+ * not trigger COW for CPUs which are not actively
+ * writing to the pool. This is however not the case for
+ * malloc_init() in populate-all pools if it populates
+ * non-zero content.
+ */
+ if (!rseq_cmp_item(p, pool->item_len, 0, NULL))
+ continue;
+ bzero(p, pool->item_len);
+ }
+}
+
+static
+void rseq_percpu_init_item(struct rseq_mempool *pool,
+ struct rseq_mempool_range *range, uintptr_t item_offset,
+ void *init_ptr, size_t init_len)
+{
+ char *init_p = NULL;
+ int i;
+
+ init_p = __rseq_pool_range_init_ptr(range, item_offset);
+ if (init_p)
+ memcpy(init_p, init_ptr, init_len);
+ for (i = 0; i < pool->attr.max_nr_cpus; i++) {
+ char *p = __rseq_pool_range_percpu_ptr(range, i,
+ item_offset, pool->attr.stride);
+
+ /*
+ * If the update propagated through a shared mapping,
+ * or the item already has the correct content, skip
+ * writing it into the cpu item to eliminate useless
+ * COW of the page.
+ */
+ if (!memcmp(init_ptr, p, init_len))
+ continue;
+ memcpy(p, init_ptr, init_len);
+ }
+}
+
+static
+void rseq_poison_item(void *p, size_t item_len, uintptr_t poison)
+{
+ size_t offset;
+
+ for (offset = 0; offset < item_len; offset += sizeof(uintptr_t))
+ *((uintptr_t *) (p + offset)) = poison;
+}
+
+static
+void rseq_percpu_poison_item(struct rseq_mempool *pool,
+ struct rseq_mempool_range *range, uintptr_t item_offset)
+{
+ uintptr_t poison = pool->attr.poison;
+ char *init_p = NULL;
+ int i;
+
+ init_p = __rseq_pool_range_init_ptr(range, item_offset);
+ if (init_p)
+ rseq_poison_item(init_p, pool->item_len, poison);
+ for (i = 0; i < pool->attr.max_nr_cpus; i++) {
+ char *p = __rseq_pool_range_percpu_ptr(range, i,
+ item_offset, pool->attr.stride);
+
+ /*
+ * If the update propagated through a shared mapping,
+ * or the item already has the correct content, skip
+ * writing it into the cpu item to eliminate useless
+ * COW of the page.
+ *
+ * It is recommended to use zero as poison value for
+ * COW_ZERO pools to eliminate COW due to writing
+ * poison to CPU memory still backed by the zero page.
+ */
+ if (rseq_cmp_item(p, pool->item_len, poison, NULL) == 0)
+ continue;
+ rseq_poison_item(p, pool->item_len, poison);
+ }
+}
+
+/* Always inline for __builtin_return_address(0). */
+static inline __attribute__((always_inline))
+void rseq_check_poison_item(const struct rseq_mempool *pool, uintptr_t item_offset,
+ void *p, size_t item_len, uintptr_t poison)
+{
+ intptr_t unexpected_value;
+
+ if (rseq_cmp_item(p, item_len, poison, &unexpected_value) == 0)
+ return;
+
+ fprintf(stderr, "%s: Poison corruption detected (0x%lx) for pool: \"%s\" (%p), item offset: %zu, caller: %p.\n",
+ __func__, (unsigned long) unexpected_value, get_pool_name(pool), pool, item_offset, (void *) __builtin_return_address(0));
+ abort();
+}
+
+/* Always inline for __builtin_return_address(0). */
+static inline __attribute__((always_inline))
+void rseq_percpu_check_poison_item(const struct rseq_mempool *pool,
+ const struct rseq_mempool_range *range, uintptr_t item_offset)
+{
+ uintptr_t poison = pool->attr.poison;
+ char *init_p;
+ int i;
+
+ if (!pool->attr.robust_set)
+ return;
+ init_p = __rseq_pool_range_init_ptr(range, item_offset);
+ if (init_p)
+ rseq_check_poison_item(pool, item_offset, init_p, pool->item_len, poison);
+ for (i = 0; i < pool->attr.max_nr_cpus; i++) {
+ char *p = __rseq_pool_range_percpu_ptr(range, i,
+ item_offset, pool->attr.stride);
+ rseq_check_poison_item(pool, item_offset, p, pool->item_len, poison);