+ /*
+ * If the update propagated through a shared mapping,
+ * or the item already has the correct content, skip
+ * writing it into the cpu item to eliminate useless
+ * COW of the page.
+ *
+ * It is recommended to use zero as poison value for
+ * COW_ZERO pools to eliminate COW due to writing
+ * poison to CPU memory still backed by the zero page.
+ */
+ if (rseq_cmp_item(p, pool->item_len, poison, NULL) == 0)
+ continue;
+ rseq_poison_item(p, pool->item_len, poison);
+ }
+}
+
+/* Always inline for __builtin_return_address(0). */
+static inline __attribute__((always_inline))
+void rseq_check_poison_item(const struct rseq_mempool *pool, uintptr_t item_offset,
+ void *p, size_t item_len, uintptr_t poison)
+{
+ intptr_t unexpected_value;
+
+ if (rseq_cmp_item(p, item_len, poison, &unexpected_value) == 0)
+ return;
+
+ fprintf(stderr, "%s: Poison corruption detected (0x%lx) for pool: \"%s\" (%p), item offset: %zu, caller: %p.\n",
+ __func__, (unsigned long) unexpected_value, get_pool_name(pool), pool, item_offset, (void *) __builtin_return_address(0));
+ abort();
+}
+
+/* Always inline for __builtin_return_address(0). */
+static inline __attribute__((always_inline))
+void rseq_percpu_check_poison_item(const struct rseq_mempool *pool,
+ const struct rseq_mempool_range *range, uintptr_t item_offset)
+{
+ uintptr_t poison = pool->attr.poison;
+ char *init_p;
+ int i;
+
+ if (!pool->attr.robust_set)
+ return;
+ init_p = __rseq_pool_range_init_ptr(range, item_offset);
+ if (init_p)
+ rseq_check_poison_item(pool, item_offset, init_p, pool->item_len, poison);
+ for (i = 0; i < pool->attr.max_nr_cpus; i++) {
+ char *p = __rseq_pool_range_percpu_ptr(range, i,
+ item_offset, pool->attr.stride);
+ rseq_check_poison_item(pool, item_offset, p, pool->item_len, poison);