pmem: switch to devm_ allocations
[deliverable/linux.git] / include / linux / pmem.h
CommitLineData
61031952
RZ
1/*
2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#ifndef __PMEM_H__
14#define __PMEM_H__
15
16#include <linux/io.h>
17
18#ifdef CONFIG_ARCH_HAS_PMEM_API
19#include <asm/cacheflush.h>
20#else
21static inline void arch_wmb_pmem(void)
22{
23 BUG();
24}
25
26static inline bool __arch_has_wmb_pmem(void)
27{
28 return false;
29}
30
61031952
RZ
31static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
32 size_t n)
33{
34 BUG();
35}
36#endif
37
38/*
39 * Architectures that define ARCH_HAS_PMEM_API must provide
e836a256
DW
40 * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(), and
41 * __arch_has_wmb_pmem().
61031952
RZ
42 */
43
44static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
45{
46 memcpy(dst, (void __force const *) src, size);
47}
48
708ab62b 49static inline void memunmap_pmem(struct device *dev, void __pmem *addr)
61031952 50{
708ab62b 51 devm_memunmap(dev, (void __force *) addr);
61031952
RZ
52}
53
54/**
55 * arch_has_wmb_pmem - true if wmb_pmem() ensures durability
56 *
57 * For a given cpu implementation within an architecture it is possible
58 * that wmb_pmem() resolves to a nop. In the case this returns
59 * false, pmem api users are unable to ensure durability and may want to
60 * fall back to a different data consistency model, or otherwise notify
61 * the user.
62 */
63static inline bool arch_has_wmb_pmem(void)
64{
65 if (IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
66 return __arch_has_wmb_pmem();
67 return false;
68}
69
70static inline bool arch_has_pmem_api(void)
71{
72 return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem();
73}
74
75/*
76 * These defaults seek to offer decent performance and minimize the
77 * window between i/o completion and writes being durable on media.
78 * However, it is undefined / architecture specific whether
79 * default_memremap_pmem + default_memcpy_to_pmem is sufficient for
80 * making data durable relative to i/o completion.
81 */
e836a256 82static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src,
61031952
RZ
83 size_t size)
84{
85 memcpy((void __force *) dst, src, size);
86}
87
61031952
RZ
88/**
89 * memremap_pmem - map physical persistent memory for pmem api
90 * @offset: physical address of persistent memory
91 * @size: size of the mapping
92 *
93 * Establish a mapping of the architecture specific memory type expected
94 * by memcpy_to_pmem() and wmb_pmem(). For example, it may be
95 * the case that an uncacheable or writethrough mapping is sufficient,
96 * or a writeback mapping provided memcpy_to_pmem() and
97 * wmb_pmem() arrange for the data to be written through the
98 * cache to persistent media.
99 */
708ab62b
CH
100static inline void __pmem *memremap_pmem(struct device *dev,
101 resource_size_t offset, unsigned long size)
61031952 102{
e836a256 103#ifdef ARCH_MEMREMAP_PMEM
708ab62b
CH
104 return (void __pmem *) devm_memremap(dev, offset, size,
105 ARCH_MEMREMAP_PMEM);
e836a256 106#else
708ab62b
CH
107 return (void __pmem *) devm_memremap(dev, offset, size,
108 MEMREMAP_WT);
e836a256 109#endif
61031952
RZ
110}
111
112/**
113 * memcpy_to_pmem - copy data to persistent memory
114 * @dst: destination buffer for the copy
115 * @src: source buffer for the copy
116 * @n: length of the copy in bytes
117 *
118 * Perform a memory copy that results in the destination of the copy
119 * being effectively evicted from, or never written to, the processor
120 * cache hierarchy after the copy completes. After memcpy_to_pmem()
121 * data may still reside in cpu or platform buffers, so this operation
122 * must be followed by a wmb_pmem().
123 */
124static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
125{
126 if (arch_has_pmem_api())
127 arch_memcpy_to_pmem(dst, src, n);
128 else
129 default_memcpy_to_pmem(dst, src, n);
130}
131
132/**
133 * wmb_pmem - synchronize writes to persistent memory
134 *
135 * After a series of memcpy_to_pmem() operations this drains data from
136 * cpu write buffers and any platform (memory controller) buffers to
137 * ensure that written data is durable on persistent memory media.
138 */
139static inline void wmb_pmem(void)
140{
141 if (arch_has_pmem_api())
142 arch_wmb_pmem();
143}
144#endif /* __PMEM_H__ */
This page took 0.042561 seconds and 5 git commands to generate.