Merge branch 'overlayfs-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszer...
[deliverable/linux.git] / arch / s390 / include / asm / idals.h
1 /*
2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
3 * Martin Schwidefsky <schwidefsky@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * Copyright IBM Corp. 2000
6 *
7 * History of changes
8 * 07/24/00 new file
9 * 05/04/02 code restructuring.
10 */
11
12 #ifndef _S390_IDALS_H
13 #define _S390_IDALS_H
14
15 #include <linux/errno.h>
16 #include <linux/err.h>
17 #include <linux/types.h>
18 #include <linux/slab.h>
19 #include <asm/cio.h>
20 #include <asm/uaccess.h>
21
22 #define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
23 #define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
24
25 /*
26 * Test if an address/length pair needs an idal list.
27 */
28 static inline int
29 idal_is_needed(void *vaddr, unsigned int length)
30 {
31 return ((__pa(vaddr) + length - 1) >> 31) != 0;
32 }
33
34
35 /*
36 * Return the number of idal words needed for an address/length pair.
37 */
38 static inline unsigned int idal_nr_words(void *vaddr, unsigned int length)
39 {
40 return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length +
41 (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
42 }
43
44 /*
45 * Create the list of idal words for an address/length pair.
46 */
47 static inline unsigned long *idal_create_words(unsigned long *idaws,
48 void *vaddr, unsigned int length)
49 {
50 unsigned long paddr;
51 unsigned int cidaw;
52
53 paddr = __pa(vaddr);
54 cidaw = ((paddr & (IDA_BLOCK_SIZE-1)) + length +
55 (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
56 *idaws++ = paddr;
57 paddr &= -IDA_BLOCK_SIZE;
58 while (--cidaw > 0) {
59 paddr += IDA_BLOCK_SIZE;
60 *idaws++ = paddr;
61 }
62 return idaws;
63 }
64
65 /*
66 * Sets the address of the data in CCW.
67 * If necessary it allocates an IDAL and sets the appropriate flags.
68 */
69 static inline int
70 set_normalized_cda(struct ccw1 * ccw, void *vaddr)
71 {
72 unsigned int nridaws;
73 unsigned long *idal;
74
75 if (ccw->flags & CCW_FLAG_IDA)
76 return -EINVAL;
77 nridaws = idal_nr_words(vaddr, ccw->count);
78 if (nridaws > 0) {
79 idal = kmalloc(nridaws * sizeof(unsigned long),
80 GFP_ATOMIC | GFP_DMA );
81 if (idal == NULL)
82 return -ENOMEM;
83 idal_create_words(idal, vaddr, ccw->count);
84 ccw->flags |= CCW_FLAG_IDA;
85 vaddr = idal;
86 }
87 ccw->cda = (__u32)(unsigned long) vaddr;
88 return 0;
89 }
90
91 /*
92 * Releases any allocated IDAL related to the CCW.
93 */
94 static inline void
95 clear_normalized_cda(struct ccw1 * ccw)
96 {
97 if (ccw->flags & CCW_FLAG_IDA) {
98 kfree((void *)(unsigned long) ccw->cda);
99 ccw->flags &= ~CCW_FLAG_IDA;
100 }
101 ccw->cda = 0;
102 }
103
104 /*
105 * Idal buffer extension
106 */
107 struct idal_buffer {
108 size_t size;
109 size_t page_order;
110 void *data[0];
111 };
112
113 /*
114 * Allocate an idal buffer
115 */
116 static inline struct idal_buffer *
117 idal_buffer_alloc(size_t size, int page_order)
118 {
119 struct idal_buffer *ib;
120 int nr_chunks, nr_ptrs, i;
121
122 nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
123 nr_chunks = (4096 << page_order) >> IDA_SIZE_LOG;
124 ib = kmalloc(sizeof(struct idal_buffer) + nr_ptrs*sizeof(void *),
125 GFP_DMA | GFP_KERNEL);
126 if (ib == NULL)
127 return ERR_PTR(-ENOMEM);
128 ib->size = size;
129 ib->page_order = page_order;
130 for (i = 0; i < nr_ptrs; i++) {
131 if ((i & (nr_chunks - 1)) != 0) {
132 ib->data[i] = ib->data[i-1] + IDA_BLOCK_SIZE;
133 continue;
134 }
135 ib->data[i] = (void *)
136 __get_free_pages(GFP_KERNEL, page_order);
137 if (ib->data[i] != NULL)
138 continue;
139 // Not enough memory
140 while (i >= nr_chunks) {
141 i -= nr_chunks;
142 free_pages((unsigned long) ib->data[i],
143 ib->page_order);
144 }
145 kfree(ib);
146 return ERR_PTR(-ENOMEM);
147 }
148 return ib;
149 }
150
151 /*
152 * Free an idal buffer.
153 */
154 static inline void
155 idal_buffer_free(struct idal_buffer *ib)
156 {
157 int nr_chunks, nr_ptrs, i;
158
159 nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
160 nr_chunks = (4096 << ib->page_order) >> IDA_SIZE_LOG;
161 for (i = 0; i < nr_ptrs; i += nr_chunks)
162 free_pages((unsigned long) ib->data[i], ib->page_order);
163 kfree(ib);
164 }
165
166 /*
167 * Test if a idal list is really needed.
168 */
169 static inline int
170 __idal_buffer_is_needed(struct idal_buffer *ib)
171 {
172 return ib->size > (4096ul << ib->page_order) ||
173 idal_is_needed(ib->data[0], ib->size);
174 }
175
176 /*
177 * Set channel data address to idal buffer.
178 */
179 static inline void
180 idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw)
181 {
182 if (__idal_buffer_is_needed(ib)) {
183 // setup idals;
184 ccw->cda = (u32)(addr_t) ib->data;
185 ccw->flags |= CCW_FLAG_IDA;
186 } else
187 // we do not need idals - use direct addressing
188 ccw->cda = (u32)(addr_t) ib->data[0];
189 ccw->count = ib->size;
190 }
191
192 /*
193 * Copy count bytes from an idal buffer to user memory
194 */
195 static inline size_t
196 idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count)
197 {
198 size_t left;
199 int i;
200
201 BUG_ON(count > ib->size);
202 for (i = 0; count > IDA_BLOCK_SIZE; i++) {
203 left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE);
204 if (left)
205 return left + count - IDA_BLOCK_SIZE;
206 to = (void __user *) to + IDA_BLOCK_SIZE;
207 count -= IDA_BLOCK_SIZE;
208 }
209 return copy_to_user(to, ib->data[i], count);
210 }
211
212 /*
213 * Copy count bytes from user memory to an idal buffer
214 */
215 static inline size_t
216 idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count)
217 {
218 size_t left;
219 int i;
220
221 BUG_ON(count > ib->size);
222 for (i = 0; count > IDA_BLOCK_SIZE; i++) {
223 left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE);
224 if (left)
225 return left + count - IDA_BLOCK_SIZE;
226 from = (void __user *) from + IDA_BLOCK_SIZE;
227 count -= IDA_BLOCK_SIZE;
228 }
229 return copy_from_user(ib->data[i], from, count);
230 }
231
232 #endif
This page took 0.035361 seconds and 5 git commands to generate.