Commit | Line | Data |
---|---|---|
f204e0b8 IM |
1 | /* |
2 | * Copyright 2014 IBM Corp. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/bitmap.h> | |
13 | #include <linux/sched.h> | |
14 | #include <linux/pid.h> | |
15 | #include <linux/fs.h> | |
16 | #include <linux/mm.h> | |
17 | #include <linux/debugfs.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/idr.h> | |
20 | #include <asm/cputable.h> | |
21 | #include <asm/current.h> | |
22 | #include <asm/copro.h> | |
23 | ||
24 | #include "cxl.h" | |
25 | ||
26 | /* | |
27 | * Allocates space for a CXL context. | |
28 | */ | |
29 | struct cxl_context *cxl_context_alloc(void) | |
30 | { | |
31 | return kzalloc(sizeof(struct cxl_context), GFP_KERNEL); | |
32 | } | |
33 | ||
34 | /* | |
35 | * Initialises a CXL context. | |
36 | */ | |
b123429e IM |
37 | int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, |
38 | struct address_space *mapping) | |
f204e0b8 IM |
39 | { |
40 | int i; | |
41 | ||
42 | spin_lock_init(&ctx->sste_lock); | |
43 | ctx->afu = afu; | |
44 | ctx->master = master; | |
7b8ad495 | 45 | ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */ |
b123429e IM |
46 | mutex_init(&ctx->mapping_lock); |
47 | ctx->mapping = mapping; | |
f204e0b8 IM |
48 | |
49 | /* | |
50 | * Allocate the segment table before we put it in the IDR so that we | |
51 | * can always access it when dereferenced from IDR. For the same | |
52 | * reason, the segment table is only destroyed after the context is | |
53 | * removed from the IDR. Access to this in the IOCTL is protected by | |
54 | * Linux filesytem symantics (can't IOCTL until open is complete). | |
55 | */ | |
56 | i = cxl_alloc_sst(ctx); | |
57 | if (i) | |
58 | return i; | |
59 | ||
60 | INIT_WORK(&ctx->fault_work, cxl_handle_fault); | |
61 | ||
62 | init_waitqueue_head(&ctx->wq); | |
63 | spin_lock_init(&ctx->lock); | |
64 | ||
65 | ctx->irq_bitmap = NULL; | |
66 | ctx->pending_irq = false; | |
67 | ctx->pending_fault = false; | |
68 | ctx->pending_afu_err = false; | |
69 | ||
70 | /* | |
71 | * When we have to destroy all contexts in cxl_context_detach_all() we | |
72 | * end up with afu_release_irqs() called from inside a | |
73 | * idr_for_each_entry(). Hence we need to make sure that anything | |
74 | * dereferenced from this IDR is ok before we allocate the IDR here. | |
75 | * This clears out the IRQ ranges to ensure this. | |
76 | */ | |
77 | for (i = 0; i < CXL_IRQ_RANGES; i++) | |
78 | ctx->irqs.range[i] = 0; | |
79 | ||
80 | mutex_init(&ctx->status_mutex); | |
81 | ||
82 | ctx->status = OPENED; | |
83 | ||
84 | /* | |
85 | * Allocating IDR! We better make sure everything's setup that | |
86 | * dereferences from it. | |
87 | */ | |
ee41d11d | 88 | mutex_lock(&afu->contexts_lock); |
f204e0b8 | 89 | idr_preload(GFP_KERNEL); |
f204e0b8 IM |
90 | i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0, |
91 | ctx->afu->num_procs, GFP_NOWAIT); | |
f204e0b8 | 92 | idr_preload_end(); |
ee41d11d | 93 | mutex_unlock(&afu->contexts_lock); |
f204e0b8 IM |
94 | if (i < 0) |
95 | return i; | |
96 | ||
97 | ctx->pe = i; | |
14baf4d9 | 98 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
cbffa3a5 | 99 | ctx->elem = &ctx->afu->native->spa[i]; |
14baf4d9 CL |
100 | ctx->external_pe = ctx->pe; |
101 | } else { | |
102 | ctx->external_pe = -1; /* assigned when attaching */ | |
103 | } | |
f204e0b8 | 104 | ctx->pe_inserted = false; |
1b5df59e VJ |
105 | |
106 | /* | |
107 | * take a ref on the afu so that it stays alive at-least till | |
108 | * this context is reclaimed inside reclaim_ctx. | |
109 | */ | |
110 | cxl_afu_get(afu); | |
f204e0b8 IM |
111 | return 0; |
112 | } | |
113 | ||
0712dc7e IM |
114 | static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
115 | { | |
116 | struct cxl_context *ctx = vma->vm_file->private_data; | |
117 | unsigned long address = (unsigned long)vmf->virtual_address; | |
118 | u64 area, offset; | |
119 | ||
120 | offset = vmf->pgoff << PAGE_SHIFT; | |
121 | ||
122 | pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n", | |
123 | __func__, ctx->pe, address, offset); | |
124 | ||
125 | if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { | |
126 | area = ctx->afu->psn_phys; | |
10a5894f | 127 | if (offset >= ctx->afu->adapter->ps_size) |
0712dc7e IM |
128 | return VM_FAULT_SIGBUS; |
129 | } else { | |
130 | area = ctx->psn_phys; | |
10a5894f | 131 | if (offset >= ctx->psn_size) |
0712dc7e IM |
132 | return VM_FAULT_SIGBUS; |
133 | } | |
134 | ||
135 | mutex_lock(&ctx->status_mutex); | |
136 | ||
137 | if (ctx->status != STARTED) { | |
138 | mutex_unlock(&ctx->status_mutex); | |
139 | pr_devel("%s: Context not started, failing problem state access\n", __func__); | |
d9232a3d IM |
140 | if (ctx->mmio_err_ff) { |
141 | if (!ctx->ff_page) { | |
142 | ctx->ff_page = alloc_page(GFP_USER); | |
143 | if (!ctx->ff_page) | |
144 | return VM_FAULT_OOM; | |
145 | memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE); | |
146 | } | |
147 | get_page(ctx->ff_page); | |
148 | vmf->page = ctx->ff_page; | |
149 | vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); | |
150 | return 0; | |
151 | } | |
0712dc7e IM |
152 | return VM_FAULT_SIGBUS; |
153 | } | |
154 | ||
155 | vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); | |
156 | ||
157 | mutex_unlock(&ctx->status_mutex); | |
158 | ||
159 | return VM_FAULT_NOPAGE; | |
160 | } | |
161 | ||
162 | static const struct vm_operations_struct cxl_mmap_vmops = { | |
163 | .fault = cxl_mmap_fault, | |
164 | }; | |
165 | ||
f204e0b8 IM |
166 | /* |
167 | * Map a per-context mmio space into the given vma. | |
168 | */ | |
169 | int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) | |
170 | { | |
5caaf534 | 171 | u64 start = vma->vm_pgoff << PAGE_SHIFT; |
f204e0b8 | 172 | u64 len = vma->vm_end - vma->vm_start; |
5caaf534 IM |
173 | |
174 | if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { | |
175 | if (start + len > ctx->afu->adapter->ps_size) | |
176 | return -EINVAL; | |
177 | } else { | |
178 | if (start + len > ctx->psn_size) | |
179 | return -EINVAL; | |
180 | } | |
f204e0b8 | 181 | |
0712dc7e IM |
182 | if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { |
183 | /* make sure there is a valid per process space for this AFU */ | |
184 | if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { | |
185 | pr_devel("AFU doesn't support mmio space\n"); | |
186 | return -EINVAL; | |
187 | } | |
f204e0b8 | 188 | |
0712dc7e IM |
189 | /* Can't mmap until the AFU is enabled */ |
190 | if (!ctx->afu->enabled) | |
191 | return -EBUSY; | |
f204e0b8 IM |
192 | } |
193 | ||
f204e0b8 IM |
194 | pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, |
195 | ctx->psn_phys, ctx->pe , ctx->master); | |
196 | ||
0712dc7e | 197 | vma->vm_flags |= VM_IO | VM_PFNMAP; |
f204e0b8 | 198 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
0712dc7e IM |
199 | vma->vm_ops = &cxl_mmap_vmops; |
200 | return 0; | |
f204e0b8 IM |
201 | } |
202 | ||
203 | /* | |
204 | * Detach a context from the hardware. This disables interrupts and doesn't | |
205 | * return until all outstanding interrupts for this context have completed. The | |
206 | * hardware should no longer access *ctx after this has returned. | |
207 | */ | |
eda3693c | 208 | int __detach_context(struct cxl_context *ctx) |
f204e0b8 IM |
209 | { |
210 | enum cxl_context_status status; | |
211 | ||
212 | mutex_lock(&ctx->status_mutex); | |
213 | status = ctx->status; | |
214 | ctx->status = CLOSED; | |
215 | mutex_unlock(&ctx->status_mutex); | |
216 | if (status != STARTED) | |
eda3693c | 217 | return -EBUSY; |
f204e0b8 | 218 | |
0b3f9c75 DA |
219 | /* Only warn if we detached while the link was OK. |
220 | * If detach fails when hw is down, we don't care. | |
221 | */ | |
5be587b1 | 222 | WARN_ON(cxl_ops->detach_process(ctx) && |
0d400f77 | 223 | cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)); |
7bb5d91a | 224 | flush_work(&ctx->fault_work); /* Only needed for dedicated process */ |
7b8ad495 VJ |
225 | |
226 | /* release the reference to the group leader and mm handling pid */ | |
7bb5d91a | 227 | put_pid(ctx->pid); |
7b8ad495 VJ |
228 | put_pid(ctx->glpid); |
229 | ||
7bb5d91a | 230 | cxl_ctx_put(); |
eda3693c | 231 | return 0; |
f204e0b8 IM |
232 | } |
233 | ||
234 | /* | |
235 | * Detach the given context from the AFU. This doesn't actually | |
236 | * free the context but it should stop the context running in hardware | |
237 | * (ie. prevent this context from generating any further interrupts | |
238 | * so that it can be freed). | |
239 | */ | |
240 | void cxl_context_detach(struct cxl_context *ctx) | |
241 | { | |
eda3693c MN |
242 | int rc; |
243 | ||
244 | rc = __detach_context(ctx); | |
245 | if (rc) | |
246 | return; | |
247 | ||
248 | afu_release_irqs(ctx, ctx); | |
eda3693c | 249 | wake_up_all(&ctx->wq); |
f204e0b8 IM |
250 | } |
251 | ||
252 | /* | |
253 | * Detach all contexts on the given AFU. | |
254 | */ | |
255 | void cxl_context_detach_all(struct cxl_afu *afu) | |
256 | { | |
257 | struct cxl_context *ctx; | |
258 | int tmp; | |
259 | ||
ee41d11d IM |
260 | mutex_lock(&afu->contexts_lock); |
261 | idr_for_each_entry(&afu->contexts_idr, ctx, tmp) { | |
f204e0b8 IM |
262 | /* |
263 | * Anything done in here needs to be setup before the IDR is | |
264 | * created and torn down after the IDR removed | |
265 | */ | |
eda3693c | 266 | cxl_context_detach(ctx); |
0712dc7e IM |
267 | |
268 | /* | |
269 | * We are force detaching - remove any active PSA mappings so | |
270 | * userspace cannot interfere with the card if it comes back. | |
271 | * Easiest way to exercise this is to unbind and rebind the | |
272 | * driver via sysfs while it is in use. | |
273 | */ | |
274 | mutex_lock(&ctx->mapping_lock); | |
275 | if (ctx->mapping) | |
276 | unmap_mapping_range(ctx->mapping, 0, 0, 1); | |
277 | mutex_unlock(&ctx->mapping_lock); | |
ee41d11d IM |
278 | } |
279 | mutex_unlock(&afu->contexts_lock); | |
f204e0b8 IM |
280 | } |
281 | ||
8ac75b96 | 282 | static void reclaim_ctx(struct rcu_head *rcu) |
f204e0b8 | 283 | { |
8ac75b96 | 284 | struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu); |
f204e0b8 IM |
285 | |
286 | free_page((u64)ctx->sstp); | |
d9232a3d IM |
287 | if (ctx->ff_page) |
288 | __free_page(ctx->ff_page); | |
f204e0b8 | 289 | ctx->sstp = NULL; |
55e07668 IM |
290 | if (ctx->kernelapi) |
291 | kfree(ctx->mapping); | |
f204e0b8 | 292 | |
52adee58 AD |
293 | if (ctx->irq_bitmap) |
294 | kfree(ctx->irq_bitmap); | |
295 | ||
1b5df59e VJ |
296 | /* Drop ref to the afu device taken during cxl_context_init */ |
297 | cxl_afu_put(ctx->afu); | |
298 | ||
f204e0b8 IM |
299 | kfree(ctx); |
300 | } | |
8ac75b96 IM |
301 | |
302 | void cxl_context_free(struct cxl_context *ctx) | |
303 | { | |
304 | mutex_lock(&ctx->afu->contexts_lock); | |
305 | idr_remove(&ctx->afu->contexts_idr, ctx->pe); | |
306 | mutex_unlock(&ctx->afu->contexts_lock); | |
307 | call_rcu(&ctx->rcu, reclaim_ctx); | |
308 | } |