Commit | Line | Data |
---|---|---|
f204e0b8 IM |
1 | /* |
2 | * Copyright 2014 IBM Corp. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/bitmap.h> | |
13 | #include <linux/sched.h> | |
14 | #include <linux/pid.h> | |
15 | #include <linux/fs.h> | |
16 | #include <linux/mm.h> | |
17 | #include <linux/debugfs.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/idr.h> | |
20 | #include <asm/cputable.h> | |
21 | #include <asm/current.h> | |
22 | #include <asm/copro.h> | |
23 | ||
24 | #include "cxl.h" | |
25 | ||
26 | /* | |
27 | * Allocates space for a CXL context. | |
28 | */ | |
29 | struct cxl_context *cxl_context_alloc(void) | |
30 | { | |
31 | return kzalloc(sizeof(struct cxl_context), GFP_KERNEL); | |
32 | } | |
33 | ||
34 | /* | |
35 | * Initialises a CXL context. | |
36 | */ | |
b123429e IM |
37 | int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, |
38 | struct address_space *mapping) | |
f204e0b8 IM |
39 | { |
40 | int i; | |
41 | ||
42 | spin_lock_init(&ctx->sste_lock); | |
43 | ctx->afu = afu; | |
44 | ctx->master = master; | |
7b8ad495 | 45 | ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */ |
b123429e IM |
46 | mutex_init(&ctx->mapping_lock); |
47 | ctx->mapping = mapping; | |
f204e0b8 IM |
48 | |
49 | /* | |
50 | * Allocate the segment table before we put it in the IDR so that we | |
51 | * can always access it when dereferenced from IDR. For the same | |
52 | * reason, the segment table is only destroyed after the context is | |
53 | * removed from the IDR. Access to this in the IOCTL is protected by | |
54 | * Linux filesytem symantics (can't IOCTL until open is complete). | |
55 | */ | |
56 | i = cxl_alloc_sst(ctx); | |
57 | if (i) | |
58 | return i; | |
59 | ||
60 | INIT_WORK(&ctx->fault_work, cxl_handle_fault); | |
61 | ||
62 | init_waitqueue_head(&ctx->wq); | |
63 | spin_lock_init(&ctx->lock); | |
64 | ||
65 | ctx->irq_bitmap = NULL; | |
66 | ctx->pending_irq = false; | |
67 | ctx->pending_fault = false; | |
68 | ctx->pending_afu_err = false; | |
69 | ||
f5c9df9a | 70 | INIT_LIST_HEAD(&ctx->irq_names); |
cbce0917 | 71 | INIT_LIST_HEAD(&ctx->extra_irq_contexts); |
f5c9df9a | 72 | |
f204e0b8 IM |
73 | /* |
74 | * When we have to destroy all contexts in cxl_context_detach_all() we | |
75 | * end up with afu_release_irqs() called from inside a | |
76 | * idr_for_each_entry(). Hence we need to make sure that anything | |
77 | * dereferenced from this IDR is ok before we allocate the IDR here. | |
78 | * This clears out the IRQ ranges to ensure this. | |
79 | */ | |
80 | for (i = 0; i < CXL_IRQ_RANGES; i++) | |
81 | ctx->irqs.range[i] = 0; | |
82 | ||
83 | mutex_init(&ctx->status_mutex); | |
84 | ||
85 | ctx->status = OPENED; | |
86 | ||
87 | /* | |
88 | * Allocating IDR! We better make sure everything's setup that | |
89 | * dereferences from it. | |
90 | */ | |
ee41d11d | 91 | mutex_lock(&afu->contexts_lock); |
f204e0b8 | 92 | idr_preload(GFP_KERNEL); |
f67a6722 IM |
93 | i = idr_alloc(&ctx->afu->contexts_idr, ctx, |
94 | ctx->afu->adapter->native->sl_ops->min_pe, | |
f204e0b8 | 95 | ctx->afu->num_procs, GFP_NOWAIT); |
f204e0b8 | 96 | idr_preload_end(); |
ee41d11d | 97 | mutex_unlock(&afu->contexts_lock); |
f204e0b8 IM |
98 | if (i < 0) |
99 | return i; | |
100 | ||
101 | ctx->pe = i; | |
14baf4d9 | 102 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
cbffa3a5 | 103 | ctx->elem = &ctx->afu->native->spa[i]; |
14baf4d9 CL |
104 | ctx->external_pe = ctx->pe; |
105 | } else { | |
106 | ctx->external_pe = -1; /* assigned when attaching */ | |
107 | } | |
f204e0b8 | 108 | ctx->pe_inserted = false; |
1b5df59e VJ |
109 | |
110 | /* | |
111 | * take a ref on the afu so that it stays alive at-least till | |
112 | * this context is reclaimed inside reclaim_ctx. | |
113 | */ | |
114 | cxl_afu_get(afu); | |
f204e0b8 IM |
115 | return 0; |
116 | } | |
117 | ||
0712dc7e IM |
118 | static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
119 | { | |
120 | struct cxl_context *ctx = vma->vm_file->private_data; | |
121 | unsigned long address = (unsigned long)vmf->virtual_address; | |
122 | u64 area, offset; | |
123 | ||
124 | offset = vmf->pgoff << PAGE_SHIFT; | |
125 | ||
126 | pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n", | |
127 | __func__, ctx->pe, address, offset); | |
128 | ||
129 | if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { | |
130 | area = ctx->afu->psn_phys; | |
10a5894f | 131 | if (offset >= ctx->afu->adapter->ps_size) |
0712dc7e IM |
132 | return VM_FAULT_SIGBUS; |
133 | } else { | |
134 | area = ctx->psn_phys; | |
10a5894f | 135 | if (offset >= ctx->psn_size) |
0712dc7e IM |
136 | return VM_FAULT_SIGBUS; |
137 | } | |
138 | ||
139 | mutex_lock(&ctx->status_mutex); | |
140 | ||
141 | if (ctx->status != STARTED) { | |
142 | mutex_unlock(&ctx->status_mutex); | |
143 | pr_devel("%s: Context not started, failing problem state access\n", __func__); | |
d9232a3d IM |
144 | if (ctx->mmio_err_ff) { |
145 | if (!ctx->ff_page) { | |
146 | ctx->ff_page = alloc_page(GFP_USER); | |
147 | if (!ctx->ff_page) | |
148 | return VM_FAULT_OOM; | |
149 | memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE); | |
150 | } | |
151 | get_page(ctx->ff_page); | |
152 | vmf->page = ctx->ff_page; | |
153 | vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); | |
154 | return 0; | |
155 | } | |
0712dc7e IM |
156 | return VM_FAULT_SIGBUS; |
157 | } | |
158 | ||
159 | vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); | |
160 | ||
161 | mutex_unlock(&ctx->status_mutex); | |
162 | ||
163 | return VM_FAULT_NOPAGE; | |
164 | } | |
165 | ||
166 | static const struct vm_operations_struct cxl_mmap_vmops = { | |
167 | .fault = cxl_mmap_fault, | |
168 | }; | |
169 | ||
f204e0b8 IM |
170 | /* |
171 | * Map a per-context mmio space into the given vma. | |
172 | */ | |
173 | int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) | |
174 | { | |
5caaf534 | 175 | u64 start = vma->vm_pgoff << PAGE_SHIFT; |
f204e0b8 | 176 | u64 len = vma->vm_end - vma->vm_start; |
5caaf534 IM |
177 | |
178 | if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { | |
179 | if (start + len > ctx->afu->adapter->ps_size) | |
180 | return -EINVAL; | |
181 | } else { | |
182 | if (start + len > ctx->psn_size) | |
183 | return -EINVAL; | |
184 | } | |
f204e0b8 | 185 | |
0712dc7e IM |
186 | if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { |
187 | /* make sure there is a valid per process space for this AFU */ | |
188 | if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { | |
189 | pr_devel("AFU doesn't support mmio space\n"); | |
190 | return -EINVAL; | |
191 | } | |
f204e0b8 | 192 | |
0712dc7e IM |
193 | /* Can't mmap until the AFU is enabled */ |
194 | if (!ctx->afu->enabled) | |
195 | return -EBUSY; | |
f204e0b8 IM |
196 | } |
197 | ||
f204e0b8 IM |
198 | pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, |
199 | ctx->psn_phys, ctx->pe , ctx->master); | |
200 | ||
0712dc7e | 201 | vma->vm_flags |= VM_IO | VM_PFNMAP; |
f204e0b8 | 202 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
0712dc7e IM |
203 | vma->vm_ops = &cxl_mmap_vmops; |
204 | return 0; | |
f204e0b8 IM |
205 | } |
206 | ||
207 | /* | |
208 | * Detach a context from the hardware. This disables interrupts and doesn't | |
209 | * return until all outstanding interrupts for this context have completed. The | |
210 | * hardware should no longer access *ctx after this has returned. | |
211 | */ | |
eda3693c | 212 | int __detach_context(struct cxl_context *ctx) |
f204e0b8 IM |
213 | { |
214 | enum cxl_context_status status; | |
215 | ||
216 | mutex_lock(&ctx->status_mutex); | |
217 | status = ctx->status; | |
218 | ctx->status = CLOSED; | |
219 | mutex_unlock(&ctx->status_mutex); | |
220 | if (status != STARTED) | |
eda3693c | 221 | return -EBUSY; |
f204e0b8 | 222 | |
0b3f9c75 DA |
223 | /* Only warn if we detached while the link was OK. |
224 | * If detach fails when hw is down, we don't care. | |
225 | */ | |
5be587b1 | 226 | WARN_ON(cxl_ops->detach_process(ctx) && |
0d400f77 | 227 | cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)); |
7bb5d91a | 228 | flush_work(&ctx->fault_work); /* Only needed for dedicated process */ |
7b8ad495 | 229 | |
2bc79ffc MN |
230 | /* |
231 | * Wait until no further interrupts are presented by the PSL | |
232 | * for this context. | |
233 | */ | |
234 | if (cxl_ops->irq_wait) | |
235 | cxl_ops->irq_wait(ctx); | |
236 | ||
7b8ad495 | 237 | /* release the reference to the group leader and mm handling pid */ |
7bb5d91a | 238 | put_pid(ctx->pid); |
7b8ad495 VJ |
239 | put_pid(ctx->glpid); |
240 | ||
7bb5d91a | 241 | cxl_ctx_put(); |
eda3693c | 242 | return 0; |
f204e0b8 IM |
243 | } |
244 | ||
245 | /* | |
246 | * Detach the given context from the AFU. This doesn't actually | |
247 | * free the context but it should stop the context running in hardware | |
248 | * (ie. prevent this context from generating any further interrupts | |
249 | * so that it can be freed). | |
250 | */ | |
251 | void cxl_context_detach(struct cxl_context *ctx) | |
252 | { | |
eda3693c MN |
253 | int rc; |
254 | ||
255 | rc = __detach_context(ctx); | |
256 | if (rc) | |
257 | return; | |
258 | ||
259 | afu_release_irqs(ctx, ctx); | |
eda3693c | 260 | wake_up_all(&ctx->wq); |
f204e0b8 IM |
261 | } |
262 | ||
263 | /* | |
264 | * Detach all contexts on the given AFU. | |
265 | */ | |
266 | void cxl_context_detach_all(struct cxl_afu *afu) | |
267 | { | |
268 | struct cxl_context *ctx; | |
269 | int tmp; | |
270 | ||
ee41d11d IM |
271 | mutex_lock(&afu->contexts_lock); |
272 | idr_for_each_entry(&afu->contexts_idr, ctx, tmp) { | |
f204e0b8 IM |
273 | /* |
274 | * Anything done in here needs to be setup before the IDR is | |
275 | * created and torn down after the IDR removed | |
276 | */ | |
eda3693c | 277 | cxl_context_detach(ctx); |
0712dc7e IM |
278 | |
279 | /* | |
280 | * We are force detaching - remove any active PSA mappings so | |
281 | * userspace cannot interfere with the card if it comes back. | |
282 | * Easiest way to exercise this is to unbind and rebind the | |
283 | * driver via sysfs while it is in use. | |
284 | */ | |
285 | mutex_lock(&ctx->mapping_lock); | |
286 | if (ctx->mapping) | |
287 | unmap_mapping_range(ctx->mapping, 0, 0, 1); | |
288 | mutex_unlock(&ctx->mapping_lock); | |
ee41d11d IM |
289 | } |
290 | mutex_unlock(&afu->contexts_lock); | |
f204e0b8 IM |
291 | } |
292 | ||
8ac75b96 | 293 | static void reclaim_ctx(struct rcu_head *rcu) |
f204e0b8 | 294 | { |
8ac75b96 | 295 | struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu); |
f204e0b8 IM |
296 | |
297 | free_page((u64)ctx->sstp); | |
d9232a3d IM |
298 | if (ctx->ff_page) |
299 | __free_page(ctx->ff_page); | |
f204e0b8 | 300 | ctx->sstp = NULL; |
55e07668 IM |
301 | if (ctx->kernelapi) |
302 | kfree(ctx->mapping); | |
f204e0b8 | 303 | |
1050e689 | 304 | kfree(ctx->irq_bitmap); |
52adee58 | 305 | |
1b5df59e VJ |
306 | /* Drop ref to the afu device taken during cxl_context_init */ |
307 | cxl_afu_put(ctx->afu); | |
308 | ||
f204e0b8 IM |
309 | kfree(ctx); |
310 | } | |
8ac75b96 IM |
311 | |
312 | void cxl_context_free(struct cxl_context *ctx) | |
313 | { | |
314 | mutex_lock(&ctx->afu->contexts_lock); | |
315 | idr_remove(&ctx->afu->contexts_idr, ctx->pe); | |
316 | mutex_unlock(&ctx->afu->contexts_lock); | |
317 | call_rcu(&ctx->rcu, reclaim_ctx); | |
318 | } |