Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6
[deliverable/linux.git] / drivers / misc / sgi-gru / grufault.c
CommitLineData
14258640
JS
1/*
2 * SN Platform GRU Driver
3 *
4 * FAULT HANDLER FOR GRU DETECTED TLB MISSES
5 *
6 * This file contains code that handles TLB misses within the GRU.
7 * These misses are reported either via interrupts or user polling of
8 * the user CB.
9 *
10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/hugetlb.h>
32#include <linux/device.h>
33#include <linux/io.h>
34#include <linux/uaccess.h>
bb04aa78 35#include <linux/security.h>
14258640
JS
36#include <asm/pgtable.h>
37#include "gru.h"
38#include "grutables.h"
39#include "grulib.h"
40#include "gru_instructions.h"
41#include <asm/uv/uv_hub.h>
42
43/*
44 * Test if a physical address is a valid GRU GSEG address
45 */
46static inline int is_gru_paddr(unsigned long paddr)
47{
48 return paddr >= gru_start_paddr && paddr < gru_end_paddr;
49}
50
51/*
52 * Find the vma of a GRU segment. Caller must hold mmap_sem.
53 */
54struct vm_area_struct *gru_find_vma(unsigned long vaddr)
55{
56 struct vm_area_struct *vma;
57
58 vma = find_vma(current->mm, vaddr);
59 if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
60 return vma;
61 return NULL;
62}
63
64/*
65 * Find and lock the gts that contains the specified user vaddr.
66 *
67 * Returns:
68 * - *gts with the mmap_sem locked for read and the GTS locked.
69 * - NULL if vaddr invalid OR is not a valid GSEG vaddr.
70 */
71
72static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
73{
74 struct mm_struct *mm = current->mm;
75 struct vm_area_struct *vma;
76 struct gru_thread_state *gts = NULL;
77
78 down_read(&mm->mmap_sem);
79 vma = gru_find_vma(vaddr);
80 if (vma)
81 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
82 if (gts)
83 mutex_lock(&gts->ts_ctxlock);
84 else
85 up_read(&mm->mmap_sem);
86 return gts;
87}
88
89static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
90{
91 struct mm_struct *mm = current->mm;
92 struct vm_area_struct *vma;
93 struct gru_thread_state *gts = NULL;
94
95 down_write(&mm->mmap_sem);
96 vma = gru_find_vma(vaddr);
97 if (vma)
98 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
99 if (gts) {
100 mutex_lock(&gts->ts_ctxlock);
101 downgrade_write(&mm->mmap_sem);
102 } else {
103 up_write(&mm->mmap_sem);
104 }
105
106 return gts;
107}
108
109/*
110 * Unlock a GTS that was previously locked with gru_find_lock_gts().
111 */
112static void gru_unlock_gts(struct gru_thread_state *gts)
113{
114 mutex_unlock(&gts->ts_ctxlock);
115 up_read(&current->mm->mmap_sem);
116}
117
118/*
119 * Set a CB.istatus to active using a user virtual address. This must be done
120 * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
121 * If the line is evicted, the status may be lost. The in-cache update
122 * is necessary to prevent the user from seeing a stale cb.istatus that will
123 * change as soon as the TFH restart is complete. Races may cause an
124 * occasional failure to clear the cb.istatus, but that is ok.
125 *
126 * If the cb address is not valid (should not happen, but...), nothing
127 * bad will happen.. The get_user()/put_user() will fail but there
128 * are no bad side-effects.
129 */
130static void gru_cb_set_istatus_active(unsigned long __user *cb)
131{
132 union {
133 struct gru_instruction_bits bits;
134 unsigned long dw;
135 } u;
136
137 if (cb) {
138 get_user(u.dw, cb);
139 u.bits.istatus = CBS_ACTIVE;
140 put_user(u.dw, cb);
141 }
142}
143
144/*
145 * Convert a interrupt IRQ to a pointer to the GRU GTS that caused the
146 * interrupt. Interrupts are always sent to a cpu on the blade that contains the
147 * GRU (except for headless blades which are not currently supported). A blade
148 * has N grus; a block of N consecutive IRQs is assigned to the GRUs. The IRQ
149 * number uniquely identifies the GRU chiplet on the local blade that caused the
150 * interrupt. Always called in interrupt context.
151 */
152static inline struct gru_state *irq_to_gru(int irq)
153{
154 return &gru_base[uv_numa_blade_id()]->bs_grus[irq - IRQ_GRU];
155}
156
157/*
158 * Read & clear a TFM
159 *
160 * The GRU has an array of fault maps. A map is private to a cpu
161 * Only one cpu will be accessing a cpu's fault map.
162 *
163 * This function scans the cpu-private fault map & clears all bits that
164 * are set. The function returns a bitmap that indicates the bits that
165 * were cleared. Note that sense the maps may be updated asynchronously by
166 * the GRU, atomic operations must be used to clear bits.
167 */
168static void get_clear_fault_map(struct gru_state *gru,
169 struct gru_tlb_fault_map *map)
170{
171 unsigned long i, k;
172 struct gru_tlb_fault_map *tfm;
173
174 tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
175 prefetchw(tfm); /* Helps on hardware, required for emulator */
176 for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
177 k = tfm->fault_bits[i];
178 if (k)
179 k = xchg(&tfm->fault_bits[i], 0UL);
180 map->fault_bits[i] = k;
181 }
182
183 /*
184 * Not functionally required but helps performance. (Required
185 * on emulator)
186 */
187 gru_flush_cache(tfm);
188}
189
190/*
191 * Atomic (interrupt context) & non-atomic (user context) functions to
192 * convert a vaddr into a physical address. The size of the page
193 * is returned in pageshift.
194 * returns:
195 * 0 - successful
196 * < 0 - error code
197 * 1 - (atomic only) try again in non-atomic context
198 */
199static int non_atomic_pte_lookup(struct vm_area_struct *vma,
200 unsigned long vaddr, int write,
201 unsigned long *paddr, int *pageshift)
202{
203 struct page *page;
204
205 /* ZZZ Need to handle HUGE pages */
206 if (is_vm_hugetlb_page(vma))
207 return -EFAULT;
208 *pageshift = PAGE_SHIFT;
209 if (get_user_pages
210 (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
211 return -EFAULT;
212 *paddr = page_to_phys(page);
213 put_page(page);
214 return 0;
215}
216
217/*
14258640
JS
218 * atomic_pte_lookup
219 *
220 * Convert a user virtual address to a physical address
221 * Only supports Intel large pages (2MB only) on x86_64.
222 * ZZZ - hugepage support is incomplete
923f7f69
JS
223 *
224 * NOTE: mmap_sem is already held on entry to this function. This
225 * guarantees existence of the page tables.
14258640
JS
226 */
227static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
228 int write, unsigned long *paddr, int *pageshift)
229{
230 pgd_t *pgdp;
231 pmd_t *pmdp;
232 pud_t *pudp;
233 pte_t pte;
234
14258640
JS
235 pgdp = pgd_offset(vma->vm_mm, vaddr);
236 if (unlikely(pgd_none(*pgdp)))
237 goto err;
238
239 pudp = pud_offset(pgdp, vaddr);
240 if (unlikely(pud_none(*pudp)))
241 goto err;
242
243 pmdp = pmd_offset(pudp, vaddr);
244 if (unlikely(pmd_none(*pmdp)))
245 goto err;
246#ifdef CONFIG_X86_64
247 if (unlikely(pmd_large(*pmdp)))
248 pte = *(pte_t *) pmdp;
249 else
250#endif
251 pte = *pte_offset_kernel(pmdp, vaddr);
252
14258640
JS
253 if (unlikely(!pte_present(pte) ||
254 (write && (!pte_write(pte) || !pte_dirty(pte)))))
255 return 1;
256
257 *paddr = pte_pfn(pte) << PAGE_SHIFT;
023a407f 258#ifdef CONFIG_HUGETLB_PAGE
14258640 259 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
023a407f
JS
260#else
261 *pageshift = PAGE_SHIFT;
262#endif
14258640
JS
263 return 0;
264
265err:
266 local_irq_enable();
267 return 1;
268}
269
ecdaf2b5
JS
270static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
271 int write, int atomic, unsigned long *gpa, int *pageshift)
272{
273 struct mm_struct *mm = gts->ts_mm;
274 struct vm_area_struct *vma;
275 unsigned long paddr;
276 int ret, ps;
277
278 vma = find_vma(mm, vaddr);
279 if (!vma)
280 goto inval;
281
282 /*
283 * Atomic lookup is faster & usually works even if called in non-atomic
284 * context.
285 */
286 rmb(); /* Must/check ms_range_active before loading PTEs */
287 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
288 if (ret) {
289 if (atomic)
290 goto upm;
291 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
292 goto inval;
293 }
294 if (is_gru_paddr(paddr))
295 goto inval;
296 paddr = paddr & ~((1UL << ps) - 1);
297 *gpa = uv_soc_phys_ram_to_gpa(paddr);
298 *pageshift = ps;
299 return 0;
300
301inval:
302 return -1;
303upm:
304 return -2;
305}
306
307
14258640
JS
308/*
309 * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
310 * Input:
311 * cb Address of user CBR. Null if not running in user context
312 * Return:
313 * 0 = dropin, exception, or switch to UPM successful
314 * 1 = range invalidate active
315 * < 0 = error code
316 *
317 */
318static int gru_try_dropin(struct gru_thread_state *gts,
319 struct gru_tlb_fault_handle *tfh,
320 unsigned long __user *cb)
321{
ecdaf2b5
JS
322 int pageshift = 0, asid, write, ret, atomic = !cb;
323 unsigned long gpa = 0, vaddr = 0;
14258640
JS
324
325 /*
326 * NOTE: The GRU contains magic hardware that eliminates races between
327 * TLB invalidates and TLB dropins. If an invalidate occurs
328 * in the window between reading the TFH and the subsequent TLB dropin,
329 * the dropin is ignored. This eliminates the need for additional locks.
330 */
331
332 /*
333 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
334 * Might be a hardware race OR a stupid user. Ignore FMM because FMM
335 * is a transient state.
336 */
337 if (tfh->state == TFHSTATE_IDLE)
338 goto failidle;
339 if (tfh->state == TFHSTATE_MISS_FMM && cb)
340 goto failfmm;
341
342 write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
343 vaddr = tfh->missvaddr;
344 asid = tfh->missasid;
345 if (asid == 0)
346 goto failnoasid;
347
348 rmb(); /* TFH must be cache resident before reading ms_range_active */
349
350 /*
351 * TFH is cache resident - at least briefly. Fail the dropin
352 * if a range invalidate is active.
353 */
354 if (atomic_read(&gts->ts_gms->ms_range_active))
355 goto failactive;
356
ecdaf2b5
JS
357 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
358 if (ret == -1)
14258640 359 goto failinval;
ecdaf2b5
JS
360 if (ret == -2)
361 goto failupm;
14258640 362
7b8274e9
JS
363 if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
364 gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
365 if (atomic || !gru_update_cch(gts, 0)) {
366 gts->ts_force_cch_reload = 1;
367 goto failupm;
368 }
369 }
14258640
JS
370 gru_cb_set_istatus_active(cb);
371 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
372 GRU_PAGESIZE(pageshift));
373 STAT(tlb_dropin);
374 gru_dbg(grudev,
375 "%s: tfh 0x%p, vaddr 0x%lx, asid 0x%x, ps %d, gpa 0x%lx\n",
376 ret ? "non-atomic" : "atomic", tfh, vaddr, asid,
377 pageshift, gpa);
378 return 0;
379
380failnoasid:
381 /* No asid (delayed unload). */
382 STAT(tlb_dropin_fail_no_asid);
383 gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
384 if (!cb)
385 tfh_user_polling_mode(tfh);
386 else
387 gru_flush_cache(tfh);
388 return -EAGAIN;
389
390failupm:
391 /* Atomic failure switch CBR to UPM */
392 tfh_user_polling_mode(tfh);
393 STAT(tlb_dropin_fail_upm);
394 gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
395 return 1;
396
397failfmm:
398 /* FMM state on UPM call */
fe5bb6b0 399 gru_flush_cache(tfh);
14258640
JS
400 STAT(tlb_dropin_fail_fmm);
401 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
402 return 0;
403
404failidle:
405 /* TFH was idle - no miss pending */
406 gru_flush_cache(tfh);
407 if (cb)
408 gru_flush_cache(cb);
409 STAT(tlb_dropin_fail_idle);
410 gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
411 return 0;
412
413failinval:
414 /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
415 tfh_exception(tfh);
416 STAT(tlb_dropin_fail_invalid);
417 gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
418 return -EFAULT;
419
420failactive:
421 /* Range invalidate active. Switch to UPM iff atomic */
422 if (!cb)
423 tfh_user_polling_mode(tfh);
424 else
425 gru_flush_cache(tfh);
426 STAT(tlb_dropin_fail_range_active);
427 gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
428 tfh, vaddr);
429 return 1;
430}
431
432/*
433 * Process an external interrupt from the GRU. This interrupt is
434 * caused by a TLB miss.
435 * Note that this is the interrupt handler that is registered with linux
436 * interrupt handlers.
437 */
438irqreturn_t gru_intr(int irq, void *dev_id)
439{
440 struct gru_state *gru;
441 struct gru_tlb_fault_map map;
442 struct gru_thread_state *gts;
443 struct gru_tlb_fault_handle *tfh = NULL;
444 int cbrnum, ctxnum;
445
446 STAT(intr);
447
448 gru = irq_to_gru(irq);
449 if (!gru) {
450 dev_err(grudev, "GRU: invalid interrupt: cpu %d, irq %d\n",
451 raw_smp_processor_id(), irq);
452 return IRQ_NONE;
453 }
454 get_clear_fault_map(gru, &map);
455 gru_dbg(grudev, "irq %d, gru %x, map 0x%lx\n", irq, gru->gs_gid,
456 map.fault_bits[0]);
457
458 for_each_cbr_in_tfm(cbrnum, map.fault_bits) {
459 tfh = get_tfh_by_index(gru, cbrnum);
460 prefetchw(tfh); /* Helps on hdw, required for emulator */
461
462 /*
463 * When hardware sets a bit in the faultmap, it implicitly
464 * locks the GRU context so that it cannot be unloaded.
465 * The gts cannot change until a TFH start/writestart command
466 * is issued.
467 */
468 ctxnum = tfh->ctxnum;
469 gts = gru->gs_gts[ctxnum];
470
471 /*
472 * This is running in interrupt context. Trylock the mmap_sem.
473 * If it fails, retry the fault in user context.
474 */
475 if (down_read_trylock(&gts->ts_mm->mmap_sem)) {
476 gru_try_dropin(gts, tfh, NULL);
477 up_read(&gts->ts_mm->mmap_sem);
478 } else {
479 tfh_user_polling_mode(tfh);
43884604 480 STAT(intr_mm_lock_failed);
14258640
JS
481 }
482 }
483 return IRQ_HANDLED;
484}
485
486
487static int gru_user_dropin(struct gru_thread_state *gts,
488 struct gru_tlb_fault_handle *tfh,
489 unsigned long __user *cb)
490{
491 struct gru_mm_struct *gms = gts->ts_gms;
492 int ret;
493
494 while (1) {
495 wait_event(gms->ms_wait_queue,
496 atomic_read(&gms->ms_range_active) == 0);
497 prefetchw(tfh); /* Helps on hdw, required for emulator */
498 ret = gru_try_dropin(gts, tfh, cb);
499 if (ret <= 0)
500 return ret;
501 STAT(call_os_wait_queue);
502 }
503}
504
505/*
506 * This interface is called as a result of a user detecting a "call OS" bit
507 * in a user CB. Normally means that a TLB fault has occurred.
508 * cb - user virtual address of the CB
509 */
510int gru_handle_user_call_os(unsigned long cb)
511{
512 struct gru_tlb_fault_handle *tfh;
513 struct gru_thread_state *gts;
514 unsigned long __user *cbp;
515 int ucbnum, cbrnum, ret = -EINVAL;
516
517 STAT(call_os);
518 gru_dbg(grudev, "address 0x%lx\n", cb);
519
520 /* sanity check the cb pointer */
521 ucbnum = get_cb_number((void *)cb);
522 if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
523 return -EINVAL;
524 cbp = (unsigned long *)cb;
525
526 gts = gru_find_lock_gts(cb);
527 if (!gts)
528 return -EINVAL;
529
fe5bb6b0 530 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
14258640 531 goto exit;
14258640
JS
532
533 /*
534 * If force_unload is set, the UPM TLB fault is phony. The task
535 * has migrated to another node and the GSEG must be moved. Just
536 * unload the context. The task will page fault and assign a new
537 * context.
538 */
fe5bb6b0 539 if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 &&
43884604
JS
540 gts->ts_blade != uv_numa_blade_id()) {
541 STAT(call_os_offnode_reference);
fe5bb6b0 542 gts->ts_force_unload = 1;
43884604 543 }
fe5bb6b0 544
7b8274e9
JS
545 /*
546 * CCH may contain stale data if ts_force_cch_reload is set.
547 */
548 if (gts->ts_gru && gts->ts_force_cch_reload) {
549 gru_update_cch(gts, 0);
550 gts->ts_force_cch_reload = 0;
551 }
552
14258640
JS
553 ret = -EAGAIN;
554 cbrnum = thread_cbr_number(gts, ucbnum);
555 if (gts->ts_force_unload) {
556 gru_unload_context(gts, 1);
557 } else if (gts->ts_gru) {
558 tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
559 ret = gru_user_dropin(gts, tfh, cbp);
560 }
561exit:
562 gru_unlock_gts(gts);
563 return ret;
564}
565
566/*
567 * Fetch the exception detail information for a CB that terminated with
568 * an exception.
569 */
570int gru_get_exception_detail(unsigned long arg)
571{
572 struct control_block_extended_exc_detail excdet;
573 struct gru_control_block_extended *cbe;
574 struct gru_thread_state *gts;
575 int ucbnum, cbrnum, ret;
576
577 STAT(user_exception);
578 if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
579 return -EFAULT;
580
581 gru_dbg(grudev, "address 0x%lx\n", excdet.cb);
582 gts = gru_find_lock_gts(excdet.cb);
583 if (!gts)
584 return -EINVAL;
585
fe5bb6b0
JS
586 ucbnum = get_cb_number((void *)excdet.cb);
587 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
588 ret = -EINVAL;
589 } else if (gts->ts_gru) {
14258640
JS
590 cbrnum = thread_cbr_number(gts, ucbnum);
591 cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
fe5bb6b0 592 prefetchw(cbe);/* Harmless on hardware, required for emulator */
14258640
JS
593 excdet.opc = cbe->opccpy;
594 excdet.exopc = cbe->exopccpy;
595 excdet.ecause = cbe->ecause;
596 excdet.exceptdet0 = cbe->idef1upd;
597 excdet.exceptdet1 = cbe->idef3upd;
598 ret = 0;
599 } else {
600 ret = -EAGAIN;
601 }
602 gru_unlock_gts(gts);
603
604 gru_dbg(grudev, "address 0x%lx, ecause 0x%x\n", excdet.cb,
605 excdet.ecause);
606 if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
607 ret = -EFAULT;
608 return ret;
609}
610
611/*
612 * User request to unload a context. Content is saved for possible reload.
613 */
bb04aa78
JS
614static int gru_unload_all_contexts(void)
615{
616 struct gru_thread_state *gts;
617 struct gru_state *gru;
e1c3219d 618 int gid, ctxnum;
bb04aa78
JS
619
620 if (!capable(CAP_SYS_ADMIN))
621 return -EPERM;
e1c3219d 622 foreach_gid(gid) {
bb04aa78
JS
623 gru = GID_TO_GRU(gid);
624 spin_lock(&gru->gs_lock);
625 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
626 gts = gru->gs_gts[ctxnum];
627 if (gts && mutex_trylock(&gts->ts_ctxlock)) {
628 spin_unlock(&gru->gs_lock);
629 gru_unload_context(gts, 1);
630 gru_unlock_gts(gts);
631 spin_lock(&gru->gs_lock);
632 }
633 }
634 spin_unlock(&gru->gs_lock);
635 }
636 return 0;
637}
638
14258640
JS
639int gru_user_unload_context(unsigned long arg)
640{
641 struct gru_thread_state *gts;
642 struct gru_unload_context_req req;
643
644 STAT(user_unload_context);
645 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
646 return -EFAULT;
647
648 gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
649
bb04aa78
JS
650 if (!req.gseg)
651 return gru_unload_all_contexts();
652
14258640
JS
653 gts = gru_find_lock_gts(req.gseg);
654 if (!gts)
655 return -EINVAL;
656
657 if (gts->ts_gru)
658 gru_unload_context(gts, 1);
659 gru_unlock_gts(gts);
660
661 return 0;
662}
663
664/*
665 * User request to flush a range of virtual addresses from the GRU TLB
666 * (Mainly for testing).
667 */
668int gru_user_flush_tlb(unsigned long arg)
669{
670 struct gru_thread_state *gts;
671 struct gru_flush_tlb_req req;
672
673 STAT(user_flush_tlb);
674 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
675 return -EFAULT;
676
677 gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
678 req.vaddr, req.len);
679
680 gts = gru_find_lock_gts(req.gseg);
681 if (!gts)
682 return -EINVAL;
683
fe5bb6b0 684 gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.len);
14258640
JS
685 gru_unlock_gts(gts);
686
687 return 0;
688}
689
690/*
691 * Register the current task as the user of the GSEG slice.
692 * Needed for TLB fault interrupt targeting.
693 */
694int gru_set_task_slice(long address)
695{
696 struct gru_thread_state *gts;
697
698 STAT(set_task_slice);
699 gru_dbg(grudev, "address 0x%lx\n", address);
700 gts = gru_alloc_locked_gts(address);
701 if (!gts)
702 return -EINVAL;
703
704 gts->ts_tgid_owner = current->tgid;
705 gru_unlock_gts(gts);
706
707 return 0;
708}
This page took 0.109307 seconds and 5 git commands to generate.