Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm...
[deliverable/linux.git] / drivers / staging / rdma / hfi1 / file_ops.c
CommitLineData
77241056 1/*
05d6ac1d 2 * Copyright(c) 2015, 2016 Intel Corporation.
77241056
MM
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
77241056
MM
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
77241056
MM
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
77241056
MM
47#include <linux/poll.h>
48#include <linux/cdev.h>
77241056 49#include <linux/vmalloc.h>
77241056 50#include <linux/io.h>
77241056 51
e6bd18f5
JG
52#include <rdma/ib.h>
53
77241056
MM
54#include "hfi.h"
55#include "pio.h"
56#include "device.h"
57#include "common.h"
58#include "trace.h"
59#include "user_sdma.h"
701e441d 60#include "user_exp_rcv.h"
77241056 61#include "eprom.h"
affa48de 62#include "aspm.h"
06e0ffa6 63#include "mmu_rb.h"
77241056
MM
64
65#undef pr_fmt
66#define pr_fmt(fmt) DRIVER_NAME ": " fmt
67
68#define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
69
70/*
71 * File operation functions
72 */
73static int hfi1_file_open(struct inode *, struct file *);
74static int hfi1_file_close(struct inode *, struct file *);
75static ssize_t hfi1_file_write(struct file *, const char __user *,
76 size_t, loff_t *);
77static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *);
78static unsigned int hfi1_poll(struct file *, struct poll_table_struct *);
79static int hfi1_file_mmap(struct file *, struct vm_area_struct *);
80
81static u64 kvirt_to_phys(void *);
82static int assign_ctxt(struct file *, struct hfi1_user_info *);
83static int init_subctxts(struct hfi1_ctxtdata *, const struct hfi1_user_info *);
84static int user_init(struct file *);
85static int get_ctxt_info(struct file *, void __user *, __u32);
86static int get_base_info(struct file *, void __user *, __u32);
87static int setup_ctxt(struct file *);
88static int setup_subctxt(struct hfi1_ctxtdata *);
89static int get_user_context(struct file *, struct hfi1_user_info *,
90 int, unsigned);
91static int find_shared_ctxt(struct file *, const struct hfi1_user_info *);
92static int allocate_ctxt(struct file *, struct hfi1_devdata *,
93 struct hfi1_user_info *);
94static unsigned int poll_urgent(struct file *, struct poll_table_struct *);
95static unsigned int poll_next(struct file *, struct poll_table_struct *);
96static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
97static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
98static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
99static int vma_fault(struct vm_area_struct *, struct vm_fault *);
77241056
MM
100
101static const struct file_operations hfi1_file_ops = {
102 .owner = THIS_MODULE,
103 .write = hfi1_file_write,
104 .write_iter = hfi1_write_iter,
105 .open = hfi1_file_open,
106 .release = hfi1_file_close,
107 .poll = hfi1_poll,
108 .mmap = hfi1_file_mmap,
109 .llseek = noop_llseek,
110};
111
112static struct vm_operations_struct vm_ops = {
113 .fault = vma_fault,
114};
115
116/*
117 * Types of memories mapped into user processes' space
118 */
119enum mmap_types {
120 PIO_BUFS = 1,
121 PIO_BUFS_SOP,
122 PIO_CRED,
123 RCV_HDRQ,
124 RCV_EGRBUF,
125 UREGS,
126 EVENTS,
127 STATUS,
128 RTAIL,
129 SUBCTXT_UREGS,
130 SUBCTXT_RCV_HDRQ,
131 SUBCTXT_EGRBUF,
132 SDMA_COMP
133};
134
135/*
136 * Masks and offsets defining the mmap tokens
137 */
138#define HFI1_MMAP_OFFSET_MASK 0xfffULL
139#define HFI1_MMAP_OFFSET_SHIFT 0
140#define HFI1_MMAP_SUBCTXT_MASK 0xfULL
141#define HFI1_MMAP_SUBCTXT_SHIFT 12
142#define HFI1_MMAP_CTXT_MASK 0xffULL
143#define HFI1_MMAP_CTXT_SHIFT 16
144#define HFI1_MMAP_TYPE_MASK 0xfULL
145#define HFI1_MMAP_TYPE_SHIFT 24
146#define HFI1_MMAP_MAGIC_MASK 0xffffffffULL
147#define HFI1_MMAP_MAGIC_SHIFT 32
148
149#define HFI1_MMAP_MAGIC 0xdabbad00
150
151#define HFI1_MMAP_TOKEN_SET(field, val) \
152 (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
153#define HFI1_MMAP_TOKEN_GET(field, token) \
154 (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
155#define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \
156 (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
157 HFI1_MMAP_TOKEN_SET(TYPE, type) | \
158 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
159 HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
e260e404 160 HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr))))
77241056 161
77241056
MM
162#define dbg(fmt, ...) \
163 pr_info(fmt, ##__VA_ARGS__)
164
77241056
MM
165static inline int is_valid_mmap(u64 token)
166{
167 return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC);
168}
169
170static int hfi1_file_open(struct inode *inode, struct file *fp)
171{
172 /* The real work is performed later in assign_ctxt() */
173 fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
174 if (fp->private_data) /* no cpu affinity by default */
175 ((struct hfi1_filedata *)fp->private_data)->rec_cpu_num = -1;
176 return fp->private_data ? 0 : -ENOMEM;
177}
178
179static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
180 size_t count, loff_t *offset)
181{
182 const struct hfi1_cmd __user *ucmd;
9e10af47
IW
183 struct hfi1_filedata *fd = fp->private_data;
184 struct hfi1_ctxtdata *uctxt = fd->uctxt;
77241056
MM
185 struct hfi1_cmd cmd;
186 struct hfi1_user_info uinfo;
187 struct hfi1_tid_info tinfo;
0b091fb3 188 unsigned long addr;
77241056
MM
189 ssize_t consumed = 0, copy = 0, ret = 0;
190 void *dest = NULL;
191 __u64 user_val = 0;
192 int uctxt_required = 1;
193 int must_be_root = 0;
194
e6bd18f5
JG
195 /* FIXME: This interface cannot continue out of staging */
196 if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
197 return -EACCES;
198
77241056
MM
199 if (count < sizeof(cmd)) {
200 ret = -EINVAL;
201 goto bail;
202 }
203
204 ucmd = (const struct hfi1_cmd __user *)data;
205 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
206 ret = -EFAULT;
207 goto bail;
208 }
209
210 consumed = sizeof(cmd);
211
212 switch (cmd.type) {
213 case HFI1_CMD_ASSIGN_CTXT:
214 uctxt_required = 0; /* assigned user context not required */
215 copy = sizeof(uinfo);
216 dest = &uinfo;
217 break;
218 case HFI1_CMD_SDMA_STATUS_UPD:
219 case HFI1_CMD_CREDIT_UPD:
220 copy = 0;
221 break;
222 case HFI1_CMD_TID_UPDATE:
223 case HFI1_CMD_TID_FREE:
0b091fb3 224 case HFI1_CMD_TID_INVAL_READ:
77241056
MM
225 copy = sizeof(tinfo);
226 dest = &tinfo;
227 break;
228 case HFI1_CMD_USER_INFO:
229 case HFI1_CMD_RECV_CTRL:
230 case HFI1_CMD_POLL_TYPE:
231 case HFI1_CMD_ACK_EVENT:
232 case HFI1_CMD_CTXT_INFO:
233 case HFI1_CMD_SET_PKEY:
234 case HFI1_CMD_CTXT_RESET:
235 copy = 0;
236 user_val = cmd.addr;
237 break;
238 case HFI1_CMD_EP_INFO:
239 case HFI1_CMD_EP_ERASE_CHIP:
cd371e09
DL
240 case HFI1_CMD_EP_ERASE_RANGE:
241 case HFI1_CMD_EP_READ_RANGE:
242 case HFI1_CMD_EP_WRITE_RANGE:
77241056
MM
243 uctxt_required = 0; /* assigned user context not required */
244 must_be_root = 1; /* validate user */
245 copy = 0;
246 break;
247 default:
248 ret = -EINVAL;
249 goto bail;
250 }
251
252 /* If the command comes with user data, copy it. */
253 if (copy) {
254 if (copy_from_user(dest, (void __user *)cmd.addr, copy)) {
255 ret = -EFAULT;
256 goto bail;
257 }
258 consumed += copy;
259 }
260
261 /*
262 * Make sure there is a uctxt when needed.
263 */
264 if (uctxt_required && !uctxt) {
265 ret = -EINVAL;
266 goto bail;
267 }
268
269 /* only root can do these operations */
270 if (must_be_root && !capable(CAP_SYS_ADMIN)) {
271 ret = -EPERM;
272 goto bail;
273 }
274
275 switch (cmd.type) {
276 case HFI1_CMD_ASSIGN_CTXT:
277 ret = assign_ctxt(fp, &uinfo);
278 if (ret < 0)
279 goto bail;
280 ret = setup_ctxt(fp);
281 if (ret)
282 goto bail;
283 ret = user_init(fp);
284 break;
285 case HFI1_CMD_CTXT_INFO:
286 ret = get_ctxt_info(fp, (void __user *)(unsigned long)
287 user_val, cmd.len);
288 break;
289 case HFI1_CMD_USER_INFO:
290 ret = get_base_info(fp, (void __user *)(unsigned long)
291 user_val, cmd.len);
292 break;
293 case HFI1_CMD_SDMA_STATUS_UPD:
294 break;
295 case HFI1_CMD_CREDIT_UPD:
296 if (uctxt && uctxt->sc)
297 sc_return_credits(uctxt->sc);
298 break;
299 case HFI1_CMD_TID_UPDATE:
0b091fb3 300 ret = hfi1_user_exp_rcv_setup(fp, &tinfo);
77241056 301 if (!ret) {
77241056
MM
302 /*
303 * Copy the number of tidlist entries we used
304 * and the length of the buffer we registered.
305 * These fields are adjacent in the structure so
306 * we can copy them at the same time.
307 */
308 addr = (unsigned long)cmd.addr +
309 offsetof(struct hfi1_tid_info, tidcnt);
310 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
311 sizeof(tinfo.tidcnt) +
312 sizeof(tinfo.length)))
313 ret = -EFAULT;
314 }
315 break;
0b091fb3
MH
316 case HFI1_CMD_TID_INVAL_READ:
317 ret = hfi1_user_exp_rcv_invalid(fp, &tinfo);
318 if (ret)
319 break;
320 addr = (unsigned long)cmd.addr +
321 offsetof(struct hfi1_tid_info, tidcnt);
322 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
323 sizeof(tinfo.tidcnt)))
324 ret = -EFAULT;
325 break;
77241056 326 case HFI1_CMD_TID_FREE:
0b091fb3
MH
327 ret = hfi1_user_exp_rcv_clear(fp, &tinfo);
328 if (ret)
329 break;
330 addr = (unsigned long)cmd.addr +
331 offsetof(struct hfi1_tid_info, tidcnt);
332 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
333 sizeof(tinfo.tidcnt)))
334 ret = -EFAULT;
77241056
MM
335 break;
336 case HFI1_CMD_RECV_CTRL:
9e10af47 337 ret = manage_rcvq(uctxt, fd->subctxt, (int)user_val);
77241056
MM
338 break;
339 case HFI1_CMD_POLL_TYPE:
340 uctxt->poll_type = (typeof(uctxt->poll_type))user_val;
341 break;
342 case HFI1_CMD_ACK_EVENT:
9e10af47 343 ret = user_event_ack(uctxt, fd->subctxt, user_val);
77241056
MM
344 break;
345 case HFI1_CMD_SET_PKEY:
346 if (HFI1_CAP_IS_USET(PKEY_CHECK))
9e10af47 347 ret = set_ctxt_pkey(uctxt, fd->subctxt, user_val);
77241056
MM
348 else
349 ret = -EPERM;
350 break;
351 case HFI1_CMD_CTXT_RESET: {
352 struct send_context *sc;
353 struct hfi1_devdata *dd;
354
355 if (!uctxt || !uctxt->dd || !uctxt->sc) {
356 ret = -EINVAL;
357 break;
358 }
359 /*
360 * There is no protection here. User level has to
361 * guarantee that no one will be writing to the send
362 * context while it is being re-initialized.
363 * If user level breaks that guarantee, it will break
364 * it's own context and no one else's.
365 */
366 dd = uctxt->dd;
367 sc = uctxt->sc;
368 /*
369 * Wait until the interrupt handler has marked the
370 * context as halted or frozen. Report error if we time
371 * out.
372 */
373 wait_event_interruptible_timeout(
374 sc->halt_wait, (sc->flags & SCF_HALTED),
375 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
376 if (!(sc->flags & SCF_HALTED)) {
377 ret = -ENOLCK;
378 break;
379 }
380 /*
381 * If the send context was halted due to a Freeze,
382 * wait until the device has been "unfrozen" before
383 * resetting the context.
384 */
385 if (sc->flags & SCF_FROZEN) {
386 wait_event_interruptible_timeout(
387 dd->event_queue,
388 !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
389 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
390 if (dd->flags & HFI1_FROZEN) {
391 ret = -ENOLCK;
392 break;
393 }
394 if (dd->flags & HFI1_FORCED_FREEZE) {
4d114fdd
JJ
395 /*
396 * Don't allow context reset if we are into
397 * forced freeze
398 */
77241056
MM
399 ret = -ENODEV;
400 break;
401 }
402 sc_disable(sc);
403 ret = sc_enable(sc);
404 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB,
405 uctxt->ctxt);
e490974e 406 } else {
77241056 407 ret = sc_restart(sc);
e490974e 408 }
77241056
MM
409 if (!ret)
410 sc_return_credits(sc);
411 break;
412 }
413 case HFI1_CMD_EP_INFO:
414 case HFI1_CMD_EP_ERASE_CHIP:
cd371e09
DL
415 case HFI1_CMD_EP_ERASE_RANGE:
416 case HFI1_CMD_EP_READ_RANGE:
417 case HFI1_CMD_EP_WRITE_RANGE:
d24bc648 418 ret = handle_eprom_command(fp, &cmd);
77241056
MM
419 break;
420 }
421
422 if (ret >= 0)
423 ret = consumed;
424bail:
425 return ret;
426}
427
428static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
429{
9e10af47
IW
430 struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
431 struct hfi1_user_sdma_pkt_q *pq = fd->pq;
432 struct hfi1_user_sdma_comp_q *cq = fd->cq;
77241056
MM
433 int ret = 0, done = 0, reqs = 0;
434 unsigned long dim = from->nr_segs;
435
9e10af47 436 if (!cq || !pq) {
77241056
MM
437 ret = -EIO;
438 goto done;
439 }
440
441 if (!iter_is_iovec(from) || !dim) {
442 ret = -EINVAL;
443 goto done;
444 }
445
446 hfi1_cdbg(SDMA, "SDMA request from %u:%u (%lu)",
9e10af47 447 fd->uctxt->ctxt, fd->subctxt, dim);
77241056
MM
448
449 if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
450 ret = -ENOSPC;
451 goto done;
452 }
453
454 while (dim) {
455 unsigned long count = 0;
456
457 ret = hfi1_user_sdma_process_request(
458 kiocb->ki_filp, (struct iovec *)(from->iov + done),
459 dim, &count);
460 if (ret)
461 goto done;
462 dim -= count;
463 done += count;
464 reqs++;
465 }
466done:
467 return ret ? ret : reqs;
468}
469
470static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
471{
9e10af47
IW
472 struct hfi1_filedata *fd = fp->private_data;
473 struct hfi1_ctxtdata *uctxt = fd->uctxt;
77241056
MM
474 struct hfi1_devdata *dd;
475 unsigned long flags, pfn;
476 u64 token = vma->vm_pgoff << PAGE_SHIFT,
477 memaddr = 0;
478 u8 subctxt, mapio = 0, vmf = 0, type;
479 ssize_t memlen = 0;
480 int ret = 0;
481 u16 ctxt;
482
77241056
MM
483 if (!is_valid_mmap(token) || !uctxt ||
484 !(vma->vm_flags & VM_SHARED)) {
485 ret = -EINVAL;
486 goto done;
487 }
488 dd = uctxt->dd;
489 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token);
490 subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token);
491 type = HFI1_MMAP_TOKEN_GET(TYPE, token);
9e10af47 492 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) {
77241056
MM
493 ret = -EINVAL;
494 goto done;
495 }
496
497 flags = vma->vm_flags;
498
499 switch (type) {
500 case PIO_BUFS:
501 case PIO_BUFS_SOP:
502 memaddr = ((dd->physaddr + TXE_PIO_SEND) +
503 /* chip pio base */
d32cf44a 504 (uctxt->sc->hw_context * BIT(16))) +
77241056
MM
505 /* 64K PIO space / ctxt */
506 (type == PIO_BUFS_SOP ?
507 (TXE_PIO_SIZE / 2) : 0); /* sop? */
508 /*
509 * Map only the amount allocated to the context, not the
510 * entire available context's PIO space.
511 */
437b29d1 512 memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE);
77241056
MM
513 flags &= ~VM_MAYREAD;
514 flags |= VM_DONTCOPY | VM_DONTEXPAND;
515 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
516 mapio = 1;
517 break;
518 case PIO_CRED:
519 if (flags & VM_WRITE) {
520 ret = -EPERM;
521 goto done;
522 }
523 /*
524 * The credit return location for this context could be on the
525 * second or third page allocated for credit returns (if number
526 * of enabled contexts > 64 and 128 respectively).
527 */
528 memaddr = dd->cr_base[uctxt->numa_id].pa +
529 (((u64)uctxt->sc->hw_free -
530 (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
531 memlen = PAGE_SIZE;
532 flags &= ~VM_MAYWRITE;
533 flags |= VM_DONTCOPY | VM_DONTEXPAND;
534 /*
535 * The driver has already allocated memory for credit
536 * returns and programmed it into the chip. Has that
537 * memory been flagged as non-cached?
538 */
539 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
540 mapio = 1;
541 break;
542 case RCV_HDRQ:
543 memaddr = uctxt->rcvhdrq_phys;
544 memlen = uctxt->rcvhdrq_size;
545 break;
546 case RCV_EGRBUF: {
547 unsigned long addr;
548 int i;
549 /*
550 * The RcvEgr buffer need to be handled differently
551 * as multiple non-contiguous pages need to be mapped
552 * into the user process.
553 */
554 memlen = uctxt->egrbufs.size;
555 if ((vma->vm_end - vma->vm_start) != memlen) {
556 dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n",
557 (vma->vm_end - vma->vm_start), memlen);
558 ret = -EINVAL;
559 goto done;
560 }
561 if (vma->vm_flags & VM_WRITE) {
562 ret = -EPERM;
563 goto done;
564 }
565 vma->vm_flags &= ~VM_MAYWRITE;
566 addr = vma->vm_start;
567 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
568 ret = remap_pfn_range(
569 vma, addr,
570 uctxt->egrbufs.buffers[i].phys >> PAGE_SHIFT,
571 uctxt->egrbufs.buffers[i].len,
572 vma->vm_page_prot);
573 if (ret < 0)
574 goto done;
575 addr += uctxt->egrbufs.buffers[i].len;
576 }
577 ret = 0;
578 goto done;
579 }
580 case UREGS:
581 /*
582 * Map only the page that contains this context's user
583 * registers.
584 */
585 memaddr = (unsigned long)
586 (dd->physaddr + RXE_PER_CONTEXT_USER)
587 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
588 /*
589 * TidFlow table is on the same page as the rest of the
590 * user registers.
591 */
592 memlen = PAGE_SIZE;
593 flags |= VM_DONTCOPY | VM_DONTEXPAND;
594 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
595 mapio = 1;
596 break;
597 case EVENTS:
598 /*
599 * Use the page where this context's flags are. User level
600 * knows where it's own bitmap is within the page.
601 */
3c6c065a
MH
602 memaddr = (unsigned long)(dd->events +
603 ((uctxt->ctxt - dd->first_user_ctxt) *
604 HFI1_MAX_SHARED_CTXTS)) & PAGE_MASK;
77241056
MM
605 memlen = PAGE_SIZE;
606 /*
607 * v3.7 removes VM_RESERVED but the effect is kept by
608 * using VM_IO.
609 */
610 flags |= VM_IO | VM_DONTEXPAND;
611 vmf = 1;
612 break;
613 case STATUS:
614 memaddr = kvirt_to_phys((void *)dd->status);
615 memlen = PAGE_SIZE;
616 flags |= VM_IO | VM_DONTEXPAND;
617 break;
618 case RTAIL:
619 if (!HFI1_CAP_IS_USET(DMA_RTAIL)) {
620 /*
621 * If the memory allocation failed, the context alloc
622 * also would have failed, so we would never get here
623 */
624 ret = -EINVAL;
625 goto done;
626 }
627 if (flags & VM_WRITE) {
628 ret = -EPERM;
629 goto done;
630 }
631 memaddr = uctxt->rcvhdrqtailaddr_phys;
632 memlen = PAGE_SIZE;
633 flags &= ~VM_MAYWRITE;
634 break;
635 case SUBCTXT_UREGS:
636 memaddr = (u64)uctxt->subctxt_uregbase;
637 memlen = PAGE_SIZE;
638 flags |= VM_IO | VM_DONTEXPAND;
639 vmf = 1;
640 break;
641 case SUBCTXT_RCV_HDRQ:
642 memaddr = (u64)uctxt->subctxt_rcvhdr_base;
643 memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt;
644 flags |= VM_IO | VM_DONTEXPAND;
645 vmf = 1;
646 break;
647 case SUBCTXT_EGRBUF:
648 memaddr = (u64)uctxt->subctxt_rcvegrbuf;
649 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
650 flags |= VM_IO | VM_DONTEXPAND;
651 flags &= ~VM_MAYWRITE;
652 vmf = 1;
653 break;
654 case SDMA_COMP: {
9e10af47 655 struct hfi1_user_sdma_comp_q *cq = fd->cq;
77241056 656
9e10af47 657 if (!cq) {
77241056
MM
658 ret = -EFAULT;
659 goto done;
660 }
77241056 661 memaddr = (u64)cq->comps;
437b29d1 662 memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries);
77241056
MM
663 flags |= VM_IO | VM_DONTEXPAND;
664 vmf = 1;
665 break;
666 }
667 default:
668 ret = -EINVAL;
669 break;
670 }
671
672 if ((vma->vm_end - vma->vm_start) != memlen) {
673 hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu",
9e10af47 674 uctxt->ctxt, fd->subctxt,
77241056
MM
675 (vma->vm_end - vma->vm_start), memlen);
676 ret = -EINVAL;
677 goto done;
678 }
679
680 vma->vm_flags = flags;
6c63e423
SS
681 hfi1_cdbg(PROC,
682 "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
683 ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
77241056
MM
684 vma->vm_end - vma->vm_start, vma->vm_flags);
685 pfn = (unsigned long)(memaddr >> PAGE_SHIFT);
686 if (vmf) {
687 vma->vm_pgoff = pfn;
688 vma->vm_ops = &vm_ops;
689 ret = 0;
690 } else if (mapio) {
691 ret = io_remap_pfn_range(vma, vma->vm_start, pfn, memlen,
692 vma->vm_page_prot);
693 } else {
694 ret = remap_pfn_range(vma, vma->vm_start, pfn, memlen,
695 vma->vm_page_prot);
696 }
697done:
698 return ret;
699}
700
701/*
702 * Local (non-chip) user memory is not mapped right away but as it is
703 * accessed by the user-level code.
704 */
705static int vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
706{
707 struct page *page;
708
709 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
710 if (!page)
711 return VM_FAULT_SIGBUS;
712
713 get_page(page);
714 vmf->page = page;
715
716 return 0;
717}
718
719static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
720{
721 struct hfi1_ctxtdata *uctxt;
722 unsigned pollflag;
723
9e10af47 724 uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
77241056
MM
725 if (!uctxt)
726 pollflag = POLLERR;
727 else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
728 pollflag = poll_urgent(fp, pt);
729 else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
730 pollflag = poll_next(fp, pt);
731 else /* invalid */
732 pollflag = POLLERR;
733
734 return pollflag;
735}
736
737static int hfi1_file_close(struct inode *inode, struct file *fp)
738{
739 struct hfi1_filedata *fdata = fp->private_data;
740 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
741 struct hfi1_devdata *dd;
742 unsigned long flags, *ev;
743
744 fp->private_data = NULL;
745
746 if (!uctxt)
747 goto done;
748
749 hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
750 dd = uctxt->dd;
751 mutex_lock(&hfi1_mutex);
752
753 flush_wc();
754 /* drain user sdma queue */
483119a7 755 hfi1_user_sdma_free_queues(fdata);
77241056 756
957558c9
MH
757 /* release the cpu */
758 hfi1_put_proc_affinity(dd, fdata->rec_cpu_num);
759
77241056
MM
760 /*
761 * Clear any left over, unhandled events so the next process that
762 * gets this context doesn't get confused.
763 */
764 ev = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
765 HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
766 *ev = 0;
767
768 if (--uctxt->cnt) {
769 uctxt->active_slaves &= ~(1 << fdata->subctxt);
770 uctxt->subpid[fdata->subctxt] = 0;
771 mutex_unlock(&hfi1_mutex);
772 goto done;
773 }
774
775 spin_lock_irqsave(&dd->uctxt_lock, flags);
776 /*
777 * Disable receive context and interrupt available, reset all
778 * RcvCtxtCtrl bits to default values.
779 */
780 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
781 HFI1_RCVCTRL_TIDFLOW_DIS |
782 HFI1_RCVCTRL_INTRAVAIL_DIS |
566c157c 783 HFI1_RCVCTRL_TAILUPD_DIS |
77241056
MM
784 HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
785 HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
786 HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt->ctxt);
787 /* Clear the context's J_KEY */
788 hfi1_clear_ctxt_jkey(dd, uctxt->ctxt);
789 /*
790 * Reset context integrity checks to default.
791 * (writes to CSRs probably belong in chip.c)
792 */
793 write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
794 hfi1_pkt_default_send_ctxt_mask(dd, uctxt->sc->type));
795 sc_disable(uctxt->sc);
796 uctxt->pid = 0;
797 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
798
799 dd->rcd[uctxt->ctxt] = NULL;
94158442
MH
800
801 hfi1_user_exp_rcv_free(fdata);
802 hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
803
77241056
MM
804 uctxt->rcvwait_to = 0;
805 uctxt->piowait_to = 0;
806 uctxt->rcvnowait = 0;
807 uctxt->pionowait = 0;
808 uctxt->event_flags = 0;
809
77241056 810 hfi1_stats.sps_ctxts--;
affa48de
AD
811 if (++dd->freectxts == dd->num_user_contexts)
812 aspm_enable_all(dd);
77241056
MM
813 mutex_unlock(&hfi1_mutex);
814 hfi1_free_ctxtdata(dd, uctxt);
815done:
816 kfree(fdata);
817 return 0;
818}
819
820/*
821 * Convert kernel *virtual* addresses to physical addresses.
822 * This is used to vmalloc'ed addresses.
823 */
824static u64 kvirt_to_phys(void *addr)
825{
826 struct page *page;
827 u64 paddr = 0;
828
829 page = vmalloc_to_page(addr);
830 if (page)
831 paddr = page_to_pfn(page) << PAGE_SHIFT;
832
833 return paddr;
834}
835
836static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
837{
838 int i_minor, ret = 0;
839 unsigned swmajor, swminor, alg = HFI1_ALG_ACROSS;
840
841 swmajor = uinfo->userversion >> 16;
842 if (swmajor != HFI1_USER_SWMAJOR) {
843 ret = -ENODEV;
844 goto done;
845 }
846
847 swminor = uinfo->userversion & 0xffff;
848
849 if (uinfo->hfi1_alg < HFI1_ALG_COUNT)
850 alg = uinfo->hfi1_alg;
851
852 mutex_lock(&hfi1_mutex);
853 /* First, lets check if we need to setup a shared context? */
957558c9
MH
854 if (uinfo->subctxt_cnt) {
855 struct hfi1_filedata *fd = fp->private_data;
856
77241056 857 ret = find_shared_ctxt(fp, uinfo);
957558c9
MH
858 if (ret < 0)
859 goto done_unlock;
860 if (ret)
861 fd->rec_cpu_num = hfi1_get_proc_affinity(
862 fd->uctxt->dd, fd->uctxt->numa_id);
863 }
77241056
MM
864
865 /*
866 * We execute the following block if we couldn't find a
867 * shared context or if context sharing is not required.
868 */
869 if (!ret) {
870 i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
871 ret = get_user_context(fp, uinfo, i_minor - 1, alg);
872 }
957558c9 873done_unlock:
77241056
MM
874 mutex_unlock(&hfi1_mutex);
875done:
876 return ret;
877}
878
70224973
DL
879/* return true if the device available for general use */
880static int usable_device(struct hfi1_devdata *dd)
881{
882 struct hfi1_pportdata *ppd = dd->pport;
883
884 return driver_lstate(ppd) == IB_PORT_ACTIVE;
885}
886
77241056
MM
887static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
888 int devno, unsigned alg)
889{
890 struct hfi1_devdata *dd = NULL;
891 int ret = 0, devmax, npresent, nup, dev;
892
893 devmax = hfi1_count_units(&npresent, &nup);
894 if (!npresent) {
895 ret = -ENXIO;
896 goto done;
897 }
898 if (!nup) {
899 ret = -ENETDOWN;
900 goto done;
901 }
902 if (devno >= 0) {
903 dd = hfi1_lookup(devno);
904 if (!dd)
905 ret = -ENODEV;
906 else if (!dd->freectxts)
907 ret = -EBUSY;
908 } else {
909 struct hfi1_devdata *pdd;
910
911 if (alg == HFI1_ALG_ACROSS) {
912 unsigned free = 0U;
913
914 for (dev = 0; dev < devmax; dev++) {
915 pdd = hfi1_lookup(dev);
70224973
DL
916 if (!pdd)
917 continue;
918 if (!usable_device(pdd))
919 continue;
920 if (pdd->freectxts &&
77241056
MM
921 pdd->freectxts > free) {
922 dd = pdd;
923 free = pdd->freectxts;
924 }
925 }
926 } else {
927 for (dev = 0; dev < devmax; dev++) {
928 pdd = hfi1_lookup(dev);
70224973
DL
929 if (!pdd)
930 continue;
931 if (!usable_device(pdd))
932 continue;
933 if (pdd->freectxts) {
77241056
MM
934 dd = pdd;
935 break;
936 }
937 }
938 }
939 if (!dd)
940 ret = -EBUSY;
941 }
942done:
943 return ret ? ret : allocate_ctxt(fp, dd, uinfo);
944}
945
946static int find_shared_ctxt(struct file *fp,
947 const struct hfi1_user_info *uinfo)
948{
949 int devmax, ndev, i;
950 int ret = 0;
9e10af47 951 struct hfi1_filedata *fd = fp->private_data;
77241056
MM
952
953 devmax = hfi1_count_units(NULL, NULL);
954
955 for (ndev = 0; ndev < devmax; ndev++) {
956 struct hfi1_devdata *dd = hfi1_lookup(ndev);
957
77241056
MM
958 if (!(dd && (dd->flags & HFI1_PRESENT) && dd->kregbase))
959 continue;
960 for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) {
961 struct hfi1_ctxtdata *uctxt = dd->rcd[i];
962
963 /* Skip ctxts which are not yet open */
964 if (!uctxt || !uctxt->cnt)
965 continue;
966 /* Skip ctxt if it doesn't match the requested one */
967 if (memcmp(uctxt->uuid, uinfo->uuid,
968 sizeof(uctxt->uuid)) ||
07839049 969 uctxt->jkey != generate_jkey(current_uid()) ||
77241056
MM
970 uctxt->subctxt_id != uinfo->subctxt_id ||
971 uctxt->subctxt_cnt != uinfo->subctxt_cnt)
972 continue;
973
974 /* Verify the sharing process matches the master */
975 if (uctxt->userversion != uinfo->userversion ||
976 uctxt->cnt >= uctxt->subctxt_cnt) {
977 ret = -EINVAL;
978 goto done;
979 }
9e10af47
IW
980 fd->uctxt = uctxt;
981 fd->subctxt = uctxt->cnt++;
982 uctxt->subpid[fd->subctxt] = current->pid;
983 uctxt->active_slaves |= 1 << fd->subctxt;
77241056
MM
984 ret = 1;
985 goto done;
986 }
987 }
988
989done:
990 return ret;
991}
992
993static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
994 struct hfi1_user_info *uinfo)
995{
9e10af47 996 struct hfi1_filedata *fd = fp->private_data;
77241056
MM
997 struct hfi1_ctxtdata *uctxt;
998 unsigned ctxt;
957558c9 999 int ret, numa;
77241056
MM
1000
1001 if (dd->flags & HFI1_FROZEN) {
1002 /*
1003 * Pick an error that is unique from all other errors
1004 * that are returned so the user process knows that
1005 * it tried to allocate while the SPC was frozen. It
1006 * it should be able to retry with success in a short
1007 * while.
1008 */
1009 return -EIO;
1010 }
1011
1012 for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts; ctxt++)
1013 if (!dd->rcd[ctxt])
1014 break;
1015
1016 if (ctxt == dd->num_rcv_contexts)
1017 return -EBUSY;
1018
957558c9
MH
1019 fd->rec_cpu_num = hfi1_get_proc_affinity(dd, -1);
1020 if (fd->rec_cpu_num != -1)
1021 numa = cpu_to_node(fd->rec_cpu_num);
1022 else
1023 numa = numa_node_id();
1024 uctxt = hfi1_create_ctxtdata(dd->pport, ctxt, numa);
77241056
MM
1025 if (!uctxt) {
1026 dd_dev_err(dd,
1027 "Unable to allocate ctxtdata memory, failing open\n");
1028 return -ENOMEM;
1029 }
957558c9
MH
1030 hfi1_cdbg(PROC, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)",
1031 uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num,
1032 uctxt->numa_id);
1033
77241056
MM
1034 /*
1035 * Allocate and enable a PIO send context.
1036 */
1037 uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize,
cc57236f 1038 uctxt->dd->node);
77241056
MM
1039 if (!uctxt->sc)
1040 return -ENOMEM;
1041
6c63e423
SS
1042 hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index,
1043 uctxt->sc->hw_context);
77241056
MM
1044 ret = sc_enable(uctxt->sc);
1045 if (ret)
1046 return ret;
1047 /*
1048 * Setup shared context resources if the user-level has requested
1049 * shared contexts and this is the 'master' process.
1050 * This has to be done here so the rest of the sub-contexts find the
1051 * proper master.
1052 */
9e10af47 1053 if (uinfo->subctxt_cnt && !fd->subctxt) {
77241056
MM
1054 ret = init_subctxts(uctxt, uinfo);
1055 /*
1056 * On error, we don't need to disable and de-allocate the
1057 * send context because it will be done during file close
1058 */
1059 if (ret)
1060 return ret;
1061 }
1062 uctxt->userversion = uinfo->userversion;
1063 uctxt->pid = current->pid;
1064 uctxt->flags = HFI1_CAP_UGET(MASK);
1065 init_waitqueue_head(&uctxt->wait);
1066 strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
1067 memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
1068 uctxt->jkey = generate_jkey(current_uid());
1069 INIT_LIST_HEAD(&uctxt->sdma_queues);
1070 spin_lock_init(&uctxt->sdma_qlock);
1071 hfi1_stats.sps_ctxts++;
affa48de
AD
1072 /*
1073 * Disable ASPM when there are open user/PSM contexts to avoid
1074 * issues with ASPM L1 exit latency
1075 */
1076 if (dd->freectxts-- == dd->num_user_contexts)
1077 aspm_disable_all(dd);
9e10af47 1078 fd->uctxt = uctxt;
77241056
MM
1079
1080 return 0;
1081}
1082
1083static int init_subctxts(struct hfi1_ctxtdata *uctxt,
1084 const struct hfi1_user_info *uinfo)
1085{
77241056
MM
1086 unsigned num_subctxts;
1087
1088 num_subctxts = uinfo->subctxt_cnt;
acac10fd
MH
1089 if (num_subctxts > HFI1_MAX_SHARED_CTXTS)
1090 return -EINVAL;
77241056
MM
1091
1092 uctxt->subctxt_cnt = uinfo->subctxt_cnt;
1093 uctxt->subctxt_id = uinfo->subctxt_id;
1094 uctxt->active_slaves = 1;
1095 uctxt->redirect_seq_cnt = 1;
1096 set_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
acac10fd
MH
1097
1098 return 0;
77241056
MM
1099}
1100
1101static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
1102{
1103 int ret = 0;
1104 unsigned num_subctxts = uctxt->subctxt_cnt;
1105
1106 uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
1107 if (!uctxt->subctxt_uregbase) {
1108 ret = -ENOMEM;
1109 goto bail;
1110 }
1111 /* We can take the size of the RcvHdr Queue from the master */
1112 uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
1113 num_subctxts);
1114 if (!uctxt->subctxt_rcvhdr_base) {
1115 ret = -ENOMEM;
1116 goto bail_ureg;
1117 }
1118
1119 uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
1120 num_subctxts);
1121 if (!uctxt->subctxt_rcvegrbuf) {
1122 ret = -ENOMEM;
1123 goto bail_rhdr;
1124 }
1125 goto bail;
1126bail_rhdr:
1127 vfree(uctxt->subctxt_rcvhdr_base);
1128bail_ureg:
1129 vfree(uctxt->subctxt_uregbase);
1130 uctxt->subctxt_uregbase = NULL;
1131bail:
1132 return ret;
1133}
1134
1135static int user_init(struct file *fp)
1136{
77241056 1137 unsigned int rcvctrl_ops = 0;
9e10af47
IW
1138 struct hfi1_filedata *fd = fp->private_data;
1139 struct hfi1_ctxtdata *uctxt = fd->uctxt;
77241056
MM
1140
1141 /* make sure that the context has already been setup */
94158442
MH
1142 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
1143 return -EFAULT;
77241056
MM
1144
1145 /* initialize poll variables... */
1146 uctxt->urgent = 0;
1147 uctxt->urgent_poll = 0;
1148
1149 /*
1150 * Now enable the ctxt for receive.
1151 * For chips that are set to DMA the tail register to memory
1152 * when they change (and when the update bit transitions from
1153 * 0 to 1. So for those chips, we turn it off and then back on.
1154 * This will (very briefly) affect any other open ctxts, but the
1155 * duration is very short, and therefore isn't an issue. We
1156 * explicitly set the in-memory tail copy to 0 beforehand, so we
1157 * don't have to wait to be sure the DMA update has happened
1158 * (chip resets head/tail to 0 on transition to enable).
1159 */
1160 if (uctxt->rcvhdrtail_kvaddr)
1161 clear_rcvhdrtail(uctxt);
1162
1163 /* Setup J_KEY before enabling the context */
1164 hfi1_set_ctxt_jkey(uctxt->dd, uctxt->ctxt, uctxt->jkey);
1165
1166 rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
1167 if (HFI1_CAP_KGET_MASK(uctxt->flags, HDRSUPP))
1168 rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
1169 /*
1170 * Ignore the bit in the flags for now until proper
1171 * support for multiple packet per rcv array entry is
1172 * added.
1173 */
1174 if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
1175 rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
1176 if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
1177 rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
1178 if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
1179 rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
566c157c
MH
1180 /*
1181 * The RcvCtxtCtrl.TailUpd bit has to be explicitly written.
1182 * We can't rely on the correct value to be set from prior
1183 * uses of the chip or ctxt. Therefore, add the rcvctrl op
1184 * for both cases.
1185 */
77241056
MM
1186 if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
1187 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
566c157c
MH
1188 else
1189 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
77241056
MM
1190 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt);
1191
1192 /* Notify any waiting slaves */
1193 if (uctxt->subctxt_cnt) {
1194 clear_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
1195 wake_up(&uctxt->wait);
1196 }
77241056 1197
94158442 1198 return 0;
77241056
MM
1199}
1200
1201static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
1202{
1203 struct hfi1_ctxt_info cinfo;
77241056 1204 struct hfi1_filedata *fd = fp->private_data;
9e10af47 1205 struct hfi1_ctxtdata *uctxt = fd->uctxt;
77241056
MM
1206 int ret = 0;
1207
ebe6b2e8 1208 memset(&cinfo, 0, sizeof(cinfo));
77241056
MM
1209 ret = hfi1_get_base_kinfo(uctxt, &cinfo);
1210 if (ret < 0)
1211 goto done;
1212 cinfo.num_active = hfi1_count_active_units();
1213 cinfo.unit = uctxt->dd->unit;
1214 cinfo.ctxt = uctxt->ctxt;
9e10af47 1215 cinfo.subctxt = fd->subctxt;
77241056
MM
1216 cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
1217 uctxt->dd->rcv_entries.group_size) +
1218 uctxt->expected_count;
1219 cinfo.credits = uctxt->sc->credits;
1220 cinfo.numa_node = uctxt->numa_id;
1221 cinfo.rec_cpu = fd->rec_cpu_num;
1222 cinfo.send_ctxt = uctxt->sc->hw_context;
1223
1224 cinfo.egrtids = uctxt->egrbufs.alloced;
1225 cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
1226 cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
9e10af47 1227 cinfo.sdma_ring_size = fd->cq->nentries;
77241056
MM
1228 cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
1229
9e10af47 1230 trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
77241056
MM
1231 if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
1232 ret = -EFAULT;
1233done:
1234 return ret;
1235}
1236
1237static int setup_ctxt(struct file *fp)
1238{
9e10af47
IW
1239 struct hfi1_filedata *fd = fp->private_data;
1240 struct hfi1_ctxtdata *uctxt = fd->uctxt;
77241056
MM
1241 struct hfi1_devdata *dd = uctxt->dd;
1242 int ret = 0;
1243
1244 /*
94158442 1245 * Context should be set up only once, including allocation and
77241056
MM
1246 * programming of eager buffers. This is done if context sharing
1247 * is not requested or by the master process.
1248 */
9e10af47 1249 if (!uctxt->subctxt_cnt || !fd->subctxt) {
77241056
MM
1250 ret = hfi1_init_ctxt(uctxt->sc);
1251 if (ret)
1252 goto done;
1253
1254 /* Now allocate the RcvHdr queue and eager buffers. */
1255 ret = hfi1_create_rcvhdrq(dd, uctxt);
1256 if (ret)
1257 goto done;
1258 ret = hfi1_setup_eagerbufs(uctxt);
1259 if (ret)
1260 goto done;
9e10af47 1261 if (uctxt->subctxt_cnt && !fd->subctxt) {
77241056
MM
1262 ret = setup_subctxt(uctxt);
1263 if (ret)
1264 goto done;
1265 }
94158442
MH
1266 } else {
1267 ret = wait_event_interruptible(uctxt->wait, !test_bit(
1268 HFI1_CTXT_MASTER_UNINIT,
1269 &uctxt->event_flags));
1270 if (ret)
1271 goto done;
77241056 1272 }
94158442 1273
77241056 1274 ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
94158442
MH
1275 if (ret)
1276 goto done;
1277 /*
1278 * Expected receive has to be setup for all processes (including
1279 * shared contexts). However, it has to be done after the master
1280 * context has been fully configured as it depends on the
1281 * eager/expected split of the RcvArray entries.
1282 * Setting it up here ensures that the subcontexts will be waiting
1283 * (due to the above wait_event_interruptible() until the master
1284 * is setup.
1285 */
1286 ret = hfi1_user_exp_rcv_init(fp);
77241056
MM
1287 if (ret)
1288 goto done;
1289
1290 set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
1291done:
1292 return ret;
1293}
1294
1295static int get_base_info(struct file *fp, void __user *ubase, __u32 len)
1296{
1297 struct hfi1_base_info binfo;
9e10af47
IW
1298 struct hfi1_filedata *fd = fp->private_data;
1299 struct hfi1_ctxtdata *uctxt = fd->uctxt;
77241056
MM
1300 struct hfi1_devdata *dd = uctxt->dd;
1301 ssize_t sz;
1302 unsigned offset;
1303 int ret = 0;
1304
1305 trace_hfi1_uctxtdata(uctxt->dd, uctxt);
1306
1307 memset(&binfo, 0, sizeof(binfo));
1308 binfo.hw_version = dd->revision;
1309 binfo.sw_version = HFI1_KERN_SWVERSION;
1310 binfo.bthqp = kdeth_qp;
1311 binfo.jkey = uctxt->jkey;
1312 /*
1313 * If more than 64 contexts are enabled the allocated credit
1314 * return will span two or three contiguous pages. Since we only
1315 * map the page containing the context's credit return address,
1316 * we need to calculate the offset in the proper page.
1317 */
1318 offset = ((u64)uctxt->sc->hw_free -
1319 (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
1320 binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
9e10af47 1321 fd->subctxt, offset);
77241056 1322 binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
9e10af47 1323 fd->subctxt,
77241056
MM
1324 uctxt->sc->base_addr);
1325 binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP,
1326 uctxt->ctxt,
9e10af47 1327 fd->subctxt,
77241056
MM
1328 uctxt->sc->base_addr);
1329 binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
9e10af47 1330 fd->subctxt,
77241056
MM
1331 uctxt->rcvhdrq);
1332 binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
9e10af47 1333 fd->subctxt,
77241056
MM
1334 uctxt->egrbufs.rcvtids[0].phys);
1335 binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
9e10af47 1336 fd->subctxt, 0);
77241056
MM
1337 /*
1338 * user regs are at
1339 * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
1340 */
1341 binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
9e10af47 1342 fd->subctxt, 0);
e260e404 1343 offset = offset_in_page((((uctxt->ctxt - dd->first_user_ctxt) *
9e10af47 1344 HFI1_MAX_SHARED_CTXTS) + fd->subctxt) *
e260e404 1345 sizeof(*dd->events));
77241056 1346 binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
9e10af47 1347 fd->subctxt,
77241056
MM
1348 offset);
1349 binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
9e10af47 1350 fd->subctxt,
77241056
MM
1351 dd->status);
1352 if (HFI1_CAP_IS_USET(DMA_RTAIL))
1353 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
9e10af47 1354 fd->subctxt, 0);
77241056
MM
1355 if (uctxt->subctxt_cnt) {
1356 binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
1357 uctxt->ctxt,
9e10af47 1358 fd->subctxt, 0);
77241056
MM
1359 binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
1360 uctxt->ctxt,
9e10af47 1361 fd->subctxt, 0);
77241056
MM
1362 binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
1363 uctxt->ctxt,
9e10af47 1364 fd->subctxt, 0);
77241056
MM
1365 }
1366 sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
1367 if (copy_to_user(ubase, &binfo, sz))
1368 ret = -EFAULT;
1369 return ret;
1370}
1371
1372static unsigned int poll_urgent(struct file *fp,
1373 struct poll_table_struct *pt)
1374{
9e10af47
IW
1375 struct hfi1_filedata *fd = fp->private_data;
1376 struct hfi1_ctxtdata *uctxt = fd->uctxt;
77241056
MM
1377 struct hfi1_devdata *dd = uctxt->dd;
1378 unsigned pollflag;
1379
1380 poll_wait(fp, &uctxt->wait, pt);
1381
1382 spin_lock_irq(&dd->uctxt_lock);
1383 if (uctxt->urgent != uctxt->urgent_poll) {
1384 pollflag = POLLIN | POLLRDNORM;
1385 uctxt->urgent_poll = uctxt->urgent;
1386 } else {
1387 pollflag = 0;
1388 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
1389 }
1390 spin_unlock_irq(&dd->uctxt_lock);
1391
1392 return pollflag;
1393}
1394
1395static unsigned int poll_next(struct file *fp,
1396 struct poll_table_struct *pt)
1397{
9e10af47
IW
1398 struct hfi1_filedata *fd = fp->private_data;
1399 struct hfi1_ctxtdata *uctxt = fd->uctxt;
77241056
MM
1400 struct hfi1_devdata *dd = uctxt->dd;
1401 unsigned pollflag;
1402
1403 poll_wait(fp, &uctxt->wait, pt);
1404
1405 spin_lock_irq(&dd->uctxt_lock);
1406 if (hdrqempty(uctxt)) {
1407 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
1408 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt->ctxt);
1409 pollflag = 0;
e490974e 1410 } else {
77241056 1411 pollflag = POLLIN | POLLRDNORM;
e490974e 1412 }
77241056
MM
1413 spin_unlock_irq(&dd->uctxt_lock);
1414
1415 return pollflag;
1416}
1417
1418/*
1419 * Find all user contexts in use, and set the specified bit in their
1420 * event mask.
1421 * See also find_ctxt() for a similar use, that is specific to send buffers.
1422 */
1423int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
1424{
1425 struct hfi1_ctxtdata *uctxt;
1426 struct hfi1_devdata *dd = ppd->dd;
1427 unsigned ctxt;
1428 int ret = 0;
1429 unsigned long flags;
1430
1431 if (!dd->events) {
1432 ret = -EINVAL;
1433 goto done;
1434 }
1435
1436 spin_lock_irqsave(&dd->uctxt_lock, flags);
1437 for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts;
1438 ctxt++) {
1439 uctxt = dd->rcd[ctxt];
1440 if (uctxt) {
1441 unsigned long *evs = dd->events +
1442 (uctxt->ctxt - dd->first_user_ctxt) *
1443 HFI1_MAX_SHARED_CTXTS;
1444 int i;
1445 /*
1446 * subctxt_cnt is 0 if not shared, so do base
1447 * separately, first, then remaining subctxt, if any
1448 */
1449 set_bit(evtbit, evs);
1450 for (i = 1; i < uctxt->subctxt_cnt; i++)
1451 set_bit(evtbit, evs + i);
1452 }
1453 }
1454 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1455done:
1456 return ret;
1457}
1458
1459/**
1460 * manage_rcvq - manage a context's receive queue
1461 * @uctxt: the context
1462 * @subctxt: the sub-context
1463 * @start_stop: action to carry out
1464 *
1465 * start_stop == 0 disables receive on the context, for use in queue
1466 * overflow conditions. start_stop==1 re-enables, to be used to
1467 * re-init the software copy of the head register
1468 */
1469static int manage_rcvq(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
1470 int start_stop)
1471{
1472 struct hfi1_devdata *dd = uctxt->dd;
1473 unsigned int rcvctrl_op;
1474
1475 if (subctxt)
1476 goto bail;
1477 /* atomically clear receive enable ctxt. */
1478 if (start_stop) {
1479 /*
1480 * On enable, force in-memory copy of the tail register to
1481 * 0, so that protocol code doesn't have to worry about
1482 * whether or not the chip has yet updated the in-memory
1483 * copy or not on return from the system call. The chip
1484 * always resets it's tail register back to 0 on a
1485 * transition from disabled to enabled.
1486 */
1487 if (uctxt->rcvhdrtail_kvaddr)
1488 clear_rcvhdrtail(uctxt);
1489 rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
e490974e 1490 } else {
77241056 1491 rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
e490974e 1492 }
77241056
MM
1493 hfi1_rcvctrl(dd, rcvctrl_op, uctxt->ctxt);
1494 /* always; new head should be equal to new tail; see above */
1495bail:
1496 return 0;
1497}
1498
1499/*
1500 * clear the event notifier events for this context.
1501 * User process then performs actions appropriate to bit having been
1502 * set, if desired, and checks again in future.
1503 */
1504static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt,
1505 unsigned long events)
1506{
1507 int i;
1508 struct hfi1_devdata *dd = uctxt->dd;
1509 unsigned long *evs;
1510
1511 if (!dd->events)
1512 return 0;
1513
1514 evs = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
1515 HFI1_MAX_SHARED_CTXTS) + subctxt;
1516
1517 for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
1518 if (!test_bit(i, &events))
1519 continue;
1520 clear_bit(i, evs);
1521 }
1522 return 0;
1523}
1524
77241056
MM
1525static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
1526 u16 pkey)
1527{
1528 int ret = -ENOENT, i, intable = 0;
1529 struct hfi1_pportdata *ppd = uctxt->ppd;
1530 struct hfi1_devdata *dd = uctxt->dd;
1531
1532 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
1533 ret = -EINVAL;
1534 goto done;
1535 }
1536
1537 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
1538 if (pkey == ppd->pkeys[i]) {
1539 intable = 1;
1540 break;
1541 }
1542
1543 if (intable)
1544 ret = hfi1_set_ctxt_pkey(dd, uctxt->ctxt, pkey);
1545done:
1546 return ret;
1547}
1548
1549static int ui_open(struct inode *inode, struct file *filp)
1550{
1551 struct hfi1_devdata *dd;
1552
1553 dd = container_of(inode->i_cdev, struct hfi1_devdata, ui_cdev);
1554 filp->private_data = dd; /* for other methods */
1555 return 0;
1556}
1557
1558static int ui_release(struct inode *inode, struct file *filp)
1559{
1560 /* nothing to do */
1561 return 0;
1562}
1563
1564static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
1565{
1566 struct hfi1_devdata *dd = filp->private_data;
1567
7723d8c2
DL
1568 return fixed_size_llseek(filp, offset, whence,
1569 (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
77241056
MM
1570}
1571
77241056
MM
1572/* NOTE: assumes unsigned long is 8 bytes */
1573static ssize_t ui_read(struct file *filp, char __user *buf, size_t count,
17fb4f29 1574 loff_t *f_pos)
77241056
MM
1575{
1576 struct hfi1_devdata *dd = filp->private_data;
1577 void __iomem *base = dd->kregbase;
1578 unsigned long total, csr_off,
1579 barlen = (dd->kregend - dd->kregbase);
1580 u64 data;
1581
1582 /* only read 8 byte quantities */
1583 if ((count % 8) != 0)
1584 return -EINVAL;
1585 /* offset must be 8-byte aligned */
1586 if ((*f_pos % 8) != 0)
1587 return -EINVAL;
1588 /* destination buffer must be 8-byte aligned */
1589 if ((unsigned long)buf % 8 != 0)
1590 return -EINVAL;
1591 /* must be in range */
1592 if (*f_pos + count > (barlen + DC8051_DATA_MEM_SIZE))
1593 return -EINVAL;
1594 /* only set the base if we are not starting past the BAR */
1595 if (*f_pos < barlen)
1596 base += *f_pos;
1597 csr_off = *f_pos;
1598 for (total = 0; total < count; total += 8, csr_off += 8) {
1599 /* accessing LCB CSRs requires more checks */
1600 if (is_lcb_offset(csr_off)) {
1601 if (read_lcb_csr(dd, csr_off, (u64 *)&data))
1602 break; /* failed */
1603 }
1604 /*
1605 * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a
1606 * false parity error. Avoid the whole issue by not reading
1607 * them. These registers are defined as having a read value
1608 * of 0.
1609 */
d0d236ea
JJ
1610 else if (csr_off == ASIC_GPIO_CLEAR ||
1611 csr_off == ASIC_GPIO_FORCE ||
1612 csr_off == ASIC_QSFP1_CLEAR ||
1613 csr_off == ASIC_QSFP1_FORCE ||
1614 csr_off == ASIC_QSFP2_CLEAR ||
1615 csr_off == ASIC_QSFP2_FORCE)
77241056
MM
1616 data = 0;
1617 else if (csr_off >= barlen) {
1618 /*
1619 * read_8051_data can read more than just 8 bytes at
1620 * a time. However, folding this into the loop and
1621 * handling the reads in 8 byte increments allows us
1622 * to smoothly transition from chip memory to 8051
1623 * memory.
1624 */
1625 if (read_8051_data(dd,
1626 (u32)(csr_off - barlen),
1627 sizeof(data), &data))
1628 break; /* failed */
1629 } else
1630 data = readq(base + total);
1631 if (put_user(data, (unsigned long __user *)(buf + total)))
1632 break;
1633 }
1634 *f_pos += total;
1635 return total;
1636}
1637
1638/* NOTE: assumes unsigned long is 8 bytes */
1639static ssize_t ui_write(struct file *filp, const char __user *buf,
1640 size_t count, loff_t *f_pos)
1641{
1642 struct hfi1_devdata *dd = filp->private_data;
1643 void __iomem *base;
1644 unsigned long total, data, csr_off;
1645 int in_lcb;
1646
1647 /* only write 8 byte quantities */
1648 if ((count % 8) != 0)
1649 return -EINVAL;
1650 /* offset must be 8-byte aligned */
1651 if ((*f_pos % 8) != 0)
1652 return -EINVAL;
1653 /* source buffer must be 8-byte aligned */
1654 if ((unsigned long)buf % 8 != 0)
1655 return -EINVAL;
1656 /* must be in range */
1657 if (*f_pos + count > dd->kregend - dd->kregbase)
1658 return -EINVAL;
1659
1660 base = (void __iomem *)dd->kregbase + *f_pos;
1661 csr_off = *f_pos;
1662 in_lcb = 0;
1663 for (total = 0; total < count; total += 8, csr_off += 8) {
1664 if (get_user(data, (unsigned long __user *)(buf + total)))
1665 break;
1666 /* accessing LCB CSRs requires a special procedure */
1667 if (is_lcb_offset(csr_off)) {
1668 if (!in_lcb) {
1669 int ret = acquire_lcb_access(dd, 1);
1670
1671 if (ret)
1672 break;
1673 in_lcb = 1;
1674 }
1675 } else {
1676 if (in_lcb) {
1677 release_lcb_access(dd, 1);
1678 in_lcb = 0;
1679 }
1680 }
1681 writeq(data, base + total);
1682 }
1683 if (in_lcb)
1684 release_lcb_access(dd, 1);
1685 *f_pos += total;
1686 return total;
1687}
1688
1689static const struct file_operations ui_file_ops = {
1690 .owner = THIS_MODULE,
1691 .llseek = ui_lseek,
1692 .read = ui_read,
1693 .write = ui_write,
1694 .open = ui_open,
1695 .release = ui_release,
1696};
b91cc573 1697
77241056
MM
1698#define UI_OFFSET 192 /* device minor offset for UI devices */
1699static int create_ui = 1;
1700
1701static struct cdev wildcard_cdev;
1702static struct device *wildcard_device;
1703
1704static atomic_t user_count = ATOMIC_INIT(0);
1705
1706static void user_remove(struct hfi1_devdata *dd)
1707{
1708 if (atomic_dec_return(&user_count) == 0)
1709 hfi1_cdev_cleanup(&wildcard_cdev, &wildcard_device);
1710
1711 hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
1712 hfi1_cdev_cleanup(&dd->ui_cdev, &dd->ui_device);
1713}
1714
1715static int user_add(struct hfi1_devdata *dd)
1716{
1717 char name[10];
1718 int ret;
1719
1720 if (atomic_inc_return(&user_count) == 1) {
1721 ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops,
e116a64f
IW
1722 &wildcard_cdev, &wildcard_device,
1723 true);
77241056
MM
1724 if (ret)
1725 goto done;
1726 }
1727
1728 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
1729 ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops,
e116a64f
IW
1730 &dd->user_cdev, &dd->user_device,
1731 true);
77241056
MM
1732 if (ret)
1733 goto done;
1734
1735 if (create_ui) {
1736 snprintf(name, sizeof(name),
1737 "%s_ui%d", class_name(), dd->unit);
1738 ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops,
e116a64f
IW
1739 &dd->ui_cdev, &dd->ui_device,
1740 false);
77241056
MM
1741 if (ret)
1742 goto done;
1743 }
1744
1745 return 0;
1746done:
1747 user_remove(dd);
1748 return ret;
1749}
1750
1751/*
1752 * Create per-unit files in /dev
1753 */
1754int hfi1_device_create(struct hfi1_devdata *dd)
1755{
1756 int r, ret;
1757
1758 r = user_add(dd);
1759 ret = hfi1_diag_add(dd);
1760 if (r && !ret)
1761 ret = r;
1762 return ret;
1763}
1764
1765/*
1766 * Remove per-unit files in /dev
1767 * void, core kernel returns no errors for this stuff
1768 */
1769void hfi1_device_remove(struct hfi1_devdata *dd)
1770{
1771 user_remove(dd);
1772 hfi1_diag_remove(dd);
1773}
This page took 0.30835 seconds and 5 git commands to generate.