2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/types.h>
49 #include <linux/device.h>
50 #include <linux/dmapool.h>
51 #include <linux/slab.h>
52 #include <linux/list.h>
53 #include <linux/highmem.h>
55 #include <linux/uio.h>
56 #include <linux/rbtree.h>
57 #include <linux/spinlock.h>
58 #include <linux/delay.h>
59 #include <linux/kthread.h>
60 #include <linux/mmu_context.h>
61 #include <linux/module.h>
62 #include <linux/vmalloc.h>
66 #include "user_sdma.h"
67 #include "verbs.h" /* for the headers */
68 #include "common.h" /* for struct hfi1_tid_info */
72 static uint hfi1_sdma_comp_ring_size
= 128;
73 module_param_named(sdma_comp_size
, hfi1_sdma_comp_ring_size
, uint
, S_IRUGO
);
74 MODULE_PARM_DESC(sdma_comp_size
, "Size of User SDMA completion ring. Default: 128");
76 /* The maximum number of Data io vectors per message/request */
77 #define MAX_VECTORS_PER_REQ 8
79 * Maximum number of packet to send from each message/request
80 * before moving to the next one.
82 #define MAX_PKTS_PER_QUEUE 16
84 #define num_pages(x) (1 + ((((x) - 1) & PAGE_MASK) >> PAGE_SHIFT))
86 #define req_opcode(x) \
87 (((x) >> HFI1_SDMA_REQ_OPCODE_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
88 #define req_version(x) \
89 (((x) >> HFI1_SDMA_REQ_VERSION_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
90 #define req_iovcnt(x) \
91 (((x) >> HFI1_SDMA_REQ_IOVCNT_SHIFT) & HFI1_SDMA_REQ_IOVCNT_MASK)
93 /* Number of BTH.PSN bits used for sequence number in expected rcvs */
94 #define BTH_SEQ_MASK 0x7ffull
97 * Define fields in the KDETH header so we can update the header
100 #define KDETH_OFFSET_SHIFT 0
101 #define KDETH_OFFSET_MASK 0x7fff
102 #define KDETH_OM_SHIFT 15
103 #define KDETH_OM_MASK 0x1
104 #define KDETH_TID_SHIFT 16
105 #define KDETH_TID_MASK 0x3ff
106 #define KDETH_TIDCTRL_SHIFT 26
107 #define KDETH_TIDCTRL_MASK 0x3
108 #define KDETH_INTR_SHIFT 28
109 #define KDETH_INTR_MASK 0x1
110 #define KDETH_SH_SHIFT 29
111 #define KDETH_SH_MASK 0x1
112 #define KDETH_HCRC_UPPER_SHIFT 16
113 #define KDETH_HCRC_UPPER_MASK 0xff
114 #define KDETH_HCRC_LOWER_SHIFT 24
115 #define KDETH_HCRC_LOWER_MASK 0xff
117 #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
118 #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
120 #define KDETH_GET(val, field) \
121 (((le32_to_cpu((val))) >> KDETH_##field##_SHIFT) & KDETH_##field##_MASK)
122 #define KDETH_SET(dw, field, val) do { \
123 u32 dwval = le32_to_cpu(dw); \
124 dwval &= ~(KDETH_##field##_MASK << KDETH_##field##_SHIFT); \
125 dwval |= (((val) & KDETH_##field##_MASK) << \
126 KDETH_##field##_SHIFT); \
127 dw = cpu_to_le32(dwval); \
130 #define AHG_HEADER_SET(arr, idx, dw, bit, width, value) \
132 if ((idx) < ARRAY_SIZE((arr))) \
133 (arr)[(idx++)] = sdma_build_ahg_descriptor( \
134 (__force u16)(value), (dw), (bit), \
140 /* KDETH OM multipliers and switch over point */
141 #define KDETH_OM_SMALL 4
142 #define KDETH_OM_LARGE 64
143 #define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
145 /* Last packet in the request */
146 #define TXREQ_FLAGS_REQ_LAST_PKT BIT(0)
148 #define SDMA_REQ_IN_USE 0
149 #define SDMA_REQ_FOR_THREAD 1
150 #define SDMA_REQ_SEND_DONE 2
151 #define SDMA_REQ_HAVE_AHG 3
152 #define SDMA_REQ_HAS_ERROR 4
153 #define SDMA_REQ_DONE_ERROR 5
155 #define SDMA_PKT_Q_INACTIVE BIT(0)
156 #define SDMA_PKT_Q_ACTIVE BIT(1)
157 #define SDMA_PKT_Q_DEFERRED BIT(2)
160 * Maximum retry attempts to submit a TX request
161 * before putting the process to sleep.
163 #define MAX_DEFER_RETRY_COUNT 1
165 static unsigned initial_pkt_count
= 8;
167 #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
169 struct sdma_mmu_node
;
171 struct user_sdma_iovec
{
172 struct list_head list
;
174 /* number of pages in this vector */
176 /* array of pinned pages for this vector */
179 * offset into the virtual address space of the vector at
180 * which we last left off.
183 struct sdma_mmu_node
*node
;
186 #define SDMA_CACHE_NODE_EVICT 0
188 struct sdma_mmu_node
{
189 struct mmu_rb_node rb
;
190 struct list_head list
;
191 struct hfi1_user_sdma_pkt_q
*pq
;
198 struct user_sdma_request
{
199 struct sdma_req_info info
;
200 struct hfi1_user_sdma_pkt_q
*pq
;
201 struct hfi1_user_sdma_comp_q
*cq
;
202 /* This is the original header from user space */
203 struct hfi1_pkt_header hdr
;
205 * Pointer to the SDMA engine for this request.
206 * Since different request could be on different VLs,
207 * each request will need it's own engine pointer.
209 struct sdma_engine
*sde
;
213 * KDETH.Offset (Eager) field
214 * We need to remember the initial value so the headers
215 * can be updated properly.
219 * KDETH.OFFSET (TID) field
220 * The offset can cover multiple packets, depending on the
221 * size of the TID entry.
226 * Remember this because the header template always sets it
231 * We copy the iovs for this request (based on
232 * info.iovcnt). These are only the data vectors
235 /* total length of the data in the request */
237 /* progress index moving along the iovs array */
239 struct user_sdma_iovec iovs
[MAX_VECTORS_PER_REQ
];
240 /* number of elements copied to the tids array */
242 /* TID array values copied from the tid_iov vector */
249 struct list_head txps
;
251 /* status of the last txreq completed */
256 * A single txreq could span up to 3 physical pages when the MTU
257 * is sufficiently large (> 4K). Each of the IOV pointers also
258 * needs it's own set of flags so the vector has been handled
259 * independently of each other.
261 struct user_sdma_txreq
{
262 /* Packet header for the txreq */
263 struct hfi1_pkt_header hdr
;
264 struct sdma_txreq txreq
;
265 struct list_head list
;
266 struct user_sdma_request
*req
;
272 #define SDMA_DBG(req, fmt, ...) \
273 hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
274 (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
276 #define SDMA_Q_DBG(pq, fmt, ...) \
277 hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \
278 (pq)->subctxt, ##__VA_ARGS__)
280 static int user_sdma_send_pkts(struct user_sdma_request
*, unsigned);
281 static int num_user_pages(const struct iovec
*);
282 static void user_sdma_txreq_cb(struct sdma_txreq
*, int);
283 static inline void pq_update(struct hfi1_user_sdma_pkt_q
*);
284 static void user_sdma_free_request(struct user_sdma_request
*, bool);
285 static int pin_vector_pages(struct user_sdma_request
*,
286 struct user_sdma_iovec
*);
287 static void unpin_vector_pages(struct mm_struct
*, struct page
**, unsigned,
289 static int check_header_template(struct user_sdma_request
*,
290 struct hfi1_pkt_header
*, u32
, u32
);
291 static int set_txreq_header(struct user_sdma_request
*,
292 struct user_sdma_txreq
*, u32
);
293 static int set_txreq_header_ahg(struct user_sdma_request
*,
294 struct user_sdma_txreq
*, u32
);
295 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q
*,
296 struct hfi1_user_sdma_comp_q
*,
297 u16
, enum hfi1_sdma_comp_state
, int);
298 static inline u32
set_pkt_bth_psn(__be32
, u8
, u32
);
299 static inline u32
get_lrh_len(struct hfi1_pkt_header
, u32 len
);
301 static int defer_packet_queue(
302 struct sdma_engine
*,
306 static void activate_packet_queue(struct iowait
*, int);
307 static bool sdma_rb_filter(struct mmu_rb_node
*, unsigned long, unsigned long);
308 static int sdma_rb_insert(struct rb_root
*, struct mmu_rb_node
*);
309 static void sdma_rb_remove(struct rb_root
*, struct mmu_rb_node
*,
311 static int sdma_rb_invalidate(struct rb_root
*, struct mmu_rb_node
*);
313 static struct mmu_rb_ops sdma_rb_ops
= {
314 .filter
= sdma_rb_filter
,
315 .insert
= sdma_rb_insert
,
316 .remove
= sdma_rb_remove
,
317 .invalidate
= sdma_rb_invalidate
320 static int defer_packet_queue(
321 struct sdma_engine
*sde
,
323 struct sdma_txreq
*txreq
,
326 struct hfi1_user_sdma_pkt_q
*pq
=
327 container_of(wait
, struct hfi1_user_sdma_pkt_q
, busy
);
328 struct hfi1_ibdev
*dev
= &pq
->dd
->verbs_dev
;
329 struct user_sdma_txreq
*tx
=
330 container_of(txreq
, struct user_sdma_txreq
, txreq
);
332 if (sdma_progress(sde
, seq
, txreq
)) {
333 if (tx
->busycount
++ < MAX_DEFER_RETRY_COUNT
)
337 * We are assuming that if the list is enqueued somewhere, it
338 * is to the dmawait list since that is the only place where
339 * it is supposed to be enqueued.
341 xchg(&pq
->state
, SDMA_PKT_Q_DEFERRED
);
342 write_seqlock(&dev
->iowait_lock
);
343 if (list_empty(&pq
->busy
.list
))
344 list_add_tail(&pq
->busy
.list
, &sde
->dmawait
);
345 write_sequnlock(&dev
->iowait_lock
);
351 static void activate_packet_queue(struct iowait
*wait
, int reason
)
353 struct hfi1_user_sdma_pkt_q
*pq
=
354 container_of(wait
, struct hfi1_user_sdma_pkt_q
, busy
);
355 xchg(&pq
->state
, SDMA_PKT_Q_ACTIVE
);
356 wake_up(&wait
->wait_dma
);
359 static void sdma_kmem_cache_ctor(void *obj
)
361 struct user_sdma_txreq
*tx
= obj
;
363 memset(tx
, 0, sizeof(*tx
));
366 int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata
*uctxt
, struct file
*fp
)
368 struct hfi1_filedata
*fd
;
372 struct hfi1_devdata
*dd
;
373 struct hfi1_user_sdma_comp_q
*cq
;
374 struct hfi1_user_sdma_pkt_q
*pq
;
382 fd
= fp
->private_data
;
384 if (!hfi1_sdma_comp_ring_size
) {
391 pq
= kzalloc(sizeof(*pq
), GFP_KERNEL
);
395 memsize
= sizeof(*pq
->reqs
) * hfi1_sdma_comp_ring_size
;
396 pq
->reqs
= kzalloc(memsize
, GFP_KERNEL
);
400 INIT_LIST_HEAD(&pq
->list
);
402 pq
->ctxt
= uctxt
->ctxt
;
403 pq
->subctxt
= fd
->subctxt
;
404 pq
->n_max_reqs
= hfi1_sdma_comp_ring_size
;
405 pq
->state
= SDMA_PKT_Q_INACTIVE
;
406 atomic_set(&pq
->n_reqs
, 0);
407 init_waitqueue_head(&pq
->wait
);
408 pq
->sdma_rb_root
= RB_ROOT
;
409 INIT_LIST_HEAD(&pq
->evict
);
410 spin_lock_init(&pq
->evict_lock
);
412 iowait_init(&pq
->busy
, 0, NULL
, defer_packet_queue
,
413 activate_packet_queue
, NULL
);
415 snprintf(buf
, 64, "txreq-kmem-cache-%u-%u-%u", dd
->unit
, uctxt
->ctxt
,
417 pq
->txreq_cache
= kmem_cache_create(buf
,
418 sizeof(struct user_sdma_txreq
),
421 sdma_kmem_cache_ctor
);
422 if (!pq
->txreq_cache
) {
423 dd_dev_err(dd
, "[%u] Failed to allocate TxReq cache\n",
428 cq
= kzalloc(sizeof(*cq
), GFP_KERNEL
);
432 memsize
= PAGE_ALIGN(sizeof(*cq
->comps
) * hfi1_sdma_comp_ring_size
);
433 cq
->comps
= vmalloc_user(memsize
);
437 cq
->nentries
= hfi1_sdma_comp_ring_size
;
440 ret
= hfi1_mmu_rb_register(&pq
->sdma_rb_root
, &sdma_rb_ops
);
442 dd_dev_err(dd
, "Failed to register with MMU %d", ret
);
446 spin_lock_irqsave(&uctxt
->sdma_qlock
, flags
);
447 list_add(&pq
->list
, &uctxt
->sdma_queues
);
448 spin_unlock_irqrestore(&uctxt
->sdma_qlock
, flags
);
454 kmem_cache_destroy(pq
->txreq_cache
);
466 int hfi1_user_sdma_free_queues(struct hfi1_filedata
*fd
)
468 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
469 struct hfi1_user_sdma_pkt_q
*pq
;
472 hfi1_cdbg(SDMA
, "[%u:%u:%u] Freeing user SDMA queues", uctxt
->dd
->unit
,
473 uctxt
->ctxt
, fd
->subctxt
);
475 hfi1_mmu_rb_unregister(&pq
->sdma_rb_root
);
477 spin_lock_irqsave(&uctxt
->sdma_qlock
, flags
);
478 if (!list_empty(&pq
->list
))
479 list_del_init(&pq
->list
);
480 spin_unlock_irqrestore(&uctxt
->sdma_qlock
, flags
);
481 iowait_sdma_drain(&pq
->busy
);
482 /* Wait until all requests have been freed. */
483 wait_event_interruptible(
485 (ACCESS_ONCE(pq
->state
) == SDMA_PKT_Q_INACTIVE
));
487 kmem_cache_destroy(pq
->txreq_cache
);
492 vfree(fd
->cq
->comps
);
499 int hfi1_user_sdma_process_request(struct file
*fp
, struct iovec
*iovec
,
500 unsigned long dim
, unsigned long *count
)
503 struct hfi1_filedata
*fd
= fp
->private_data
;
504 struct hfi1_ctxtdata
*uctxt
= fd
->uctxt
;
505 struct hfi1_user_sdma_pkt_q
*pq
= fd
->pq
;
506 struct hfi1_user_sdma_comp_q
*cq
= fd
->cq
;
507 struct hfi1_devdata
*dd
= pq
->dd
;
508 unsigned long idx
= 0;
509 u8 pcount
= initial_pkt_count
;
510 struct sdma_req_info info
;
511 struct user_sdma_request
*req
;
515 if (iovec
[idx
].iov_len
< sizeof(info
) + sizeof(req
->hdr
)) {
518 "[%u:%u:%u] First vector not big enough for header %lu/%lu",
519 dd
->unit
, uctxt
->ctxt
, fd
->subctxt
,
520 iovec
[idx
].iov_len
, sizeof(info
) + sizeof(req
->hdr
));
523 ret
= copy_from_user(&info
, iovec
[idx
].iov_base
, sizeof(info
));
525 hfi1_cdbg(SDMA
, "[%u:%u:%u] Failed to copy info QW (%d)",
526 dd
->unit
, uctxt
->ctxt
, fd
->subctxt
, ret
);
530 trace_hfi1_sdma_user_reqinfo(dd
, uctxt
->ctxt
, fd
->subctxt
,
532 if (cq
->comps
[info
.comp_idx
].status
== QUEUED
||
533 test_bit(SDMA_REQ_IN_USE
, &pq
->reqs
[info
.comp_idx
].flags
)) {
534 hfi1_cdbg(SDMA
, "[%u:%u:%u] Entry %u is in QUEUED state",
535 dd
->unit
, uctxt
->ctxt
, fd
->subctxt
,
539 if (!info
.fragsize
) {
541 "[%u:%u:%u:%u] Request does not specify fragsize",
542 dd
->unit
, uctxt
->ctxt
, fd
->subctxt
, info
.comp_idx
);
546 * We've done all the safety checks that we can up to this point,
547 * "allocate" the request entry.
549 hfi1_cdbg(SDMA
, "[%u:%u:%u] Using req/comp entry %u\n", dd
->unit
,
550 uctxt
->ctxt
, fd
->subctxt
, info
.comp_idx
);
551 req
= pq
->reqs
+ info
.comp_idx
;
552 memset(req
, 0, sizeof(*req
));
553 /* Mark the request as IN_USE before we start filling it in. */
554 set_bit(SDMA_REQ_IN_USE
, &req
->flags
);
555 req
->data_iovs
= req_iovcnt(info
.ctrl
) - 1;
559 INIT_LIST_HEAD(&req
->txps
);
561 memcpy(&req
->info
, &info
, sizeof(info
));
563 if (req_opcode(info
.ctrl
) == EXPECTED
)
566 if (!info
.npkts
|| req
->data_iovs
> MAX_VECTORS_PER_REQ
) {
567 SDMA_DBG(req
, "Too many vectors (%u/%u)", req
->data_iovs
,
568 MAX_VECTORS_PER_REQ
);
571 /* Copy the header from the user buffer */
572 ret
= copy_from_user(&req
->hdr
, iovec
[idx
].iov_base
+ sizeof(info
),
575 SDMA_DBG(req
, "Failed to copy header template (%d)", ret
);
580 /* If Static rate control is not enabled, sanitize the header. */
581 if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL
))
584 /* Validate the opcode. Do not trust packets from user space blindly. */
585 opcode
= (be32_to_cpu(req
->hdr
.bth
[0]) >> 24) & 0xff;
586 if ((opcode
& USER_OPCODE_CHECK_MASK
) !=
587 USER_OPCODE_CHECK_VAL
) {
588 SDMA_DBG(req
, "Invalid opcode (%d)", opcode
);
593 * Validate the vl. Do not trust packets from user space blindly.
594 * VL comes from PBC, SC comes from LRH, and the VL needs to
595 * match the SC look up.
597 vl
= (le16_to_cpu(req
->hdr
.pbc
[0]) >> 12) & 0xF;
598 sc
= (((be16_to_cpu(req
->hdr
.lrh
[0]) >> 12) & 0xF) |
599 (((le16_to_cpu(req
->hdr
.pbc
[1]) >> 14) & 0x1) << 4));
600 if (vl
>= dd
->pport
->vls_operational
||
601 vl
!= sc_to_vlt(dd
, sc
)) {
602 SDMA_DBG(req
, "Invalid SC(%u)/VL(%u)", sc
, vl
);
607 /* Checking P_KEY for requests from user-space */
608 if (egress_pkey_check(dd
->pport
, req
->hdr
.lrh
, req
->hdr
.bth
, sc
,
609 PKEY_CHECK_INVALID
)) {
615 * Also should check the BTH.lnh. If it says the next header is GRH then
616 * the RXE parsing will be off and will land in the middle of the KDETH
617 * or miss it entirely.
619 if ((be16_to_cpu(req
->hdr
.lrh
[0]) & 0x3) == HFI1_LRH_GRH
) {
620 SDMA_DBG(req
, "User tried to pass in a GRH");
625 req
->koffset
= le32_to_cpu(req
->hdr
.kdeth
.swdata
[6]);
627 * Calculate the initial TID offset based on the values of
628 * KDETH.OFFSET and KDETH.OM that are passed in.
630 req
->tidoffset
= KDETH_GET(req
->hdr
.kdeth
.ver_tid_offset
, OFFSET
) *
631 (KDETH_GET(req
->hdr
.kdeth
.ver_tid_offset
, OM
) ?
632 KDETH_OM_LARGE
: KDETH_OM_SMALL
);
633 SDMA_DBG(req
, "Initial TID offset %u", req
->tidoffset
);
636 /* Save all the IO vector structures */
637 while (i
< req
->data_iovs
) {
638 INIT_LIST_HEAD(&req
->iovs
[i
].list
);
639 memcpy(&req
->iovs
[i
].iov
, iovec
+ idx
++, sizeof(struct iovec
));
640 ret
= pin_vector_pages(req
, &req
->iovs
[i
]);
645 req
->data_len
+= req
->iovs
[i
++].iov
.iov_len
;
647 SDMA_DBG(req
, "total data length %u", req
->data_len
);
649 if (pcount
> req
->info
.npkts
)
650 pcount
= req
->info
.npkts
;
653 * User space will provide the TID info only when the
654 * request type is EXPECTED. This is true even if there is
655 * only one packet in the request and the header is already
656 * setup. The reason for the singular TID case is that the
657 * driver needs to perform safety checks.
659 if (req_opcode(req
->info
.ctrl
) == EXPECTED
) {
660 u16 ntids
= iovec
[idx
].iov_len
/ sizeof(*req
->tids
);
662 if (!ntids
|| ntids
> MAX_TID_PAIR_ENTRIES
) {
666 req
->tids
= kcalloc(ntids
, sizeof(*req
->tids
), GFP_KERNEL
);
672 * We have to copy all of the tids because they may vary
673 * in size and, therefore, the TID count might not be
674 * equal to the pkt count. However, there is no way to
675 * tell at this point.
677 ret
= copy_from_user(req
->tids
, iovec
[idx
].iov_base
,
678 ntids
* sizeof(*req
->tids
));
680 SDMA_DBG(req
, "Failed to copy %d TIDs (%d)",
689 /* Have to select the engine */
690 req
->sde
= sdma_select_engine_vl(dd
,
691 (u32
)(uctxt
->ctxt
+ fd
->subctxt
),
693 if (!req
->sde
|| !sdma_running(req
->sde
)) {
698 /* We don't need an AHG entry if the request contains only one packet */
699 if (req
->info
.npkts
> 1 && HFI1_CAP_IS_USET(SDMA_AHG
)) {
700 int ahg
= sdma_ahg_alloc(req
->sde
);
702 if (likely(ahg
>= 0)) {
703 req
->ahg_idx
= (u8
)ahg
;
704 set_bit(SDMA_REQ_HAVE_AHG
, &req
->flags
);
708 set_comp_state(pq
, cq
, info
.comp_idx
, QUEUED
, 0);
709 atomic_inc(&pq
->n_reqs
);
711 /* Send the first N packets in the request to buy us some time */
712 ret
= user_sdma_send_pkts(req
, pcount
);
713 if (unlikely(ret
< 0 && ret
!= -EBUSY
)) {
719 * It is possible that the SDMA engine would have processed all the
720 * submitted packets by the time we get here. Therefore, only set
721 * packet queue state to ACTIVE if there are still uncompleted
724 if (atomic_read(&pq
->n_reqs
))
725 xchg(&pq
->state
, SDMA_PKT_Q_ACTIVE
);
728 * This is a somewhat blocking send implementation.
729 * The driver will block the caller until all packets of the
730 * request have been submitted to the SDMA engine. However, it
731 * will not wait for send completions.
733 while (!test_bit(SDMA_REQ_SEND_DONE
, &req
->flags
)) {
734 ret
= user_sdma_send_pkts(req
, pcount
);
738 set_bit(SDMA_REQ_DONE_ERROR
, &req
->flags
);
739 if (ACCESS_ONCE(req
->seqcomp
) ==
740 req
->seqsubmitted
- 1)
744 wait_event_interruptible_timeout(
746 (pq
->state
== SDMA_PKT_Q_ACTIVE
),
748 SDMA_IOWAIT_TIMEOUT
));
754 user_sdma_free_request(req
, true);
757 set_comp_state(pq
, cq
, info
.comp_idx
, ERROR
, req
->status
);
761 static inline u32
compute_data_length(struct user_sdma_request
*req
,
762 struct user_sdma_txreq
*tx
)
765 * Determine the proper size of the packet data.
766 * The size of the data of the first packet is in the header
767 * template. However, it includes the header and ICRC, which need
769 * The size of the remaining packets is the minimum of the frag
770 * size (MTU) or remaining data in the request.
775 len
= ((be16_to_cpu(req
->hdr
.lrh
[2]) << 2) -
776 (sizeof(tx
->hdr
) - 4));
777 } else if (req_opcode(req
->info
.ctrl
) == EXPECTED
) {
778 u32 tidlen
= EXP_TID_GET(req
->tids
[req
->tididx
], LEN
) *
781 * Get the data length based on the remaining space in the
784 len
= min(tidlen
- req
->tidoffset
, (u32
)req
->info
.fragsize
);
785 /* If we've filled up the TID pair, move to the next one. */
786 if (unlikely(!len
) && ++req
->tididx
< req
->n_tids
&&
787 req
->tids
[req
->tididx
]) {
788 tidlen
= EXP_TID_GET(req
->tids
[req
->tididx
],
791 len
= min_t(u32
, tidlen
, req
->info
.fragsize
);
794 * Since the TID pairs map entire pages, make sure that we
795 * are not going to try to send more data that we have
798 len
= min(len
, req
->data_len
- req
->sent
);
800 len
= min(req
->data_len
- req
->sent
, (u32
)req
->info
.fragsize
);
802 SDMA_DBG(req
, "Data Length = %u", len
);
806 static inline u32
get_lrh_len(struct hfi1_pkt_header hdr
, u32 len
)
808 /* (Size of complete header - size of PBC) + 4B ICRC + data length */
809 return ((sizeof(hdr
) - sizeof(hdr
.pbc
)) + 4 + len
);
812 static int user_sdma_send_pkts(struct user_sdma_request
*req
, unsigned maxpkts
)
816 struct user_sdma_txreq
*tx
= NULL
;
817 struct hfi1_user_sdma_pkt_q
*pq
= NULL
;
818 struct user_sdma_iovec
*iovec
= NULL
;
825 /* If tx completion has reported an error, we are done. */
826 if (test_bit(SDMA_REQ_HAS_ERROR
, &req
->flags
)) {
827 set_bit(SDMA_REQ_DONE_ERROR
, &req
->flags
);
832 * Check if we might have sent the entire request already
834 if (unlikely(req
->seqnum
== req
->info
.npkts
)) {
835 if (!list_empty(&req
->txps
))
840 if (!maxpkts
|| maxpkts
> req
->info
.npkts
- req
->seqnum
)
841 maxpkts
= req
->info
.npkts
- req
->seqnum
;
843 while (npkts
< maxpkts
) {
844 u32 datalen
= 0, queued
= 0, data_sent
= 0;
848 * Check whether any of the completions have come back
849 * with errors. If so, we are not going to process any
850 * more packets from this request.
852 if (test_bit(SDMA_REQ_HAS_ERROR
, &req
->flags
)) {
853 set_bit(SDMA_REQ_DONE_ERROR
, &req
->flags
);
857 tx
= kmem_cache_alloc(pq
->txreq_cache
, GFP_KERNEL
);
864 INIT_LIST_HEAD(&tx
->list
);
866 if (req
->seqnum
== req
->info
.npkts
- 1)
867 tx
->flags
|= TXREQ_FLAGS_REQ_LAST_PKT
;
870 * Calculate the payload size - this is min of the fragment
871 * (MTU) size or the remaining bytes in the request but only
872 * if we have payload data.
875 iovec
= &req
->iovs
[req
->iov_idx
];
876 if (ACCESS_ONCE(iovec
->offset
) == iovec
->iov
.iov_len
) {
877 if (++req
->iov_idx
== req
->data_iovs
) {
881 iovec
= &req
->iovs
[req
->iov_idx
];
882 WARN_ON(iovec
->offset
);
885 datalen
= compute_data_length(req
, tx
);
888 "Request has data but pkt len is 0");
894 if (test_bit(SDMA_REQ_HAVE_AHG
, &req
->flags
)) {
896 u16 pbclen
= le16_to_cpu(req
->hdr
.pbc
[0]);
897 u32 lrhlen
= get_lrh_len(req
->hdr
, datalen
);
899 * Copy the request header into the tx header
900 * because the HW needs a cacheline-aligned
902 * This copy can be optimized out if the hdr
903 * member of user_sdma_request were also
906 memcpy(&tx
->hdr
, &req
->hdr
, sizeof(tx
->hdr
));
907 if (PBC2LRH(pbclen
) != lrhlen
) {
908 pbclen
= (pbclen
& 0xf000) |
910 tx
->hdr
.pbc
[0] = cpu_to_le16(pbclen
);
912 ret
= sdma_txinit_ahg(&tx
->txreq
,
913 SDMA_TXREQ_F_AHG_COPY
,
914 sizeof(tx
->hdr
) + datalen
,
915 req
->ahg_idx
, 0, NULL
, 0,
919 ret
= sdma_txadd_kvaddr(pq
->dd
, &tx
->txreq
,
927 changes
= set_txreq_header_ahg(req
, tx
,
931 sdma_txinit_ahg(&tx
->txreq
,
932 SDMA_TXREQ_F_USE_AHG
,
933 datalen
, req
->ahg_idx
, changes
,
934 req
->ahg
, sizeof(req
->hdr
),
938 ret
= sdma_txinit(&tx
->txreq
, 0, sizeof(req
->hdr
) +
939 datalen
, user_sdma_txreq_cb
);
943 * Modify the header for this packet. This only needs
944 * to be done if we are not going to use AHG. Otherwise,
945 * the HW will do it based on the changes we gave it
946 * during sdma_txinit_ahg().
948 ret
= set_txreq_header(req
, tx
, datalen
);
954 * If the request contains any data vectors, add up to
955 * fragsize bytes to the descriptor.
957 while (queued
< datalen
&&
958 (req
->sent
+ data_sent
) < req
->data_len
) {
959 unsigned long base
, offset
;
960 unsigned pageidx
, len
;
962 base
= (unsigned long)iovec
->iov
.iov_base
;
963 offset
= offset_in_page(base
+ iovec
->offset
+
965 pageidx
= (((iovec
->offset
+ iov_offset
+
966 base
) - (base
& PAGE_MASK
)) >> PAGE_SHIFT
);
967 len
= offset
+ req
->info
.fragsize
> PAGE_SIZE
?
968 PAGE_SIZE
- offset
: req
->info
.fragsize
;
969 len
= min((datalen
- queued
), len
);
970 ret
= sdma_txadd_page(pq
->dd
, &tx
->txreq
,
971 iovec
->pages
[pageidx
],
974 SDMA_DBG(req
, "SDMA txreq add page failed %d\n",
981 if (unlikely(queued
< datalen
&&
982 pageidx
== iovec
->npages
&&
983 req
->iov_idx
< req
->data_iovs
- 1)) {
984 iovec
->offset
+= iov_offset
;
985 iovec
= &req
->iovs
[++req
->iov_idx
];
990 * The txreq was submitted successfully so we can update
993 req
->koffset
+= datalen
;
994 if (req_opcode(req
->info
.ctrl
) == EXPECTED
)
995 req
->tidoffset
+= datalen
;
996 req
->sent
+= data_sent
;
998 iovec
->offset
+= iov_offset
;
999 list_add_tail(&tx
->txreq
.list
, &req
->txps
);
1001 * It is important to increment this here as it is used to
1002 * generate the BTH.PSN and, therefore, can't be bulk-updated
1003 * outside of the loop.
1005 tx
->seqnum
= req
->seqnum
++;
1009 ret
= sdma_send_txlist(req
->sde
, &pq
->busy
, &req
->txps
);
1010 if (list_empty(&req
->txps
)) {
1011 req
->seqsubmitted
= req
->seqnum
;
1012 if (req
->seqnum
== req
->info
.npkts
) {
1013 set_bit(SDMA_REQ_SEND_DONE
, &req
->flags
);
1015 * The txreq has already been submitted to the HW queue
1016 * so we can free the AHG entry now. Corruption will not
1017 * happen due to the sequential manner in which
1018 * descriptors are processed.
1020 if (test_bit(SDMA_REQ_HAVE_AHG
, &req
->flags
))
1021 sdma_ahg_free(req
->sde
, req
->ahg_idx
);
1023 } else if (ret
> 0) {
1024 req
->seqsubmitted
+= ret
;
1030 sdma_txclean(pq
->dd
, &tx
->txreq
);
1032 kmem_cache_free(pq
->txreq_cache
, tx
);
1037 * How many pages in this iovec element?
1039 static inline int num_user_pages(const struct iovec
*iov
)
1041 const unsigned long addr
= (unsigned long)iov
->iov_base
;
1042 const unsigned long len
= iov
->iov_len
;
1043 const unsigned long spage
= addr
& PAGE_MASK
;
1044 const unsigned long epage
= (addr
+ len
- 1) & PAGE_MASK
;
1046 return 1 + ((epage
- spage
) >> PAGE_SHIFT
);
1049 static u32
sdma_cache_evict(struct hfi1_user_sdma_pkt_q
*pq
, u32 npages
)
1052 struct sdma_mmu_node
*node
, *ptr
;
1053 struct list_head to_evict
= LIST_HEAD_INIT(to_evict
);
1055 spin_lock(&pq
->evict_lock
);
1056 list_for_each_entry_safe_reverse(node
, ptr
, &pq
->evict
, list
) {
1057 /* Make sure that no one is still using the node. */
1058 if (!atomic_read(&node
->refcount
)) {
1059 set_bit(SDMA_CACHE_NODE_EVICT
, &node
->flags
);
1060 list_del_init(&node
->list
);
1061 list_add(&node
->list
, &to_evict
);
1062 cleared
+= node
->npages
;
1063 if (cleared
>= npages
)
1067 spin_unlock(&pq
->evict_lock
);
1069 list_for_each_entry_safe(node
, ptr
, &to_evict
, list
)
1070 hfi1_mmu_rb_remove(&pq
->sdma_rb_root
, &node
->rb
);
1075 static int pin_vector_pages(struct user_sdma_request
*req
,
1076 struct user_sdma_iovec
*iovec
) {
1077 int ret
= 0, pinned
, npages
, cleared
;
1078 struct page
**pages
;
1079 struct hfi1_user_sdma_pkt_q
*pq
= req
->pq
;
1080 struct sdma_mmu_node
*node
= NULL
;
1081 struct mmu_rb_node
*rb_node
;
1083 rb_node
= hfi1_mmu_rb_extract(&pq
->sdma_rb_root
,
1084 (unsigned long)iovec
->iov
.iov_base
,
1085 iovec
->iov
.iov_len
);
1086 if (rb_node
&& !IS_ERR(rb_node
))
1087 node
= container_of(rb_node
, struct sdma_mmu_node
, rb
);
1092 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
1096 node
->rb
.addr
= (unsigned long)iovec
->iov
.iov_base
;
1098 atomic_set(&node
->refcount
, 0);
1099 INIT_LIST_HEAD(&node
->list
);
1102 npages
= num_user_pages(&iovec
->iov
);
1103 if (node
->npages
< npages
) {
1104 pages
= kcalloc(npages
, sizeof(*pages
), GFP_KERNEL
);
1106 SDMA_DBG(req
, "Failed page array alloc");
1110 memcpy(pages
, node
->pages
, node
->npages
* sizeof(*pages
));
1112 npages
-= node
->npages
;
1115 * If rb_node is NULL, it means that this is brand new node
1116 * and, therefore not on the eviction list.
1117 * If, however, the rb_node is non-NULL, it means that the
1118 * node is already in RB tree and, therefore on the eviction
1119 * list (nodes are unconditionally inserted in the eviction
1120 * list). In that case, we have to remove the node prior to
1121 * calling the eviction function in order to prevent it from
1122 * freeing this node.
1125 spin_lock(&pq
->evict_lock
);
1126 list_del_init(&node
->list
);
1127 spin_unlock(&pq
->evict_lock
);
1130 if (!hfi1_can_pin_pages(pq
->dd
, pq
->n_locked
, npages
)) {
1131 cleared
= sdma_cache_evict(pq
, npages
);
1132 if (cleared
>= npages
)
1135 pinned
= hfi1_acquire_user_pages(
1136 ((unsigned long)iovec
->iov
.iov_base
+
1137 (node
->npages
* PAGE_SIZE
)), npages
, 0,
1138 pages
+ node
->npages
);
1144 if (pinned
!= npages
) {
1145 unpin_vector_pages(current
->mm
, pages
, node
->npages
,
1151 node
->rb
.len
= iovec
->iov
.iov_len
;
1152 node
->pages
= pages
;
1153 node
->npages
+= pinned
;
1154 npages
= node
->npages
;
1155 spin_lock(&pq
->evict_lock
);
1156 list_add(&node
->list
, &pq
->evict
);
1157 pq
->n_locked
+= pinned
;
1158 spin_unlock(&pq
->evict_lock
);
1160 iovec
->pages
= node
->pages
;
1161 iovec
->npages
= npages
;
1164 ret
= hfi1_mmu_rb_insert(&req
->pq
->sdma_rb_root
, &node
->rb
);
1166 spin_lock(&pq
->evict_lock
);
1167 if (!list_empty(&node
->list
))
1168 list_del(&node
->list
);
1169 pq
->n_locked
-= node
->npages
;
1170 spin_unlock(&pq
->evict_lock
);
1176 unpin_vector_pages(current
->mm
, node
->pages
, 0, node
->npages
);
1181 static void unpin_vector_pages(struct mm_struct
*mm
, struct page
**pages
,
1182 unsigned start
, unsigned npages
)
1184 hfi1_release_user_pages(mm
, pages
+ start
, npages
, 0);
1188 static int check_header_template(struct user_sdma_request
*req
,
1189 struct hfi1_pkt_header
*hdr
, u32 lrhlen
,
1193 * Perform safety checks for any type of packet:
1194 * - transfer size is multiple of 64bytes
1195 * - packet length is multiple of 4bytes
1196 * - entire request length is multiple of 4bytes
1197 * - packet length is not larger than MTU size
1199 * These checks are only done for the first packet of the
1200 * transfer since the header is "given" to us by user space.
1201 * For the remainder of the packets we compute the values.
1203 if (req
->info
.fragsize
% PIO_BLOCK_SIZE
||
1204 lrhlen
& 0x3 || req
->data_len
& 0x3 ||
1205 lrhlen
> get_lrh_len(*hdr
, req
->info
.fragsize
))
1208 if (req_opcode(req
->info
.ctrl
) == EXPECTED
) {
1210 * The header is checked only on the first packet. Furthermore,
1211 * we ensure that at least one TID entry is copied when the
1212 * request is submitted. Therefore, we don't have to verify that
1213 * tididx points to something sane.
1215 u32 tidval
= req
->tids
[req
->tididx
],
1216 tidlen
= EXP_TID_GET(tidval
, LEN
) * PAGE_SIZE
,
1217 tididx
= EXP_TID_GET(tidval
, IDX
),
1218 tidctrl
= EXP_TID_GET(tidval
, CTRL
),
1220 __le32 kval
= hdr
->kdeth
.ver_tid_offset
;
1222 tidoff
= KDETH_GET(kval
, OFFSET
) *
1223 (KDETH_GET(req
->hdr
.kdeth
.ver_tid_offset
, OM
) ?
1224 KDETH_OM_LARGE
: KDETH_OM_SMALL
);
1226 * Expected receive packets have the following
1227 * additional checks:
1228 * - offset is not larger than the TID size
1229 * - TIDCtrl values match between header and TID array
1230 * - TID indexes match between header and TID array
1232 if ((tidoff
+ datalen
> tidlen
) ||
1233 KDETH_GET(kval
, TIDCTRL
) != tidctrl
||
1234 KDETH_GET(kval
, TID
) != tididx
)
1241 * Correctly set the BTH.PSN field based on type of
1242 * transfer - eager packets can just increment the PSN but
1243 * expected packets encode generation and sequence in the
1244 * BTH.PSN field so just incrementing will result in errors.
1246 static inline u32
set_pkt_bth_psn(__be32 bthpsn
, u8 expct
, u32 frags
)
1248 u32 val
= be32_to_cpu(bthpsn
),
1249 mask
= (HFI1_CAP_IS_KSET(EXTENDED_PSN
) ? 0x7fffffffull
:
1253 psn
= (psn
& ~BTH_SEQ_MASK
) | ((psn
+ frags
) & BTH_SEQ_MASK
);
1259 static int set_txreq_header(struct user_sdma_request
*req
,
1260 struct user_sdma_txreq
*tx
, u32 datalen
)
1262 struct hfi1_user_sdma_pkt_q
*pq
= req
->pq
;
1263 struct hfi1_pkt_header
*hdr
= &tx
->hdr
;
1266 u32 tidval
= 0, lrhlen
= get_lrh_len(*hdr
, datalen
);
1268 /* Copy the header template to the request before modification */
1269 memcpy(hdr
, &req
->hdr
, sizeof(*hdr
));
1272 * Check if the PBC and LRH length are mismatched. If so
1273 * adjust both in the header.
1275 pbclen
= le16_to_cpu(hdr
->pbc
[0]);
1276 if (PBC2LRH(pbclen
) != lrhlen
) {
1277 pbclen
= (pbclen
& 0xf000) | LRH2PBC(lrhlen
);
1278 hdr
->pbc
[0] = cpu_to_le16(pbclen
);
1279 hdr
->lrh
[2] = cpu_to_be16(lrhlen
>> 2);
1282 * This is the first packet in the sequence that has
1283 * a "static" size that can be used for the rest of
1284 * the packets (besides the last one).
1286 if (unlikely(req
->seqnum
== 2)) {
1288 * From this point on the lengths in both the
1289 * PBC and LRH are the same until the last
1291 * Adjust the template so we don't have to update
1294 req
->hdr
.pbc
[0] = hdr
->pbc
[0];
1295 req
->hdr
.lrh
[2] = hdr
->lrh
[2];
1299 * We only have to modify the header if this is not the
1300 * first packet in the request. Otherwise, we use the
1301 * header given to us.
1303 if (unlikely(!req
->seqnum
)) {
1304 ret
= check_header_template(req
, hdr
, lrhlen
, datalen
);
1310 hdr
->bth
[2] = cpu_to_be32(
1311 set_pkt_bth_psn(hdr
->bth
[2],
1312 (req_opcode(req
->info
.ctrl
) == EXPECTED
),
1315 /* Set ACK request on last packet */
1316 if (unlikely(tx
->flags
& TXREQ_FLAGS_REQ_LAST_PKT
))
1317 hdr
->bth
[2] |= cpu_to_be32(1UL << 31);
1319 /* Set the new offset */
1320 hdr
->kdeth
.swdata
[6] = cpu_to_le32(req
->koffset
);
1321 /* Expected packets have to fill in the new TID information */
1322 if (req_opcode(req
->info
.ctrl
) == EXPECTED
) {
1323 tidval
= req
->tids
[req
->tididx
];
1325 * If the offset puts us at the end of the current TID,
1326 * advance everything.
1328 if ((req
->tidoffset
) == (EXP_TID_GET(tidval
, LEN
) *
1332 * Since we don't copy all the TIDs, all at once,
1333 * we have to check again.
1335 if (++req
->tididx
> req
->n_tids
- 1 ||
1336 !req
->tids
[req
->tididx
]) {
1339 tidval
= req
->tids
[req
->tididx
];
1341 req
->omfactor
= EXP_TID_GET(tidval
, LEN
) * PAGE_SIZE
>=
1342 KDETH_OM_MAX_SIZE
? KDETH_OM_LARGE
: KDETH_OM_SMALL
;
1343 /* Set KDETH.TIDCtrl based on value for this TID. */
1344 KDETH_SET(hdr
->kdeth
.ver_tid_offset
, TIDCTRL
,
1345 EXP_TID_GET(tidval
, CTRL
));
1346 /* Set KDETH.TID based on value for this TID */
1347 KDETH_SET(hdr
->kdeth
.ver_tid_offset
, TID
,
1348 EXP_TID_GET(tidval
, IDX
));
1349 /* Clear KDETH.SH only on the last packet */
1350 if (unlikely(tx
->flags
& TXREQ_FLAGS_REQ_LAST_PKT
))
1351 KDETH_SET(hdr
->kdeth
.ver_tid_offset
, SH
, 0);
1353 * Set the KDETH.OFFSET and KDETH.OM based on size of
1356 SDMA_DBG(req
, "TID offset %ubytes %uunits om%u",
1357 req
->tidoffset
, req
->tidoffset
/ req
->omfactor
,
1358 req
->omfactor
!= KDETH_OM_SMALL
);
1359 KDETH_SET(hdr
->kdeth
.ver_tid_offset
, OFFSET
,
1360 req
->tidoffset
/ req
->omfactor
);
1361 KDETH_SET(hdr
->kdeth
.ver_tid_offset
, OM
,
1362 req
->omfactor
!= KDETH_OM_SMALL
);
1365 trace_hfi1_sdma_user_header(pq
->dd
, pq
->ctxt
, pq
->subctxt
,
1366 req
->info
.comp_idx
, hdr
, tidval
);
1367 return sdma_txadd_kvaddr(pq
->dd
, &tx
->txreq
, hdr
, sizeof(*hdr
));
1370 static int set_txreq_header_ahg(struct user_sdma_request
*req
,
1371 struct user_sdma_txreq
*tx
, u32 len
)
1374 struct hfi1_user_sdma_pkt_q
*pq
= req
->pq
;
1375 struct hfi1_pkt_header
*hdr
= &req
->hdr
;
1376 u16 pbclen
= le16_to_cpu(hdr
->pbc
[0]);
1377 u32 val32
, tidval
= 0, lrhlen
= get_lrh_len(*hdr
, len
);
1379 if (PBC2LRH(pbclen
) != lrhlen
) {
1380 /* PBC.PbcLengthDWs */
1381 AHG_HEADER_SET(req
->ahg
, diff
, 0, 0, 12,
1382 cpu_to_le16(LRH2PBC(lrhlen
)));
1383 /* LRH.PktLen (we need the full 16 bits due to byte swap) */
1384 AHG_HEADER_SET(req
->ahg
, diff
, 3, 0, 16,
1385 cpu_to_be16(lrhlen
>> 2));
1389 * Do the common updates
1391 /* BTH.PSN and BTH.A */
1392 val32
= (be32_to_cpu(hdr
->bth
[2]) + req
->seqnum
) &
1393 (HFI1_CAP_IS_KSET(EXTENDED_PSN
) ? 0x7fffffff : 0xffffff);
1394 if (unlikely(tx
->flags
& TXREQ_FLAGS_REQ_LAST_PKT
))
1396 AHG_HEADER_SET(req
->ahg
, diff
, 6, 0, 16, cpu_to_be16(val32
>> 16));
1397 AHG_HEADER_SET(req
->ahg
, diff
, 6, 16, 16, cpu_to_be16(val32
& 0xffff));
1399 AHG_HEADER_SET(req
->ahg
, diff
, 15, 0, 16,
1400 cpu_to_le16(req
->koffset
& 0xffff));
1401 AHG_HEADER_SET(req
->ahg
, diff
, 15, 16, 16,
1402 cpu_to_le16(req
->koffset
>> 16));
1403 if (req_opcode(req
->info
.ctrl
) == EXPECTED
) {
1406 tidval
= req
->tids
[req
->tididx
];
1409 * If the offset puts us at the end of the current TID,
1410 * advance everything.
1412 if ((req
->tidoffset
) == (EXP_TID_GET(tidval
, LEN
) *
1416 * Since we don't copy all the TIDs, all at once,
1417 * we have to check again.
1419 if (++req
->tididx
> req
->n_tids
- 1 ||
1420 !req
->tids
[req
->tididx
]) {
1423 tidval
= req
->tids
[req
->tididx
];
1425 req
->omfactor
= ((EXP_TID_GET(tidval
, LEN
) *
1427 KDETH_OM_MAX_SIZE
) ? KDETH_OM_LARGE
:
1429 /* KDETH.OM and KDETH.OFFSET (TID) */
1430 AHG_HEADER_SET(req
->ahg
, diff
, 7, 0, 16,
1431 ((!!(req
->omfactor
- KDETH_OM_SMALL
)) << 15 |
1432 ((req
->tidoffset
/ req
->omfactor
) & 0x7fff)));
1433 /* KDETH.TIDCtrl, KDETH.TID */
1434 val
= cpu_to_le16(((EXP_TID_GET(tidval
, CTRL
) & 0x3) << 10) |
1435 (EXP_TID_GET(tidval
, IDX
) & 0x3ff));
1436 /* Clear KDETH.SH on last packet */
1437 if (unlikely(tx
->flags
& TXREQ_FLAGS_REQ_LAST_PKT
)) {
1438 val
|= cpu_to_le16(KDETH_GET(hdr
->kdeth
.ver_tid_offset
,
1440 val
&= cpu_to_le16(~(1U << 13));
1441 AHG_HEADER_SET(req
->ahg
, diff
, 7, 16, 14, val
);
1443 AHG_HEADER_SET(req
->ahg
, diff
, 7, 16, 12, val
);
1447 trace_hfi1_sdma_user_header_ahg(pq
->dd
, pq
->ctxt
, pq
->subctxt
,
1448 req
->info
.comp_idx
, req
->sde
->this_idx
,
1449 req
->ahg_idx
, req
->ahg
, diff
, tidval
);
1454 * SDMA tx request completion callback. Called when the SDMA progress
1455 * state machine gets notification that the SDMA descriptors for this
1456 * tx request have been processed by the DMA engine. Called in
1457 * interrupt context.
1459 static void user_sdma_txreq_cb(struct sdma_txreq
*txreq
, int status
)
1461 struct user_sdma_txreq
*tx
=
1462 container_of(txreq
, struct user_sdma_txreq
, txreq
);
1463 struct user_sdma_request
*req
;
1464 struct hfi1_user_sdma_pkt_q
*pq
;
1465 struct hfi1_user_sdma_comp_q
*cq
;
1475 if (status
!= SDMA_TXREQ_S_OK
) {
1476 SDMA_DBG(req
, "SDMA completion with error %d",
1478 set_bit(SDMA_REQ_HAS_ERROR
, &req
->flags
);
1481 req
->seqcomp
= tx
->seqnum
;
1482 kmem_cache_free(pq
->txreq_cache
, tx
);
1485 idx
= req
->info
.comp_idx
;
1486 if (req
->status
== -1 && status
== SDMA_TXREQ_S_OK
) {
1487 if (req
->seqcomp
== req
->info
.npkts
- 1) {
1489 user_sdma_free_request(req
, false);
1491 set_comp_state(pq
, cq
, idx
, COMPLETE
, 0);
1494 if (status
!= SDMA_TXREQ_S_OK
)
1495 req
->status
= status
;
1496 if (req
->seqcomp
== (ACCESS_ONCE(req
->seqsubmitted
) - 1) &&
1497 (test_bit(SDMA_REQ_SEND_DONE
, &req
->flags
) ||
1498 test_bit(SDMA_REQ_DONE_ERROR
, &req
->flags
))) {
1499 user_sdma_free_request(req
, false);
1501 set_comp_state(pq
, cq
, idx
, ERROR
, req
->status
);
1506 static inline void pq_update(struct hfi1_user_sdma_pkt_q
*pq
)
1508 if (atomic_dec_and_test(&pq
->n_reqs
)) {
1509 xchg(&pq
->state
, SDMA_PKT_Q_INACTIVE
);
1514 static void user_sdma_free_request(struct user_sdma_request
*req
, bool unpin
)
1516 if (!list_empty(&req
->txps
)) {
1517 struct sdma_txreq
*t
, *p
;
1519 list_for_each_entry_safe(t
, p
, &req
->txps
, list
) {
1520 struct user_sdma_txreq
*tx
=
1521 container_of(t
, struct user_sdma_txreq
, txreq
);
1522 list_del_init(&t
->list
);
1523 sdma_txclean(req
->pq
->dd
, t
);
1524 kmem_cache_free(req
->pq
->txreq_cache
, tx
);
1527 if (req
->data_iovs
) {
1528 struct sdma_mmu_node
*node
;
1531 for (i
= 0; i
< req
->data_iovs
; i
++) {
1532 node
= req
->iovs
[i
].node
;
1537 hfi1_mmu_rb_remove(&req
->pq
->sdma_rb_root
,
1540 atomic_dec(&node
->refcount
);
1544 clear_bit(SDMA_REQ_IN_USE
, &req
->flags
);
1547 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q
*pq
,
1548 struct hfi1_user_sdma_comp_q
*cq
,
1549 u16 idx
, enum hfi1_sdma_comp_state state
,
1552 hfi1_cdbg(SDMA
, "[%u:%u:%u:%u] Setting completion status %u %d",
1553 pq
->dd
->unit
, pq
->ctxt
, pq
->subctxt
, idx
, state
, ret
);
1554 cq
->comps
[idx
].status
= state
;
1556 cq
->comps
[idx
].errcode
= -ret
;
1557 trace_hfi1_sdma_user_completion(pq
->dd
, pq
->ctxt
, pq
->subctxt
,
1561 static bool sdma_rb_filter(struct mmu_rb_node
*node
, unsigned long addr
,
1564 return (bool)(node
->addr
== addr
);
1567 static int sdma_rb_insert(struct rb_root
*root
, struct mmu_rb_node
*mnode
)
1569 struct sdma_mmu_node
*node
=
1570 container_of(mnode
, struct sdma_mmu_node
, rb
);
1572 atomic_inc(&node
->refcount
);
1576 static void sdma_rb_remove(struct rb_root
*root
, struct mmu_rb_node
*mnode
,
1577 struct mm_struct
*mm
)
1579 struct sdma_mmu_node
*node
=
1580 container_of(mnode
, struct sdma_mmu_node
, rb
);
1582 spin_lock(&node
->pq
->evict_lock
);
1584 * We've been called by the MMU notifier but this node has been
1585 * scheduled for eviction. The eviction function will take care
1586 * of freeing this node.
1587 * We have to take the above lock first because we are racing
1588 * against the setting of the bit in the eviction function.
1590 if (mm
&& test_bit(SDMA_CACHE_NODE_EVICT
, &node
->flags
)) {
1591 spin_unlock(&node
->pq
->evict_lock
);
1595 if (!list_empty(&node
->list
))
1596 list_del(&node
->list
);
1597 node
->pq
->n_locked
-= node
->npages
;
1598 spin_unlock(&node
->pq
->evict_lock
);
1601 * If mm is set, we are being called by the MMU notifier and we
1602 * should not pass a mm_struct to unpin_vector_page(). This is to
1603 * prevent a deadlock when hfi1_release_user_pages() attempts to
1604 * take the mmap_sem, which the MMU notifier has already taken.
1606 unpin_vector_pages(mm
? NULL
: current
->mm
, node
->pages
, 0,
1609 * If called by the MMU notifier, we have to adjust the pinned
1610 * page count ourselves.
1613 mm
->pinned_vm
-= node
->npages
;
1617 static int sdma_rb_invalidate(struct rb_root
*root
, struct mmu_rb_node
*mnode
)
1619 struct sdma_mmu_node
*node
=
1620 container_of(mnode
, struct sdma_mmu_node
, rb
);
1622 if (!atomic_read(&node
->refcount
))