Merge remote-tracking branches 'spi/fix/spidev' and 'spi/fix/xtfpga' into spi-linus
[deliverable/linux.git] / drivers / staging / rdma / hfi1 / verbs.h
1 /*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51 #ifndef HFI1_VERBS_H
52 #define HFI1_VERBS_H
53
54 #include <linux/types.h>
55 #include <linux/seqlock.h>
56 #include <linux/kernel.h>
57 #include <linux/interrupt.h>
58 #include <linux/kref.h>
59 #include <linux/workqueue.h>
60 #include <linux/kthread.h>
61 #include <linux/completion.h>
62 #include <rdma/ib_pack.h>
63 #include <rdma/ib_user_verbs.h>
64 #include <rdma/ib_mad.h>
65
66 struct hfi1_ctxtdata;
67 struct hfi1_pportdata;
68 struct hfi1_devdata;
69 struct hfi1_packet;
70
71 #include "iowait.h"
72
73 #define HFI1_MAX_RDMA_ATOMIC 16
74 #define HFI1_GUIDS_PER_PORT 5
75
76 /*
77 * Increment this value if any changes that break userspace ABI
78 * compatibility are made.
79 */
80 #define HFI1_UVERBS_ABI_VERSION 2
81
82 /*
83 * Define an ib_cq_notify value that is not valid so we know when CQ
84 * notifications are armed.
85 */
86 #define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
87
88 #define IB_SEQ_NAK (3 << 29)
89
90 /* AETH NAK opcode values */
91 #define IB_RNR_NAK 0x20
92 #define IB_NAK_PSN_ERROR 0x60
93 #define IB_NAK_INVALID_REQUEST 0x61
94 #define IB_NAK_REMOTE_ACCESS_ERROR 0x62
95 #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
96 #define IB_NAK_INVALID_RD_REQUEST 0x64
97
98 /* Flags for checking QP state (see ib_hfi1_state_ops[]) */
99 #define HFI1_POST_SEND_OK 0x01
100 #define HFI1_POST_RECV_OK 0x02
101 #define HFI1_PROCESS_RECV_OK 0x04
102 #define HFI1_PROCESS_SEND_OK 0x08
103 #define HFI1_PROCESS_NEXT_SEND_OK 0x10
104 #define HFI1_FLUSH_SEND 0x20
105 #define HFI1_FLUSH_RECV 0x40
106 #define HFI1_PROCESS_OR_FLUSH_SEND \
107 (HFI1_PROCESS_SEND_OK | HFI1_FLUSH_SEND)
108
109 /* IB Performance Manager status values */
110 #define IB_PMA_SAMPLE_STATUS_DONE 0x00
111 #define IB_PMA_SAMPLE_STATUS_STARTED 0x01
112 #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
113
114 /* Mandatory IB performance counter select values. */
115 #define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
116 #define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
117 #define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
118 #define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
119 #define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
120
121 #define HFI1_VENDOR_IPG cpu_to_be16(0xFFA0)
122
123 #define IB_BTH_REQ_ACK (1 << 31)
124 #define IB_BTH_SOLICITED (1 << 23)
125 #define IB_BTH_MIG_REQ (1 << 22)
126
127 #define IB_GRH_VERSION 6
128 #define IB_GRH_VERSION_MASK 0xF
129 #define IB_GRH_VERSION_SHIFT 28
130 #define IB_GRH_TCLASS_MASK 0xFF
131 #define IB_GRH_TCLASS_SHIFT 20
132 #define IB_GRH_FLOW_MASK 0xFFFFF
133 #define IB_GRH_FLOW_SHIFT 0
134 #define IB_GRH_NEXT_HDR 0x1B
135
136 #define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
137
138 /* flags passed by hfi1_ib_rcv() */
139 enum {
140 HFI1_HAS_GRH = (1 << 0),
141 };
142
143 struct ib_reth {
144 __be64 vaddr;
145 __be32 rkey;
146 __be32 length;
147 } __packed;
148
149 struct ib_atomic_eth {
150 __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
151 __be32 rkey;
152 __be64 swap_data;
153 __be64 compare_data;
154 } __packed;
155
156 union ib_ehdrs {
157 struct {
158 __be32 deth[2];
159 __be32 imm_data;
160 } ud;
161 struct {
162 struct ib_reth reth;
163 __be32 imm_data;
164 } rc;
165 struct {
166 __be32 aeth;
167 __be32 atomic_ack_eth[2];
168 } at;
169 __be32 imm_data;
170 __be32 aeth;
171 struct ib_atomic_eth atomic_eth;
172 } __packed;
173
174 struct hfi1_other_headers {
175 __be32 bth[3];
176 union ib_ehdrs u;
177 } __packed;
178
179 /*
180 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
181 * long (72 w/ imm_data). Only the first 56 bytes of the IB header
182 * will be in the eager header buffer. The remaining 12 or 16 bytes
183 * are in the data buffer.
184 */
185 struct hfi1_ib_header {
186 __be16 lrh[4];
187 union {
188 struct {
189 struct ib_grh grh;
190 struct hfi1_other_headers oth;
191 } l;
192 struct hfi1_other_headers oth;
193 } u;
194 } __packed;
195
196 struct ahg_ib_header {
197 struct sdma_engine *sde;
198 u32 ahgdesc[2];
199 u16 tx_flags;
200 u8 ahgcount;
201 u8 ahgidx;
202 struct hfi1_ib_header ibh;
203 };
204
205 struct hfi1_pio_header {
206 __le64 pbc;
207 struct hfi1_ib_header hdr;
208 } __packed;
209
210 /*
211 * used for force cacheline alignment for AHG
212 */
213 struct tx_pio_header {
214 struct hfi1_pio_header phdr;
215 } ____cacheline_aligned;
216
217 /*
218 * There is one struct hfi1_mcast for each multicast GID.
219 * All attached QPs are then stored as a list of
220 * struct hfi1_mcast_qp.
221 */
222 struct hfi1_mcast_qp {
223 struct list_head list;
224 struct hfi1_qp *qp;
225 };
226
227 struct hfi1_mcast {
228 struct rb_node rb_node;
229 union ib_gid mgid;
230 struct list_head qp_list;
231 wait_queue_head_t wait;
232 atomic_t refcount;
233 int n_attached;
234 };
235
236 /* Protection domain */
237 struct hfi1_pd {
238 struct ib_pd ibpd;
239 int user; /* non-zero if created from user space */
240 };
241
242 /* Address Handle */
243 struct hfi1_ah {
244 struct ib_ah ibah;
245 struct ib_ah_attr attr;
246 atomic_t refcount;
247 };
248
249 /*
250 * This structure is used by hfi1_mmap() to validate an offset
251 * when an mmap() request is made. The vm_area_struct then uses
252 * this as its vm_private_data.
253 */
254 struct hfi1_mmap_info {
255 struct list_head pending_mmaps;
256 struct ib_ucontext *context;
257 void *obj;
258 __u64 offset;
259 struct kref ref;
260 unsigned size;
261 };
262
263 /*
264 * This structure is used to contain the head pointer, tail pointer,
265 * and completion queue entries as a single memory allocation so
266 * it can be mmap'ed into user space.
267 */
268 struct hfi1_cq_wc {
269 u32 head; /* index of next entry to fill */
270 u32 tail; /* index of next ib_poll_cq() entry */
271 union {
272 /* these are actually size ibcq.cqe + 1 */
273 struct ib_uverbs_wc uqueue[0];
274 struct ib_wc kqueue[0];
275 };
276 };
277
278 /*
279 * The completion queue structure.
280 */
281 struct hfi1_cq {
282 struct ib_cq ibcq;
283 struct kthread_work comptask;
284 struct hfi1_devdata *dd;
285 spinlock_t lock; /* protect changes in this struct */
286 u8 notify;
287 u8 triggered;
288 struct hfi1_cq_wc *queue;
289 struct hfi1_mmap_info *ip;
290 };
291
292 /*
293 * A segment is a linear region of low physical memory.
294 * Used by the verbs layer.
295 */
296 struct hfi1_seg {
297 void *vaddr;
298 size_t length;
299 };
300
301 /* The number of hfi1_segs that fit in a page. */
302 #define HFI1_SEGSZ (PAGE_SIZE / sizeof(struct hfi1_seg))
303
304 struct hfi1_segarray {
305 struct hfi1_seg segs[HFI1_SEGSZ];
306 };
307
308 struct hfi1_mregion {
309 struct ib_pd *pd; /* shares refcnt of ibmr.pd */
310 u64 user_base; /* User's address for this region */
311 u64 iova; /* IB start address of this region */
312 size_t length;
313 u32 lkey;
314 u32 offset; /* offset (bytes) to start of region */
315 int access_flags;
316 u32 max_segs; /* number of hfi1_segs in all the arrays */
317 u32 mapsz; /* size of the map array */
318 u8 page_shift; /* 0 - non unform/non powerof2 sizes */
319 u8 lkey_published; /* in global table */
320 struct completion comp; /* complete when refcount goes to zero */
321 atomic_t refcount;
322 struct hfi1_segarray *map[0]; /* the segments */
323 };
324
325 /*
326 * These keep track of the copy progress within a memory region.
327 * Used by the verbs layer.
328 */
329 struct hfi1_sge {
330 struct hfi1_mregion *mr;
331 void *vaddr; /* kernel virtual address of segment */
332 u32 sge_length; /* length of the SGE */
333 u32 length; /* remaining length of the segment */
334 u16 m; /* current index: mr->map[m] */
335 u16 n; /* current index: mr->map[m]->segs[n] */
336 };
337
338 /* Memory region */
339 struct hfi1_mr {
340 struct ib_mr ibmr;
341 struct ib_umem *umem;
342 struct hfi1_mregion mr; /* must be last */
343 };
344
345 /*
346 * Send work request queue entry.
347 * The size of the sg_list is determined when the QP is created and stored
348 * in qp->s_max_sge.
349 */
350 struct hfi1_swqe {
351 struct ib_send_wr wr; /* don't use wr.sg_list */
352 u32 psn; /* first packet sequence number */
353 u32 lpsn; /* last packet sequence number */
354 u32 ssn; /* send sequence number */
355 u32 length; /* total length of data in sg_list */
356 struct hfi1_sge sg_list[0];
357 };
358
359 /*
360 * Receive work request queue entry.
361 * The size of the sg_list is determined when the QP (or SRQ) is created
362 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
363 */
364 struct hfi1_rwqe {
365 u64 wr_id;
366 u8 num_sge;
367 struct ib_sge sg_list[0];
368 };
369
370 /*
371 * This structure is used to contain the head pointer, tail pointer,
372 * and receive work queue entries as a single memory allocation so
373 * it can be mmap'ed into user space.
374 * Note that the wq array elements are variable size so you can't
375 * just index into the array to get the N'th element;
376 * use get_rwqe_ptr() instead.
377 */
378 struct hfi1_rwq {
379 u32 head; /* new work requests posted to the head */
380 u32 tail; /* receives pull requests from here. */
381 struct hfi1_rwqe wq[0];
382 };
383
384 struct hfi1_rq {
385 struct hfi1_rwq *wq;
386 u32 size; /* size of RWQE array */
387 u8 max_sge;
388 /* protect changes in this struct */
389 spinlock_t lock ____cacheline_aligned_in_smp;
390 };
391
392 struct hfi1_srq {
393 struct ib_srq ibsrq;
394 struct hfi1_rq rq;
395 struct hfi1_mmap_info *ip;
396 /* send signal when number of RWQEs < limit */
397 u32 limit;
398 };
399
400 struct hfi1_sge_state {
401 struct hfi1_sge *sg_list; /* next SGE to be used if any */
402 struct hfi1_sge sge; /* progress state for the current SGE */
403 u32 total_len;
404 u8 num_sge;
405 };
406
407 /*
408 * This structure holds the information that the send tasklet needs
409 * to send a RDMA read response or atomic operation.
410 */
411 struct hfi1_ack_entry {
412 u8 opcode;
413 u8 sent;
414 u32 psn;
415 u32 lpsn;
416 union {
417 struct hfi1_sge rdma_sge;
418 u64 atomic_data;
419 };
420 };
421
422 /*
423 * Variables prefixed with s_ are for the requester (sender).
424 * Variables prefixed with r_ are for the responder (receiver).
425 * Variables prefixed with ack_ are for responder replies.
426 *
427 * Common variables are protected by both r_rq.lock and s_lock in that order
428 * which only happens in modify_qp() or changing the QP 'state'.
429 */
430 struct hfi1_qp {
431 struct ib_qp ibqp;
432 /* read mostly fields above and below */
433 struct ib_ah_attr remote_ah_attr;
434 struct ib_ah_attr alt_ah_attr;
435 struct hfi1_qp __rcu *next; /* link list for QPN hash table */
436 struct hfi1_swqe *s_wq; /* send work queue */
437 struct hfi1_mmap_info *ip;
438 struct ahg_ib_header *s_hdr; /* next packet header to send */
439 u8 s_sc; /* SC[0..4] for next packet */
440 unsigned long timeout_jiffies; /* computed from timeout */
441
442 enum ib_mtu path_mtu;
443 int srate_mbps; /* s_srate (below) converted to Mbit/s */
444 u32 remote_qpn;
445 u32 pmtu; /* decoded from path_mtu */
446 u32 qkey; /* QKEY for this QP (for UD or RD) */
447 u32 s_size; /* send work queue size */
448 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
449 u32 s_ahgpsn; /* set to the psn in the copy of the header */
450
451 u8 state; /* QP state */
452 u8 allowed_ops; /* high order bits of allowed opcodes */
453 u8 qp_access_flags;
454 u8 alt_timeout; /* Alternate path timeout for this QP */
455 u8 timeout; /* Timeout for this QP */
456 u8 s_srate;
457 u8 s_mig_state;
458 u8 port_num;
459 u8 s_pkey_index; /* PKEY index to use */
460 u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
461 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
462 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
463 u8 s_retry_cnt; /* number of times to retry */
464 u8 s_rnr_retry_cnt;
465 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
466 u8 s_max_sge; /* size of s_wq->sg_list */
467 u8 s_draining;
468
469 /* start of read/write fields */
470 atomic_t refcount ____cacheline_aligned_in_smp;
471 wait_queue_head_t wait;
472
473
474 struct hfi1_ack_entry s_ack_queue[HFI1_MAX_RDMA_ATOMIC + 1]
475 ____cacheline_aligned_in_smp;
476 struct hfi1_sge_state s_rdma_read_sge;
477
478 spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
479 unsigned long r_aflags;
480 u64 r_wr_id; /* ID for current receive WQE */
481 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
482 u32 r_len; /* total length of r_sge */
483 u32 r_rcv_len; /* receive data len processed */
484 u32 r_psn; /* expected rcv packet sequence number */
485 u32 r_msn; /* message sequence number */
486
487 u8 r_state; /* opcode of last packet received */
488 u8 r_flags;
489 u8 r_head_ack_queue; /* index into s_ack_queue[] */
490
491 struct list_head rspwait; /* link for waiting to respond */
492
493 struct hfi1_sge_state r_sge; /* current receive data */
494 struct hfi1_rq r_rq; /* receive work queue */
495
496 spinlock_t s_lock ____cacheline_aligned_in_smp;
497 struct hfi1_sge_state *s_cur_sge;
498 u32 s_flags;
499 struct hfi1_swqe *s_wqe;
500 struct hfi1_sge_state s_sge; /* current send request data */
501 struct hfi1_mregion *s_rdma_mr;
502 struct sdma_engine *s_sde; /* current sde */
503 u32 s_cur_size; /* size of send packet in bytes */
504 u32 s_len; /* total length of s_sge */
505 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
506 u32 s_next_psn; /* PSN for next request */
507 u32 s_last_psn; /* last response PSN processed */
508 u32 s_sending_psn; /* lowest PSN that is being sent */
509 u32 s_sending_hpsn; /* highest PSN that is being sent */
510 u32 s_psn; /* current packet sequence number */
511 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
512 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
513 u32 s_head; /* new entries added here */
514 u32 s_tail; /* next entry to process */
515 u32 s_cur; /* current work queue entry */
516 u32 s_acked; /* last un-ACK'ed entry */
517 u32 s_last; /* last completed entry */
518 u32 s_ssn; /* SSN of tail entry */
519 u32 s_lsn; /* limit sequence number (credit) */
520 u16 s_hdrwords; /* size of s_hdr in 32 bit words */
521 u16 s_rdma_ack_cnt;
522 s8 s_ahgidx;
523 u8 s_state; /* opcode of last packet sent */
524 u8 s_ack_state; /* opcode of packet to ACK */
525 u8 s_nak_state; /* non-zero if NAK is pending */
526 u8 r_nak_state; /* non-zero if NAK is pending */
527 u8 s_retry; /* requester retry counter */
528 u8 s_rnr_retry; /* requester RNR retry counter */
529 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
530 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
531
532 struct hfi1_sge_state s_ack_rdma_sge;
533 struct timer_list s_timer;
534
535 struct iowait s_iowait;
536
537 struct hfi1_sge r_sg_list[0] /* verified SGEs */
538 ____cacheline_aligned_in_smp;
539 };
540
541 /*
542 * Atomic bit definitions for r_aflags.
543 */
544 #define HFI1_R_WRID_VALID 0
545 #define HFI1_R_REWIND_SGE 1
546
547 /*
548 * Bit definitions for r_flags.
549 */
550 #define HFI1_R_REUSE_SGE 0x01
551 #define HFI1_R_RDMAR_SEQ 0x02
552 #define HFI1_R_RSP_NAK 0x04
553 #define HFI1_R_RSP_SEND 0x08
554 #define HFI1_R_COMM_EST 0x10
555
556 /*
557 * Bit definitions for s_flags.
558 *
559 * HFI1_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
560 * HFI1_S_BUSY - send tasklet is processing the QP
561 * HFI1_S_TIMER - the RC retry timer is active
562 * HFI1_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
563 * HFI1_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
564 * before processing the next SWQE
565 * HFI1_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
566 * before processing the next SWQE
567 * HFI1_S_WAIT_RNR - waiting for RNR timeout
568 * HFI1_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
569 * HFI1_S_WAIT_DMA - waiting for send DMA queue to drain before generating
570 * next send completion entry not via send DMA
571 * HFI1_S_WAIT_PIO - waiting for a send buffer to be available
572 * HFI1_S_WAIT_TX - waiting for a struct verbs_txreq to be available
573 * HFI1_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
574 * HFI1_S_WAIT_KMEM - waiting for kernel memory to be available
575 * HFI1_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
576 * HFI1_S_WAIT_ACK - waiting for an ACK packet before sending more requests
577 * HFI1_S_SEND_ONE - send one packet, request ACK, then wait for ACK
578 * HFI1_S_ECN - a BECN was queued to the send engine
579 */
580 #define HFI1_S_SIGNAL_REQ_WR 0x0001
581 #define HFI1_S_BUSY 0x0002
582 #define HFI1_S_TIMER 0x0004
583 #define HFI1_S_RESP_PENDING 0x0008
584 #define HFI1_S_ACK_PENDING 0x0010
585 #define HFI1_S_WAIT_FENCE 0x0020
586 #define HFI1_S_WAIT_RDMAR 0x0040
587 #define HFI1_S_WAIT_RNR 0x0080
588 #define HFI1_S_WAIT_SSN_CREDIT 0x0100
589 #define HFI1_S_WAIT_DMA 0x0200
590 #define HFI1_S_WAIT_PIO 0x0400
591 #define HFI1_S_WAIT_TX 0x0800
592 #define HFI1_S_WAIT_DMA_DESC 0x1000
593 #define HFI1_S_WAIT_KMEM 0x2000
594 #define HFI1_S_WAIT_PSN 0x4000
595 #define HFI1_S_WAIT_ACK 0x8000
596 #define HFI1_S_SEND_ONE 0x10000
597 #define HFI1_S_UNLIMITED_CREDIT 0x20000
598 #define HFI1_S_AHG_VALID 0x40000
599 #define HFI1_S_AHG_CLEAR 0x80000
600 #define HFI1_S_ECN 0x100000
601
602 /*
603 * Wait flags that would prevent any packet type from being sent.
604 */
605 #define HFI1_S_ANY_WAIT_IO (HFI1_S_WAIT_PIO | HFI1_S_WAIT_TX | \
606 HFI1_S_WAIT_DMA_DESC | HFI1_S_WAIT_KMEM)
607
608 /*
609 * Wait flags that would prevent send work requests from making progress.
610 */
611 #define HFI1_S_ANY_WAIT_SEND (HFI1_S_WAIT_FENCE | HFI1_S_WAIT_RDMAR | \
612 HFI1_S_WAIT_RNR | HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_DMA | \
613 HFI1_S_WAIT_PSN | HFI1_S_WAIT_ACK)
614
615 #define HFI1_S_ANY_WAIT (HFI1_S_ANY_WAIT_IO | HFI1_S_ANY_WAIT_SEND)
616
617 #define HFI1_PSN_CREDIT 16
618
619 /*
620 * Since struct hfi1_swqe is not a fixed size, we can't simply index into
621 * struct hfi1_qp.s_wq. This function does the array index computation.
622 */
623 static inline struct hfi1_swqe *get_swqe_ptr(struct hfi1_qp *qp,
624 unsigned n)
625 {
626 return (struct hfi1_swqe *)((char *)qp->s_wq +
627 (sizeof(struct hfi1_swqe) +
628 qp->s_max_sge *
629 sizeof(struct hfi1_sge)) * n);
630 }
631
632 /*
633 * Since struct hfi1_rwqe is not a fixed size, we can't simply index into
634 * struct hfi1_rwq.wq. This function does the array index computation.
635 */
636 static inline struct hfi1_rwqe *get_rwqe_ptr(struct hfi1_rq *rq, unsigned n)
637 {
638 return (struct hfi1_rwqe *)
639 ((char *) rq->wq->wq +
640 (sizeof(struct hfi1_rwqe) +
641 rq->max_sge * sizeof(struct ib_sge)) * n);
642 }
643
644 #define MAX_LKEY_TABLE_BITS 23
645
646 struct hfi1_lkey_table {
647 spinlock_t lock; /* protect changes in this struct */
648 u32 next; /* next unused index (speeds search) */
649 u32 gen; /* generation count */
650 u32 max; /* size of the table */
651 struct hfi1_mregion __rcu **table;
652 };
653
654 struct hfi1_opcode_stats {
655 u64 n_packets; /* number of packets */
656 u64 n_bytes; /* total number of bytes */
657 };
658
659 struct hfi1_opcode_stats_perctx {
660 struct hfi1_opcode_stats stats[256];
661 };
662
663 static inline void inc_opstats(
664 u32 tlen,
665 struct hfi1_opcode_stats *stats)
666 {
667 #ifdef CONFIG_DEBUG_FS
668 stats->n_bytes += tlen;
669 stats->n_packets++;
670 #endif
671 }
672
673 struct hfi1_ibport {
674 struct hfi1_qp __rcu *qp[2];
675 struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
676 struct hfi1_ah *sm_ah;
677 struct hfi1_ah *smi_ah;
678 struct rb_root mcast_tree;
679 spinlock_t lock; /* protect changes in this struct */
680
681 /* non-zero when timer is set */
682 unsigned long mkey_lease_timeout;
683 unsigned long trap_timeout;
684 __be64 gid_prefix; /* in network order */
685 __be64 mkey;
686 __be64 guids[HFI1_GUIDS_PER_PORT - 1]; /* writable GUIDs */
687 u64 tid; /* TID for traps */
688 u64 n_rc_resends;
689 u64 n_seq_naks;
690 u64 n_rdma_seq;
691 u64 n_rnr_naks;
692 u64 n_other_naks;
693 u64 n_loop_pkts;
694 u64 n_pkt_drops;
695 u64 n_vl15_dropped;
696 u64 n_rc_timeouts;
697 u64 n_dmawait;
698 u64 n_unaligned;
699 u64 n_rc_dupreq;
700 u64 n_rc_seqnak;
701
702 /* Hot-path per CPU counters to avoid cacheline trading to update */
703 u64 z_rc_acks;
704 u64 z_rc_qacks;
705 u64 z_rc_delayed_comp;
706 u64 __percpu *rc_acks;
707 u64 __percpu *rc_qacks;
708 u64 __percpu *rc_delayed_comp;
709
710 u32 port_cap_flags;
711 u32 pma_sample_start;
712 u32 pma_sample_interval;
713 __be16 pma_counter_select[5];
714 u16 pma_tag;
715 u16 pkey_violations;
716 u16 qkey_violations;
717 u16 mkey_violations;
718 u16 mkey_lease_period;
719 u16 sm_lid;
720 u16 repress_traps;
721 u8 sm_sl;
722 u8 mkeyprot;
723 u8 subnet_timeout;
724 u8 vl_high_limit;
725 /* the first 16 entries are sl_to_vl for !OPA */
726 u8 sl_to_sc[32];
727 u8 sc_to_sl[32];
728 };
729
730
731 struct hfi1_qp_ibdev;
732 struct hfi1_ibdev {
733 struct ib_device ibdev;
734 struct list_head pending_mmaps;
735 spinlock_t mmap_offset_lock; /* protect mmap_offset */
736 u32 mmap_offset;
737 struct hfi1_mregion __rcu *dma_mr;
738
739 struct hfi1_qp_ibdev *qp_dev;
740
741 /* QP numbers are shared by all IB ports */
742 struct hfi1_lkey_table lk_table;
743 /* protect wait lists */
744 seqlock_t iowait_lock;
745 struct list_head txwait; /* list for wait verbs_txreq */
746 struct list_head memwait; /* list for wait kernel memory */
747 struct list_head txreq_free;
748 struct kmem_cache *verbs_txreq_cache;
749 struct timer_list mem_timer;
750
751 /* other waiters */
752 spinlock_t pending_lock;
753
754 u64 n_piowait;
755 u64 n_txwait;
756 u64 n_kmem_wait;
757
758 u32 n_pds_allocated; /* number of PDs allocated for device */
759 spinlock_t n_pds_lock;
760 u32 n_ahs_allocated; /* number of AHs allocated for device */
761 spinlock_t n_ahs_lock;
762 u32 n_cqs_allocated; /* number of CQs allocated for device */
763 spinlock_t n_cqs_lock;
764 u32 n_qps_allocated; /* number of QPs allocated for device */
765 spinlock_t n_qps_lock;
766 u32 n_srqs_allocated; /* number of SRQs allocated for device */
767 spinlock_t n_srqs_lock;
768 u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
769 spinlock_t n_mcast_grps_lock;
770 #ifdef CONFIG_DEBUG_FS
771 /* per HFI debugfs */
772 struct dentry *hfi1_ibdev_dbg;
773 /* per HFI symlinks to above */
774 struct dentry *hfi1_ibdev_link;
775 #endif
776 };
777
778 struct hfi1_verbs_counters {
779 u64 symbol_error_counter;
780 u64 link_error_recovery_counter;
781 u64 link_downed_counter;
782 u64 port_rcv_errors;
783 u64 port_rcv_remphys_errors;
784 u64 port_xmit_discards;
785 u64 port_xmit_data;
786 u64 port_rcv_data;
787 u64 port_xmit_packets;
788 u64 port_rcv_packets;
789 u32 local_link_integrity_errors;
790 u32 excessive_buffer_overrun_errors;
791 u32 vl15_dropped;
792 };
793
794 static inline struct hfi1_mr *to_imr(struct ib_mr *ibmr)
795 {
796 return container_of(ibmr, struct hfi1_mr, ibmr);
797 }
798
799 static inline struct hfi1_pd *to_ipd(struct ib_pd *ibpd)
800 {
801 return container_of(ibpd, struct hfi1_pd, ibpd);
802 }
803
804 static inline struct hfi1_ah *to_iah(struct ib_ah *ibah)
805 {
806 return container_of(ibah, struct hfi1_ah, ibah);
807 }
808
809 static inline struct hfi1_cq *to_icq(struct ib_cq *ibcq)
810 {
811 return container_of(ibcq, struct hfi1_cq, ibcq);
812 }
813
814 static inline struct hfi1_srq *to_isrq(struct ib_srq *ibsrq)
815 {
816 return container_of(ibsrq, struct hfi1_srq, ibsrq);
817 }
818
819 static inline struct hfi1_qp *to_iqp(struct ib_qp *ibqp)
820 {
821 return container_of(ibqp, struct hfi1_qp, ibqp);
822 }
823
824 static inline struct hfi1_ibdev *to_idev(struct ib_device *ibdev)
825 {
826 return container_of(ibdev, struct hfi1_ibdev, ibdev);
827 }
828
829 /*
830 * Send if not busy or waiting for I/O and either
831 * a RC response is pending or we can process send work requests.
832 */
833 static inline int hfi1_send_ok(struct hfi1_qp *qp)
834 {
835 return !(qp->s_flags & (HFI1_S_BUSY | HFI1_S_ANY_WAIT_IO)) &&
836 (qp->s_hdrwords || (qp->s_flags & HFI1_S_RESP_PENDING) ||
837 !(qp->s_flags & HFI1_S_ANY_WAIT_SEND));
838 }
839
840 /*
841 * This must be called with s_lock held.
842 */
843 void hfi1_schedule_send(struct hfi1_qp *qp);
844 void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
845 u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
846 void hfi1_cap_mask_chg(struct hfi1_ibport *ibp);
847 void hfi1_sys_guid_chg(struct hfi1_ibport *ibp);
848 void hfi1_node_desc_chg(struct hfi1_ibport *ibp);
849 int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
850 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
851 const struct ib_mad_hdr *in_mad, size_t in_mad_size,
852 struct ib_mad_hdr *out_mad, size_t *out_mad_size,
853 u16 *out_mad_pkey_index);
854 int hfi1_create_agents(struct hfi1_ibdev *dev);
855 void hfi1_free_agents(struct hfi1_ibdev *dev);
856
857 /*
858 * The PSN_MASK and PSN_SHIFT allow for
859 * 1) comparing two PSNs
860 * 2) returning the PSN with any upper bits masked
861 * 3) returning the difference between to PSNs
862 *
863 * The number of significant bits in the PSN must
864 * necessarily be at least one bit less than
865 * the container holding the PSN.
866 */
867 #ifndef CONFIG_HFI1_VERBS_31BIT_PSN
868 #define PSN_MASK 0xFFFFFF
869 #define PSN_SHIFT 8
870 #else
871 #define PSN_MASK 0x7FFFFFFF
872 #define PSN_SHIFT 1
873 #endif
874 #define PSN_MODIFY_MASK 0xFFFFFF
875
876 /* Number of bits to pay attention to in the opcode for checking qp type */
877 #define OPCODE_QP_MASK 0xE0
878
879 /*
880 * Compare the lower 24 bits of the msn values.
881 * Returns an integer <, ==, or > than zero.
882 */
883 static inline int cmp_msn(u32 a, u32 b)
884 {
885 return (((int) a) - ((int) b)) << 8;
886 }
887
888 /*
889 * Compare two PSNs
890 * Returns an integer <, ==, or > than zero.
891 */
892 static inline int cmp_psn(u32 a, u32 b)
893 {
894 return (((int) a) - ((int) b)) << PSN_SHIFT;
895 }
896
897 /*
898 * Return masked PSN
899 */
900 static inline u32 mask_psn(u32 a)
901 {
902 return a & PSN_MASK;
903 }
904
905 /*
906 * Return delta between two PSNs
907 */
908 static inline u32 delta_psn(u32 a, u32 b)
909 {
910 return (((int)a - (int)b) << PSN_SHIFT) >> PSN_SHIFT;
911 }
912
913 struct hfi1_mcast *hfi1_mcast_find(struct hfi1_ibport *ibp, union ib_gid *mgid);
914
915 int hfi1_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
916
917 int hfi1_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
918
919 int hfi1_mcast_tree_empty(struct hfi1_ibport *ibp);
920
921 struct verbs_txreq;
922 void hfi1_put_txreq(struct verbs_txreq *tx);
923
924 int hfi1_verbs_send(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
925 u32 hdrwords, struct hfi1_sge_state *ss, u32 len);
926
927 void hfi1_copy_sge(struct hfi1_sge_state *ss, void *data, u32 length,
928 int release);
929
930 void hfi1_skip_sge(struct hfi1_sge_state *ss, u32 length, int release);
931
932 void hfi1_cnp_rcv(struct hfi1_packet *packet);
933
934 void hfi1_uc_rcv(struct hfi1_packet *packet);
935
936 void hfi1_rc_rcv(struct hfi1_packet *packet);
937
938 void hfi1_rc_hdrerr(
939 struct hfi1_ctxtdata *rcd,
940 struct hfi1_ib_header *hdr,
941 u32 rcv_flags,
942 struct hfi1_qp *qp);
943
944 u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
945
946 int hfi1_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
947
948 struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid);
949
950 void hfi1_rc_rnr_retry(unsigned long arg);
951
952 void hfi1_rc_send_complete(struct hfi1_qp *qp, struct hfi1_ib_header *hdr);
953
954 void hfi1_rc_error(struct hfi1_qp *qp, enum ib_wc_status err);
955
956 void hfi1_ud_rcv(struct hfi1_packet *packet);
957
958 int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey);
959
960 int hfi1_alloc_lkey(struct hfi1_mregion *mr, int dma_region);
961
962 void hfi1_free_lkey(struct hfi1_mregion *mr);
963
964 int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct hfi1_pd *pd,
965 struct hfi1_sge *isge, struct ib_sge *sge, int acc);
966
967 int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge,
968 u32 len, u64 vaddr, u32 rkey, int acc);
969
970 int hfi1_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
971 struct ib_recv_wr **bad_wr);
972
973 struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
974 struct ib_srq_init_attr *srq_init_attr,
975 struct ib_udata *udata);
976
977 int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
978 enum ib_srq_attr_mask attr_mask,
979 struct ib_udata *udata);
980
981 int hfi1_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
982
983 int hfi1_destroy_srq(struct ib_srq *ibsrq);
984
985 int hfi1_cq_init(struct hfi1_devdata *dd);
986
987 void hfi1_cq_exit(struct hfi1_devdata *dd);
988
989 void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int sig);
990
991 int hfi1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
992
993 struct ib_cq *hfi1_create_cq(
994 struct ib_device *ibdev,
995 const struct ib_cq_init_attr *attr,
996 struct ib_ucontext *context,
997 struct ib_udata *udata);
998
999 int hfi1_destroy_cq(struct ib_cq *ibcq);
1000
1001 int hfi1_req_notify_cq(
1002 struct ib_cq *ibcq,
1003 enum ib_cq_notify_flags notify_flags);
1004
1005 int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
1006
1007 struct ib_mr *hfi1_get_dma_mr(struct ib_pd *pd, int acc);
1008
1009 struct ib_mr *hfi1_reg_phys_mr(struct ib_pd *pd,
1010 struct ib_phys_buf *buffer_list,
1011 int num_phys_buf, int acc, u64 *iova_start);
1012
1013 struct ib_mr *hfi1_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1014 u64 virt_addr, int mr_access_flags,
1015 struct ib_udata *udata);
1016
1017 int hfi1_dereg_mr(struct ib_mr *ibmr);
1018
1019 struct ib_mr *hfi1_alloc_mr(struct ib_pd *pd,
1020 enum ib_mr_type mr_type,
1021 u32 max_entries);
1022
1023 struct ib_fast_reg_page_list *hfi1_alloc_fast_reg_page_list(
1024 struct ib_device *ibdev, int page_list_len);
1025
1026 void hfi1_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
1027
1028 int hfi1_fast_reg_mr(struct hfi1_qp *qp, struct ib_send_wr *wr);
1029
1030 struct ib_fmr *hfi1_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1031 struct ib_fmr_attr *fmr_attr);
1032
1033 int hfi1_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
1034 int list_len, u64 iova);
1035
1036 int hfi1_unmap_fmr(struct list_head *fmr_list);
1037
1038 int hfi1_dealloc_fmr(struct ib_fmr *ibfmr);
1039
1040 static inline void hfi1_get_mr(struct hfi1_mregion *mr)
1041 {
1042 atomic_inc(&mr->refcount);
1043 }
1044
1045 static inline void hfi1_put_mr(struct hfi1_mregion *mr)
1046 {
1047 if (unlikely(atomic_dec_and_test(&mr->refcount)))
1048 complete(&mr->comp);
1049 }
1050
1051 static inline void hfi1_put_ss(struct hfi1_sge_state *ss)
1052 {
1053 while (ss->num_sge) {
1054 hfi1_put_mr(ss->sge.mr);
1055 if (--ss->num_sge)
1056 ss->sge = *ss->sg_list++;
1057 }
1058 }
1059
1060 void hfi1_release_mmap_info(struct kref *ref);
1061
1062 struct hfi1_mmap_info *hfi1_create_mmap_info(struct hfi1_ibdev *dev, u32 size,
1063 struct ib_ucontext *context,
1064 void *obj);
1065
1066 void hfi1_update_mmap_info(struct hfi1_ibdev *dev, struct hfi1_mmap_info *ip,
1067 u32 size, void *obj);
1068
1069 int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
1070
1071 int hfi1_get_rwqe(struct hfi1_qp *qp, int wr_id_only);
1072
1073 void hfi1_migrate_qp(struct hfi1_qp *qp);
1074
1075 int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
1076 int has_grh, struct hfi1_qp *qp, u32 bth0);
1077
1078 u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
1079 struct ib_global_route *grh, u32 hwords, u32 nwords);
1080
1081 void clear_ahg(struct hfi1_qp *qp);
1082
1083 void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr,
1084 u32 bth0, u32 bth2, int middle);
1085
1086 void hfi1_do_send(struct work_struct *work);
1087
1088 void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe,
1089 enum ib_wc_status status);
1090
1091 void hfi1_send_rc_ack(struct hfi1_ctxtdata *, struct hfi1_qp *qp, int is_fecn);
1092
1093 int hfi1_make_rc_req(struct hfi1_qp *qp);
1094
1095 int hfi1_make_uc_req(struct hfi1_qp *qp);
1096
1097 int hfi1_make_ud_req(struct hfi1_qp *qp);
1098
1099 int hfi1_register_ib_device(struct hfi1_devdata *);
1100
1101 void hfi1_unregister_ib_device(struct hfi1_devdata *);
1102
1103 void hfi1_ib_rcv(struct hfi1_packet *packet);
1104
1105 unsigned hfi1_get_npkeys(struct hfi1_devdata *);
1106
1107 int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct ahg_ib_header *hdr,
1108 u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
1109 u32 plen, u32 dwords, u64 pbc);
1110
1111 int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct ahg_ib_header *hdr,
1112 u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
1113 u32 plen, u32 dwords, u64 pbc);
1114
1115 struct send_context *qp_to_send_context(struct hfi1_qp *qp, u8 sc5);
1116
1117 extern const enum ib_wc_opcode ib_hfi1_wc_opcode[];
1118
1119 extern const u8 hdr_len_by_opcode[];
1120
1121 extern const int ib_hfi1_state_ops[];
1122
1123 extern __be64 ib_hfi1_sys_image_guid; /* in network order */
1124
1125 extern unsigned int hfi1_lkey_table_size;
1126
1127 extern unsigned int hfi1_max_cqes;
1128
1129 extern unsigned int hfi1_max_cqs;
1130
1131 extern unsigned int hfi1_max_qp_wrs;
1132
1133 extern unsigned int hfi1_max_qps;
1134
1135 extern unsigned int hfi1_max_sges;
1136
1137 extern unsigned int hfi1_max_mcast_grps;
1138
1139 extern unsigned int hfi1_max_mcast_qp_attached;
1140
1141 extern unsigned int hfi1_max_srqs;
1142
1143 extern unsigned int hfi1_max_srq_sges;
1144
1145 extern unsigned int hfi1_max_srq_wrs;
1146
1147 extern const u32 ib_hfi1_rnr_table[];
1148
1149 extern struct ib_dma_mapping_ops hfi1_dma_mapping_ops;
1150
1151 #endif /* HFI1_VERBS_H */
This page took 0.05487 seconds and 6 git commands to generate.