IB/qib: Remove ibport and use rdmavt version
[deliverable/linux.git] / drivers / infiniband / hw / qib / qib_verbs.h
1 /*
2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #ifndef QIB_VERBS_H
36 #define QIB_VERBS_H
37
38 #include <linux/types.h>
39 #include <linux/spinlock.h>
40 #include <linux/kernel.h>
41 #include <linux/interrupt.h>
42 #include <linux/kref.h>
43 #include <linux/workqueue.h>
44 #include <linux/kthread.h>
45 #include <linux/completion.h>
46 #include <rdma/ib_pack.h>
47 #include <rdma/ib_user_verbs.h>
48 #include <rdma/rdma_vt.h>
49
50 struct qib_ctxtdata;
51 struct qib_pportdata;
52 struct qib_devdata;
53 struct qib_verbs_txreq;
54
55 #define QIB_MAX_RDMA_ATOMIC 16
56 #define QIB_GUIDS_PER_PORT 5
57
58 #define QPN_MAX (1 << 24)
59 #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
60
61 /*
62 * Increment this value if any changes that break userspace ABI
63 * compatibility are made.
64 */
65 #define QIB_UVERBS_ABI_VERSION 2
66
67 /*
68 * Define an ib_cq_notify value that is not valid so we know when CQ
69 * notifications are armed.
70 */
71 #define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
72
73 #define IB_SEQ_NAK (3 << 29)
74
75 /* AETH NAK opcode values */
76 #define IB_RNR_NAK 0x20
77 #define IB_NAK_PSN_ERROR 0x60
78 #define IB_NAK_INVALID_REQUEST 0x61
79 #define IB_NAK_REMOTE_ACCESS_ERROR 0x62
80 #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
81 #define IB_NAK_INVALID_RD_REQUEST 0x64
82
83 /* Flags for checking QP state (see ib_qib_state_ops[]) */
84 #define QIB_POST_SEND_OK 0x01
85 #define QIB_POST_RECV_OK 0x02
86 #define QIB_PROCESS_RECV_OK 0x04
87 #define QIB_PROCESS_SEND_OK 0x08
88 #define QIB_PROCESS_NEXT_SEND_OK 0x10
89 #define QIB_FLUSH_SEND 0x20
90 #define QIB_FLUSH_RECV 0x40
91 #define QIB_PROCESS_OR_FLUSH_SEND \
92 (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
93
94 /* IB Performance Manager status values */
95 #define IB_PMA_SAMPLE_STATUS_DONE 0x00
96 #define IB_PMA_SAMPLE_STATUS_STARTED 0x01
97 #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
98
99 /* Mandatory IB performance counter select values. */
100 #define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
101 #define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
102 #define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
103 #define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
104 #define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
105
106 #define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
107
108 #define IB_BTH_REQ_ACK (1 << 31)
109 #define IB_BTH_SOLICITED (1 << 23)
110 #define IB_BTH_MIG_REQ (1 << 22)
111
112 /* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
113 #define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
114
115 #define IB_GRH_VERSION 6
116 #define IB_GRH_VERSION_MASK 0xF
117 #define IB_GRH_VERSION_SHIFT 28
118 #define IB_GRH_TCLASS_MASK 0xFF
119 #define IB_GRH_TCLASS_SHIFT 20
120 #define IB_GRH_FLOW_MASK 0xFFFFF
121 #define IB_GRH_FLOW_SHIFT 0
122 #define IB_GRH_NEXT_HDR 0x1B
123
124 #define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
125
126 /* Values for set/get portinfo VLCap OperationalVLs */
127 #define IB_VL_VL0 1
128 #define IB_VL_VL0_1 2
129 #define IB_VL_VL0_3 3
130 #define IB_VL_VL0_7 4
131 #define IB_VL_VL0_14 5
132
133 static inline int qib_num_vls(int vls)
134 {
135 switch (vls) {
136 default:
137 case IB_VL_VL0:
138 return 1;
139 case IB_VL_VL0_1:
140 return 2;
141 case IB_VL_VL0_3:
142 return 4;
143 case IB_VL_VL0_7:
144 return 8;
145 case IB_VL_VL0_14:
146 return 15;
147 }
148 }
149
150 struct ib_reth {
151 __be64 vaddr;
152 __be32 rkey;
153 __be32 length;
154 } __packed;
155
156 struct ib_atomic_eth {
157 __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
158 __be32 rkey;
159 __be64 swap_data;
160 __be64 compare_data;
161 } __packed;
162
163 struct qib_other_headers {
164 __be32 bth[3];
165 union {
166 struct {
167 __be32 deth[2];
168 __be32 imm_data;
169 } ud;
170 struct {
171 struct ib_reth reth;
172 __be32 imm_data;
173 } rc;
174 struct {
175 __be32 aeth;
176 __be32 atomic_ack_eth[2];
177 } at;
178 __be32 imm_data;
179 __be32 aeth;
180 struct ib_atomic_eth atomic_eth;
181 } u;
182 } __packed;
183
184 /*
185 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
186 * long (72 w/ imm_data). Only the first 56 bytes of the IB header
187 * will be in the eager header buffer. The remaining 12 or 16 bytes
188 * are in the data buffer.
189 */
190 struct qib_ib_header {
191 __be16 lrh[4];
192 union {
193 struct {
194 struct ib_grh grh;
195 struct qib_other_headers oth;
196 } l;
197 struct qib_other_headers oth;
198 } u;
199 } __packed;
200
201 struct qib_pio_header {
202 __le32 pbc[2];
203 struct qib_ib_header hdr;
204 } __packed;
205
206 /*
207 * There is one struct qib_mcast for each multicast GID.
208 * All attached QPs are then stored as a list of
209 * struct qib_mcast_qp.
210 */
211 struct qib_mcast_qp {
212 struct list_head list;
213 struct rvt_qp *qp;
214 };
215
216 struct qib_mcast {
217 struct rb_node rb_node;
218 union ib_gid mgid;
219 struct list_head qp_list;
220 wait_queue_head_t wait;
221 atomic_t refcount;
222 int n_attached;
223 };
224
225 /*
226 * This structure is used to contain the head pointer, tail pointer,
227 * and completion queue entries as a single memory allocation so
228 * it can be mmap'ed into user space.
229 */
230 struct qib_cq_wc {
231 u32 head; /* index of next entry to fill */
232 u32 tail; /* index of next ib_poll_cq() entry */
233 union {
234 /* these are actually size ibcq.cqe + 1 */
235 struct ib_uverbs_wc uqueue[0];
236 struct ib_wc kqueue[0];
237 };
238 };
239
240 /*
241 * The completion queue structure.
242 */
243 struct qib_cq {
244 struct ib_cq ibcq;
245 struct kthread_work comptask;
246 struct qib_devdata *dd;
247 spinlock_t lock; /* protect changes in this struct */
248 u8 notify;
249 u8 triggered;
250 struct qib_cq_wc *queue;
251 struct rvt_mmap_info *ip;
252 };
253
254 /*
255 * qib specific data structure that will be hidden from rvt after the queue pair
256 * is made common.
257 */
258 struct qib_qp_priv {
259 struct qib_ib_header *s_hdr; /* next packet header to send */
260 struct list_head iowait; /* link for wait PIO buf */
261 atomic_t s_dma_busy;
262 struct qib_verbs_txreq *s_tx;
263 struct work_struct s_work;
264 wait_queue_head_t wait_dma;
265 struct rvt_qp *owner;
266 };
267
268 /*
269 * Atomic bit definitions for r_aflags.
270 */
271 #define QIB_R_WRID_VALID 0
272 #define QIB_R_REWIND_SGE 1
273
274 /*
275 * Bit definitions for r_flags.
276 */
277 #define QIB_R_REUSE_SGE 0x01
278 #define QIB_R_RDMAR_SEQ 0x02
279 #define QIB_R_RSP_NAK 0x04
280 #define QIB_R_RSP_SEND 0x08
281 #define QIB_R_COMM_EST 0x10
282
283 /*
284 * Bit definitions for s_flags.
285 *
286 * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
287 * QIB_S_BUSY - send tasklet is processing the QP
288 * QIB_S_TIMER - the RC retry timer is active
289 * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
290 * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
291 * before processing the next SWQE
292 * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
293 * before processing the next SWQE
294 * QIB_S_WAIT_RNR - waiting for RNR timeout
295 * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
296 * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
297 * next send completion entry not via send DMA
298 * QIB_S_WAIT_PIO - waiting for a send buffer to be available
299 * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
300 * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
301 * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
302 * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
303 * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
304 * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
305 */
306 #define QIB_S_SIGNAL_REQ_WR 0x0001
307 #define QIB_S_BUSY 0x0002
308 #define QIB_S_TIMER 0x0004
309 #define QIB_S_RESP_PENDING 0x0008
310 #define QIB_S_ACK_PENDING 0x0010
311 #define QIB_S_WAIT_FENCE 0x0020
312 #define QIB_S_WAIT_RDMAR 0x0040
313 #define QIB_S_WAIT_RNR 0x0080
314 #define QIB_S_WAIT_SSN_CREDIT 0x0100
315 #define QIB_S_WAIT_DMA 0x0200
316 #define QIB_S_WAIT_PIO 0x0400
317 #define QIB_S_WAIT_TX 0x0800
318 #define QIB_S_WAIT_DMA_DESC 0x1000
319 #define QIB_S_WAIT_KMEM 0x2000
320 #define QIB_S_WAIT_PSN 0x4000
321 #define QIB_S_WAIT_ACK 0x8000
322 #define QIB_S_SEND_ONE 0x10000
323 #define QIB_S_UNLIMITED_CREDIT 0x20000
324
325 /*
326 * Wait flags that would prevent any packet type from being sent.
327 */
328 #define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
329 QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
330
331 /*
332 * Wait flags that would prevent send work requests from making progress.
333 */
334 #define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
335 QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
336 QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
337
338 #define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
339
340 #define QIB_PSN_CREDIT 16
341
342 /*
343 * Since struct rvt_swqe is not a fixed size, we can't simply index into
344 * struct rvt_qp.s_wq. This function does the array index computation.
345 */
346 static inline struct rvt_swqe *get_swqe_ptr(struct rvt_qp *qp,
347 unsigned n)
348 {
349 return (struct rvt_swqe *)((char *)qp->s_wq +
350 (sizeof(struct rvt_swqe) +
351 qp->s_max_sge *
352 sizeof(struct rvt_sge)) * n);
353 }
354
355 /*
356 * Since struct rvt_rwqe is not a fixed size, we can't simply index into
357 * struct rvt_rwq.wq. This function does the array index computation.
358 */
359 static inline struct rvt_rwqe *get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
360 {
361 return (struct rvt_rwqe *)
362 ((char *) rq->wq->wq +
363 (sizeof(struct rvt_rwqe) +
364 rq->max_sge * sizeof(struct ib_sge)) * n);
365 }
366
367 /*
368 * QPN-map pages start out as NULL, they get allocated upon
369 * first use and are never deallocated. This way,
370 * large bitmaps are not allocated unless large numbers of QPs are used.
371 */
372 struct qpn_map {
373 void *page;
374 };
375
376 struct qib_qpn_table {
377 spinlock_t lock; /* protect changes in this struct */
378 unsigned flags; /* flags for QP0/1 allocated for each port */
379 u32 last; /* last QP number allocated */
380 u32 nmaps; /* size of the map table */
381 u16 limit;
382 u16 mask;
383 /* bit map of free QP numbers other than 0/1 */
384 struct qpn_map map[QPNMAP_ENTRIES];
385 };
386
387 struct qib_opcode_stats {
388 u64 n_packets; /* number of packets */
389 u64 n_bytes; /* total number of bytes */
390 };
391
392 struct qib_opcode_stats_perctx {
393 struct qib_opcode_stats stats[128];
394 };
395
396 struct qib_pma_counters {
397 u64 n_unicast_xmit; /* total unicast packets sent */
398 u64 n_unicast_rcv; /* total unicast packets received */
399 u64 n_multicast_xmit; /* total multicast packets sent */
400 u64 n_multicast_rcv; /* total multicast packets received */
401 };
402
403 struct qib_ibport {
404 struct rvt_ibport rvp;
405 struct rvt_ah *sm_ah;
406 struct rvt_ah *smi_ah;
407 __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
408 struct qib_pma_counters __percpu *pmastats;
409 u64 z_unicast_xmit; /* starting count for PMA */
410 u64 z_unicast_rcv; /* starting count for PMA */
411 u64 z_multicast_xmit; /* starting count for PMA */
412 u64 z_multicast_rcv; /* starting count for PMA */
413 u64 z_symbol_error_counter; /* starting count for PMA */
414 u64 z_link_error_recovery_counter; /* starting count for PMA */
415 u64 z_link_downed_counter; /* starting count for PMA */
416 u64 z_port_rcv_errors; /* starting count for PMA */
417 u64 z_port_rcv_remphys_errors; /* starting count for PMA */
418 u64 z_port_xmit_discards; /* starting count for PMA */
419 u64 z_port_xmit_data; /* starting count for PMA */
420 u64 z_port_rcv_data; /* starting count for PMA */
421 u64 z_port_xmit_packets; /* starting count for PMA */
422 u64 z_port_rcv_packets; /* starting count for PMA */
423 u32 z_local_link_integrity_errors; /* starting count for PMA */
424 u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
425 u32 z_vl15_dropped; /* starting count for PMA */
426 u8 sl_to_vl[16];
427 };
428
429 struct qib_ibdev {
430 struct rvt_dev_info rdi;
431 struct list_head pending_mmaps;
432 spinlock_t mmap_offset_lock; /* protect mmap_offset */
433 u32 mmap_offset;
434
435 /* QP numbers are shared by all IB ports */
436 struct qib_qpn_table qpn_table;
437 struct list_head piowait; /* list for wait PIO buf */
438 struct list_head dmawait; /* list for wait DMA */
439 struct list_head txwait; /* list for wait qib_verbs_txreq */
440 struct list_head memwait; /* list for wait kernel memory */
441 struct list_head txreq_free;
442 struct timer_list mem_timer;
443 struct rvt_qp __rcu **qp_table;
444 struct qib_pio_header *pio_hdrs;
445 dma_addr_t pio_hdrs_phys;
446 /* list of QPs waiting for RNR timer */
447 spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
448 u32 qp_table_size; /* size of the hash table */
449 u32 qp_rnd; /* random bytes for hash */
450 spinlock_t qpt_lock;
451
452 u32 n_piowait;
453 u32 n_txwait;
454
455 u32 n_cqs_allocated; /* number of CQs allocated for device */
456 spinlock_t n_cqs_lock;
457 u32 n_qps_allocated; /* number of QPs allocated for device */
458 spinlock_t n_qps_lock;
459 u32 n_srqs_allocated; /* number of SRQs allocated for device */
460 spinlock_t n_srqs_lock;
461 u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
462 spinlock_t n_mcast_grps_lock;
463 #ifdef CONFIG_DEBUG_FS
464 /* per HCA debugfs */
465 struct dentry *qib_ibdev_dbg;
466 #endif
467 };
468
469 struct qib_verbs_counters {
470 u64 symbol_error_counter;
471 u64 link_error_recovery_counter;
472 u64 link_downed_counter;
473 u64 port_rcv_errors;
474 u64 port_rcv_remphys_errors;
475 u64 port_xmit_discards;
476 u64 port_xmit_data;
477 u64 port_rcv_data;
478 u64 port_xmit_packets;
479 u64 port_rcv_packets;
480 u32 local_link_integrity_errors;
481 u32 excessive_buffer_overrun_errors;
482 u32 vl15_dropped;
483 };
484
485 static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
486 {
487 return container_of(ibcq, struct qib_cq, ibcq);
488 }
489
490 static inline struct rvt_qp *to_iqp(struct ib_qp *ibqp)
491 {
492 return container_of(ibqp, struct rvt_qp, ibqp);
493 }
494
495 static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
496 {
497 struct rvt_dev_info *rdi;
498
499 rdi = container_of(ibdev, struct rvt_dev_info, ibdev);
500 return container_of(rdi, struct qib_ibdev, rdi);
501 }
502
503 /*
504 * Send if not busy or waiting for I/O and either
505 * a RC response is pending or we can process send work requests.
506 */
507 static inline int qib_send_ok(struct rvt_qp *qp)
508 {
509 return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
510 (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
511 !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
512 }
513
514 /*
515 * This must be called with s_lock held.
516 */
517 void qib_schedule_send(struct rvt_qp *qp);
518
519 static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
520 {
521 u16 p1 = pkey1 & 0x7FFF;
522 u16 p2 = pkey2 & 0x7FFF;
523
524 /*
525 * Low 15 bits must be non-zero and match, and
526 * one of the two must be a full member.
527 */
528 return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
529 }
530
531 void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
532 u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
533 void qib_cap_mask_chg(struct qib_ibport *ibp);
534 void qib_sys_guid_chg(struct qib_ibport *ibp);
535 void qib_node_desc_chg(struct qib_ibport *ibp);
536 int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
537 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
538 const struct ib_mad_hdr *in, size_t in_mad_size,
539 struct ib_mad_hdr *out, size_t *out_mad_size,
540 u16 *out_mad_pkey_index);
541 int qib_create_agents(struct qib_ibdev *dev);
542 void qib_free_agents(struct qib_ibdev *dev);
543
544 /*
545 * Compare the lower 24 bits of the two values.
546 * Returns an integer <, ==, or > than zero.
547 */
548 static inline int qib_cmp24(u32 a, u32 b)
549 {
550 return (((int) a) - ((int) b)) << 8;
551 }
552
553 struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
554
555 int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
556 u64 *rwords, u64 *spkts, u64 *rpkts,
557 u64 *xmit_wait);
558
559 int qib_get_counters(struct qib_pportdata *ppd,
560 struct qib_verbs_counters *cntrs);
561
562 int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
563
564 int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
565
566 int qib_mcast_tree_empty(struct qib_ibport *ibp);
567
568 __be32 qib_compute_aeth(struct rvt_qp *qp);
569
570 struct rvt_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
571
572 struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
573 struct ib_qp_init_attr *init_attr,
574 struct ib_udata *udata);
575
576 int qib_destroy_qp(struct ib_qp *ibqp);
577
578 int qib_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
579
580 int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
581 int attr_mask, struct ib_udata *udata);
582
583 int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
584 int attr_mask, struct ib_qp_init_attr *init_attr);
585
586 unsigned qib_free_all_qps(struct qib_devdata *dd);
587
588 void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
589
590 void qib_free_qpn_table(struct qib_qpn_table *qpt);
591
592 #ifdef CONFIG_DEBUG_FS
593
594 struct qib_qp_iter;
595
596 struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev);
597
598 int qib_qp_iter_next(struct qib_qp_iter *iter);
599
600 void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter);
601
602 #endif
603
604 void qib_get_credit(struct rvt_qp *qp, u32 aeth);
605
606 unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
607
608 void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
609
610 void qib_put_txreq(struct qib_verbs_txreq *tx);
611
612 int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr,
613 u32 hdrwords, struct rvt_sge_state *ss, u32 len);
614
615 void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length,
616 int release);
617
618 void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release);
619
620 void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
621 int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
622
623 void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
624 int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
625
626 int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
627
628 struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
629
630 void qib_rc_rnr_retry(unsigned long arg);
631
632 void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr);
633
634 void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
635
636 int qib_post_ud_send(struct rvt_qp *qp, struct ib_send_wr *wr);
637
638 void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
639 int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
640
641 int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
642 struct ib_recv_wr **bad_wr);
643
644 struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
645 struct ib_srq_init_attr *srq_init_attr,
646 struct ib_udata *udata);
647
648 int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
649 enum ib_srq_attr_mask attr_mask,
650 struct ib_udata *udata);
651
652 int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
653
654 int qib_destroy_srq(struct ib_srq *ibsrq);
655
656 int qib_cq_init(struct qib_devdata *dd);
657
658 void qib_cq_exit(struct qib_devdata *dd);
659
660 void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
661
662 int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
663
664 struct ib_cq *qib_create_cq(struct ib_device *ibdev,
665 const struct ib_cq_init_attr *attr,
666 struct ib_ucontext *context,
667 struct ib_udata *udata);
668
669 int qib_destroy_cq(struct ib_cq *ibcq);
670
671 int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
672
673 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
674
675 void mr_rcu_callback(struct rcu_head *list);
676
677 static inline void qib_put_ss(struct rvt_sge_state *ss)
678 {
679 while (ss->num_sge) {
680 rvt_put_mr(ss->sge.mr);
681 if (--ss->num_sge)
682 ss->sge = *ss->sg_list++;
683 }
684 }
685
686 void qib_release_mmap_info(struct kref *ref);
687
688 struct rvt_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
689 struct ib_ucontext *context,
690 void *obj);
691
692 void qib_update_mmap_info(struct qib_ibdev *dev, struct rvt_mmap_info *ip,
693 u32 size, void *obj);
694
695 int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
696
697 int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only);
698
699 void qib_migrate_qp(struct rvt_qp *qp);
700
701 int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
702 int has_grh, struct rvt_qp *qp, u32 bth0);
703
704 u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
705 struct ib_global_route *grh, u32 hwords, u32 nwords);
706
707 void qib_make_ruc_header(struct rvt_qp *qp, struct qib_other_headers *ohdr,
708 u32 bth0, u32 bth2);
709
710 void qib_do_send(struct work_struct *work);
711
712 void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
713 enum ib_wc_status status);
714
715 void qib_send_rc_ack(struct rvt_qp *qp);
716
717 int qib_make_rc_req(struct rvt_qp *qp);
718
719 int qib_make_uc_req(struct rvt_qp *qp);
720
721 int qib_make_ud_req(struct rvt_qp *qp);
722
723 int qib_register_ib_device(struct qib_devdata *);
724
725 void qib_unregister_ib_device(struct qib_devdata *);
726
727 void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
728
729 void qib_ib_piobufavail(struct qib_devdata *);
730
731 unsigned qib_get_npkeys(struct qib_devdata *);
732
733 unsigned qib_get_pkey(struct qib_ibport *, unsigned);
734
735 extern const enum ib_wc_opcode ib_qib_wc_opcode[];
736
737 /*
738 * Below HCA-independent IB PhysPortState values, returned
739 * by the f_ibphys_portstate() routine.
740 */
741 #define IB_PHYSPORTSTATE_SLEEP 1
742 #define IB_PHYSPORTSTATE_POLL 2
743 #define IB_PHYSPORTSTATE_DISABLED 3
744 #define IB_PHYSPORTSTATE_CFG_TRAIN 4
745 #define IB_PHYSPORTSTATE_LINKUP 5
746 #define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
747 #define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
748 #define IB_PHYSPORTSTATE_CFG_IDLE 0xB
749 #define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
750 #define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
751 #define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
752 #define IB_PHYSPORTSTATE_CFG_ENH 0x10
753 #define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
754
755 extern const int ib_qib_state_ops[];
756
757 extern __be64 ib_qib_sys_image_guid; /* in network order */
758
759 extern unsigned int ib_rvt_lkey_table_size;
760
761 extern unsigned int ib_qib_max_cqes;
762
763 extern unsigned int ib_qib_max_cqs;
764
765 extern unsigned int ib_qib_max_qp_wrs;
766
767 extern unsigned int ib_qib_max_qps;
768
769 extern unsigned int ib_qib_max_sges;
770
771 extern unsigned int ib_qib_max_mcast_grps;
772
773 extern unsigned int ib_qib_max_mcast_qp_attached;
774
775 extern unsigned int ib_qib_max_srqs;
776
777 extern unsigned int ib_qib_max_srq_sges;
778
779 extern unsigned int ib_qib_max_srq_wrs;
780
781 extern const u32 ib_qib_rnr_table[];
782
783 #endif /* QIB_VERBS_H */
This page took 0.074973 seconds and 5 git commands to generate.