IB/qib: Remove ibport and use rdmavt version
[deliverable/linux.git] / drivers / infiniband / hw / qib / qib_rc.c
index e6b7556d522108951bf38ebc6750b991eb232285..1e8463de9efc63b1816bc61b843fde2552ed8091 100644 (file)
@@ -40,7 +40,7 @@
 
 static void rc_timeout(unsigned long arg);
 
-static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe,
+static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
                       u32 psn, u32 pmtu)
 {
        u32 len;
@@ -54,7 +54,7 @@ static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe,
        return wqe->length - len;
 }
 
-static void start_timer(struct qib_qp *qp)
+static void start_timer(struct rvt_qp *qp)
 {
        qp->s_flags |= QIB_S_TIMER;
        qp->s_timer.function = rc_timeout;
@@ -74,10 +74,10 @@ static void start_timer(struct qib_qp *qp)
  * Note that we are in the responder's side of the QP context.
  * Note the QP s_lock must be held.
  */
-static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
+static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
                           struct qib_other_headers *ohdr, u32 pmtu)
 {
-       struct qib_ack_entry *e;
+       struct rvt_ack_entry *e;
        u32 hwords;
        u32 len;
        u32 bth0;
@@ -95,7 +95,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
        case OP(RDMA_READ_RESPONSE_ONLY):
                e = &qp->s_ack_queue[qp->s_tail_ack_queue];
                if (e->rdma_sge.mr) {
-                       qib_put_mr(e->rdma_sge.mr);
+                       rvt_put_mr(e->rdma_sge.mr);
                        e->rdma_sge.mr = NULL;
                }
                /* FALLTHROUGH */
@@ -133,7 +133,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
                        /* Copy SGE state in case we need to resend */
                        qp->s_rdma_mr = e->rdma_sge.mr;
                        if (qp->s_rdma_mr)
-                               qib_get_mr(qp->s_rdma_mr);
+                               rvt_get_mr(qp->s_rdma_mr);
                        qp->s_ack_rdma_sge.sge = e->rdma_sge;
                        qp->s_ack_rdma_sge.num_sge = 1;
                        qp->s_cur_sge = &qp->s_ack_rdma_sge;
@@ -172,7 +172,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
                qp->s_cur_sge = &qp->s_ack_rdma_sge;
                qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
                if (qp->s_rdma_mr)
-                       qib_get_mr(qp->s_rdma_mr);
+                       rvt_get_mr(qp->s_rdma_mr);
                len = qp->s_ack_rdma_sge.sge.sge_length;
                if (len > pmtu)
                        len = pmtu;
@@ -228,12 +228,13 @@ bail:
  *
  * Return 1 if constructed; otherwise, return 0.
  */
-int qib_make_rc_req(struct qib_qp *qp)
+int qib_make_rc_req(struct rvt_qp *qp)
 {
+       struct qib_qp_priv *priv = qp->priv;
        struct qib_ibdev *dev = to_idev(qp->ibqp.device);
        struct qib_other_headers *ohdr;
-       struct qib_sge_state *ss;
-       struct qib_swqe *wqe;
+       struct rvt_sge_state *ss;
+       struct rvt_swqe *wqe;
        u32 hwords;
        u32 len;
        u32 bth0;
@@ -244,9 +245,9 @@ int qib_make_rc_req(struct qib_qp *qp)
        int ret = 0;
        int delta;
 
-       ohdr = &qp->s_hdr->u.oth;
+       ohdr = &priv->s_hdr->u.oth;
        if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
-               ohdr = &qp->s_hdr->u.l.oth;
+               ohdr = &priv->s_hdr->u.l.oth;
 
        /*
         * The lock is needed to synchronize between the sending tasklet,
@@ -266,7 +267,7 @@ int qib_make_rc_req(struct qib_qp *qp)
                if (qp->s_last == qp->s_head)
                        goto bail;
                /* If DMAs are in progress, we can't flush immediately. */
-               if (atomic_read(&qp->s_dma_busy)) {
+               if (atomic_read(&priv->s_dma_busy)) {
                        qp->s_flags |= QIB_S_WAIT_DMA;
                        goto bail;
                }
@@ -647,7 +648,7 @@ unlock:
  * Note that RDMA reads and atomics are handled in the
  * send side QP state and tasklet.
  */
-void qib_send_rc_ack(struct qib_qp *qp)
+void qib_send_rc_ack(struct rvt_qp *qp)
 {
        struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
        struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
@@ -759,7 +760,7 @@ void qib_send_rc_ack(struct qib_qp *qp)
 
 queue_ack:
        if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
-               ibp->n_rc_qacks++;
+               this_cpu_inc(*ibp->rvp.rc_qacks);
                qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING;
                qp->s_nak_state = qp->r_nak_state;
                qp->s_ack_psn = qp->r_ack_psn;
@@ -782,10 +783,10 @@ done:
  * for the given QP.
  * Called at interrupt level with the QP s_lock held.
  */
-static void reset_psn(struct qib_qp *qp, u32 psn)
+static void reset_psn(struct rvt_qp *qp, u32 psn)
 {
        u32 n = qp->s_acked;
-       struct qib_swqe *wqe = get_swqe_ptr(qp, n);
+       struct rvt_swqe *wqe = get_swqe_ptr(qp, n);
        u32 opcode;
 
        qp->s_cur = n;
@@ -867,9 +868,9 @@ done:
  * Back up requester to resend the last un-ACKed request.
  * The QP r_lock and s_lock should be held and interrupts disabled.
  */
-static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
+static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
 {
-       struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
+       struct rvt_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
        struct qib_ibport *ibp;
 
        if (qp->s_retry == 0) {
@@ -887,9 +888,9 @@ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
 
        ibp = to_iport(qp->ibqp.device, qp->port_num);
        if (wqe->wr.opcode == IB_WR_RDMA_READ)
-               ibp->n_rc_resends++;
+               ibp->rvp.n_rc_resends++;
        else
-               ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
+               ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
 
        qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR |
                         QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN |
@@ -904,7 +905,7 @@ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
  */
 static void rc_timeout(unsigned long arg)
 {
-       struct qib_qp *qp = (struct qib_qp *)arg;
+       struct rvt_qp *qp = (struct rvt_qp *)arg;
        struct qib_ibport *ibp;
        unsigned long flags;
 
@@ -912,7 +913,7 @@ static void rc_timeout(unsigned long arg)
        spin_lock(&qp->s_lock);
        if (qp->s_flags & QIB_S_TIMER) {
                ibp = to_iport(qp->ibqp.device, qp->port_num);
-               ibp->n_rc_timeouts++;
+               ibp->rvp.n_rc_timeouts++;
                qp->s_flags &= ~QIB_S_TIMER;
                del_timer(&qp->s_timer);
                qib_restart_rc(qp, qp->s_last_psn + 1, 1);
@@ -927,7 +928,7 @@ static void rc_timeout(unsigned long arg)
  */
 void qib_rc_rnr_retry(unsigned long arg)
 {
-       struct qib_qp *qp = (struct qib_qp *)arg;
+       struct rvt_qp *qp = (struct rvt_qp *)arg;
        unsigned long flags;
 
        spin_lock_irqsave(&qp->s_lock, flags);
@@ -943,9 +944,9 @@ void qib_rc_rnr_retry(unsigned long arg)
  * Set qp->s_sending_psn to the next PSN after the given one.
  * This would be psn+1 except when RDMA reads are present.
  */
-static void reset_sending_psn(struct qib_qp *qp, u32 psn)
+static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
 {
-       struct qib_swqe *wqe;
+       struct rvt_swqe *wqe;
        u32 n = qp->s_last;
 
        /* Find the work request corresponding to the given PSN. */
@@ -968,10 +969,10 @@ static void reset_sending_psn(struct qib_qp *qp, u32 psn)
 /*
  * This should be called with the QP s_lock held and interrupts disabled.
  */
-void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
+void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr)
 {
        struct qib_other_headers *ohdr;
-       struct qib_swqe *wqe;
+       struct rvt_swqe *wqe;
        struct ib_wc wc;
        unsigned i;
        u32 opcode;
@@ -1012,9 +1013,9 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
                    qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
                        break;
                for (i = 0; i < wqe->wr.num_sge; i++) {
-                       struct qib_sge *sge = &wqe->sg_list[i];
+                       struct rvt_sge *sge = &wqe->sg_list[i];
 
-                       qib_put_mr(sge->mr);
+                       rvt_put_mr(sge->mr);
                }
                /* Post a send completion queue entry if requested. */
                if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
@@ -1043,7 +1044,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
        }
 }
 
-static inline void update_last_psn(struct qib_qp *qp, u32 psn)
+static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
 {
        qp->s_last_psn = psn;
 }
@@ -1053,8 +1054,8 @@ static inline void update_last_psn(struct qib_qp *qp, u32 psn)
  * This is similar to qib_send_complete but has to check to be sure
  * that the SGEs are not being referenced if the SWQE is being resent.
  */
-static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
-                                        struct qib_swqe *wqe,
+static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
+                                        struct rvt_swqe *wqe,
                                         struct qib_ibport *ibp)
 {
        struct ib_wc wc;
@@ -1068,9 +1069,9 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
        if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
            qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
                for (i = 0; i < wqe->wr.num_sge; i++) {
-                       struct qib_sge *sge = &wqe->sg_list[i];
+                       struct rvt_sge *sge = &wqe->sg_list[i];
 
-                       qib_put_mr(sge->mr);
+                       rvt_put_mr(sge->mr);
                }
                /* Post a send completion queue entry if requested. */
                if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
@@ -1086,7 +1087,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
                if (++qp->s_last >= qp->s_size)
                        qp->s_last = 0;
        } else
-               ibp->n_rc_delayed_comp++;
+               this_cpu_inc(*ibp->rvp.rc_delayed_comp);
 
        qp->s_retry = qp->s_retry_cnt;
        update_last_psn(qp, wqe->lpsn);
@@ -1126,12 +1127,12 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
  * Called at interrupt level with the QP s_lock held.
  * Returns 1 if OK, 0 if current operation should be aborted (NAK).
  */
-static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
+static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
                     u64 val, struct qib_ctxtdata *rcd)
 {
        struct qib_ibport *ibp;
        enum ib_wc_status status;
-       struct qib_swqe *wqe;
+       struct rvt_swqe *wqe;
        int ret = 0;
        u32 ack_psn;
        int diff;
@@ -1231,7 +1232,7 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
 
        switch (aeth >> 29) {
        case 0:         /* ACK */
-               ibp->n_rc_acks++;
+               this_cpu_inc(*ibp->rvp.rc_acks);
                if (qp->s_acked != qp->s_tail) {
                        /*
                         * We are expecting more ACKs so
@@ -1260,7 +1261,7 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
                goto bail;
 
        case 1:         /* RNR NAK */
-               ibp->n_rnr_naks++;
+               ibp->rvp.n_rnr_naks++;
                if (qp->s_acked == qp->s_tail)
                        goto bail;
                if (qp->s_flags & QIB_S_WAIT_RNR)
@@ -1275,7 +1276,7 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
                /* The last valid PSN is the previous PSN. */
                update_last_psn(qp, psn - 1);
 
-               ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
+               ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
 
                reset_psn(qp, psn);
 
@@ -1296,7 +1297,7 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
                switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
                        QIB_AETH_CREDIT_MASK) {
                case 0: /* PSN sequence error */
-                       ibp->n_seq_naks++;
+                       ibp->rvp.n_seq_naks++;
                        /*
                         * Back up to the responder's expected PSN.
                         * Note that we might get a NAK in the middle of an
@@ -1309,17 +1310,17 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
 
                case 1: /* Invalid Request */
                        status = IB_WC_REM_INV_REQ_ERR;
-                       ibp->n_other_naks++;
+                       ibp->rvp.n_other_naks++;
                        goto class_b;
 
                case 2: /* Remote Access Error */
                        status = IB_WC_REM_ACCESS_ERR;
-                       ibp->n_other_naks++;
+                       ibp->rvp.n_other_naks++;
                        goto class_b;
 
                case 3: /* Remote Operation Error */
                        status = IB_WC_REM_OP_ERR;
-                       ibp->n_other_naks++;
+                       ibp->rvp.n_other_naks++;
 class_b:
                        if (qp->s_last == qp->s_acked) {
                                qib_send_complete(qp, wqe, status);
@@ -1349,10 +1350,10 @@ bail:
  * We have seen an out of sequence RDMA read middle or last packet.
  * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
  */
-static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
+static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
                         struct qib_ctxtdata *rcd)
 {
-       struct qib_swqe *wqe;
+       struct rvt_swqe *wqe;
 
        /* Remove QP from retry timer */
        if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
@@ -1370,7 +1371,7 @@ static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
                wqe = do_rc_completion(qp, wqe, ibp);
        }
 
-       ibp->n_rdma_seq++;
+       ibp->rvp.n_rdma_seq++;
        qp->r_flags |= QIB_R_RDMAR_SEQ;
        qib_restart_rc(qp, qp->s_last_psn + 1, 0);
        if (list_empty(&qp->rspwait)) {
@@ -1399,12 +1400,12 @@ static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
 static void qib_rc_rcv_resp(struct qib_ibport *ibp,
                            struct qib_other_headers *ohdr,
                            void *data, u32 tlen,
-                           struct qib_qp *qp,
+                           struct rvt_qp *qp,
                            u32 opcode,
                            u32 psn, u32 hdrsize, u32 pmtu,
                            struct qib_ctxtdata *rcd)
 {
-       struct qib_swqe *wqe;
+       struct rvt_swqe *wqe;
        struct qib_pportdata *ppd = ppd_from_ibp(ibp);
        enum ib_wc_status status;
        unsigned long flags;
@@ -1623,14 +1624,14 @@ bail:
  */
 static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
                            void *data,
-                           struct qib_qp *qp,
+                           struct rvt_qp *qp,
                            u32 opcode,
                            u32 psn,
                            int diff,
                            struct qib_ctxtdata *rcd)
 {
        struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
-       struct qib_ack_entry *e;
+       struct rvt_ack_entry *e;
        unsigned long flags;
        u8 i, prev;
        int old_req;
@@ -1642,7 +1643,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
                 * Don't queue the NAK if we already sent one.
                 */
                if (!qp->r_nak_state) {
-                       ibp->n_rc_seqnak++;
+                       ibp->rvp.n_rc_seqnak++;
                        qp->r_nak_state = IB_NAK_PSN_ERROR;
                        /* Use the expected PSN. */
                        qp->r_ack_psn = qp->r_psn;
@@ -1678,7 +1679,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
         */
        e = NULL;
        old_req = 1;
-       ibp->n_rc_dupreq++;
+       ibp->rvp.n_rc_dupreq++;
 
        spin_lock_irqsave(&qp->s_lock, flags);
 
@@ -1732,7 +1733,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
                if (unlikely(offset + len != e->rdma_sge.sge_length))
                        goto unlock_done;
                if (e->rdma_sge.mr) {
-                       qib_put_mr(e->rdma_sge.mr);
+                       rvt_put_mr(e->rdma_sge.mr);
                        e->rdma_sge.mr = NULL;
                }
                if (len != 0) {
@@ -1740,7 +1741,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
                        u64 vaddr = be64_to_cpu(reth->vaddr);
                        int ok;
 
-                       ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
+                       ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
                                         IB_ACCESS_REMOTE_READ);
                        if (unlikely(!ok))
                                goto unlock_done;
@@ -1818,7 +1819,7 @@ send_ack:
        return 0;
 }
 
-void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err)
+void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
 {
        unsigned long flags;
        int lastwqe;
@@ -1837,7 +1838,7 @@ void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err)
        }
 }
 
-static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n)
+static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
 {
        unsigned next;
 
@@ -1862,7 +1863,7 @@ static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n)
  * Called at interrupt level.
  */
 void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
-               int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+               int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
 {
        struct qib_ibport *ibp = &rcd->ppd->ibport_data;
        struct qib_other_headers *ohdr;
@@ -2069,7 +2070,7 @@ send_last:
                        int ok;
 
                        /* Check rkey & NAK */
-                       ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
+                       ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
                                         rkey, IB_ACCESS_REMOTE_WRITE);
                        if (unlikely(!ok))
                                goto nack_acc;
@@ -2096,7 +2097,7 @@ send_last:
                goto send_last;
 
        case OP(RDMA_READ_REQUEST): {
-               struct qib_ack_entry *e;
+               struct rvt_ack_entry *e;
                u32 len;
                u8 next;
 
@@ -2114,7 +2115,7 @@ send_last:
                }
                e = &qp->s_ack_queue[qp->r_head_ack_queue];
                if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
-                       qib_put_mr(e->rdma_sge.mr);
+                       rvt_put_mr(e->rdma_sge.mr);
                        e->rdma_sge.mr = NULL;
                }
                reth = &ohdr->u.rc.reth;
@@ -2125,7 +2126,7 @@ send_last:
                        int ok;
 
                        /* Check rkey & NAK */
-                       ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr,
+                       ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
                                         rkey, IB_ACCESS_REMOTE_READ);
                        if (unlikely(!ok))
                                goto nack_acc_unlck;
@@ -2166,7 +2167,7 @@ send_last:
        case OP(COMPARE_SWAP):
        case OP(FETCH_ADD): {
                struct ib_atomic_eth *ateth;
-               struct qib_ack_entry *e;
+               struct rvt_ack_entry *e;
                u64 vaddr;
                atomic64_t *maddr;
                u64 sdata;
@@ -2186,7 +2187,7 @@ send_last:
                }
                e = &qp->s_ack_queue[qp->r_head_ack_queue];
                if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
-                       qib_put_mr(e->rdma_sge.mr);
+                       rvt_put_mr(e->rdma_sge.mr);
                        e->rdma_sge.mr = NULL;
                }
                ateth = &ohdr->u.atomic_eth;
@@ -2196,7 +2197,7 @@ send_last:
                        goto nack_inv_unlck;
                rkey = be32_to_cpu(ateth->rkey);
                /* Check rkey & NAK */
-               if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+               if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
                                          vaddr, rkey,
                                          IB_ACCESS_REMOTE_ATOMIC)))
                        goto nack_acc_unlck;
@@ -2208,7 +2209,7 @@ send_last:
                        (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
                                      be64_to_cpu(ateth->compare_data),
                                      sdata);
-               qib_put_mr(qp->r_sge.sge.mr);
+               rvt_put_mr(qp->r_sge.sge.mr);
                qp->r_sge.num_sge = 0;
                e->opcode = opcode;
                e->sent = 0;
This page took 0.03547 seconds and 5 git commands to generate.