IB/qib: Remove ibport and use rdmavt version
[deliverable/linux.git] / drivers / infiniband / hw / qib / qib_qp.c
index 40f85bb3e0d3bdce5289a5c8c9c2418df33e4ca4..9cb9be7809aef1b7183b65914e4c405d7de5efe6 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/err.h>
 #include <linux/vmalloc.h>
 #include <linux/jhash.h>
+#include <rdma/rdma_vt.h>
 #ifdef CONFIG_DEBUG_FS
 #include <linux/seq_file.h>
 #endif
@@ -100,9 +101,10 @@ static u32 credit_table[31] = {
        32768                   /* 1E */
 };
 
-static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
+static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
+                        gfp_t gfp)
 {
-       unsigned long page = get_zeroed_page(GFP_KERNEL);
+       unsigned long page = get_zeroed_page(gfp);
 
        /*
         * Free the page if someone raced with us installing it.
@@ -121,7 +123,7 @@ static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
  * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
  */
 static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
-                    enum ib_qp_type type, u8 port)
+                    enum ib_qp_type type, u8 port, gfp_t gfp)
 {
        u32 i, offset, max_scan, qpn;
        struct qpn_map *map;
@@ -151,7 +153,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
        max_scan = qpt->nmaps - !offset;
        for (i = 0;;) {
                if (unlikely(!map->page)) {
-                       get_map_page(qpt, map);
+                       get_map_page(qpt, map, gfp);
                        if (unlikely(!map->page))
                                break;
                }
@@ -219,7 +221,7 @@ static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
  * Put the QP into the hash table.
  * The hash table holds a reference to the QP.
  */
-static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
+static void insert_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
 {
        struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
        unsigned long flags;
@@ -229,9 +231,9 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
        spin_lock_irqsave(&dev->qpt_lock, flags);
 
        if (qp->ibqp.qp_num == 0)
-               rcu_assign_pointer(ibp->qp0, qp);
+               rcu_assign_pointer(ibp->rvp.qp[0], qp);
        else if (qp->ibqp.qp_num == 1)
-               rcu_assign_pointer(ibp->qp1, qp);
+               rcu_assign_pointer(ibp->rvp.qp[1], qp);
        else {
                qp->next = dev->qp_table[n];
                rcu_assign_pointer(dev->qp_table[n], qp);
@@ -244,7 +246,7 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
  * Remove the QP from the table so it can't be found asynchronously by
  * the receive interrupt routine.
  */
-static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
+static void remove_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
 {
        struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
        unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
@@ -253,15 +255,15 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
 
        spin_lock_irqsave(&dev->qpt_lock, flags);
 
-       if (rcu_dereference_protected(ibp->qp0,
+       if (rcu_dereference_protected(ibp->rvp.qp[0],
+                                     lockdep_is_held(&dev->qpt_lock)) == qp) {
+               RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
+       } else if (rcu_dereference_protected(ibp->rvp.qp[1],
                        lockdep_is_held(&dev->qpt_lock)) == qp) {
-               RCU_INIT_POINTER(ibp->qp0, NULL);
-       } else if (rcu_dereference_protected(ibp->qp1,
-                       lockdep_is_held(&dev->qpt_lock)) == qp) {
-               RCU_INIT_POINTER(ibp->qp1, NULL);
+               RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
        } else {
-               struct qib_qp *q;
-               struct qib_qp __rcu **qpp;
+               struct rvt_qp *q;
+               struct rvt_qp __rcu **qpp;
 
                removed = 0;
                qpp = &dev->qp_table[n];
@@ -295,7 +297,7 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
 {
        struct qib_ibdev *dev = &dd->verbs_dev;
        unsigned long flags;
-       struct qib_qp *qp;
+       struct rvt_qp *qp;
        unsigned n, qp_inuse = 0;
 
        for (n = 0; n < dd->num_pports; n++) {
@@ -304,9 +306,9 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
                if (!qib_mcast_tree_empty(ibp))
                        qp_inuse++;
                rcu_read_lock();
-               if (rcu_dereference(ibp->qp0))
+               if (rcu_dereference(ibp->rvp.qp[0]))
                        qp_inuse++;
-               if (rcu_dereference(ibp->qp1))
+               if (rcu_dereference(ibp->rvp.qp[1]))
                        qp_inuse++;
                rcu_read_unlock();
        }
@@ -335,16 +337,16 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
  * The caller is responsible for decrementing the QP reference count
  * when done.
  */
-struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
+struct rvt_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
 {
-       struct qib_qp *qp = NULL;
+       struct rvt_qp *qp = NULL;
 
        rcu_read_lock();
        if (unlikely(qpn <= 1)) {
                if (qpn == 0)
-                       qp = rcu_dereference(ibp->qp0);
+                       qp = rcu_dereference(ibp->rvp.qp[0]);
                else
-                       qp = rcu_dereference(ibp->qp1);
+                       qp = rcu_dereference(ibp->rvp.qp[1]);
                if (qp)
                        atomic_inc(&qp->refcount);
        } else {
@@ -367,12 +369,13 @@ struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
  * @qp: the QP to reset
  * @type: the QP type
  */
-static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
+static void qib_reset_qp(struct rvt_qp *qp, enum ib_qp_type type)
 {
+       struct qib_qp_priv *priv = qp->priv;
        qp->remote_qpn = 0;
        qp->qkey = 0;
        qp->qp_access_flags = 0;
-       atomic_set(&qp->s_dma_busy, 0);
+       atomic_set(&priv->s_dma_busy, 0);
        qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
        qp->s_hdrwords = 0;
        qp->s_wqe = NULL;
@@ -414,7 +417,7 @@ static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
        qp->r_sge.num_sge = 0;
 }
 
-static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
+static void clear_mr_refs(struct rvt_qp *qp, int clr_sends)
 {
        unsigned n;
 
@@ -425,23 +428,24 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
 
        if (clr_sends) {
                while (qp->s_last != qp->s_head) {
-                       struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
+                       struct rvt_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
                        unsigned i;
 
                        for (i = 0; i < wqe->wr.num_sge; i++) {
-                               struct qib_sge *sge = &wqe->sg_list[i];
+                               struct rvt_sge *sge = &wqe->sg_list[i];
 
-                               qib_put_mr(sge->mr);
+                               rvt_put_mr(sge->mr);
                        }
                        if (qp->ibqp.qp_type == IB_QPT_UD ||
                            qp->ibqp.qp_type == IB_QPT_SMI ||
                            qp->ibqp.qp_type == IB_QPT_GSI)
-                               atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount);
+                               atomic_dec(
+                                &ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
                        if (++qp->s_last >= qp->s_size)
                                qp->s_last = 0;
                }
                if (qp->s_rdma_mr) {
-                       qib_put_mr(qp->s_rdma_mr);
+                       rvt_put_mr(qp->s_rdma_mr);
                        qp->s_rdma_mr = NULL;
                }
        }
@@ -450,11 +454,11 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
                return;
 
        for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
-               struct qib_ack_entry *e = &qp->s_ack_queue[n];
+               struct rvt_ack_entry *e = &qp->s_ack_queue[n];
 
                if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
                    e->rdma_sge.mr) {
-                       qib_put_mr(e->rdma_sge.mr);
+                       rvt_put_mr(e->rdma_sge.mr);
                        e->rdma_sge.mr = NULL;
                }
        }
@@ -470,8 +474,9 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
  * The QP r_lock and s_lock should be held and interrupts disabled.
  * If we are already in error state, just return.
  */
-int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
+int qib_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
 {
+       struct qib_qp_priv *priv = qp->priv;
        struct qib_ibdev *dev = to_idev(qp->ibqp.device);
        struct ib_wc wc;
        int ret = 0;
@@ -490,21 +495,21 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
                qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
 
        spin_lock(&dev->pending_lock);
-       if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
+       if (!list_empty(&priv->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
                qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
-               list_del_init(&qp->iowait);
+               list_del_init(&priv->iowait);
        }
        spin_unlock(&dev->pending_lock);
 
        if (!(qp->s_flags & QIB_S_BUSY)) {
                qp->s_hdrwords = 0;
                if (qp->s_rdma_mr) {
-                       qib_put_mr(qp->s_rdma_mr);
+                       rvt_put_mr(qp->s_rdma_mr);
                        qp->s_rdma_mr = NULL;
                }
-               if (qp->s_tx) {
-                       qib_put_txreq(qp->s_tx);
-                       qp->s_tx = NULL;
+               if (priv->s_tx) {
+                       qib_put_txreq(priv->s_tx);
+                       priv->s_tx = NULL;
                }
        }
 
@@ -526,7 +531,7 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
        wc.status = IB_WC_WR_FLUSH_ERR;
 
        if (qp->r_rq.wq) {
-               struct qib_rwq *wq;
+               struct rvt_rwq *wq;
                u32 head;
                u32 tail;
 
@@ -569,7 +574,8 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                  int attr_mask, struct ib_udata *udata)
 {
        struct qib_ibdev *dev = to_idev(ibqp->device);
-       struct qib_qp *qp = to_iqp(ibqp);
+       struct rvt_qp *qp = to_iqp(ibqp);
+       struct qib_qp_priv *priv = qp->priv;
        enum ib_qp_state cur_state, new_state;
        struct ib_event ev;
        int lastwqe = 0;
@@ -589,16 +595,17 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                goto inval;
 
        if (attr_mask & IB_QP_AV) {
-               if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
+               if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
                        goto inval;
-               if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
+               if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
                        goto inval;
        }
 
        if (attr_mask & IB_QP_ALT_PATH) {
-               if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
+               if (attr->alt_ah_attr.dlid >=
+                   be16_to_cpu(IB_MULTICAST_LID_BASE))
                        goto inval;
-               if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
+               if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
                        goto inval;
                if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
                        goto inval;
@@ -696,19 +703,20 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                if (qp->state != IB_QPS_RESET) {
                        qp->state = IB_QPS_RESET;
                        spin_lock(&dev->pending_lock);
-                       if (!list_empty(&qp->iowait))
-                               list_del_init(&qp->iowait);
+                       if (!list_empty(&priv->iowait))
+                               list_del_init(&priv->iowait);
                        spin_unlock(&dev->pending_lock);
                        qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
                        spin_unlock(&qp->s_lock);
                        spin_unlock_irq(&qp->r_lock);
                        /* Stop the sending work queue and retry timer */
-                       cancel_work_sync(&qp->s_work);
+                       cancel_work_sync(&priv->s_work);
                        del_timer_sync(&qp->s_timer);
-                       wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
-                       if (qp->s_tx) {
-                               qib_put_txreq(qp->s_tx);
-                               qp->s_tx = NULL;
+                       wait_event(priv->wait_dma,
+                                  !atomic_read(&priv->s_dma_busy));
+                       if (priv->s_tx) {
+                               qib_put_txreq(priv->s_tx);
+                               priv->s_tx = NULL;
                        }
                        remove_qp(dev, qp);
                        wait_event(qp->wait, !atomic_read(&qp->refcount));
@@ -854,7 +862,7 @@ bail:
 int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                 int attr_mask, struct ib_qp_init_attr *init_attr)
 {
-       struct qib_qp *qp = to_iqp(ibqp);
+       struct rvt_qp *qp = to_iqp(ibqp);
 
        attr->qp_state = qp->state;
        attr->cur_qp_state = attr->qp_state;
@@ -907,7 +915,7 @@ int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  *
  * Returns the AETH.
  */
-__be32 qib_compute_aeth(struct qib_qp *qp)
+__be32 qib_compute_aeth(struct rvt_qp *qp)
 {
        u32 aeth = qp->r_msn & QIB_MSN_MASK;
 
@@ -920,7 +928,7 @@ __be32 qib_compute_aeth(struct qib_qp *qp)
        } else {
                u32 min, max, x;
                u32 credits;
-               struct qib_rwq *wq = qp->r_rq.wq;
+               struct rvt_rwq *wq = qp->r_rq.wq;
                u32 head;
                u32 tail;
 
@@ -975,21 +983,29 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
                            struct ib_qp_init_attr *init_attr,
                            struct ib_udata *udata)
 {
-       struct qib_qp *qp;
+       struct rvt_qp *qp;
        int err;
-       struct qib_swqe *swq = NULL;
+       struct rvt_swqe *swq = NULL;
        struct qib_ibdev *dev;
        struct qib_devdata *dd;
        size_t sz;
        size_t sg_list_sz;
        struct ib_qp *ret;
+       gfp_t gfp;
+       struct qib_qp_priv *priv;
 
        if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
            init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
-           init_attr->create_flags) {
-               ret = ERR_PTR(-EINVAL);
-               goto bail;
-       }
+           init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
+               return ERR_PTR(-EINVAL);
+
+       /* GFP_NOIO is applicable in RC QPs only */
+       if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
+           init_attr->qp_type != IB_QPT_RC)
+               return ERR_PTR(-EINVAL);
+
+       gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
+                       GFP_NOIO : GFP_KERNEL;
 
        /* Check receive queue parameters if no SRQ is specified. */
        if (!init_attr->srq) {
@@ -1018,10 +1034,11 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
        case IB_QPT_UC:
        case IB_QPT_RC:
        case IB_QPT_UD:
-               sz = sizeof(struct qib_sge) *
+               sz = sizeof(struct rvt_sge) *
                        init_attr->cap.max_send_sge +
-                       sizeof(struct qib_swqe);
-               swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
+                       sizeof(struct rvt_swqe);
+               swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz,
+                               gfp, PAGE_KERNEL);
                if (swq == NULL) {
                        ret = ERR_PTR(-ENOMEM);
                        goto bail;
@@ -1029,7 +1046,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
                sz = sizeof(*qp);
                sg_list_sz = 0;
                if (init_attr->srq) {
-                       struct qib_srq *srq = to_isrq(init_attr->srq);
+                       struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
 
                        if (srq->rq.max_sge > 1)
                                sg_list_sz = sizeof(*qp->r_sg_list) *
@@ -1037,17 +1054,24 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
                } else if (init_attr->cap.max_recv_sge > 1)
                        sg_list_sz = sizeof(*qp->r_sg_list) *
                                (init_attr->cap.max_recv_sge - 1);
-               qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
+               qp = kzalloc(sz + sg_list_sz, gfp);
                if (!qp) {
                        ret = ERR_PTR(-ENOMEM);
                        goto bail_swq;
                }
                RCU_INIT_POINTER(qp->next, NULL);
-               qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
-               if (!qp->s_hdr) {
+               priv = kzalloc(sizeof(*priv), gfp);
+               if (!priv) {
+                       ret = ERR_PTR(-ENOMEM);
+                       goto bail_qp_hdr;
+               }
+               priv->owner = qp;
+               priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
+               if (!priv->s_hdr) {
                        ret = ERR_PTR(-ENOMEM);
                        goto bail_qp;
                }
+               qp->priv = priv;
                qp->timeout_jiffies =
                        usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
                                1000UL);
@@ -1057,9 +1081,17 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
                        qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
                        qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
                        sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
-                               sizeof(struct qib_rwqe);
-                       qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
-                                                  qp->r_rq.size * sz);
+                               sizeof(struct rvt_rwqe);
+                       if (gfp != GFP_NOIO)
+                               qp->r_rq.wq = vmalloc_user(
+                                               sizeof(struct rvt_rwq) +
+                                               qp->r_rq.size * sz);
+                       else
+                               qp->r_rq.wq = __vmalloc(
+                                               sizeof(struct rvt_rwq) +
+                                               qp->r_rq.size * sz,
+                                               gfp, PAGE_KERNEL);
+
                        if (!qp->r_rq.wq) {
                                ret = ERR_PTR(-ENOMEM);
                                goto bail_qp;
@@ -1075,11 +1107,11 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
                spin_lock_init(&qp->r_rq.lock);
                atomic_set(&qp->refcount, 0);
                init_waitqueue_head(&qp->wait);
-               init_waitqueue_head(&qp->wait_dma);
+               init_waitqueue_head(&priv->wait_dma);
                init_timer(&qp->s_timer);
                qp->s_timer.data = (unsigned long)qp;
-               INIT_WORK(&qp->s_work, qib_do_send);
-               INIT_LIST_HEAD(&qp->iowait);
+               INIT_WORK(&priv->s_work, qib_do_send);
+               INIT_LIST_HEAD(&priv->iowait);
                INIT_LIST_HEAD(&qp->rspwait);
                qp->state = IB_QPS_RESET;
                qp->s_wq = swq;
@@ -1090,7 +1122,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
                dev = to_idev(ibpd->device);
                dd = dd_from_dev(dev);
                err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
-                               init_attr->port_num);
+                               init_attr->port_num, gfp);
                if (err < 0) {
                        ret = ERR_PTR(err);
                        vfree(qp->r_rq.wq);
@@ -1124,7 +1156,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
                                goto bail_ip;
                        }
                } else {
-                       u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
+                       u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
 
                        qp->ip = qib_create_mmap_info(dev, s,
                                                      ibpd->uobject->context,
@@ -1169,7 +1201,9 @@ bail_ip:
                vfree(qp->r_rq.wq);
        free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
 bail_qp:
-       kfree(qp->s_hdr);
+       kfree(priv->s_hdr);
+       kfree(priv);
+bail_qp_hdr:
        kfree(qp);
 bail_swq:
        vfree(swq);
@@ -1188,25 +1222,26 @@ bail:
  */
 int qib_destroy_qp(struct ib_qp *ibqp)
 {
-       struct qib_qp *qp = to_iqp(ibqp);
+       struct rvt_qp *qp = to_iqp(ibqp);
        struct qib_ibdev *dev = to_idev(ibqp->device);
+       struct qib_qp_priv *priv = qp->priv;
 
        /* Make sure HW and driver activity is stopped. */
        spin_lock_irq(&qp->s_lock);
        if (qp->state != IB_QPS_RESET) {
                qp->state = IB_QPS_RESET;
                spin_lock(&dev->pending_lock);
-               if (!list_empty(&qp->iowait))
-                       list_del_init(&qp->iowait);
+               if (!list_empty(&priv->iowait))
+                       list_del_init(&priv->iowait);
                spin_unlock(&dev->pending_lock);
                qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
                spin_unlock_irq(&qp->s_lock);
-               cancel_work_sync(&qp->s_work);
+               cancel_work_sync(&priv->s_work);
                del_timer_sync(&qp->s_timer);
-               wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
-               if (qp->s_tx) {
-                       qib_put_txreq(qp->s_tx);
-                       qp->s_tx = NULL;
+               wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
+               if (priv->s_tx) {
+                       qib_put_txreq(priv->s_tx);
+                       priv->s_tx = NULL;
                }
                remove_qp(dev, qp);
                wait_event(qp->wait, !atomic_read(&qp->refcount));
@@ -1225,7 +1260,8 @@ int qib_destroy_qp(struct ib_qp *ibqp)
        else
                vfree(qp->r_rq.wq);
        vfree(qp->s_wq);
-       kfree(qp->s_hdr);
+       kfree(priv->s_hdr);
+       kfree(priv);
        kfree(qp);
        return 0;
 }
@@ -1262,7 +1298,7 @@ void qib_free_qpn_table(struct qib_qpn_table *qpt)
  *
  * The QP s_lock should be held.
  */
-void qib_get_credit(struct qib_qp *qp, u32 aeth)
+void qib_get_credit(struct rvt_qp *qp, u32 aeth)
 {
        u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
 
@@ -1296,7 +1332,7 @@ void qib_get_credit(struct qib_qp *qp, u32 aeth)
 
 struct qib_qp_iter {
        struct qib_ibdev *dev;
-       struct qib_qp *qp;
+       struct rvt_qp *qp;
        int n;
 };
 
@@ -1322,8 +1358,8 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
        struct qib_ibdev *dev = iter->dev;
        int n = iter->n;
        int ret = 1;
-       struct qib_qp *pqp = iter->qp;
-       struct qib_qp *qp;
+       struct rvt_qp *pqp = iter->qp;
+       struct rvt_qp *qp;
 
        for (; n < dev->qp_table_size; n++) {
                if (pqp)
@@ -1346,8 +1382,9 @@ static const char * const qp_type_str[] = {
 
 void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
 {
-       struct qib_swqe *wqe;
-       struct qib_qp *qp = iter->qp;
+       struct rvt_swqe *wqe;
+       struct rvt_qp *qp = iter->qp;
+       struct qib_qp_priv *priv = qp->priv;
 
        wqe = get_swqe_ptr(qp, qp->s_last);
        seq_printf(s,
@@ -1359,8 +1396,8 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
                   wqe->wr.opcode,
                   qp->s_hdrwords,
                   qp->s_flags,
-                  atomic_read(&qp->s_dma_busy),
-                  !list_empty(&qp->iowait),
+                  atomic_read(&priv->s_dma_busy),
+                  !list_empty(&priv->iowait),
                   qp->timeout,
                   wqe->ssn,
                   qp->s_lsn,
This page took 0.060861 seconds and 5 git commands to generate.