Merge tag 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 5 Aug 2016 00:26:31 +0000 (20:26 -0400)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 5 Aug 2016 00:26:31 +0000 (20:26 -0400)
Pull second round of rdma updates from Doug Ledford:
 "This can be split out into just two categories:

   - fixes to the RDMA R/W API in regards to SG list length limits
     (about 5 patches)

   - fixes/features for the Intel hfi1 driver (everything else)

  The hfi1 driver is still being brought to full feature support by
  Intel, and they have a lot of people working on it, so that amounts to
  almost the entirety of this pull request"

* tag 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (84 commits)
  IB/hfi1: Add cache evict LRU list
  IB/hfi1: Fix memory leak during unexpected shutdown
  IB/hfi1: Remove unneeded mm argument in remove function
  IB/hfi1: Consistently call ops->remove outside spinlock
  IB/hfi1: Use evict mmu rb operation
  IB/hfi1: Add evict operation to the mmu rb handler
  IB/hfi1: Fix TID caching actions
  IB/hfi1: Make the cache handler own its rb tree root
  IB/hfi1: Make use of mm consistent
  IB/hfi1: Fix user SDMA racy user request claim
  IB/hfi1: Fix error condition that needs to clean up
  IB/hfi1: Release node on insert failure
  IB/hfi1: Validate SDMA user iovector count
  IB/hfi1: Validate SDMA user request index
  IB/hfi1: Use the same capability state for all shared contexts
  IB/hfi1: Prevent null pointer dereference
  IB/hfi1: Rename TID mmu_rb_* functions
  IB/hfi1: Remove unneeded empty check in hfi1_mmu_rb_unregister()
  IB/hfi1: Restructure hfi1_file_open
  IB/hfi1: Make iovec loop index easy to understand
  ...

1  2 
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/hfi1/Kconfig
drivers/infiniband/hw/hfi1/file_ops.c
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/verbs.c
include/rdma/ib_verbs.h

index 2e813edcddabd9919849f881c6a4da61bc6382e6,e39a0b59723426ce0957f48092f369c4f319308a..f2b776efab3a3ee1ffdc306ef7af01d3093e9ed7
@@@ -758,12 -758,6 +758,12 @@@ struct ib_qp *ib_create_qp(struct ib_p
        struct ib_qp *qp;
        int ret;
  
 +      if (qp_init_attr->rwq_ind_tbl &&
 +          (qp_init_attr->recv_cq ||
 +          qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
 +          qp_init_attr->cap.max_recv_sge))
 +              return ERR_PTR(-EINVAL);
 +
        /*
         * If the callers is using the RDMA API calculate the resources
         * needed for the RDMA READ/WRITE operations.
        qp->real_qp    = qp;
        qp->uobject    = NULL;
        qp->qp_type    = qp_init_attr->qp_type;
 +      qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
  
        atomic_set(&qp->usecnt, 0);
        qp->mrs_used = 0;
                qp->srq = NULL;
        } else {
                qp->recv_cq = qp_init_attr->recv_cq;
 -              atomic_inc(&qp_init_attr->recv_cq->usecnt);
 +              if (qp_init_attr->recv_cq)
 +                      atomic_inc(&qp_init_attr->recv_cq->usecnt);
                qp->srq = qp_init_attr->srq;
                if (qp->srq)
                        atomic_inc(&qp_init_attr->srq->usecnt);
        qp->xrcd    = NULL;
  
        atomic_inc(&pd->usecnt);
 -      atomic_inc(&qp_init_attr->send_cq->usecnt);
 +      if (qp_init_attr->send_cq)
 +              atomic_inc(&qp_init_attr->send_cq->usecnt);
 +      if (qp_init_attr->rwq_ind_tbl)
 +              atomic_inc(&qp->rwq_ind_tbl->usecnt);
  
        if (qp_init_attr->cap.max_rdma_ctxs) {
                ret = rdma_rw_init_mrs(qp, qp_init_attr);
                }
        }
  
+       /*
+        * Note: all hw drivers guarantee that max_send_sge is lower than
+        * the device RDMA WRITE SGE limit but not all hw drivers ensure that
+        * max_send_sge <= max_sge_rd.
+        */
+       qp->max_write_sge = qp_init_attr->cap.max_send_sge;
+       qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
+                                device->attrs.max_sge_rd);
        return qp;
  }
  EXPORT_SYMBOL(ib_create_qp);
@@@ -1294,7 -1292,6 +1303,7 @@@ int ib_destroy_qp(struct ib_qp *qp
        struct ib_pd *pd;
        struct ib_cq *scq, *rcq;
        struct ib_srq *srq;
 +      struct ib_rwq_ind_table *ind_tbl;
        int ret;
  
        WARN_ON_ONCE(qp->mrs_used > 0);
        scq  = qp->send_cq;
        rcq  = qp->recv_cq;
        srq  = qp->srq;
 +      ind_tbl = qp->rwq_ind_tbl;
  
        if (!qp->uobject)
                rdma_rw_cleanup_mrs(qp);
                        atomic_dec(&rcq->usecnt);
                if (srq)
                        atomic_dec(&srq->usecnt);
 +              if (ind_tbl)
 +                      atomic_dec(&ind_tbl->usecnt);
        }
  
        return ret;
@@@ -1573,150 -1567,6 +1582,150 @@@ int ib_dealloc_xrcd(struct ib_xrcd *xrc
  }
  EXPORT_SYMBOL(ib_dealloc_xrcd);
  
 +/**
 + * ib_create_wq - Creates a WQ associated with the specified protection
 + * domain.
 + * @pd: The protection domain associated with the WQ.
 + * @wq_init_attr: A list of initial attributes required to create the
 + * WQ. If WQ creation succeeds, then the attributes are updated to
 + * the actual capabilities of the created WQ.
 + *
 + * wq_init_attr->max_wr and wq_init_attr->max_sge determine
 + * the requested size of the WQ, and set to the actual values allocated
 + * on return.
 + * If ib_create_wq() succeeds, then max_wr and max_sge will always be
 + * at least as large as the requested values.
 + */
 +struct ib_wq *ib_create_wq(struct ib_pd *pd,
 +                         struct ib_wq_init_attr *wq_attr)
 +{
 +      struct ib_wq *wq;
 +
 +      if (!pd->device->create_wq)
 +              return ERR_PTR(-ENOSYS);
 +
 +      wq = pd->device->create_wq(pd, wq_attr, NULL);
 +      if (!IS_ERR(wq)) {
 +              wq->event_handler = wq_attr->event_handler;
 +              wq->wq_context = wq_attr->wq_context;
 +              wq->wq_type = wq_attr->wq_type;
 +              wq->cq = wq_attr->cq;
 +              wq->device = pd->device;
 +              wq->pd = pd;
 +              wq->uobject = NULL;
 +              atomic_inc(&pd->usecnt);
 +              atomic_inc(&wq_attr->cq->usecnt);
 +              atomic_set(&wq->usecnt, 0);
 +      }
 +      return wq;
 +}
 +EXPORT_SYMBOL(ib_create_wq);
 +
 +/**
 + * ib_destroy_wq - Destroys the specified WQ.
 + * @wq: The WQ to destroy.
 + */
 +int ib_destroy_wq(struct ib_wq *wq)
 +{
 +      int err;
 +      struct ib_cq *cq = wq->cq;
 +      struct ib_pd *pd = wq->pd;
 +
 +      if (atomic_read(&wq->usecnt))
 +              return -EBUSY;
 +
 +      err = wq->device->destroy_wq(wq);
 +      if (!err) {
 +              atomic_dec(&pd->usecnt);
 +              atomic_dec(&cq->usecnt);
 +      }
 +      return err;
 +}
 +EXPORT_SYMBOL(ib_destroy_wq);
 +
 +/**
 + * ib_modify_wq - Modifies the specified WQ.
 + * @wq: The WQ to modify.
 + * @wq_attr: On input, specifies the WQ attributes to modify.
 + * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
 + *   are being modified.
 + * On output, the current values of selected WQ attributes are returned.
 + */
 +int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
 +               u32 wq_attr_mask)
 +{
 +      int err;
 +
 +      if (!wq->device->modify_wq)
 +              return -ENOSYS;
 +
 +      err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL);
 +      return err;
 +}
 +EXPORT_SYMBOL(ib_modify_wq);
 +
 +/*
 + * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
 + * @device: The device on which to create the rwq indirection table.
 + * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
 + * create the Indirection Table.
 + *
 + * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
 + *    than the created ib_rwq_ind_table object and the caller is responsible
 + *    for its memory allocation/free.
 + */
 +struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
 +                                               struct ib_rwq_ind_table_init_attr *init_attr)
 +{
 +      struct ib_rwq_ind_table *rwq_ind_table;
 +      int i;
 +      u32 table_size;
 +
 +      if (!device->create_rwq_ind_table)
 +              return ERR_PTR(-ENOSYS);
 +
 +      table_size = (1 << init_attr->log_ind_tbl_size);
 +      rwq_ind_table = device->create_rwq_ind_table(device,
 +                              init_attr, NULL);
 +      if (IS_ERR(rwq_ind_table))
 +              return rwq_ind_table;
 +
 +      rwq_ind_table->ind_tbl = init_attr->ind_tbl;
 +      rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size;
 +      rwq_ind_table->device = device;
 +      rwq_ind_table->uobject = NULL;
 +      atomic_set(&rwq_ind_table->usecnt, 0);
 +
 +      for (i = 0; i < table_size; i++)
 +              atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt);
 +
 +      return rwq_ind_table;
 +}
 +EXPORT_SYMBOL(ib_create_rwq_ind_table);
 +
 +/*
 + * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
 + * @wq_ind_table: The Indirection Table to destroy.
 +*/
 +int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
 +{
 +      int err, i;
 +      u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
 +      struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
 +
 +      if (atomic_read(&rwq_ind_table->usecnt))
 +              return -EBUSY;
 +
 +      err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table);
 +      if (!err) {
 +              for (i = 0; i < table_size; i++)
 +                      atomic_dec(&ind_tbl[i]->usecnt);
 +      }
 +
 +      return err;
 +}
 +EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
 +
  struct ib_flow *ib_create_flow(struct ib_qp *qp,
                               struct ib_flow_attr *flow_attr,
                               int domain)
index f846fd51b85b925b268679f6d28e1e65db56dac7,bac18607fd5bbcd9d437f946a8dadd839f4dd2d3..f6ea0881765a1e789d2107479e7245d4cc0640c5
@@@ -1,8 -1,10 +1,9 @@@
  config INFINIBAND_HFI1
        tristate "Intel OPA Gen1 support"
-       depends on X86_64 && INFINIBAND_RDMAVT
+       depends on X86_64 && INFINIBAND_RDMAVT && I2C
        select MMU_NOTIFIER
        select CRC32
 -      default m
+       select I2C_ALGOBIT
        ---help---
        This is a low-level driver for Intel OPA Gen1 adapter.
  config HFI1_DEBUG_SDMA_ORDER
index 32c19fad12a4c4df5d074ac5540e3a2f8ee4973c,4f39bffad74a371fff15fbbb227807ae5361defa..1ecbec1923589c3ec96d0958ec45541aa2b5724b
@@@ -168,6 -168,7 +168,7 @@@ static inline int is_valid_mmap(u64 tok
  
  static int hfi1_file_open(struct inode *inode, struct file *fp)
  {
+       struct hfi1_filedata *fd;
        struct hfi1_devdata *dd = container_of(inode->i_cdev,
                                               struct hfi1_devdata,
                                               user_cdev);
        kobject_get(&dd->kobj);
  
        /* The real work is performed later in assign_ctxt() */
-       fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
-       if (fp->private_data) /* no cpu affinity by default */
-               ((struct hfi1_filedata *)fp->private_data)->rec_cpu_num = -1;
-       return fp->private_data ? 0 : -ENOMEM;
+       fd = kzalloc(sizeof(*fd), GFP_KERNEL);
+       if (fd) {
+               fd->rec_cpu_num = -1; /* no cpu affinity by default */
+               fd->mm = current->mm;
+       }
+       fp->private_data = fd;
+       return fd ? 0 : -ENOMEM;
  }
  
  static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
                                    sizeof(struct hfi1_base_info));
                break;
        case HFI1_IOCTL_CREDIT_UPD:
 -              if (uctxt && uctxt->sc)
 +              if (uctxt)
                        sc_return_credits(uctxt->sc);
                break;
  
@@@ -392,41 -400,38 +400,38 @@@ static ssize_t hfi1_write_iter(struct k
        struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
        struct hfi1_user_sdma_pkt_q *pq = fd->pq;
        struct hfi1_user_sdma_comp_q *cq = fd->cq;
-       int ret = 0, done = 0, reqs = 0;
+       int done = 0, reqs = 0;
        unsigned long dim = from->nr_segs;
  
-       if (!cq || !pq) {
-               ret = -EIO;
-               goto done;
-       }
+       if (!cq || !pq)
+               return -EIO;
  
-       if (!iter_is_iovec(from) || !dim) {
-               ret = -EINVAL;
-               goto done;
-       }
+       if (!iter_is_iovec(from) || !dim)
+               return -EINVAL;
  
        hfi1_cdbg(SDMA, "SDMA request from %u:%u (%lu)",
                  fd->uctxt->ctxt, fd->subctxt, dim);
  
-       if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
-               ret = -ENOSPC;
-               goto done;
-       }
+       if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
+               return -ENOSPC;
  
        while (dim) {
+               int ret;
                unsigned long count = 0;
  
                ret = hfi1_user_sdma_process_request(
                        kiocb->ki_filp, (struct iovec *)(from->iov + done),
                        dim, &count);
-               if (ret)
-                       goto done;
+               if (ret) {
+                       reqs = ret;
+                       break;
+               }
                dim -= count;
                done += count;
                reqs++;
        }
- done:
-       return ret ? ret : reqs;
+       return reqs;
  }
  
  static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
@@@ -718,7 -723,7 +723,7 @@@ static int hfi1_file_close(struct inod
        hfi1_user_sdma_free_queues(fdata);
  
        /* release the cpu */
-       hfi1_put_proc_affinity(dd, fdata->rec_cpu_num);
+       hfi1_put_proc_affinity(fdata->rec_cpu_num);
  
        /*
         * Clear any left over, unhandled events so the next process that
  
        if (--uctxt->cnt) {
                uctxt->active_slaves &= ~(1 << fdata->subctxt);
-               uctxt->subpid[fdata->subctxt] = 0;
                mutex_unlock(&hfi1_mutex);
                goto done;
        }
        write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
                        hfi1_pkt_default_send_ctxt_mask(dd, uctxt->sc->type));
        sc_disable(uctxt->sc);
-       uctxt->pid = 0;
        spin_unlock_irqrestore(&dd->uctxt_lock, flags);
  
        dd->rcd[uctxt->ctxt] = NULL;
@@@ -818,9 -821,10 +821,10 @@@ static int assign_ctxt(struct file *fp
                ret = find_shared_ctxt(fp, uinfo);
                if (ret < 0)
                        goto done_unlock;
-               if (ret)
-                       fd->rec_cpu_num = hfi1_get_proc_affinity(
-                               fd->uctxt->dd, fd->uctxt->numa_id);
+               if (ret) {
+                       fd->rec_cpu_num =
+                               hfi1_get_proc_affinity(fd->uctxt->numa_id);
+               }
        }
  
        /*
@@@ -895,7 -899,6 +899,6 @@@ static int find_shared_ctxt(struct fil
                        }
                        fd->uctxt = uctxt;
                        fd->subctxt  = uctxt->cnt++;
-                       uctxt->subpid[fd->subctxt] = current->pid;
                        uctxt->active_slaves |= 1 << fd->subctxt;
                        ret = 1;
                        goto done;
@@@ -932,7 -935,11 +935,11 @@@ static int allocate_ctxt(struct file *f
        if (ctxt == dd->num_rcv_contexts)
                return -EBUSY;
  
-       fd->rec_cpu_num = hfi1_get_proc_affinity(dd, -1);
+       /*
+        * If we don't have a NUMA node requested, preference is towards
+        * device NUMA node.
+        */
+       fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node);
        if (fd->rec_cpu_num != -1)
                numa = cpu_to_node(fd->rec_cpu_num);
        else
                        return ret;
        }
        uctxt->userversion = uinfo->userversion;
-       uctxt->pid = current->pid;
-       uctxt->flags = HFI1_CAP_UGET(MASK);
+       uctxt->flags = hfi1_cap_mask; /* save current flag state */
        init_waitqueue_head(&uctxt->wait);
        strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
        memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
@@@ -1080,18 -1086,18 +1086,18 @@@ static int user_init(struct file *fp
        hfi1_set_ctxt_jkey(uctxt->dd, uctxt->ctxt, uctxt->jkey);
  
        rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
-       if (HFI1_CAP_KGET_MASK(uctxt->flags, HDRSUPP))
+       if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP))
                rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
        /*
         * Ignore the bit in the flags for now until proper
         * support for multiple packet per rcv array entry is
         * added.
         */
-       if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
+       if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR))
                rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
-       if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
+       if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL))
                rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
-       if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
+       if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
                rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
        /*
         * The RcvCtxtCtrl.TailUpd bit has to be explicitly written.
         * uses of the chip or ctxt. Therefore, add the rcvctrl op
         * for both cases.
         */
-       if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
+       if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL))
                rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
        else
                rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
@@@ -1122,9 -1128,14 +1128,14 @@@ static int get_ctxt_info(struct file *f
        int ret = 0;
  
        memset(&cinfo, 0, sizeof(cinfo));
-       ret = hfi1_get_base_kinfo(uctxt, &cinfo);
-       if (ret < 0)
-               goto done;
+       cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
+                               HFI1_CAP_MISC_MASK) << HFI1_CAP_USER_SHIFT) |
+                       HFI1_CAP_UGET_MASK(uctxt->flags, MASK) |
+                       HFI1_CAP_KGET_MASK(uctxt->flags, K2U);
+       /* adjust flag if this fd is not able to cache */
+       if (!fd->handler)
+               cinfo.runtime_flags |= HFI1_CAP_TID_UNMAP; /* no caching */
        cinfo.num_active = hfi1_count_active_units();
        cinfo.unit = uctxt->dd->unit;
        cinfo.ctxt = uctxt->ctxt;
        trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
        if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
                ret = -EFAULT;
- done:
        return ret;
  }
  
index 49a71e24a8f00922298b5405e246117781ff1fc7,ba9083602cbd25d2dfb8a15892b4edc9527202b0..1000e0fd96d9b4972cec3327739f5ca7ff59b7b3
@@@ -62,6 -62,8 +62,8 @@@
  #include <linux/cdev.h>
  #include <linux/delay.h>
  #include <linux/kthread.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c-algo-bit.h>
  #include <rdma/rdma_vt.h>
  
  #include "chip_registers.h"
@@@ -253,7 -255,7 +255,7 @@@ struct hfi1_ctxtdata 
        /* chip offset of PIO buffers for this ctxt */
        u32 piobufs;
        /* per-context configuration flags */
-       u32 flags;
+       unsigned long flags;
        /* per-context event flags for fileops/intr communication */
        unsigned long event_flags;
        /* WAIT_RCV that timed out, no interrupt */
        u32 urgent;
        /* saved total number of polled urgent packets for poll edge trigger */
        u32 urgent_poll;
-       /* pid of process using this ctxt */
-       pid_t pid;
-       pid_t subpid[HFI1_MAX_SHARED_CTXTS];
        /* same size as task_struct .comm[], command that opened context */
        char comm[TASK_COMM_LEN];
        /* so file ops can get at unit */
@@@ -366,11 -365,6 +365,6 @@@ struct hfi1_packet 
        u8 etype;
  };
  
- static inline bool has_sc4_bit(struct hfi1_packet *p)
- {
-       return !!rhf_dc_info(p->rhf);
- }
  /*
   * Private data for snoop/capture support.
   */
@@@ -805,10 -799,19 +799,19 @@@ struct hfi1_temp 
        u8 triggers;      /* temperature triggers */
  };
  
+ struct hfi1_i2c_bus {
+       struct hfi1_devdata *controlling_dd; /* current controlling device */
+       struct i2c_adapter adapter;     /* bus details */
+       struct i2c_algo_bit_data algo;  /* bus algorithm details */
+       int num;                        /* bus number, 0 or 1 */
+ };
  /* common data between shared ASIC HFIs */
  struct hfi1_asic_data {
        struct hfi1_devdata *dds[2];    /* back pointers */
        struct mutex asic_resource_mutex;
+       struct hfi1_i2c_bus *i2c_bus0;
+       struct hfi1_i2c_bus *i2c_bus1;
  };
  
  /* device data struct now contains only "general per-device" info.
@@@ -1128,7 -1131,8 +1131,8 @@@ struct hfi1_devdata 
                NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS];
        /* Software counter that aggregates all cce_err_status errors */
        u64 sw_cce_err_status_aggregate;
+       /* Software counter that aggregates all bypass packet rcv errors */
+       u64 sw_rcv_bypass_packet_errors;
        /* receive interrupt functions */
        rhf_rcv_function_ptr *rhf_rcv_function_map;
        rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
  
  /* 8051 firmware version helper */
  #define dc8051_ver(a, b) ((a) << 8 | (b))
 +#define dc8051_ver_maj(a) ((a & 0xff00) >> 8)
 +#define dc8051_ver_min(a)  (a & 0x00ff)
  
  /* f_put_tid types */
  #define PT_EXPECTED 0
  
  struct tid_rb_node;
  struct mmu_rb_node;
+ struct mmu_rb_handler;
  
  /* Private data for file operations */
  struct hfi1_filedata {
        /* for cpu affinity; -1 if none */
        int rec_cpu_num;
        u32 tid_n_pinned;
-       struct rb_root tid_rb_root;
+       struct mmu_rb_handler *handler;
        struct tid_rb_node **entry_to_rb;
        spinlock_t tid_lock; /* protect tid_[limit,used] counters */
        u32 tid_limit;
        u32 invalid_tid_idx;
        /* protect invalid_tids array and invalid_tid_idx */
        spinlock_t invalid_lock;
+       struct mm_struct *mm;
  };
  
  extern struct list_head hfi1_dev_list;
@@@ -1236,6 -1240,8 +1242,8 @@@ int handle_receive_interrupt_nodma_rtai
  int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *, int);
  void set_all_slowpath(struct hfi1_devdata *dd);
  
+ extern const struct pci_device_id hfi1_pci_tbl[];
  /* receive packet handler dispositions */
  #define RCV_PKT_OK      0x0 /* keep going */
  #define RCV_PKT_LIMIT   0x1 /* stop, hit limit, start thread */
@@@ -1261,7 -1267,7 +1269,7 @@@ void receive_interrupt_work(struct work
  static inline int hdr2sc(struct hfi1_message_header *hdr, u64 rhf)
  {
        return ((be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf) |
-              ((!!(rhf & RHF_DC_INFO_SMASK)) << 4);
+              ((!!(rhf_dc_info(rhf))) << 4);
  }
  
  static inline u16 generate_jkey(kuid_t uid)
@@@ -1571,6 -1577,22 +1579,22 @@@ static inline struct hfi1_ibport *to_ip
        return &dd->pport[pidx].ibport_data;
  }
  
+ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+                              bool do_cnp);
+ static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt,
+                              bool do_cnp)
+ {
+       struct hfi1_other_headers *ohdr = pkt->ohdr;
+       u32 bth1;
+       bth1 = be32_to_cpu(ohdr->bth[1]);
+       if (unlikely(bth1 & (HFI1_BECN_SMASK | HFI1_FECN_SMASK))) {
+               hfi1_process_ecn_slowpath(qp, pkt, do_cnp);
+               return bth1 & HFI1_FECN_SMASK;
+       }
+       return false;
+ }
  /*
   * Return the indexed PKEY from the port PKEY table.
   */
@@@ -1588,14 -1610,23 +1612,23 @@@ static inline u16 hfi1_get_pkey(struct 
  }
  
  /*
-  * Readers of cc_state must call get_cc_state() under rcu_read_lock().
-  * Writers of cc_state must call get_cc_state() under cc_state_lock.
+  * Called by readers of cc_state only, must call under rcu_read_lock().
   */
  static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
  {
        return rcu_dereference(ppd->cc_state);
  }
  
+ /*
+  * Called by writers of cc_state only,  must call under cc_state_lock.
+  */
+ static inline
+ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
+ {
+       return rcu_dereference_protected(ppd->cc_state,
+                                        lockdep_is_held(&ppd->cc_state_lock));
+ }
  /*
   * values for dd->flags (_device_ related flags)
   */
@@@ -1671,9 -1702,12 +1704,12 @@@ void shutdown_led_override(struct hfi1_
   */
  #define DEFAULT_RCVHDR_ENTSIZE 32
  
- bool hfi1_can_pin_pages(struct hfi1_devdata *, u32, u32);
- int hfi1_acquire_user_pages(unsigned long, size_t, bool, struct page **);
- void hfi1_release_user_pages(struct mm_struct *, struct page **, size_t, bool);
+ bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
+                       u32 nlocked, u32 npages);
+ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr,
+                           size_t npages, bool writable, struct page **pages);
+ void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
+                            size_t npages, bool dirty);
  
  static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
  {
@@@ -1949,4 -1983,55 +1985,55 @@@ static inline u32 qsfp_resource(struct 
  
  int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
  
+ #define DD_DEV_ENTRY(dd)       __string(dev, dev_name(&(dd)->pcidev->dev))
+ #define DD_DEV_ASSIGN(dd)      __assign_str(dev, dev_name(&(dd)->pcidev->dev))
+ #define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
+ #define show_packettype(etype)                  \
+ __print_symbolic(etype,                         \
+       packettype_name(EXPECTED),              \
+       packettype_name(EAGER),                 \
+       packettype_name(IB),                    \
+       packettype_name(ERROR),                 \
+       packettype_name(BYPASS))
+ #define ib_opcode_name(opcode) { IB_OPCODE_##opcode, #opcode  }
+ #define show_ib_opcode(opcode)                             \
+ __print_symbolic(opcode,                                   \
+       ib_opcode_name(RC_SEND_FIRST),                     \
+       ib_opcode_name(RC_SEND_MIDDLE),                    \
+       ib_opcode_name(RC_SEND_LAST),                      \
+       ib_opcode_name(RC_SEND_LAST_WITH_IMMEDIATE),       \
+       ib_opcode_name(RC_SEND_ONLY),                      \
+       ib_opcode_name(RC_SEND_ONLY_WITH_IMMEDIATE),       \
+       ib_opcode_name(RC_RDMA_WRITE_FIRST),               \
+       ib_opcode_name(RC_RDMA_WRITE_MIDDLE),              \
+       ib_opcode_name(RC_RDMA_WRITE_LAST),                \
+       ib_opcode_name(RC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
+       ib_opcode_name(RC_RDMA_WRITE_ONLY),                \
+       ib_opcode_name(RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
+       ib_opcode_name(RC_RDMA_READ_REQUEST),              \
+       ib_opcode_name(RC_RDMA_READ_RESPONSE_FIRST),       \
+       ib_opcode_name(RC_RDMA_READ_RESPONSE_MIDDLE),      \
+       ib_opcode_name(RC_RDMA_READ_RESPONSE_LAST),        \
+       ib_opcode_name(RC_RDMA_READ_RESPONSE_ONLY),        \
+       ib_opcode_name(RC_ACKNOWLEDGE),                    \
+       ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE),             \
+       ib_opcode_name(RC_COMPARE_SWAP),                   \
+       ib_opcode_name(RC_FETCH_ADD),                      \
+       ib_opcode_name(UC_SEND_FIRST),                     \
+       ib_opcode_name(UC_SEND_MIDDLE),                    \
+       ib_opcode_name(UC_SEND_LAST),                      \
+       ib_opcode_name(UC_SEND_LAST_WITH_IMMEDIATE),       \
+       ib_opcode_name(UC_SEND_ONLY),                      \
+       ib_opcode_name(UC_SEND_ONLY_WITH_IMMEDIATE),       \
+       ib_opcode_name(UC_RDMA_WRITE_FIRST),               \
+       ib_opcode_name(UC_RDMA_WRITE_MIDDLE),              \
+       ib_opcode_name(UC_RDMA_WRITE_LAST),                \
+       ib_opcode_name(UC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
+       ib_opcode_name(UC_RDMA_WRITE_ONLY),                \
+       ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
+       ib_opcode_name(UD_SEND_ONLY),                      \
+       ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE),       \
+       ib_opcode_name(CNP))
  #endif                          /* _HFI1_KERNEL_H */
index dd4be3c2b2254d11a0045ac6adf6739ba658ea3c,5265d160fa63521a7590240d3d78c96095c0ab6a..2b359540901db3dd4c42cad718cc938ff7139d6a
@@@ -306,7 -306,10 +306,10 @@@ const enum ib_wc_opcode ib_hfi1_wc_opco
        [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
        [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
        [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
-       [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
+       [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
+       [IB_WR_SEND_WITH_INV] = IB_WC_SEND,
+       [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
+       [IB_WR_REG_MR] = IB_WC_REG_MR
  };
  
  /*
@@@ -378,6 -381,8 +381,8 @@@ static const opcode_handler opcode_hand
        [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE]             = &hfi1_rc_rcv,
        [IB_OPCODE_RC_COMPARE_SWAP]                   = &hfi1_rc_rcv,
        [IB_OPCODE_RC_FETCH_ADD]                      = &hfi1_rc_rcv,
+       [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE]      = &hfi1_rc_rcv,
+       [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE]      = &hfi1_rc_rcv,
        /* UC */
        [IB_OPCODE_UC_SEND_FIRST]                     = &hfi1_uc_rcv,
        [IB_OPCODE_UC_SEND_MIDDLE]                    = &hfi1_uc_rcv,
@@@ -540,19 -545,15 +545,15 @@@ void hfi1_skip_sge(struct rvt_sge_stat
  /*
   * Make sure the QP is ready and able to accept the given opcode.
   */
- static inline int qp_ok(int opcode, struct hfi1_packet *packet)
+ static inline opcode_handler qp_ok(int opcode, struct hfi1_packet *packet)
  {
-       struct hfi1_ibport *ibp;
        if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
-               goto dropit;
+               return NULL;
        if (((opcode & RVT_OPCODE_QP_MASK) == packet->qp->allowed_ops) ||
            (opcode == IB_OPCODE_CNP))
-               return 1;
- dropit:
-       ibp = &packet->rcd->ppd->ibport_data;
-       ibp->rvp.n_pkt_drops++;
-       return 0;
+               return opcode_handler_tbl[opcode];
+       return NULL;
  }
  
  /**
@@@ -571,6 -572,7 +572,7 @@@ void hfi1_ib_rcv(struct hfi1_packet *pa
        struct hfi1_pportdata *ppd = rcd->ppd;
        struct hfi1_ibport *ibp = &ppd->ibport_data;
        struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
+       opcode_handler packet_handler;
        unsigned long flags;
        u32 qp_num;
        int lnh;
                list_for_each_entry_rcu(p, &mcast->qp_list, list) {
                        packet->qp = p->qp;
                        spin_lock_irqsave(&packet->qp->r_lock, flags);
-                       if (likely((qp_ok(opcode, packet))))
-                               opcode_handler_tbl[opcode](packet);
+                       packet_handler = qp_ok(opcode, packet);
+                       if (likely(packet_handler))
+                               packet_handler(packet);
+                       else
+                               ibp->rvp.n_pkt_drops++;
                        spin_unlock_irqrestore(&packet->qp->r_lock, flags);
                }
                /*
                        goto drop;
                }
                spin_lock_irqsave(&packet->qp->r_lock, flags);
-               if (likely((qp_ok(opcode, packet))))
-                       opcode_handler_tbl[opcode](packet);
+               packet_handler = qp_ok(opcode, packet);
+               if (likely(packet_handler))
+                       packet_handler(packet);
+               else
+                       ibp->rvp.n_pkt_drops++;
                spin_unlock_irqrestore(&packet->qp->r_lock, flags);
                rcu_read_unlock();
        }
@@@ -808,19 -816,19 +816,19 @@@ static int build_verbs_tx_desc
        struct rvt_sge_state *ss,
        u32 length,
        struct verbs_txreq *tx,
-       struct ahg_ib_header *ahdr,
+       struct hfi1_ahg_info *ahg_info,
        u64 pbc)
  {
        int ret = 0;
-       struct hfi1_pio_header *phdr = &tx->phdr;
+       struct hfi1_sdma_header *phdr = &tx->phdr;
        u16 hdrbytes = tx->hdr_dwords << 2;
  
-       if (!ahdr->ahgcount) {
+       if (!ahg_info->ahgcount) {
                ret = sdma_txinit_ahg(
                        &tx->txreq,
-                       ahdr->tx_flags,
+                       ahg_info->tx_flags,
                        hdrbytes + length,
-                       ahdr->ahgidx,
+                       ahg_info->ahgidx,
                        0,
                        NULL,
                        0,
        } else {
                ret = sdma_txinit_ahg(
                        &tx->txreq,
-                       ahdr->tx_flags,
+                       ahg_info->tx_flags,
                        length,
-                       ahdr->ahgidx,
-                       ahdr->ahgcount,
-                       ahdr->ahgdesc,
+                       ahg_info->ahgidx,
+                       ahg_info->ahgcount,
+                       ahg_info->ahgdesc,
                        hdrbytes,
                        verbs_sdma_complete);
                if (ret)
@@@ -860,7 -868,7 +868,7 @@@ int hfi1_verbs_send_dma(struct rvt_qp *
                        u64 pbc)
  {
        struct hfi1_qp_priv *priv = qp->priv;
-       struct ahg_ib_header *ahdr = priv->s_hdr;
+       struct hfi1_ahg_info *ahg_info = priv->s_ahg;
        u32 hdrwords = qp->s_hdrwords;
        struct rvt_sge_state *ss = qp->s_cur_sge;
        u32 len = qp->s_cur_size;
                                         plen);
                }
                tx->wqe = qp->s_wqe;
-               ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahdr, pbc);
+               ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahg_info, pbc);
                if (unlikely(ret))
                        goto bail_build;
        }
@@@ -1291,22 -1299,21 +1299,24 @@@ int hfi1_verbs_send(struct rvt_qp *qp, 
  static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
  {
        struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
 +      u16 ver = dd->dc8051_ver;
  
        memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
  
 +      rdi->dparms.props.fw_ver = ((u64)(dc8051_ver_maj(ver)) << 16) |
 +                                  (u64)dc8051_ver_min(ver);
        rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
                        IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
                        IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
-                       IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
+                       IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE |
+                       IB_DEVICE_MEM_MGT_EXTENSIONS;
        rdi->dparms.props.page_size_cap = PAGE_SIZE;
        rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
        rdi->dparms.props.vendor_part_id = dd->pcidev->device;
        rdi->dparms.props.hw_ver = dd->minrev;
        rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid;
-       rdi->dparms.props.max_mr_size = ~0ULL;
+       rdi->dparms.props.max_mr_size = U64_MAX;
+       rdi->dparms.props.max_fast_reg_page_list_len = UINT_MAX;
        rdi->dparms.props.max_qp = hfi1_max_qps;
        rdi->dparms.props.max_qp_wr = hfi1_max_qp_wrs;
        rdi->dparms.props.max_sge = hfi1_max_sges;
@@@ -1570,17 -1577,6 +1580,17 @@@ static void init_ibport(struct hfi1_ppo
        RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
  }
  
 +static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str,
 +                              size_t str_len)
 +{
 +      struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
 +      struct hfi1_ibdev *dev = dev_from_rdi(rdi);
 +      u16 ver = dd_from_dev(dev)->dc8051_ver;
 +
 +      snprintf(str, str_len, "%u.%u", dc8051_ver_maj(ver),
 +               dc8051_ver_min(ver));
 +}
 +
  /**
   * hfi1_register_ib_device - register our device with the infiniband core
   * @dd: the device data structure
@@@ -1627,7 -1623,6 +1637,7 @@@ int hfi1_register_ib_device(struct hfi1
  
        /* keep process mad in the driver */
        ibdev->process_mad = hfi1_process_mad;
 +      ibdev->get_dev_fw_str = hfi1_get_dev_fw_str;
  
        strncpy(ibdev->node_desc, init_utsname()->nodename,
                sizeof(ibdev->node_desc));
        dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
        dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
  
+       /* post send table */
+       dd->verbs_dev.rdi.post_parms = hfi1_post_parms;
        ppd = dd->pport;
        for (i = 0; i < dd->num_pports; i++, ppd++)
                rvt_init_port(&dd->verbs_dev.rdi,
@@@ -1745,8 -1743,7 +1758,7 @@@ void hfi1_cnp_rcv(struct hfi1_packet *p
        struct rvt_qp *qp = packet->qp;
        u32 lqpn, rqpn = 0;
        u16 rlid = 0;
-       u8 sl, sc5, sc4_bit, svc_type;
-       bool sc4_set = has_sc4_bit(packet);
+       u8 sl, sc5, svc_type;
  
        switch (packet->qp->ibqp.qp_type) {
        case IB_QPT_UC:
                return;
        }
  
-       sc4_bit = sc4_set << 4;
-       sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
-       sc5 |= sc4_bit;
+       sc5 = hdr2sc((struct hfi1_message_header *)hdr, packet->rhf);
        sl = ibp->sc_to_sl[sc5];
        lqpn = qp->ibqp.qp_num;
  
diff --combined include/rdma/ib_verbs.h
index 94a0bc5b5bdd47b16678148a04690793321b85c2,e694f02d42e3dc70ca0b20747404da15a8ed3229..8e90dd28bb7536d16058d711b096f8125bd42874
@@@ -562,7 -562,6 +562,7 @@@ enum ib_event_type 
        IB_EVENT_QP_LAST_WQE_REACHED,
        IB_EVENT_CLIENT_REREGISTER,
        IB_EVENT_GID_CHANGE,
 +      IB_EVENT_WQ_FATAL,
  };
  
  const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
@@@ -573,7 -572,6 +573,7 @@@ struct ib_event 
                struct ib_cq    *cq;
                struct ib_qp    *qp;
                struct ib_srq   *srq;
 +              struct ib_wq    *wq;
                u8              port_num;
        } element;
        enum ib_event_type      event;
@@@ -1017,7 -1015,6 +1017,7 @@@ struct ib_qp_init_attr 
         * Only needed for special QP types, or when using the RW API.
         */
        u8                      port_num;
 +      struct ib_rwq_ind_table *rwq_ind_tbl;
  };
  
  struct ib_qp_open_attr {
@@@ -1326,8 -1323,6 +1326,8 @@@ struct ib_ucontext 
        struct list_head        ah_list;
        struct list_head        xrcd_list;
        struct list_head        rule_list;
 +      struct list_head        wq_list;
 +      struct list_head        rwq_ind_tbl_list;
        int                     closing;
  
        struct pid             *tgid;
@@@ -1433,63 -1428,10 +1433,67 @@@ struct ib_srq 
        } ext;
  };
  
 +enum ib_wq_type {
 +      IB_WQT_RQ
 +};
 +
 +enum ib_wq_state {
 +      IB_WQS_RESET,
 +      IB_WQS_RDY,
 +      IB_WQS_ERR
 +};
 +
 +struct ib_wq {
 +      struct ib_device       *device;
 +      struct ib_uobject      *uobject;
 +      void                *wq_context;
 +      void                (*event_handler)(struct ib_event *, void *);
 +      struct ib_pd           *pd;
 +      struct ib_cq           *cq;
 +      u32             wq_num;
 +      enum ib_wq_state       state;
 +      enum ib_wq_type wq_type;
 +      atomic_t                usecnt;
 +};
 +
 +struct ib_wq_init_attr {
 +      void                   *wq_context;
 +      enum ib_wq_type wq_type;
 +      u32             max_wr;
 +      u32             max_sge;
 +      struct  ib_cq          *cq;
 +      void                (*event_handler)(struct ib_event *, void *);
 +};
 +
 +enum ib_wq_attr_mask {
 +      IB_WQ_STATE     = 1 << 0,
 +      IB_WQ_CUR_STATE = 1 << 1,
 +};
 +
 +struct ib_wq_attr {
 +      enum    ib_wq_state     wq_state;
 +      enum    ib_wq_state     curr_wq_state;
 +};
 +
 +struct ib_rwq_ind_table {
 +      struct ib_device        *device;
 +      struct ib_uobject      *uobject;
 +      atomic_t                usecnt;
 +      u32             ind_tbl_num;
 +      u32             log_ind_tbl_size;
 +      struct ib_wq    **ind_tbl;
 +};
 +
 +struct ib_rwq_ind_table_init_attr {
 +      u32             log_ind_tbl_size;
 +      /* Each entry is a pointer to Receive Work Queue */
 +      struct ib_wq    **ind_tbl;
 +};
 +
+ /*
+  * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
+  * @max_read_sge:  Maximum SGE elements per RDMA READ request.
+  */
  struct ib_qp {
        struct ib_device       *device;
        struct ib_pd           *pd;
        void                  (*event_handler)(struct ib_event *, void *);
        void                   *qp_context;
        u32                     qp_num;
+       u32                     max_write_sge;
+       u32                     max_read_sge;
        enum ib_qp_type         qp_type;
 +      struct ib_rwq_ind_table *rwq_ind_tbl;
  };
  
  struct ib_mr {
@@@ -1569,7 -1512,6 +1575,7 @@@ enum ib_flow_spec_type 
        IB_FLOW_SPEC_IB         = 0x22,
        /* L3 header*/
        IB_FLOW_SPEC_IPV4       = 0x30,
 +      IB_FLOW_SPEC_IPV6       = 0x31,
        /* L4 headers*/
        IB_FLOW_SPEC_TCP        = 0x40,
        IB_FLOW_SPEC_UDP        = 0x41
@@@ -1631,18 -1573,6 +1637,18 @@@ struct ib_flow_spec_ipv4 
        struct ib_flow_ipv4_filter mask;
  };
  
 +struct ib_flow_ipv6_filter {
 +      u8      src_ip[16];
 +      u8      dst_ip[16];
 +};
 +
 +struct ib_flow_spec_ipv6 {
 +      enum ib_flow_spec_type     type;
 +      u16                        size;
 +      struct ib_flow_ipv6_filter val;
 +      struct ib_flow_ipv6_filter mask;
 +};
 +
  struct ib_flow_tcp_udp_filter {
        __be16  dst_port;
        __be16  src_port;
@@@ -1664,7 -1594,6 +1670,7 @@@ union ib_flow_spec 
        struct ib_flow_spec_ib          ib;
        struct ib_flow_spec_ipv4        ipv4;
        struct ib_flow_spec_tcp_udp     tcp_udp;
 +      struct ib_flow_spec_ipv6        ipv6;
  };
  
  struct ib_flow_attr {
@@@ -1998,18 -1927,7 +2004,18 @@@ struct ib_device 
                                                   struct ifla_vf_stats *stats);
        int                        (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
                                                  int type);
 -
 +      struct ib_wq *             (*create_wq)(struct ib_pd *pd,
 +                                              struct ib_wq_init_attr *init_attr,
 +                                              struct ib_udata *udata);
 +      int                        (*destroy_wq)(struct ib_wq *wq);
 +      int                        (*modify_wq)(struct ib_wq *wq,
 +                                              struct ib_wq_attr *attr,
 +                                              u32 wq_attr_mask,
 +                                              struct ib_udata *udata);
 +      struct ib_rwq_ind_table *  (*create_rwq_ind_table)(struct ib_device *device,
 +                                                         struct ib_rwq_ind_table_init_attr *init_attr,
 +                                                         struct ib_udata *udata);
 +      int                        (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
        struct ib_dma_mapping_ops   *dma_ops;
  
        struct module               *owner;
         * in fast paths.
         */
        int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
 +      void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len);
  };
  
  struct ib_client {
  struct ib_device *ib_alloc_device(size_t size);
  void ib_dealloc_device(struct ib_device *device);
  
 +void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len);
 +
  int ib_register_device(struct ib_device *device,
                       int (*port_callback)(struct ib_device *,
                                            u8, struct kobject *));
@@@ -2910,19 -2825,19 +2916,19 @@@ static inline void ib_dma_unmap_single(
  static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
                                          void *cpu_addr, size_t size,
                                          enum dma_data_direction direction,
 -                                        struct dma_attrs *attrs)
 +                                        unsigned long dma_attrs)
  {
        return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
 -                                  direction, attrs);
 +                                  direction, dma_attrs);
  }
  
  static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
                                             u64 addr, size_t size,
                                             enum dma_data_direction direction,
 -                                           struct dma_attrs *attrs)
 +                                           unsigned long dma_attrs)
  {
        return dma_unmap_single_attrs(dev->dma_device, addr, size,
 -                                    direction, attrs);
 +                                    direction, dma_attrs);
  }
  
  /**
@@@ -2997,18 -2912,17 +3003,18 @@@ static inline void ib_dma_unmap_sg(stru
  static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
                                      struct scatterlist *sg, int nents,
                                      enum dma_data_direction direction,
 -                                    struct dma_attrs *attrs)
 +                                    unsigned long dma_attrs)
  {
 -      return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
 +      return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
 +                              dma_attrs);
  }
  
  static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
                                         struct scatterlist *sg, int nents,
                                         enum dma_data_direction direction,
 -                                       struct dma_attrs *attrs)
 +                                       unsigned long dma_attrs)
  {
 -      dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
 +      dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
  }
  /**
   * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
@@@ -3259,15 -3173,6 +3265,15 @@@ int ib_check_mr_status(struct ib_mr *mr
  struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
                                            u16 pkey, const union ib_gid *gid,
                                            const struct sockaddr *addr);
 +struct ib_wq *ib_create_wq(struct ib_pd *pd,
 +                         struct ib_wq_init_attr *init_attr);
 +int ib_destroy_wq(struct ib_wq *wq);
 +int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
 +               u32 wq_attr_mask);
 +struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
 +                                               struct ib_rwq_ind_table_init_attr*
 +                                               wq_ind_table_init_attr);
 +int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
  
  int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
                 unsigned int *sg_offset, unsigned int page_size);
This page took 0.060881 seconds and 5 git commands to generate.