RDMA/cma: Add ability to specify type of service
[deliverable/linux.git] / drivers / infiniband / core / cma.c
index d026764c7e9c57e78429f0e315e48f25fc08e584..19c9172f0cdc8b2fd442d086c40a11aba7ab0e59 100644 (file)
@@ -138,6 +138,7 @@ struct rdma_id_private {
        u32                     qkey;
        u32                     qp_num;
        u8                      srq;
+       u8                      tos;
 };
 
 struct cma_multicast {
@@ -368,6 +369,11 @@ static void cma_enable_remove(struct rdma_id_private *id_priv)
                wake_up(&id_priv->wait_remove);
 }
 
+static int cma_has_cm_dev(struct rdma_id_private *id_priv)
+{
+       return (id_priv->id.device && id_priv->cm_id.ib);
+}
+
 struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
                                  void *context, enum rdma_port_space ps)
 {
@@ -568,7 +574,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
                break;
        case RDMA_TRANSPORT_IWARP:
                if (!id_priv->cm_id.iw) {
-                       qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
+                       qp_attr->qp_access_flags = 0;
                        *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
                } else
                        ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
@@ -1183,9 +1189,10 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
        struct sockaddr_in *sin;
        int ret = 0;
 
-       memset(&event, 0, sizeof event);
-       atomic_inc(&id_priv->dev_remove);
+       if (cma_disable_remove(id_priv, CMA_CONNECT))
+               return 0;
 
+       memset(&event, 0, sizeof event);
        switch (iw_event->event) {
        case IW_CM_EVENT_CLOSE:
                event.event = RDMA_CM_EVENT_DISCONNECTED;
@@ -1468,6 +1475,15 @@ err:
 }
 EXPORT_SYMBOL(rdma_listen);
 
+void rdma_set_service_type(struct rdma_cm_id *id, int tos)
+{
+       struct rdma_id_private *id_priv;
+
+       id_priv = container_of(id, struct rdma_id_private, id);
+       id_priv->tos = (u8) tos;
+}
+EXPORT_SYMBOL(rdma_set_service_type);
+
 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
                              void *context)
 {
@@ -1492,23 +1508,37 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
                              struct cma_work *work)
 {
-       struct rdma_dev_addr *addr = &id_priv->id.route.addr.dev_addr;
+       struct rdma_addr *addr = &id_priv->id.route.addr;
        struct ib_sa_path_rec path_rec;
+       ib_sa_comp_mask comp_mask;
+       struct sockaddr_in6 *sin6;
 
        memset(&path_rec, 0, sizeof path_rec);
-       ib_addr_get_sgid(addr, &path_rec.sgid);
-       ib_addr_get_dgid(addr, &path_rec.dgid);
-       path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr));
+       ib_addr_get_sgid(&addr->dev_addr, &path_rec.sgid);
+       ib_addr_get_dgid(&addr->dev_addr, &path_rec.dgid);
+       path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
        path_rec.numb_path = 1;
        path_rec.reversible = 1;
+       path_rec.service_id = cma_get_service_id(id_priv->id.ps, &addr->dst_addr);
+
+       comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
+                   IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
+                   IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
+
+       if (addr->src_addr.sa_family == AF_INET) {
+               path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
+               comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
+       } else {
+               sin6 = (struct sockaddr_in6 *) &addr->src_addr;
+               path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
+               comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
+       }
 
        id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
-                               id_priv->id.port_num, &path_rec,
-                               IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
-                               IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
-                               IB_SA_PATH_REC_REVERSIBLE,
-                               timeout_ms, GFP_KERNEL,
-                               cma_query_handler, work, &id_priv->query);
+                                              id_priv->id.port_num, &path_rec,
+                                              comp_mask, timeout_ms,
+                                              GFP_KERNEL, cma_query_handler,
+                                              work, &id_priv->query);
 
        return (id_priv->query_id < 0) ? id_priv->query_id : 0;
 }
@@ -2320,7 +2350,6 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
        rep.private_data_len = conn_param->private_data_len;
        rep.responder_resources = conn_param->responder_resources;
        rep.initiator_depth = conn_param->initiator_depth;
-       rep.target_ack_delay = CMA_CM_RESPONSE_TIMEOUT;
        rep.failover_accepted = 0;
        rep.flow_control = conn_param->flow_control;
        rep.rnr_retry_count = conn_param->rnr_retry_count;
@@ -2421,7 +2450,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
        int ret;
 
        id_priv = container_of(id, struct rdma_id_private, id);
-       if (!cma_comp(id_priv, CMA_CONNECT))
+       if (!cma_has_cm_dev(id_priv))
                return -EINVAL;
 
        switch (id->device->node_type) {
@@ -2443,7 +2472,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
        int ret;
 
        id_priv = container_of(id, struct rdma_id_private, id);
-       if (!cma_comp(id_priv, CMA_CONNECT))
+       if (!cma_has_cm_dev(id_priv))
                return -EINVAL;
 
        switch (rdma_node_get_transport(id->device->node_type)) {
@@ -2474,8 +2503,7 @@ int rdma_disconnect(struct rdma_cm_id *id)
        int ret;
 
        id_priv = container_of(id, struct rdma_id_private, id);
-       if (!cma_comp(id_priv, CMA_CONNECT) &&
-           !cma_comp(id_priv, CMA_DISCONNECT))
+       if (!cma_has_cm_dev(id_priv))
                return -EINVAL;
 
        switch (rdma_node_get_transport(id->device->node_type)) {
@@ -2768,8 +2796,8 @@ static int cma_init(void)
        int ret;
 
        get_random_bytes(&next_port, sizeof next_port);
-       next_port = (next_port % (sysctl_local_port_range[1] -
-                                 sysctl_local_port_range[0])) +
+       next_port = ((unsigned int) next_port %
+                   (sysctl_local_port_range[1] - sysctl_local_port_range[0])) +
                    sysctl_local_port_range[0];
        cma_wq = create_singlethread_workqueue("rdma_cm");
        if (!cma_wq)
This page took 0.026895 seconds and 5 git commands to generate.