2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $
40 #include "mthca_dev.h"
41 #include "mthca_cmd.h"
42 #include "mthca_user.h"
43 #include "mthca_memfree.h"
45 static int mthca_query_device(struct ib_device
*ibdev
,
46 struct ib_device_attr
*props
)
48 struct ib_smp
*in_mad
= NULL
;
49 struct ib_smp
*out_mad
= NULL
;
51 struct mthca_dev
* mdev
= to_mdev(ibdev
);
55 in_mad
= kmalloc(sizeof *in_mad
, GFP_KERNEL
);
56 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
57 if (!in_mad
|| !out_mad
)
60 memset(props
, 0, sizeof *props
);
62 props
->fw_ver
= mdev
->fw_ver
;
64 memset(in_mad
, 0, sizeof *in_mad
);
65 in_mad
->base_version
= 1;
66 in_mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
67 in_mad
->class_version
= 1;
68 in_mad
->method
= IB_MGMT_METHOD_GET
;
69 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
71 err
= mthca_MAD_IFC(mdev
, 1, 1,
72 1, NULL
, NULL
, in_mad
, out_mad
,
81 props
->device_cap_flags
= mdev
->device_cap_flags
;
82 props
->vendor_id
= be32_to_cpup((u32
*) (out_mad
->data
+ 36)) &
84 props
->vendor_part_id
= be16_to_cpup((u16
*) (out_mad
->data
+ 30));
85 props
->hw_ver
= be16_to_cpup((u16
*) (out_mad
->data
+ 32));
86 memcpy(&props
->sys_image_guid
, out_mad
->data
+ 4, 8);
87 memcpy(&props
->node_guid
, out_mad
->data
+ 12, 8);
89 props
->max_mr_size
= ~0ull;
90 props
->max_qp
= mdev
->limits
.num_qps
- mdev
->limits
.reserved_qps
;
91 props
->max_qp_wr
= 0xffff;
92 props
->max_sge
= mdev
->limits
.max_sg
;
93 props
->max_cq
= mdev
->limits
.num_cqs
- mdev
->limits
.reserved_cqs
;
94 props
->max_cqe
= 0xffff;
95 props
->max_mr
= mdev
->limits
.num_mpts
- mdev
->limits
.reserved_mrws
;
96 props
->max_pd
= mdev
->limits
.num_pds
- mdev
->limits
.reserved_pds
;
97 props
->max_qp_rd_atom
= 1 << mdev
->qp_table
.rdb_shift
;
98 props
->max_qp_init_rd_atom
= 1 << mdev
->qp_table
.rdb_shift
;
99 props
->local_ca_ack_delay
= mdev
->limits
.local_ca_ack_delay
;
108 static int mthca_query_port(struct ib_device
*ibdev
,
109 u8 port
, struct ib_port_attr
*props
)
111 struct ib_smp
*in_mad
= NULL
;
112 struct ib_smp
*out_mad
= NULL
;
116 in_mad
= kmalloc(sizeof *in_mad
, GFP_KERNEL
);
117 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
118 if (!in_mad
|| !out_mad
)
121 memset(in_mad
, 0, sizeof *in_mad
);
122 in_mad
->base_version
= 1;
123 in_mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
124 in_mad
->class_version
= 1;
125 in_mad
->method
= IB_MGMT_METHOD_GET
;
126 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
127 in_mad
->attr_mod
= cpu_to_be32(port
);
129 err
= mthca_MAD_IFC(to_mdev(ibdev
), 1, 1,
130 port
, NULL
, NULL
, in_mad
, out_mad
,
139 props
->lid
= be16_to_cpup((u16
*) (out_mad
->data
+ 16));
140 props
->lmc
= out_mad
->data
[34] & 0x7;
141 props
->sm_lid
= be16_to_cpup((u16
*) (out_mad
->data
+ 18));
142 props
->sm_sl
= out_mad
->data
[36] & 0xf;
143 props
->state
= out_mad
->data
[32] & 0xf;
144 props
->phys_state
= out_mad
->data
[33] >> 4;
145 props
->port_cap_flags
= be32_to_cpup((u32
*) (out_mad
->data
+ 20));
146 props
->gid_tbl_len
= to_mdev(ibdev
)->limits
.gid_table_len
;
147 props
->pkey_tbl_len
= to_mdev(ibdev
)->limits
.pkey_table_len
;
148 props
->qkey_viol_cntr
= be16_to_cpup((u16
*) (out_mad
->data
+ 48));
149 props
->active_width
= out_mad
->data
[31] & 0xf;
150 props
->active_speed
= out_mad
->data
[35] >> 4;
158 static int mthca_modify_port(struct ib_device
*ibdev
,
159 u8 port
, int port_modify_mask
,
160 struct ib_port_modify
*props
)
162 struct mthca_set_ib_param set_ib
;
163 struct ib_port_attr attr
;
167 if (down_interruptible(&to_mdev(ibdev
)->cap_mask_mutex
))
170 err
= mthca_query_port(ibdev
, port
, &attr
);
174 set_ib
.set_si_guid
= 0;
175 set_ib
.reset_qkey_viol
= !!(port_modify_mask
& IB_PORT_RESET_QKEY_CNTR
);
177 set_ib
.cap_mask
= (attr
.port_cap_flags
| props
->set_port_cap_mask
) &
178 ~props
->clr_port_cap_mask
;
180 err
= mthca_SET_IB(to_mdev(ibdev
), &set_ib
, port
, &status
);
189 up(&to_mdev(ibdev
)->cap_mask_mutex
);
193 static int mthca_query_pkey(struct ib_device
*ibdev
,
194 u8 port
, u16 index
, u16
*pkey
)
196 struct ib_smp
*in_mad
= NULL
;
197 struct ib_smp
*out_mad
= NULL
;
201 in_mad
= kmalloc(sizeof *in_mad
, GFP_KERNEL
);
202 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
203 if (!in_mad
|| !out_mad
)
206 memset(in_mad
, 0, sizeof *in_mad
);
207 in_mad
->base_version
= 1;
208 in_mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
209 in_mad
->class_version
= 1;
210 in_mad
->method
= IB_MGMT_METHOD_GET
;
211 in_mad
->attr_id
= IB_SMP_ATTR_PKEY_TABLE
;
212 in_mad
->attr_mod
= cpu_to_be32(index
/ 32);
214 err
= mthca_MAD_IFC(to_mdev(ibdev
), 1, 1,
215 port
, NULL
, NULL
, in_mad
, out_mad
,
224 *pkey
= be16_to_cpu(((u16
*) out_mad
->data
)[index
% 32]);
232 static int mthca_query_gid(struct ib_device
*ibdev
, u8 port
,
233 int index
, union ib_gid
*gid
)
235 struct ib_smp
*in_mad
= NULL
;
236 struct ib_smp
*out_mad
= NULL
;
240 in_mad
= kmalloc(sizeof *in_mad
, GFP_KERNEL
);
241 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
242 if (!in_mad
|| !out_mad
)
245 memset(in_mad
, 0, sizeof *in_mad
);
246 in_mad
->base_version
= 1;
247 in_mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
248 in_mad
->class_version
= 1;
249 in_mad
->method
= IB_MGMT_METHOD_GET
;
250 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
251 in_mad
->attr_mod
= cpu_to_be32(port
);
253 err
= mthca_MAD_IFC(to_mdev(ibdev
), 1, 1,
254 port
, NULL
, NULL
, in_mad
, out_mad
,
263 memcpy(gid
->raw
, out_mad
->data
+ 8, 8);
265 memset(in_mad
, 0, sizeof *in_mad
);
266 in_mad
->base_version
= 1;
267 in_mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
268 in_mad
->class_version
= 1;
269 in_mad
->method
= IB_MGMT_METHOD_GET
;
270 in_mad
->attr_id
= IB_SMP_ATTR_GUID_INFO
;
271 in_mad
->attr_mod
= cpu_to_be32(index
/ 8);
273 err
= mthca_MAD_IFC(to_mdev(ibdev
), 1, 1,
274 port
, NULL
, NULL
, in_mad
, out_mad
,
283 memcpy(gid
->raw
+ 8, out_mad
->data
+ (index
% 8) * 16, 8);
291 static struct ib_ucontext
*mthca_alloc_ucontext(struct ib_device
*ibdev
,
292 struct ib_udata
*udata
)
294 struct mthca_alloc_ucontext_resp uresp
;
295 struct mthca_ucontext
*context
;
298 memset(&uresp
, 0, sizeof uresp
);
300 uresp
.qp_tab_size
= to_mdev(ibdev
)->limits
.num_qps
;
301 if (mthca_is_memfree(to_mdev(ibdev
)))
302 uresp
.uarc_size
= to_mdev(ibdev
)->uar_table
.uarc_size
;
306 context
= kmalloc(sizeof *context
, GFP_KERNEL
);
308 return ERR_PTR(-ENOMEM
);
310 err
= mthca_uar_alloc(to_mdev(ibdev
), &context
->uar
);
316 context
->db_tab
= mthca_init_user_db_tab(to_mdev(ibdev
));
317 if (IS_ERR(context
->db_tab
)) {
318 err
= PTR_ERR(context
->db_tab
);
319 mthca_uar_free(to_mdev(ibdev
), &context
->uar
);
324 if (ib_copy_to_udata(udata
, &uresp
, sizeof uresp
)) {
325 mthca_cleanup_user_db_tab(to_mdev(ibdev
), &context
->uar
, context
->db_tab
);
326 mthca_uar_free(to_mdev(ibdev
), &context
->uar
);
328 return ERR_PTR(-EFAULT
);
331 return &context
->ibucontext
;
334 static int mthca_dealloc_ucontext(struct ib_ucontext
*context
)
336 mthca_cleanup_user_db_tab(to_mdev(context
->device
), &to_mucontext(context
)->uar
,
337 to_mucontext(context
)->db_tab
);
338 mthca_uar_free(to_mdev(context
->device
), &to_mucontext(context
)->uar
);
339 kfree(to_mucontext(context
));
344 static int mthca_mmap_uar(struct ib_ucontext
*context
,
345 struct vm_area_struct
*vma
)
347 if (vma
->vm_end
- vma
->vm_start
!= PAGE_SIZE
)
350 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
352 if (remap_pfn_range(vma
, vma
->vm_start
,
353 to_mucontext(context
)->uar
.pfn
,
354 PAGE_SIZE
, vma
->vm_page_prot
))
360 static struct ib_pd
*mthca_alloc_pd(struct ib_device
*ibdev
,
361 struct ib_ucontext
*context
,
362 struct ib_udata
*udata
)
367 pd
= kmalloc(sizeof *pd
, GFP_KERNEL
);
369 return ERR_PTR(-ENOMEM
);
371 err
= mthca_pd_alloc(to_mdev(ibdev
), !context
, pd
);
378 if (ib_copy_to_udata(udata
, &pd
->pd_num
, sizeof (__u32
))) {
379 mthca_pd_free(to_mdev(ibdev
), pd
);
381 return ERR_PTR(-EFAULT
);
388 static int mthca_dealloc_pd(struct ib_pd
*pd
)
390 mthca_pd_free(to_mdev(pd
->device
), to_mpd(pd
));
396 static struct ib_ah
*mthca_ah_create(struct ib_pd
*pd
,
397 struct ib_ah_attr
*ah_attr
)
402 ah
= kmalloc(sizeof *ah
, GFP_ATOMIC
);
404 return ERR_PTR(-ENOMEM
);
406 err
= mthca_create_ah(to_mdev(pd
->device
), to_mpd(pd
), ah_attr
, ah
);
415 static int mthca_ah_destroy(struct ib_ah
*ah
)
417 mthca_destroy_ah(to_mdev(ah
->device
), to_mah(ah
));
423 static struct ib_qp
*mthca_create_qp(struct ib_pd
*pd
,
424 struct ib_qp_init_attr
*init_attr
,
425 struct ib_udata
*udata
)
430 switch (init_attr
->qp_type
) {
435 qp
= kmalloc(sizeof *qp
, GFP_KERNEL
);
437 return ERR_PTR(-ENOMEM
);
439 qp
->sq
.max
= init_attr
->cap
.max_send_wr
;
440 qp
->rq
.max
= init_attr
->cap
.max_recv_wr
;
441 qp
->sq
.max_gs
= init_attr
->cap
.max_send_sge
;
442 qp
->rq
.max_gs
= init_attr
->cap
.max_recv_sge
;
444 err
= mthca_alloc_qp(to_mdev(pd
->device
), to_mpd(pd
),
445 to_mcq(init_attr
->send_cq
),
446 to_mcq(init_attr
->recv_cq
),
447 init_attr
->qp_type
, init_attr
->sq_sig_type
,
449 qp
->ibqp
.qp_num
= qp
->qpn
;
455 qp
= kmalloc(sizeof (struct mthca_sqp
), GFP_KERNEL
);
457 return ERR_PTR(-ENOMEM
);
459 qp
->sq
.max
= init_attr
->cap
.max_send_wr
;
460 qp
->rq
.max
= init_attr
->cap
.max_recv_wr
;
461 qp
->sq
.max_gs
= init_attr
->cap
.max_send_sge
;
462 qp
->rq
.max_gs
= init_attr
->cap
.max_recv_sge
;
464 qp
->ibqp
.qp_num
= init_attr
->qp_type
== IB_QPT_SMI
? 0 : 1;
466 err
= mthca_alloc_sqp(to_mdev(pd
->device
), to_mpd(pd
),
467 to_mcq(init_attr
->send_cq
),
468 to_mcq(init_attr
->recv_cq
),
469 init_attr
->sq_sig_type
,
470 qp
->ibqp
.qp_num
, init_attr
->port_num
,
475 /* Don't support raw QPs */
476 return ERR_PTR(-ENOSYS
);
484 init_attr
->cap
.max_inline_data
= 0;
489 static int mthca_destroy_qp(struct ib_qp
*qp
)
491 mthca_free_qp(to_mdev(qp
->device
), to_mqp(qp
));
496 static struct ib_cq
*mthca_create_cq(struct ib_device
*ibdev
, int entries
,
497 struct ib_ucontext
*context
,
498 struct ib_udata
*udata
)
504 cq
= kmalloc(sizeof *cq
, GFP_KERNEL
);
506 return ERR_PTR(-ENOMEM
);
508 for (nent
= 1; nent
<= entries
; nent
<<= 1)
511 err
= mthca_init_cq(to_mdev(ibdev
), nent
, cq
);
520 static int mthca_destroy_cq(struct ib_cq
*cq
)
522 mthca_free_cq(to_mdev(cq
->device
), to_mcq(cq
));
528 static inline u32
convert_access(int acc
)
530 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MTHCA_MPT_FLAG_ATOMIC
: 0) |
531 (acc
& IB_ACCESS_REMOTE_WRITE
? MTHCA_MPT_FLAG_REMOTE_WRITE
: 0) |
532 (acc
& IB_ACCESS_REMOTE_READ
? MTHCA_MPT_FLAG_REMOTE_READ
: 0) |
533 (acc
& IB_ACCESS_LOCAL_WRITE
? MTHCA_MPT_FLAG_LOCAL_WRITE
: 0) |
534 MTHCA_MPT_FLAG_LOCAL_READ
;
537 static struct ib_mr
*mthca_get_dma_mr(struct ib_pd
*pd
, int acc
)
542 mr
= kmalloc(sizeof *mr
, GFP_KERNEL
);
544 return ERR_PTR(-ENOMEM
);
546 err
= mthca_mr_alloc_notrans(to_mdev(pd
->device
),
548 convert_access(acc
), mr
);
558 static struct ib_mr
*mthca_reg_phys_mr(struct ib_pd
*pd
,
559 struct ib_phys_buf
*buffer_list
,
573 /* First check that we have enough alignment */
574 if ((*iova_start
& ~PAGE_MASK
) != (buffer_list
[0].addr
& ~PAGE_MASK
))
575 return ERR_PTR(-EINVAL
);
577 if (num_phys_buf
> 1 &&
578 ((buffer_list
[0].addr
+ buffer_list
[0].size
) & ~PAGE_MASK
))
579 return ERR_PTR(-EINVAL
);
583 for (i
= 0; i
< num_phys_buf
; ++i
) {
584 if (i
!= 0 && buffer_list
[i
].addr
& ~PAGE_MASK
)
585 return ERR_PTR(-EINVAL
);
586 if (i
!= 0 && i
!= num_phys_buf
- 1 &&
587 (buffer_list
[i
].size
& ~PAGE_MASK
))
588 return ERR_PTR(-EINVAL
);
590 total_size
+= buffer_list
[i
].size
;
592 mask
|= buffer_list
[i
].addr
;
595 /* Find largest page shift we can use to cover buffers */
596 for (shift
= PAGE_SHIFT
; shift
< 31; ++shift
)
597 if (num_phys_buf
> 1) {
598 if ((1ULL << shift
) & mask
)
602 buffer_list
[0].size
+
603 (buffer_list
[0].addr
& ((1ULL << shift
) - 1)))
607 buffer_list
[0].size
+= buffer_list
[0].addr
& ((1ULL << shift
) - 1);
608 buffer_list
[0].addr
&= ~0ull << shift
;
610 mr
= kmalloc(sizeof *mr
, GFP_KERNEL
);
612 return ERR_PTR(-ENOMEM
);
615 for (i
= 0; i
< num_phys_buf
; ++i
)
616 npages
+= (buffer_list
[i
].size
+ (1ULL << shift
) - 1) >> shift
;
621 page_list
= kmalloc(npages
* sizeof *page_list
, GFP_KERNEL
);
624 return ERR_PTR(-ENOMEM
);
628 for (i
= 0; i
< num_phys_buf
; ++i
)
630 j
< (buffer_list
[i
].size
+ (1ULL << shift
) - 1) >> shift
;
632 page_list
[n
++] = buffer_list
[i
].addr
+ ((u64
) j
<< shift
);
634 mthca_dbg(to_mdev(pd
->device
), "Registering memory at %llx (iova %llx) "
635 "in PD %x; shift %d, npages %d.\n",
636 (unsigned long long) buffer_list
[0].addr
,
637 (unsigned long long) *iova_start
,
641 err
= mthca_mr_alloc_phys(to_mdev(pd
->device
),
643 page_list
, shift
, npages
,
644 *iova_start
, total_size
,
645 convert_access(acc
), mr
);
657 static struct ib_mr
*mthca_reg_user_mr(struct ib_pd
*pd
, struct ib_umem
*region
,
658 int acc
, struct ib_udata
*udata
)
660 struct mthca_dev
*dev
= to_mdev(pd
->device
);
661 struct ib_umem_chunk
*chunk
;
668 shift
= ffs(region
->page_size
) - 1;
670 mr
= kmalloc(sizeof *mr
, GFP_KERNEL
);
672 return ERR_PTR(-ENOMEM
);
675 list_for_each_entry(chunk
, ®ion
->chunk_list
, list
)
678 mr
->mtt
= mthca_alloc_mtt(dev
, n
);
679 if (IS_ERR(mr
->mtt
)) {
680 err
= PTR_ERR(mr
->mtt
);
684 pages
= (u64
*) __get_free_page(GFP_KERNEL
);
692 list_for_each_entry(chunk
, ®ion
->chunk_list
, list
)
693 for (j
= 0; j
< chunk
->nmap
; ++j
) {
694 len
= sg_dma_len(&chunk
->page_list
[j
]) >> shift
;
695 for (k
= 0; k
< len
; ++k
) {
696 pages
[i
++] = sg_dma_address(&chunk
->page_list
[j
]) +
697 region
->page_size
* k
;
699 * Be friendly to WRITE_MTT command
700 * and leave two empty slots for the
701 * index and reserved fields of the
704 if (i
== PAGE_SIZE
/ sizeof (u64
) - 2) {
705 err
= mthca_write_mtt(dev
, mr
->mtt
,
716 err
= mthca_write_mtt(dev
, mr
->mtt
, n
, pages
, i
);
718 free_page((unsigned long) pages
);
722 err
= mthca_mr_alloc(dev
, to_mpd(pd
)->pd_num
, shift
, region
->virt_base
,
723 region
->length
, convert_access(acc
), mr
);
731 mthca_free_mtt(dev
, mr
->mtt
);
738 static int mthca_dereg_mr(struct ib_mr
*mr
)
740 struct mthca_mr
*mmr
= to_mmr(mr
);
741 mthca_free_mr(to_mdev(mr
->device
), mmr
);
746 static struct ib_fmr
*mthca_alloc_fmr(struct ib_pd
*pd
, int mr_access_flags
,
747 struct ib_fmr_attr
*fmr_attr
)
749 struct mthca_fmr
*fmr
;
752 fmr
= kmalloc(sizeof *fmr
, GFP_KERNEL
);
754 return ERR_PTR(-ENOMEM
);
756 memcpy(&fmr
->attr
, fmr_attr
, sizeof *fmr_attr
);
757 err
= mthca_fmr_alloc(to_mdev(pd
->device
), to_mpd(pd
)->pd_num
,
758 convert_access(mr_access_flags
), fmr
);
768 static int mthca_dealloc_fmr(struct ib_fmr
*fmr
)
770 struct mthca_fmr
*mfmr
= to_mfmr(fmr
);
773 err
= mthca_free_fmr(to_mdev(fmr
->device
), mfmr
);
781 static int mthca_unmap_fmr(struct list_head
*fmr_list
)
786 struct mthca_dev
*mdev
= NULL
;
788 list_for_each_entry(fmr
, fmr_list
, list
) {
789 if (mdev
&& to_mdev(fmr
->device
) != mdev
)
791 mdev
= to_mdev(fmr
->device
);
797 if (mthca_is_memfree(mdev
)) {
798 list_for_each_entry(fmr
, fmr_list
, list
)
799 mthca_arbel_fmr_unmap(mdev
, to_mfmr(fmr
));
803 list_for_each_entry(fmr
, fmr_list
, list
)
804 mthca_tavor_fmr_unmap(mdev
, to_mfmr(fmr
));
806 err
= mthca_SYNC_TPT(mdev
, &status
);
814 static ssize_t
show_rev(struct class_device
*cdev
, char *buf
)
816 struct mthca_dev
*dev
= container_of(cdev
, struct mthca_dev
, ib_dev
.class_dev
);
817 return sprintf(buf
, "%x\n", dev
->rev_id
);
820 static ssize_t
show_fw_ver(struct class_device
*cdev
, char *buf
)
822 struct mthca_dev
*dev
= container_of(cdev
, struct mthca_dev
, ib_dev
.class_dev
);
823 return sprintf(buf
, "%x.%x.%x\n", (int) (dev
->fw_ver
>> 32),
824 (int) (dev
->fw_ver
>> 16) & 0xffff,
825 (int) dev
->fw_ver
& 0xffff);
828 static ssize_t
show_hca(struct class_device
*cdev
, char *buf
)
830 struct mthca_dev
*dev
= container_of(cdev
, struct mthca_dev
, ib_dev
.class_dev
);
831 switch (dev
->pdev
->device
) {
832 case PCI_DEVICE_ID_MELLANOX_TAVOR
:
833 return sprintf(buf
, "MT23108\n");
834 case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT
:
835 return sprintf(buf
, "MT25208 (MT23108 compat mode)\n");
836 case PCI_DEVICE_ID_MELLANOX_ARBEL
:
837 return sprintf(buf
, "MT25208\n");
838 case PCI_DEVICE_ID_MELLANOX_SINAI
:
839 case PCI_DEVICE_ID_MELLANOX_SINAI_OLD
:
840 return sprintf(buf
, "MT25204\n");
842 return sprintf(buf
, "unknown\n");
846 static CLASS_DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
847 static CLASS_DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
848 static CLASS_DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
850 static struct class_device_attribute
*mthca_class_attributes
[] = {
851 &class_device_attr_hw_rev
,
852 &class_device_attr_fw_ver
,
853 &class_device_attr_hca_type
856 int mthca_register_device(struct mthca_dev
*dev
)
861 strlcpy(dev
->ib_dev
.name
, "mthca%d", IB_DEVICE_NAME_MAX
);
862 dev
->ib_dev
.owner
= THIS_MODULE
;
864 dev
->ib_dev
.node_type
= IB_NODE_CA
;
865 dev
->ib_dev
.phys_port_cnt
= dev
->limits
.num_ports
;
866 dev
->ib_dev
.dma_device
= &dev
->pdev
->dev
;
867 dev
->ib_dev
.class_dev
.dev
= &dev
->pdev
->dev
;
868 dev
->ib_dev
.query_device
= mthca_query_device
;
869 dev
->ib_dev
.query_port
= mthca_query_port
;
870 dev
->ib_dev
.modify_port
= mthca_modify_port
;
871 dev
->ib_dev
.query_pkey
= mthca_query_pkey
;
872 dev
->ib_dev
.query_gid
= mthca_query_gid
;
873 dev
->ib_dev
.alloc_ucontext
= mthca_alloc_ucontext
;
874 dev
->ib_dev
.dealloc_ucontext
= mthca_dealloc_ucontext
;
875 dev
->ib_dev
.mmap
= mthca_mmap_uar
;
876 dev
->ib_dev
.alloc_pd
= mthca_alloc_pd
;
877 dev
->ib_dev
.dealloc_pd
= mthca_dealloc_pd
;
878 dev
->ib_dev
.create_ah
= mthca_ah_create
;
879 dev
->ib_dev
.destroy_ah
= mthca_ah_destroy
;
880 dev
->ib_dev
.create_qp
= mthca_create_qp
;
881 dev
->ib_dev
.modify_qp
= mthca_modify_qp
;
882 dev
->ib_dev
.destroy_qp
= mthca_destroy_qp
;
883 dev
->ib_dev
.create_cq
= mthca_create_cq
;
884 dev
->ib_dev
.destroy_cq
= mthca_destroy_cq
;
885 dev
->ib_dev
.poll_cq
= mthca_poll_cq
;
886 dev
->ib_dev
.get_dma_mr
= mthca_get_dma_mr
;
887 dev
->ib_dev
.reg_phys_mr
= mthca_reg_phys_mr
;
888 dev
->ib_dev
.reg_user_mr
= mthca_reg_user_mr
;
889 dev
->ib_dev
.dereg_mr
= mthca_dereg_mr
;
891 if (dev
->mthca_flags
& MTHCA_FLAG_FMR
) {
892 dev
->ib_dev
.alloc_fmr
= mthca_alloc_fmr
;
893 dev
->ib_dev
.unmap_fmr
= mthca_unmap_fmr
;
894 dev
->ib_dev
.dealloc_fmr
= mthca_dealloc_fmr
;
895 if (mthca_is_memfree(dev
))
896 dev
->ib_dev
.map_phys_fmr
= mthca_arbel_map_phys_fmr
;
898 dev
->ib_dev
.map_phys_fmr
= mthca_tavor_map_phys_fmr
;
901 dev
->ib_dev
.attach_mcast
= mthca_multicast_attach
;
902 dev
->ib_dev
.detach_mcast
= mthca_multicast_detach
;
903 dev
->ib_dev
.process_mad
= mthca_process_mad
;
905 if (mthca_is_memfree(dev
)) {
906 dev
->ib_dev
.req_notify_cq
= mthca_arbel_arm_cq
;
907 dev
->ib_dev
.post_send
= mthca_arbel_post_send
;
908 dev
->ib_dev
.post_recv
= mthca_arbel_post_receive
;
910 dev
->ib_dev
.req_notify_cq
= mthca_tavor_arm_cq
;
911 dev
->ib_dev
.post_send
= mthca_tavor_post_send
;
912 dev
->ib_dev
.post_recv
= mthca_tavor_post_receive
;
915 init_MUTEX(&dev
->cap_mask_mutex
);
917 ret
= ib_register_device(&dev
->ib_dev
);
921 for (i
= 0; i
< ARRAY_SIZE(mthca_class_attributes
); ++i
) {
922 ret
= class_device_create_file(&dev
->ib_dev
.class_dev
,
923 mthca_class_attributes
[i
]);
925 ib_unregister_device(&dev
->ib_dev
);
933 void mthca_unregister_device(struct mthca_dev
*dev
)
935 ib_unregister_device(&dev
->ib_dev
);