2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: user_mad.c 1389 2004-12-27 22:56:47Z roland $
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/device.h>
38 #include <linux/err.h>
40 #include <linux/cdev.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/poll.h>
44 #include <linux/rwsem.h>
45 #include <linux/kref.h>
47 #include <asm/uaccess.h>
48 #include <asm/semaphore.h>
51 #include <ib_user_mad.h>
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
55 MODULE_LICENSE("Dual BSD/GPL");
58 IB_UMAD_MAX_PORTS
= 64,
59 IB_UMAD_MAX_AGENTS
= 32,
62 IB_UMAD_MINOR_BASE
= 0
68 struct class_device class_dev
;
72 struct class_device sm_class_dev
;
73 struct semaphore sm_sem
;
75 struct ib_device
*ib_dev
;
76 struct ib_umad_device
*umad_dev
;
80 struct ib_umad_device
{
81 int start_port
, end_port
;
83 struct ib_umad_port port
[0];
87 struct ib_umad_port
*port
;
89 struct list_head recv_list
;
90 wait_queue_head_t recv_wait
;
91 struct rw_semaphore agent_mutex
;
92 struct ib_mad_agent
*agent
[IB_UMAD_MAX_AGENTS
];
93 struct ib_mr
*mr
[IB_UMAD_MAX_AGENTS
];
96 struct ib_umad_packet
{
97 struct ib_user_mad mad
;
99 struct list_head list
;
100 DECLARE_PCI_UNMAP_ADDR(mapping
)
103 static const dev_t base_dev
= MKDEV(IB_UMAD_MAJOR
, IB_UMAD_MINOR_BASE
);
104 static spinlock_t map_lock
;
105 static DECLARE_BITMAP(dev_map
, IB_UMAD_MAX_PORTS
* 2);
107 static void ib_umad_add_one(struct ib_device
*device
);
108 static void ib_umad_remove_one(struct ib_device
*device
);
110 static int queue_packet(struct ib_umad_file
*file
,
111 struct ib_mad_agent
*agent
,
112 struct ib_umad_packet
*packet
)
116 down_read(&file
->agent_mutex
);
117 for (packet
->mad
.id
= 0;
118 packet
->mad
.id
< IB_UMAD_MAX_AGENTS
;
120 if (agent
== file
->agent
[packet
->mad
.id
]) {
121 spin_lock_irq(&file
->recv_lock
);
122 list_add_tail(&packet
->list
, &file
->recv_list
);
123 spin_unlock_irq(&file
->recv_lock
);
124 wake_up_interruptible(&file
->recv_wait
);
129 up_read(&file
->agent_mutex
);
134 static void send_handler(struct ib_mad_agent
*agent
,
135 struct ib_mad_send_wc
*send_wc
)
137 struct ib_umad_file
*file
= agent
->context
;
138 struct ib_umad_packet
*packet
=
139 (void *) (unsigned long) send_wc
->wr_id
;
141 dma_unmap_single(agent
->device
->dma_device
,
142 pci_unmap_addr(packet
, mapping
),
143 sizeof packet
->mad
.data
,
145 ib_destroy_ah(packet
->ah
);
147 if (send_wc
->status
== IB_WC_RESP_TIMEOUT_ERR
) {
148 packet
->mad
.status
= ETIMEDOUT
;
150 if (!queue_packet(file
, agent
, packet
))
157 static void recv_handler(struct ib_mad_agent
*agent
,
158 struct ib_mad_recv_wc
*mad_recv_wc
)
160 struct ib_umad_file
*file
= agent
->context
;
161 struct ib_umad_packet
*packet
;
163 if (mad_recv_wc
->wc
->status
!= IB_WC_SUCCESS
)
166 packet
= kmalloc(sizeof *packet
, GFP_KERNEL
);
170 memset(packet
, 0, sizeof *packet
);
172 memcpy(packet
->mad
.data
, mad_recv_wc
->recv_buf
.mad
, sizeof packet
->mad
.data
);
173 packet
->mad
.status
= 0;
174 packet
->mad
.qpn
= cpu_to_be32(mad_recv_wc
->wc
->src_qp
);
175 packet
->mad
.lid
= cpu_to_be16(mad_recv_wc
->wc
->slid
);
176 packet
->mad
.sl
= mad_recv_wc
->wc
->sl
;
177 packet
->mad
.path_bits
= mad_recv_wc
->wc
->dlid_path_bits
;
178 packet
->mad
.grh_present
= !!(mad_recv_wc
->wc
->wc_flags
& IB_WC_GRH
);
179 if (packet
->mad
.grh_present
) {
181 packet
->mad
.gid_index
= 0;
182 packet
->mad
.hop_limit
= 0;
183 packet
->mad
.traffic_class
= 0;
184 memset(packet
->mad
.gid
, 0, 16);
185 packet
->mad
.flow_label
= 0;
188 if (queue_packet(file
, agent
, packet
))
192 ib_free_recv_mad(mad_recv_wc
);
195 static ssize_t
ib_umad_read(struct file
*filp
, char __user
*buf
,
196 size_t count
, loff_t
*pos
)
198 struct ib_umad_file
*file
= filp
->private_data
;
199 struct ib_umad_packet
*packet
;
202 if (count
< sizeof (struct ib_user_mad
))
205 spin_lock_irq(&file
->recv_lock
);
207 while (list_empty(&file
->recv_list
)) {
208 spin_unlock_irq(&file
->recv_lock
);
210 if (filp
->f_flags
& O_NONBLOCK
)
213 if (wait_event_interruptible(file
->recv_wait
,
214 !list_empty(&file
->recv_list
)))
217 spin_lock_irq(&file
->recv_lock
);
220 packet
= list_entry(file
->recv_list
.next
, struct ib_umad_packet
, list
);
221 list_del(&packet
->list
);
223 spin_unlock_irq(&file
->recv_lock
);
225 if (copy_to_user(buf
, &packet
->mad
, sizeof packet
->mad
))
228 ret
= sizeof packet
->mad
;
234 static ssize_t
ib_umad_write(struct file
*filp
, const char __user
*buf
,
235 size_t count
, loff_t
*pos
)
237 struct ib_umad_file
*file
= filp
->private_data
;
238 struct ib_umad_packet
*packet
;
239 struct ib_mad_agent
*agent
;
240 struct ib_ah_attr ah_attr
;
241 struct ib_sge gather_list
;
242 struct ib_send_wr
*bad_wr
, wr
= {
243 .opcode
= IB_WR_SEND
,
244 .sg_list
= &gather_list
,
246 .send_flags
= IB_SEND_SIGNALED
,
252 if (count
< sizeof (struct ib_user_mad
))
255 packet
= kmalloc(sizeof *packet
, GFP_KERNEL
);
259 if (copy_from_user(&packet
->mad
, buf
, sizeof packet
->mad
)) {
264 if (packet
->mad
.id
< 0 || packet
->mad
.id
>= IB_UMAD_MAX_AGENTS
) {
269 down_read(&file
->agent_mutex
);
271 agent
= file
->agent
[packet
->mad
.id
];
278 * If userspace is generating a request that will generate a
279 * response, we need to make sure the high-order part of the
280 * transaction ID matches the agent being used to send the
283 method
= ((struct ib_mad_hdr
*) packet
->mad
.data
)->method
;
285 if (!(method
& IB_MGMT_METHOD_RESP
) &&
286 method
!= IB_MGMT_METHOD_TRAP_REPRESS
&&
287 method
!= IB_MGMT_METHOD_SEND
) {
288 tid
= &((struct ib_mad_hdr
*) packet
->mad
.data
)->tid
;
289 *tid
= cpu_to_be64(((u64
) agent
->hi_tid
) << 32 |
290 (be64_to_cpup(tid
) & 0xffffffff));
293 memset(&ah_attr
, 0, sizeof ah_attr
);
294 ah_attr
.dlid
= be16_to_cpu(packet
->mad
.lid
);
295 ah_attr
.sl
= packet
->mad
.sl
;
296 ah_attr
.src_path_bits
= packet
->mad
.path_bits
;
297 ah_attr
.port_num
= file
->port
->port_num
;
298 if (packet
->mad
.grh_present
) {
299 ah_attr
.ah_flags
= IB_AH_GRH
;
300 memcpy(ah_attr
.grh
.dgid
.raw
, packet
->mad
.gid
, 16);
301 ah_attr
.grh
.flow_label
= packet
->mad
.flow_label
;
302 ah_attr
.grh
.hop_limit
= packet
->mad
.hop_limit
;
303 ah_attr
.grh
.traffic_class
= packet
->mad
.traffic_class
;
306 packet
->ah
= ib_create_ah(agent
->qp
->pd
, &ah_attr
);
307 if (IS_ERR(packet
->ah
)) {
308 ret
= PTR_ERR(packet
->ah
);
312 gather_list
.addr
= dma_map_single(agent
->device
->dma_device
,
314 sizeof packet
->mad
.data
,
316 gather_list
.length
= sizeof packet
->mad
.data
;
317 gather_list
.lkey
= file
->mr
[packet
->mad
.id
]->lkey
;
318 pci_unmap_addr_set(packet
, mapping
, gather_list
.addr
);
320 wr
.wr
.ud
.mad_hdr
= (struct ib_mad_hdr
*) packet
->mad
.data
;
321 wr
.wr
.ud
.ah
= packet
->ah
;
322 wr
.wr
.ud
.remote_qpn
= be32_to_cpu(packet
->mad
.qpn
);
323 wr
.wr
.ud
.remote_qkey
= be32_to_cpu(packet
->mad
.qkey
);
324 wr
.wr
.ud
.timeout_ms
= packet
->mad
.timeout_ms
;
325 wr
.wr
.ud
.retries
= 0;
327 wr
.wr_id
= (unsigned long) packet
;
329 ret
= ib_post_send_mad(agent
, &wr
, &bad_wr
);
331 dma_unmap_single(agent
->device
->dma_device
,
332 pci_unmap_addr(packet
, mapping
),
333 sizeof packet
->mad
.data
,
338 up_read(&file
->agent_mutex
);
340 return sizeof packet
->mad
;
343 up_read(&file
->agent_mutex
);
350 static unsigned int ib_umad_poll(struct file
*filp
, struct poll_table_struct
*wait
)
352 struct ib_umad_file
*file
= filp
->private_data
;
354 /* we will always be able to post a MAD send */
355 unsigned int mask
= POLLOUT
| POLLWRNORM
;
357 poll_wait(filp
, &file
->recv_wait
, wait
);
359 if (!list_empty(&file
->recv_list
))
360 mask
|= POLLIN
| POLLRDNORM
;
365 static int ib_umad_reg_agent(struct ib_umad_file
*file
, unsigned long arg
)
367 struct ib_user_mad_reg_req ureq
;
368 struct ib_mad_reg_req req
;
369 struct ib_mad_agent
*agent
;
373 down_write(&file
->agent_mutex
);
375 if (copy_from_user(&ureq
, (void __user
*) arg
, sizeof ureq
)) {
380 if (ureq
.qpn
!= 0 && ureq
.qpn
!= 1) {
385 for (agent_id
= 0; agent_id
< IB_UMAD_MAX_AGENTS
; ++agent_id
)
386 if (!file
->agent
[agent_id
])
393 if (ureq
.mgmt_class
) {
394 req
.mgmt_class
= ureq
.mgmt_class
;
395 req
.mgmt_class_version
= ureq
.mgmt_class_version
;
396 memcpy(req
.method_mask
, ureq
.method_mask
, sizeof req
.method_mask
);
397 memcpy(req
.oui
, ureq
.oui
, sizeof req
.oui
);
400 agent
= ib_register_mad_agent(file
->port
->ib_dev
, file
->port
->port_num
,
401 ureq
.qpn
? IB_QPT_GSI
: IB_QPT_SMI
,
402 ureq
.mgmt_class
? &req
: NULL
,
403 0, send_handler
, recv_handler
, file
);
405 ret
= PTR_ERR(agent
);
409 file
->agent
[agent_id
] = agent
;
411 file
->mr
[agent_id
] = ib_get_dma_mr(agent
->qp
->pd
, IB_ACCESS_LOCAL_WRITE
);
412 if (IS_ERR(file
->mr
[agent_id
])) {
417 if (put_user(agent_id
,
418 (u32 __user
*) (arg
+ offsetof(struct ib_user_mad_reg_req
, id
)))) {
427 ib_dereg_mr(file
->mr
[agent_id
]);
430 file
->agent
[agent_id
] = NULL
;
431 ib_unregister_mad_agent(agent
);
434 up_write(&file
->agent_mutex
);
438 static int ib_umad_unreg_agent(struct ib_umad_file
*file
, unsigned long arg
)
443 down_write(&file
->agent_mutex
);
445 if (get_user(id
, (u32 __user
*) arg
)) {
450 if (id
< 0 || id
>= IB_UMAD_MAX_AGENTS
|| !file
->agent
[id
]) {
455 ib_dereg_mr(file
->mr
[id
]);
456 ib_unregister_mad_agent(file
->agent
[id
]);
457 file
->agent
[id
] = NULL
;
460 up_write(&file
->agent_mutex
);
464 static long ib_umad_ioctl(struct file
*filp
,
465 unsigned int cmd
, unsigned long arg
)
468 case IB_USER_MAD_REGISTER_AGENT
:
469 return ib_umad_reg_agent(filp
->private_data
, arg
);
470 case IB_USER_MAD_UNREGISTER_AGENT
:
471 return ib_umad_unreg_agent(filp
->private_data
, arg
);
477 static int ib_umad_open(struct inode
*inode
, struct file
*filp
)
479 struct ib_umad_port
*port
=
480 container_of(inode
->i_cdev
, struct ib_umad_port
, dev
);
481 struct ib_umad_file
*file
;
483 file
= kmalloc(sizeof *file
, GFP_KERNEL
);
487 memset(file
, 0, sizeof *file
);
489 spin_lock_init(&file
->recv_lock
);
490 init_rwsem(&file
->agent_mutex
);
491 INIT_LIST_HEAD(&file
->recv_list
);
492 init_waitqueue_head(&file
->recv_wait
);
495 filp
->private_data
= file
;
500 static int ib_umad_close(struct inode
*inode
, struct file
*filp
)
502 struct ib_umad_file
*file
= filp
->private_data
;
503 struct ib_umad_packet
*packet
, *tmp
;
506 for (i
= 0; i
< IB_UMAD_MAX_AGENTS
; ++i
)
507 if (file
->agent
[i
]) {
508 ib_dereg_mr(file
->mr
[i
]);
509 ib_unregister_mad_agent(file
->agent
[i
]);
512 list_for_each_entry_safe(packet
, tmp
, &file
->recv_list
, list
)
520 static struct file_operations umad_fops
= {
521 .owner
= THIS_MODULE
,
522 .read
= ib_umad_read
,
523 .write
= ib_umad_write
,
524 .poll
= ib_umad_poll
,
525 .unlocked_ioctl
= ib_umad_ioctl
,
526 .compat_ioctl
= ib_umad_ioctl
,
527 .open
= ib_umad_open
,
528 .release
= ib_umad_close
531 static int ib_umad_sm_open(struct inode
*inode
, struct file
*filp
)
533 struct ib_umad_port
*port
=
534 container_of(inode
->i_cdev
, struct ib_umad_port
, sm_dev
);
535 struct ib_port_modify props
= {
536 .set_port_cap_mask
= IB_PORT_SM
540 if (filp
->f_flags
& O_NONBLOCK
) {
541 if (down_trylock(&port
->sm_sem
))
544 if (down_interruptible(&port
->sm_sem
))
548 ret
= ib_modify_port(port
->ib_dev
, port
->port_num
, 0, &props
);
554 filp
->private_data
= port
;
559 static int ib_umad_sm_close(struct inode
*inode
, struct file
*filp
)
561 struct ib_umad_port
*port
= filp
->private_data
;
562 struct ib_port_modify props
= {
563 .clr_port_cap_mask
= IB_PORT_SM
567 ret
= ib_modify_port(port
->ib_dev
, port
->port_num
, 0, &props
);
573 static struct file_operations umad_sm_fops
= {
574 .owner
= THIS_MODULE
,
575 .open
= ib_umad_sm_open
,
576 .release
= ib_umad_sm_close
579 static struct ib_client umad_client
= {
581 .add
= ib_umad_add_one
,
582 .remove
= ib_umad_remove_one
585 static ssize_t
show_dev(struct class_device
*class_dev
, char *buf
)
587 struct ib_umad_port
*port
= class_get_devdata(class_dev
);
589 if (class_dev
== &port
->class_dev
)
590 return print_dev_t(buf
, port
->dev
.dev
);
592 return print_dev_t(buf
, port
->sm_dev
.dev
);
594 static CLASS_DEVICE_ATTR(dev
, S_IRUGO
, show_dev
, NULL
);
596 static ssize_t
show_ibdev(struct class_device
*class_dev
, char *buf
)
598 struct ib_umad_port
*port
= class_get_devdata(class_dev
);
600 return sprintf(buf
, "%s\n", port
->ib_dev
->name
);
602 static CLASS_DEVICE_ATTR(ibdev
, S_IRUGO
, show_ibdev
, NULL
);
604 static ssize_t
show_port(struct class_device
*class_dev
, char *buf
)
606 struct ib_umad_port
*port
= class_get_devdata(class_dev
);
608 return sprintf(buf
, "%d\n", port
->port_num
);
610 static CLASS_DEVICE_ATTR(port
, S_IRUGO
, show_port
, NULL
);
612 static void ib_umad_release_dev(struct kref
*ref
)
614 struct ib_umad_device
*dev
=
615 container_of(ref
, struct ib_umad_device
, ref
);
620 static void ib_umad_release_port(struct class_device
*class_dev
)
622 struct ib_umad_port
*port
= class_get_devdata(class_dev
);
624 if (class_dev
== &port
->class_dev
) {
625 cdev_del(&port
->dev
);
626 clear_bit(port
->devnum
, dev_map
);
628 cdev_del(&port
->sm_dev
);
629 clear_bit(port
->sm_devnum
, dev_map
);
632 kref_put(&port
->umad_dev
->ref
, ib_umad_release_dev
);
635 static struct class umad_class
= {
636 .name
= "infiniband_mad",
637 .release
= ib_umad_release_port
640 static ssize_t
show_abi_version(struct class *class, char *buf
)
642 return sprintf(buf
, "%d\n", IB_USER_MAD_ABI_VERSION
);
644 static CLASS_ATTR(abi_version
, S_IRUGO
, show_abi_version
, NULL
);
646 static int ib_umad_init_port(struct ib_device
*device
, int port_num
,
647 struct ib_umad_port
*port
)
649 spin_lock(&map_lock
);
650 port
->devnum
= find_first_zero_bit(dev_map
, IB_UMAD_MAX_PORTS
);
651 if (port
->devnum
>= IB_UMAD_MAX_PORTS
) {
652 spin_unlock(&map_lock
);
655 port
->sm_devnum
= find_next_zero_bit(dev_map
, IB_UMAD_MAX_PORTS
* 2, IB_UMAD_MAX_PORTS
);
656 if (port
->sm_devnum
>= IB_UMAD_MAX_PORTS
* 2) {
657 spin_unlock(&map_lock
);
660 set_bit(port
->devnum
, dev_map
);
661 set_bit(port
->sm_devnum
, dev_map
);
662 spin_unlock(&map_lock
);
664 port
->ib_dev
= device
;
665 port
->port_num
= port_num
;
666 init_MUTEX(&port
->sm_sem
);
668 cdev_init(&port
->dev
, &umad_fops
);
669 port
->dev
.owner
= THIS_MODULE
;
670 kobject_set_name(&port
->dev
.kobj
, "umad%d", port
->devnum
);
671 if (cdev_add(&port
->dev
, base_dev
+ port
->devnum
, 1))
674 port
->class_dev
.class = &umad_class
;
675 port
->class_dev
.dev
= device
->dma_device
;
677 snprintf(port
->class_dev
.class_id
, BUS_ID_SIZE
, "umad%d", port
->devnum
);
679 if (class_device_register(&port
->class_dev
))
682 class_set_devdata(&port
->class_dev
, port
);
683 kref_get(&port
->umad_dev
->ref
);
685 if (class_device_create_file(&port
->class_dev
, &class_device_attr_dev
))
687 if (class_device_create_file(&port
->class_dev
, &class_device_attr_ibdev
))
689 if (class_device_create_file(&port
->class_dev
, &class_device_attr_port
))
692 cdev_init(&port
->sm_dev
, &umad_sm_fops
);
693 port
->sm_dev
.owner
= THIS_MODULE
;
694 kobject_set_name(&port
->dev
.kobj
, "issm%d", port
->sm_devnum
- IB_UMAD_MAX_PORTS
);
695 if (cdev_add(&port
->sm_dev
, base_dev
+ port
->sm_devnum
, 1))
698 port
->sm_class_dev
.class = &umad_class
;
699 port
->sm_class_dev
.dev
= device
->dma_device
;
701 snprintf(port
->sm_class_dev
.class_id
, BUS_ID_SIZE
, "issm%d", port
->sm_devnum
- IB_UMAD_MAX_PORTS
);
703 if (class_device_register(&port
->sm_class_dev
))
706 class_set_devdata(&port
->sm_class_dev
, port
);
707 kref_get(&port
->umad_dev
->ref
);
709 if (class_device_create_file(&port
->sm_class_dev
, &class_device_attr_dev
))
711 if (class_device_create_file(&port
->sm_class_dev
, &class_device_attr_ibdev
))
713 if (class_device_create_file(&port
->sm_class_dev
, &class_device_attr_port
))
719 class_device_unregister(&port
->sm_class_dev
);
722 cdev_del(&port
->sm_dev
);
725 class_device_unregister(&port
->class_dev
);
728 cdev_del(&port
->dev
);
729 clear_bit(port
->devnum
, dev_map
);
734 static void ib_umad_add_one(struct ib_device
*device
)
736 struct ib_umad_device
*umad_dev
;
739 if (device
->node_type
== IB_NODE_SWITCH
)
743 e
= device
->phys_port_cnt
;
746 umad_dev
= kmalloc(sizeof *umad_dev
+
747 (e
- s
+ 1) * sizeof (struct ib_umad_port
),
752 memset(umad_dev
, 0, sizeof *umad_dev
+
753 (e
- s
+ 1) * sizeof (struct ib_umad_port
));
755 kref_init(&umad_dev
->ref
);
757 umad_dev
->start_port
= s
;
758 umad_dev
->end_port
= e
;
760 for (i
= s
; i
<= e
; ++i
) {
761 umad_dev
->port
[i
- s
].umad_dev
= umad_dev
;
763 if (ib_umad_init_port(device
, i
, &umad_dev
->port
[i
- s
]))
767 ib_set_client_data(device
, &umad_client
, umad_dev
);
773 class_device_unregister(&umad_dev
->port
[i
- s
].class_dev
);
774 class_device_unregister(&umad_dev
->port
[i
- s
].sm_class_dev
);
777 kref_put(&umad_dev
->ref
, ib_umad_release_dev
);
780 static void ib_umad_remove_one(struct ib_device
*device
)
782 struct ib_umad_device
*umad_dev
= ib_get_client_data(device
, &umad_client
);
788 for (i
= 0; i
<= umad_dev
->end_port
- umad_dev
->start_port
; ++i
) {
789 class_device_unregister(&umad_dev
->port
[i
].class_dev
);
790 class_device_unregister(&umad_dev
->port
[i
].sm_class_dev
);
793 kref_put(&umad_dev
->ref
, ib_umad_release_dev
);
796 static int __init
ib_umad_init(void)
800 spin_lock_init(&map_lock
);
802 ret
= register_chrdev_region(base_dev
, IB_UMAD_MAX_PORTS
* 2,
805 printk(KERN_ERR
"user_mad: couldn't register device number\n");
809 ret
= class_register(&umad_class
);
811 printk(KERN_ERR
"user_mad: couldn't create class infiniband_mad\n");
815 ret
= class_create_file(&umad_class
, &class_attr_abi_version
);
817 printk(KERN_ERR
"user_mad: couldn't create abi_version attribute\n");
821 ret
= ib_register_client(&umad_client
);
823 printk(KERN_ERR
"user_mad: couldn't register ib_umad client\n");
830 class_unregister(&umad_class
);
833 unregister_chrdev_region(base_dev
, IB_UMAD_MAX_PORTS
* 2);
839 static void __exit
ib_umad_cleanup(void)
841 ib_unregister_client(&umad_client
);
842 class_unregister(&umad_class
);
843 unregister_chrdev_region(base_dev
, IB_UMAD_MAX_PORTS
* 2);
846 module_init(ib_umad_init
);
847 module_exit(ib_umad_cleanup
);