3efee341c9bcddcf31c1401a3b38ac17c3e6f641
[deliverable/linux.git] / drivers / infiniband / hw / ipath / ipath_cq.c
1 /*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
36
37 #include "ipath_verbs.h"
38
39 /**
40 * ipath_cq_enter - add a new entry to the completion queue
41 * @cq: completion queue
42 * @entry: work completion entry to add
43 * @sig: true if @entry is a solicitated entry
44 *
45 * This may be called with one of the qp->s_lock or qp->r_rq.lock held.
46 */
47 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
48 {
49 unsigned long flags;
50 u32 next;
51
52 spin_lock_irqsave(&cq->lock, flags);
53
54 if (cq->head == cq->ibcq.cqe)
55 next = 0;
56 else
57 next = cq->head + 1;
58 if (unlikely(next == cq->tail)) {
59 spin_unlock_irqrestore(&cq->lock, flags);
60 if (cq->ibcq.event_handler) {
61 struct ib_event ev;
62
63 ev.device = cq->ibcq.device;
64 ev.element.cq = &cq->ibcq;
65 ev.event = IB_EVENT_CQ_ERR;
66 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
67 }
68 return;
69 }
70 cq->queue[cq->head] = *entry;
71 cq->head = next;
72
73 if (cq->notify == IB_CQ_NEXT_COMP ||
74 (cq->notify == IB_CQ_SOLICITED && solicited)) {
75 cq->notify = IB_CQ_NONE;
76 cq->triggered++;
77 /*
78 * This will cause send_complete() to be called in
79 * another thread.
80 */
81 tasklet_hi_schedule(&cq->comptask);
82 }
83
84 spin_unlock_irqrestore(&cq->lock, flags);
85
86 if (entry->status != IB_WC_SUCCESS)
87 to_idev(cq->ibcq.device)->n_wqe_errs++;
88 }
89
90 /**
91 * ipath_poll_cq - poll for work completion entries
92 * @ibcq: the completion queue to poll
93 * @num_entries: the maximum number of entries to return
94 * @entry: pointer to array where work completions are placed
95 *
96 * Returns the number of completion entries polled.
97 *
98 * This may be called from interrupt context. Also called by ib_poll_cq()
99 * in the generic verbs code.
100 */
101 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
102 {
103 struct ipath_cq *cq = to_icq(ibcq);
104 unsigned long flags;
105 int npolled;
106
107 spin_lock_irqsave(&cq->lock, flags);
108
109 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
110 if (cq->tail == cq->head)
111 break;
112 *entry = cq->queue[cq->tail];
113 if (cq->tail == cq->ibcq.cqe)
114 cq->tail = 0;
115 else
116 cq->tail++;
117 }
118
119 spin_unlock_irqrestore(&cq->lock, flags);
120
121 return npolled;
122 }
123
124 static void send_complete(unsigned long data)
125 {
126 struct ipath_cq *cq = (struct ipath_cq *)data;
127
128 /*
129 * The completion handler will most likely rearm the notification
130 * and poll for all pending entries. If a new completion entry
131 * is added while we are in this routine, tasklet_hi_schedule()
132 * won't call us again until we return so we check triggered to
133 * see if we need to call the handler again.
134 */
135 for (;;) {
136 u8 triggered = cq->triggered;
137
138 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
139
140 if (cq->triggered == triggered)
141 return;
142 }
143 }
144
145 /**
146 * ipath_create_cq - create a completion queue
147 * @ibdev: the device this completion queue is attached to
148 * @entries: the minimum size of the completion queue
149 * @context: unused by the InfiniPath driver
150 * @udata: unused by the InfiniPath driver
151 *
152 * Returns a pointer to the completion queue or negative errno values
153 * for failure.
154 *
155 * Called by ib_create_cq() in the generic verbs code.
156 */
157 struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
158 struct ib_ucontext *context,
159 struct ib_udata *udata)
160 {
161 struct ipath_ibdev *dev = to_idev(ibdev);
162 struct ipath_cq *cq;
163 struct ib_wc *wc;
164 struct ib_cq *ret;
165
166 if (entries > ib_ipath_max_cqes) {
167 ret = ERR_PTR(-EINVAL);
168 goto bail;
169 }
170
171 if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
172 ret = ERR_PTR(-ENOMEM);
173 goto bail;
174 }
175
176 /*
177 * Need to use vmalloc() if we want to support large #s of
178 * entries.
179 */
180 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
181 if (!cq) {
182 ret = ERR_PTR(-ENOMEM);
183 goto bail;
184 }
185
186 /*
187 * Need to use vmalloc() if we want to support large #s of entries.
188 */
189 wc = vmalloc(sizeof(*wc) * (entries + 1));
190 if (!wc) {
191 kfree(cq);
192 ret = ERR_PTR(-ENOMEM);
193 goto bail;
194 }
195 /*
196 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
197 * The number of entries should be >= the number requested or return
198 * an error.
199 */
200 cq->ibcq.cqe = entries;
201 cq->notify = IB_CQ_NONE;
202 cq->triggered = 0;
203 spin_lock_init(&cq->lock);
204 tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
205 cq->head = 0;
206 cq->tail = 0;
207 cq->queue = wc;
208
209 ret = &cq->ibcq;
210
211 dev->n_cqs_allocated++;
212
213 bail:
214 return ret;
215 }
216
217 /**
218 * ipath_destroy_cq - destroy a completion queue
219 * @ibcq: the completion queue to destroy.
220 *
221 * Returns 0 for success.
222 *
223 * Called by ib_destroy_cq() in the generic verbs code.
224 */
225 int ipath_destroy_cq(struct ib_cq *ibcq)
226 {
227 struct ipath_ibdev *dev = to_idev(ibcq->device);
228 struct ipath_cq *cq = to_icq(ibcq);
229
230 tasklet_kill(&cq->comptask);
231 dev->n_cqs_allocated--;
232 vfree(cq->queue);
233 kfree(cq);
234
235 return 0;
236 }
237
238 /**
239 * ipath_req_notify_cq - change the notification type for a completion queue
240 * @ibcq: the completion queue
241 * @notify: the type of notification to request
242 *
243 * Returns 0 for success.
244 *
245 * This may be called from interrupt context. Also called by
246 * ib_req_notify_cq() in the generic verbs code.
247 */
248 int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
249 {
250 struct ipath_cq *cq = to_icq(ibcq);
251 unsigned long flags;
252
253 spin_lock_irqsave(&cq->lock, flags);
254 /*
255 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
256 * any other transitions.
257 */
258 if (cq->notify != IB_CQ_NEXT_COMP)
259 cq->notify = notify;
260 spin_unlock_irqrestore(&cq->lock, flags);
261 return 0;
262 }
263
264 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
265 {
266 struct ipath_cq *cq = to_icq(ibcq);
267 struct ib_wc *wc, *old_wc;
268 u32 n;
269 int ret;
270
271 /*
272 * Need to use vmalloc() if we want to support large #s of entries.
273 */
274 wc = vmalloc(sizeof(*wc) * (cqe + 1));
275 if (!wc) {
276 ret = -ENOMEM;
277 goto bail;
278 }
279
280 spin_lock_irq(&cq->lock);
281 if (cq->head < cq->tail)
282 n = cq->ibcq.cqe + 1 + cq->head - cq->tail;
283 else
284 n = cq->head - cq->tail;
285 if (unlikely((u32)cqe < n)) {
286 spin_unlock_irq(&cq->lock);
287 vfree(wc);
288 ret = -EOVERFLOW;
289 goto bail;
290 }
291 for (n = 0; cq->tail != cq->head; n++) {
292 wc[n] = cq->queue[cq->tail];
293 if (cq->tail == cq->ibcq.cqe)
294 cq->tail = 0;
295 else
296 cq->tail++;
297 }
298 cq->ibcq.cqe = cqe;
299 cq->head = n;
300 cq->tail = 0;
301 old_wc = cq->queue;
302 cq->queue = wc;
303 spin_unlock_irq(&cq->lock);
304
305 vfree(old_wc);
306
307 ret = 0;
308
309 bail:
310 return ret;
311 }
This page took 0.036198 seconds and 4 git commands to generate.