Merge branch 'i2c-embedded/for-next' of git://git.pengutronix.de/git/wsa/linux
[deliverable/linux.git] / drivers / infiniband / hw / qib / qib_init.c
1 /*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
37 #include <linux/vmalloc.h>
38 #include <linux/delay.h>
39 #include <linux/idr.h>
40 #include <linux/module.h>
41
42 #include "qib.h"
43 #include "qib_common.h"
44
45 /*
46 * min buffers we want to have per context, after driver
47 */
48 #define QIB_MIN_USER_CTXT_BUFCNT 7
49
50 #define QLOGIC_IB_R_SOFTWARE_MASK 0xFF
51 #define QLOGIC_IB_R_SOFTWARE_SHIFT 24
52 #define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62)
53
54 /*
55 * Number of ctxts we are configured to use (to allow for more pio
56 * buffers per ctxt, etc.) Zero means use chip value.
57 */
58 ushort qib_cfgctxts;
59 module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO);
60 MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use");
61
62 /*
63 * If set, do not write to any regs if avoidable, hack to allow
64 * check for deranged default register values.
65 */
66 ushort qib_mini_init;
67 module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO);
68 MODULE_PARM_DESC(mini_init, "If set, do minimal diag init");
69
70 unsigned qib_n_krcv_queues;
71 module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
72 MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
73
74 /*
75 * qib_wc_pat parameter:
76 * 0 is WC via MTRR
77 * 1 is WC via PAT
78 * If PAT initialization fails, code reverts back to MTRR
79 */
80 unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
81 module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
82 MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
83
84 struct workqueue_struct *qib_cq_wq;
85
86 static void verify_interrupt(unsigned long);
87
88 static struct idr qib_unit_table;
89 u32 qib_cpulist_count;
90 unsigned long *qib_cpulist;
91
92 /* set number of contexts we'll actually use */
93 void qib_set_ctxtcnt(struct qib_devdata *dd)
94 {
95 if (!qib_cfgctxts) {
96 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
97 if (dd->cfgctxts > dd->ctxtcnt)
98 dd->cfgctxts = dd->ctxtcnt;
99 } else if (qib_cfgctxts < dd->num_pports)
100 dd->cfgctxts = dd->ctxtcnt;
101 else if (qib_cfgctxts <= dd->ctxtcnt)
102 dd->cfgctxts = qib_cfgctxts;
103 else
104 dd->cfgctxts = dd->ctxtcnt;
105 dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
106 dd->cfgctxts - dd->first_user_ctxt;
107 }
108
109 /*
110 * Common code for creating the receive context array.
111 */
112 int qib_create_ctxts(struct qib_devdata *dd)
113 {
114 unsigned i;
115 int ret;
116
117 /*
118 * Allocate full ctxtcnt array, rather than just cfgctxts, because
119 * cleanup iterates across all possible ctxts.
120 */
121 dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
122 if (!dd->rcd) {
123 qib_dev_err(dd, "Unable to allocate ctxtdata array, "
124 "failing\n");
125 ret = -ENOMEM;
126 goto done;
127 }
128
129 /* create (one or more) kctxt */
130 for (i = 0; i < dd->first_user_ctxt; ++i) {
131 struct qib_pportdata *ppd;
132 struct qib_ctxtdata *rcd;
133
134 if (dd->skip_kctxt_mask & (1 << i))
135 continue;
136
137 ppd = dd->pport + (i % dd->num_pports);
138 rcd = qib_create_ctxtdata(ppd, i);
139 if (!rcd) {
140 qib_dev_err(dd, "Unable to allocate ctxtdata"
141 " for Kernel ctxt, failing\n");
142 ret = -ENOMEM;
143 goto done;
144 }
145 rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
146 rcd->seq_cnt = 1;
147 }
148 ret = 0;
149 done:
150 return ret;
151 }
152
153 /*
154 * Common code for user and kernel context setup.
155 */
156 struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt)
157 {
158 struct qib_devdata *dd = ppd->dd;
159 struct qib_ctxtdata *rcd;
160
161 rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
162 if (rcd) {
163 INIT_LIST_HEAD(&rcd->qp_wait_list);
164 rcd->ppd = ppd;
165 rcd->dd = dd;
166 rcd->cnt = 1;
167 rcd->ctxt = ctxt;
168 dd->rcd[ctxt] = rcd;
169
170 dd->f_init_ctxt(rcd);
171
172 /*
173 * To avoid wasting a lot of memory, we allocate 32KB chunks
174 * of physically contiguous memory, advance through it until
175 * used up and then allocate more. Of course, we need
176 * memory to store those extra pointers, now. 32KB seems to
177 * be the most that is "safe" under memory pressure
178 * (creating large files and then copying them over
179 * NFS while doing lots of MPI jobs). The OOM killer can
180 * get invoked, even though we say we can sleep and this can
181 * cause significant system problems....
182 */
183 rcd->rcvegrbuf_size = 0x8000;
184 rcd->rcvegrbufs_perchunk =
185 rcd->rcvegrbuf_size / dd->rcvegrbufsize;
186 rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
187 rcd->rcvegrbufs_perchunk - 1) /
188 rcd->rcvegrbufs_perchunk;
189 BUG_ON(!is_power_of_2(rcd->rcvegrbufs_perchunk));
190 rcd->rcvegrbufs_perchunk_shift =
191 ilog2(rcd->rcvegrbufs_perchunk);
192 }
193 return rcd;
194 }
195
196 /*
197 * Common code for initializing the physical port structure.
198 */
199 void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
200 u8 hw_pidx, u8 port)
201 {
202 ppd->dd = dd;
203 ppd->hw_pidx = hw_pidx;
204 ppd->port = port; /* IB port number, not index */
205
206 spin_lock_init(&ppd->sdma_lock);
207 spin_lock_init(&ppd->lflags_lock);
208 init_waitqueue_head(&ppd->state_wait);
209
210 init_timer(&ppd->symerr_clear_timer);
211 ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup;
212 ppd->symerr_clear_timer.data = (unsigned long)ppd;
213 }
214
215 static int init_pioavailregs(struct qib_devdata *dd)
216 {
217 int ret, pidx;
218 u64 *status_page;
219
220 dd->pioavailregs_dma = dma_alloc_coherent(
221 &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
222 GFP_KERNEL);
223 if (!dd->pioavailregs_dma) {
224 qib_dev_err(dd, "failed to allocate PIOavail reg area "
225 "in memory\n");
226 ret = -ENOMEM;
227 goto done;
228 }
229
230 /*
231 * We really want L2 cache aligned, but for current CPUs of
232 * interest, they are the same.
233 */
234 status_page = (u64 *)
235 ((char *) dd->pioavailregs_dma +
236 ((2 * L1_CACHE_BYTES +
237 dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
238 /* device status comes first, for backwards compatibility */
239 dd->devstatusp = status_page;
240 *status_page++ = 0;
241 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
242 dd->pport[pidx].statusp = status_page;
243 *status_page++ = 0;
244 }
245
246 /*
247 * Setup buffer to hold freeze and other messages, accessible to
248 * apps, following statusp. This is per-unit, not per port.
249 */
250 dd->freezemsg = (char *) status_page;
251 *dd->freezemsg = 0;
252 /* length of msg buffer is "whatever is left" */
253 ret = (char *) status_page - (char *) dd->pioavailregs_dma;
254 dd->freezelen = PAGE_SIZE - ret;
255
256 ret = 0;
257
258 done:
259 return ret;
260 }
261
262 /**
263 * init_shadow_tids - allocate the shadow TID array
264 * @dd: the qlogic_ib device
265 *
266 * allocate the shadow TID array, so we can qib_munlock previous
267 * entries. It may make more sense to move the pageshadow to the
268 * ctxt data structure, so we only allocate memory for ctxts actually
269 * in use, since we at 8k per ctxt, now.
270 * We don't want failures here to prevent use of the driver/chip,
271 * so no return value.
272 */
273 static void init_shadow_tids(struct qib_devdata *dd)
274 {
275 struct page **pages;
276 dma_addr_t *addrs;
277
278 pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
279 if (!pages) {
280 qib_dev_err(dd, "failed to allocate shadow page * "
281 "array, no expected sends!\n");
282 goto bail;
283 }
284
285 addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
286 if (!addrs) {
287 qib_dev_err(dd, "failed to allocate shadow dma handle "
288 "array, no expected sends!\n");
289 goto bail_free;
290 }
291
292 dd->pageshadow = pages;
293 dd->physshadow = addrs;
294 return;
295
296 bail_free:
297 vfree(pages);
298 bail:
299 dd->pageshadow = NULL;
300 }
301
302 /*
303 * Do initialization for device that is only needed on
304 * first detect, not on resets.
305 */
306 static int loadtime_init(struct qib_devdata *dd)
307 {
308 int ret = 0;
309
310 if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
311 QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
312 qib_dev_err(dd, "Driver only handles version %d, "
313 "chip swversion is %d (%llx), failng\n",
314 QIB_CHIP_SWVERSION,
315 (int)(dd->revision >>
316 QLOGIC_IB_R_SOFTWARE_SHIFT) &
317 QLOGIC_IB_R_SOFTWARE_MASK,
318 (unsigned long long) dd->revision);
319 ret = -ENOSYS;
320 goto done;
321 }
322
323 if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
324 qib_devinfo(dd->pcidev, "%s", dd->boardversion);
325
326 spin_lock_init(&dd->pioavail_lock);
327 spin_lock_init(&dd->sendctrl_lock);
328 spin_lock_init(&dd->uctxt_lock);
329 spin_lock_init(&dd->qib_diag_trans_lock);
330 spin_lock_init(&dd->eep_st_lock);
331 mutex_init(&dd->eep_lock);
332
333 if (qib_mini_init)
334 goto done;
335
336 ret = init_pioavailregs(dd);
337 init_shadow_tids(dd);
338
339 qib_get_eeprom_info(dd);
340
341 /* setup time (don't start yet) to verify we got interrupt */
342 init_timer(&dd->intrchk_timer);
343 dd->intrchk_timer.function = verify_interrupt;
344 dd->intrchk_timer.data = (unsigned long) dd;
345
346 done:
347 return ret;
348 }
349
350 /**
351 * init_after_reset - re-initialize after a reset
352 * @dd: the qlogic_ib device
353 *
354 * sanity check at least some of the values after reset, and
355 * ensure no receive or transmit (explicitly, in case reset
356 * failed
357 */
358 static int init_after_reset(struct qib_devdata *dd)
359 {
360 int i;
361
362 /*
363 * Ensure chip does no sends or receives, tail updates, or
364 * pioavail updates while we re-initialize. This is mostly
365 * for the driver data structures, not chip registers.
366 */
367 for (i = 0; i < dd->num_pports; ++i) {
368 /*
369 * ctxt == -1 means "all contexts". Only really safe for
370 * _dis_abling things, as here.
371 */
372 dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
373 QIB_RCVCTRL_INTRAVAIL_DIS |
374 QIB_RCVCTRL_TAILUPD_DIS, -1);
375 /* Redundant across ports for some, but no big deal. */
376 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
377 QIB_SENDCTRL_AVAIL_DIS);
378 }
379
380 return 0;
381 }
382
383 static void enable_chip(struct qib_devdata *dd)
384 {
385 u64 rcvmask;
386 int i;
387
388 /*
389 * Enable PIO send, and update of PIOavail regs to memory.
390 */
391 for (i = 0; i < dd->num_pports; ++i)
392 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
393 QIB_SENDCTRL_AVAIL_ENB);
394 /*
395 * Enable kernel ctxts' receive and receive interrupt.
396 * Other ctxts done as user opens and inits them.
397 */
398 rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB;
399 rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
400 QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB;
401 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
402 struct qib_ctxtdata *rcd = dd->rcd[i];
403
404 if (rcd)
405 dd->f_rcvctrl(rcd->ppd, rcvmask, i);
406 }
407 }
408
409 static void verify_interrupt(unsigned long opaque)
410 {
411 struct qib_devdata *dd = (struct qib_devdata *) opaque;
412
413 if (!dd)
414 return; /* being torn down */
415
416 /*
417 * If we don't have a lid or any interrupts, let the user know and
418 * don't bother checking again.
419 */
420 if (dd->int_counter == 0) {
421 if (!dd->f_intr_fallback(dd))
422 dev_err(&dd->pcidev->dev, "No interrupts detected, "
423 "not usable.\n");
424 else /* re-arm the timer to see if fallback works */
425 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
426 }
427 }
428
429 static void init_piobuf_state(struct qib_devdata *dd)
430 {
431 int i, pidx;
432 u32 uctxts;
433
434 /*
435 * Ensure all buffers are free, and fifos empty. Buffers
436 * are common, so only do once for port 0.
437 *
438 * After enable and qib_chg_pioavailkernel so we can safely
439 * enable pioavail updates and PIOENABLE. After this, packets
440 * are ready and able to go out.
441 */
442 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
443 for (pidx = 0; pidx < dd->num_pports; ++pidx)
444 dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
445
446 /*
447 * If not all sendbufs are used, add the one to each of the lower
448 * numbered contexts. pbufsctxt and lastctxt_piobuf are
449 * calculated in chip-specific code because it may cause some
450 * chip-specific adjustments to be made.
451 */
452 uctxts = dd->cfgctxts - dd->first_user_ctxt;
453 dd->ctxts_extrabuf = dd->pbufsctxt ?
454 dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
455
456 /*
457 * Set up the shadow copies of the piobufavail registers,
458 * which we compare against the chip registers for now, and
459 * the in memory DMA'ed copies of the registers.
460 * By now pioavail updates to memory should have occurred, so
461 * copy them into our working/shadow registers; this is in
462 * case something went wrong with abort, but mostly to get the
463 * initial values of the generation bit correct.
464 */
465 for (i = 0; i < dd->pioavregs; i++) {
466 __le64 tmp;
467
468 tmp = dd->pioavailregs_dma[i];
469 /*
470 * Don't need to worry about pioavailkernel here
471 * because we will call qib_chg_pioavailkernel() later
472 * in initialization, to busy out buffers as needed.
473 */
474 dd->pioavailshadow[i] = le64_to_cpu(tmp);
475 }
476 while (i < ARRAY_SIZE(dd->pioavailshadow))
477 dd->pioavailshadow[i++] = 0; /* for debugging sanity */
478
479 /* after pioavailshadow is setup */
480 qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
481 TXCHK_CHG_TYPE_KERN, NULL);
482 dd->f_initvl15_bufs(dd);
483 }
484
485 /**
486 * qib_init - do the actual initialization sequence on the chip
487 * @dd: the qlogic_ib device
488 * @reinit: reinitializing, so don't allocate new memory
489 *
490 * Do the actual initialization sequence on the chip. This is done
491 * both from the init routine called from the PCI infrastructure, and
492 * when we reset the chip, or detect that it was reset internally,
493 * or it's administratively re-enabled.
494 *
495 * Memory allocation here and in called routines is only done in
496 * the first case (reinit == 0). We have to be careful, because even
497 * without memory allocation, we need to re-write all the chip registers
498 * TIDs, etc. after the reset or enable has completed.
499 */
500 int qib_init(struct qib_devdata *dd, int reinit)
501 {
502 int ret = 0, pidx, lastfail = 0;
503 u32 portok = 0;
504 unsigned i;
505 struct qib_ctxtdata *rcd;
506 struct qib_pportdata *ppd;
507 unsigned long flags;
508
509 /* Set linkstate to unknown, so we can watch for a transition. */
510 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
511 ppd = dd->pport + pidx;
512 spin_lock_irqsave(&ppd->lflags_lock, flags);
513 ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED |
514 QIBL_LINKDOWN | QIBL_LINKINIT |
515 QIBL_LINKV);
516 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
517 }
518
519 if (reinit)
520 ret = init_after_reset(dd);
521 else
522 ret = loadtime_init(dd);
523 if (ret)
524 goto done;
525
526 /* Bypass most chip-init, to get to device creation */
527 if (qib_mini_init)
528 return 0;
529
530 ret = dd->f_late_initreg(dd);
531 if (ret)
532 goto done;
533
534 /* dd->rcd can be NULL if early init failed */
535 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
536 /*
537 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
538 * re-init, the simplest way to handle this is to free
539 * existing, and re-allocate.
540 * Need to re-create rest of ctxt 0 ctxtdata as well.
541 */
542 rcd = dd->rcd[i];
543 if (!rcd)
544 continue;
545
546 lastfail = qib_create_rcvhdrq(dd, rcd);
547 if (!lastfail)
548 lastfail = qib_setup_eagerbufs(rcd);
549 if (lastfail) {
550 qib_dev_err(dd, "failed to allocate kernel ctxt's "
551 "rcvhdrq and/or egr bufs\n");
552 continue;
553 }
554 }
555
556 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
557 int mtu;
558 if (lastfail)
559 ret = lastfail;
560 ppd = dd->pport + pidx;
561 mtu = ib_mtu_enum_to_int(qib_ibmtu);
562 if (mtu == -1) {
563 mtu = QIB_DEFAULT_MTU;
564 qib_ibmtu = 0; /* don't leave invalid value */
565 }
566 /* set max we can ever have for this driver load */
567 ppd->init_ibmaxlen = min(mtu > 2048 ?
568 dd->piosize4k : dd->piosize2k,
569 dd->rcvegrbufsize +
570 (dd->rcvhdrentsize << 2));
571 /*
572 * Have to initialize ibmaxlen, but this will normally
573 * change immediately in qib_set_mtu().
574 */
575 ppd->ibmaxlen = ppd->init_ibmaxlen;
576 qib_set_mtu(ppd, mtu);
577
578 spin_lock_irqsave(&ppd->lflags_lock, flags);
579 ppd->lflags |= QIBL_IB_LINK_DISABLED;
580 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
581
582 lastfail = dd->f_bringup_serdes(ppd);
583 if (lastfail) {
584 qib_devinfo(dd->pcidev,
585 "Failed to bringup IB port %u\n", ppd->port);
586 lastfail = -ENETDOWN;
587 continue;
588 }
589
590 portok++;
591 }
592
593 if (!portok) {
594 /* none of the ports initialized */
595 if (!ret && lastfail)
596 ret = lastfail;
597 else if (!ret)
598 ret = -ENETDOWN;
599 /* but continue on, so we can debug cause */
600 }
601
602 enable_chip(dd);
603
604 init_piobuf_state(dd);
605
606 done:
607 if (!ret) {
608 /* chip is OK for user apps; mark it as initialized */
609 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
610 ppd = dd->pport + pidx;
611 /*
612 * Set status even if port serdes is not initialized
613 * so that diags will work.
614 */
615 *ppd->statusp |= QIB_STATUS_CHIP_PRESENT |
616 QIB_STATUS_INITTED;
617 if (!ppd->link_speed_enabled)
618 continue;
619 if (dd->flags & QIB_HAS_SEND_DMA)
620 ret = qib_setup_sdma(ppd);
621 init_timer(&ppd->hol_timer);
622 ppd->hol_timer.function = qib_hol_event;
623 ppd->hol_timer.data = (unsigned long)ppd;
624 ppd->hol_state = QIB_HOL_UP;
625 }
626
627 /* now we can enable all interrupts from the chip */
628 dd->f_set_intr_state(dd, 1);
629
630 /*
631 * Setup to verify we get an interrupt, and fallback
632 * to an alternate if necessary and possible.
633 */
634 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
635 /* start stats retrieval timer */
636 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
637 }
638
639 /* if ret is non-zero, we probably should do some cleanup here... */
640 return ret;
641 }
642
643 /*
644 * These next two routines are placeholders in case we don't have per-arch
645 * code for controlling write combining. If explicit control of write
646 * combining is not available, performance will probably be awful.
647 */
648
649 int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
650 {
651 return -EOPNOTSUPP;
652 }
653
654 void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
655 {
656 }
657
658 static inline struct qib_devdata *__qib_lookup(int unit)
659 {
660 return idr_find(&qib_unit_table, unit);
661 }
662
663 struct qib_devdata *qib_lookup(int unit)
664 {
665 struct qib_devdata *dd;
666 unsigned long flags;
667
668 spin_lock_irqsave(&qib_devs_lock, flags);
669 dd = __qib_lookup(unit);
670 spin_unlock_irqrestore(&qib_devs_lock, flags);
671
672 return dd;
673 }
674
675 /*
676 * Stop the timers during unit shutdown, or after an error late
677 * in initialization.
678 */
679 static void qib_stop_timers(struct qib_devdata *dd)
680 {
681 struct qib_pportdata *ppd;
682 int pidx;
683
684 if (dd->stats_timer.data) {
685 del_timer_sync(&dd->stats_timer);
686 dd->stats_timer.data = 0;
687 }
688 if (dd->intrchk_timer.data) {
689 del_timer_sync(&dd->intrchk_timer);
690 dd->intrchk_timer.data = 0;
691 }
692 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
693 ppd = dd->pport + pidx;
694 if (ppd->hol_timer.data)
695 del_timer_sync(&ppd->hol_timer);
696 if (ppd->led_override_timer.data) {
697 del_timer_sync(&ppd->led_override_timer);
698 atomic_set(&ppd->led_override_timer_active, 0);
699 }
700 if (ppd->symerr_clear_timer.data)
701 del_timer_sync(&ppd->symerr_clear_timer);
702 }
703 }
704
705 /**
706 * qib_shutdown_device - shut down a device
707 * @dd: the qlogic_ib device
708 *
709 * This is called to make the device quiet when we are about to
710 * unload the driver, and also when the device is administratively
711 * disabled. It does not free any data structures.
712 * Everything it does has to be setup again by qib_init(dd, 1)
713 */
714 static void qib_shutdown_device(struct qib_devdata *dd)
715 {
716 struct qib_pportdata *ppd;
717 unsigned pidx;
718
719 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
720 ppd = dd->pport + pidx;
721
722 spin_lock_irq(&ppd->lflags_lock);
723 ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT |
724 QIBL_LINKARMED | QIBL_LINKACTIVE |
725 QIBL_LINKV);
726 spin_unlock_irq(&ppd->lflags_lock);
727 *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY);
728 }
729 dd->flags &= ~QIB_INITTED;
730
731 /* mask interrupts, but not errors */
732 dd->f_set_intr_state(dd, 0);
733
734 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
735 ppd = dd->pport + pidx;
736 dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
737 QIB_RCVCTRL_CTXT_DIS |
738 QIB_RCVCTRL_INTRAVAIL_DIS |
739 QIB_RCVCTRL_PKEY_ENB, -1);
740 /*
741 * Gracefully stop all sends allowing any in progress to
742 * trickle out first.
743 */
744 dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
745 }
746
747 /*
748 * Enough for anything that's going to trickle out to have actually
749 * done so.
750 */
751 udelay(20);
752
753 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
754 ppd = dd->pport + pidx;
755 dd->f_setextled(ppd, 0); /* make sure LEDs are off */
756
757 if (dd->flags & QIB_HAS_SEND_DMA)
758 qib_teardown_sdma(ppd);
759
760 dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
761 QIB_SENDCTRL_SEND_DIS);
762 /*
763 * Clear SerdesEnable.
764 * We can't count on interrupts since we are stopping.
765 */
766 dd->f_quiet_serdes(ppd);
767 }
768
769 qib_update_eeprom_log(dd);
770 }
771
772 /**
773 * qib_free_ctxtdata - free a context's allocated data
774 * @dd: the qlogic_ib device
775 * @rcd: the ctxtdata structure
776 *
777 * free up any allocated data for a context
778 * This should not touch anything that would affect a simultaneous
779 * re-allocation of context data, because it is called after qib_mutex
780 * is released (and can be called from reinit as well).
781 * It should never change any chip state, or global driver state.
782 */
783 void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
784 {
785 if (!rcd)
786 return;
787
788 if (rcd->rcvhdrq) {
789 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
790 rcd->rcvhdrq, rcd->rcvhdrq_phys);
791 rcd->rcvhdrq = NULL;
792 if (rcd->rcvhdrtail_kvaddr) {
793 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
794 rcd->rcvhdrtail_kvaddr,
795 rcd->rcvhdrqtailaddr_phys);
796 rcd->rcvhdrtail_kvaddr = NULL;
797 }
798 }
799 if (rcd->rcvegrbuf) {
800 unsigned e;
801
802 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
803 void *base = rcd->rcvegrbuf[e];
804 size_t size = rcd->rcvegrbuf_size;
805
806 dma_free_coherent(&dd->pcidev->dev, size,
807 base, rcd->rcvegrbuf_phys[e]);
808 }
809 kfree(rcd->rcvegrbuf);
810 rcd->rcvegrbuf = NULL;
811 kfree(rcd->rcvegrbuf_phys);
812 rcd->rcvegrbuf_phys = NULL;
813 rcd->rcvegrbuf_chunks = 0;
814 }
815
816 kfree(rcd->tid_pg_list);
817 vfree(rcd->user_event_mask);
818 vfree(rcd->subctxt_uregbase);
819 vfree(rcd->subctxt_rcvegrbuf);
820 vfree(rcd->subctxt_rcvhdr_base);
821 kfree(rcd);
822 }
823
824 /*
825 * Perform a PIO buffer bandwidth write test, to verify proper system
826 * configuration. Even when all the setup calls work, occasionally
827 * BIOS or other issues can prevent write combining from working, or
828 * can cause other bandwidth problems to the chip.
829 *
830 * This test simply writes the same buffer over and over again, and
831 * measures close to the peak bandwidth to the chip (not testing
832 * data bandwidth to the wire). On chips that use an address-based
833 * trigger to send packets to the wire, this is easy. On chips that
834 * use a count to trigger, we want to make sure that the packet doesn't
835 * go out on the wire, or trigger flow control checks.
836 */
837 static void qib_verify_pioperf(struct qib_devdata *dd)
838 {
839 u32 pbnum, cnt, lcnt;
840 u32 __iomem *piobuf;
841 u32 *addr;
842 u64 msecs, emsecs;
843
844 piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
845 if (!piobuf) {
846 qib_devinfo(dd->pcidev,
847 "No PIObufs for checking perf, skipping\n");
848 return;
849 }
850
851 /*
852 * Enough to give us a reasonable test, less than piobuf size, and
853 * likely multiple of store buffer length.
854 */
855 cnt = 1024;
856
857 addr = vmalloc(cnt);
858 if (!addr) {
859 qib_devinfo(dd->pcidev,
860 "Couldn't get memory for checking PIO perf,"
861 " skipping\n");
862 goto done;
863 }
864
865 preempt_disable(); /* we want reasonably accurate elapsed time */
866 msecs = 1 + jiffies_to_msecs(jiffies);
867 for (lcnt = 0; lcnt < 10000U; lcnt++) {
868 /* wait until we cross msec boundary */
869 if (jiffies_to_msecs(jiffies) >= msecs)
870 break;
871 udelay(1);
872 }
873
874 dd->f_set_armlaunch(dd, 0);
875
876 /*
877 * length 0, no dwords actually sent
878 */
879 writeq(0, piobuf);
880 qib_flush_wc();
881
882 /*
883 * This is only roughly accurate, since even with preempt we
884 * still take interrupts that could take a while. Running for
885 * >= 5 msec seems to get us "close enough" to accurate values.
886 */
887 msecs = jiffies_to_msecs(jiffies);
888 for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
889 qib_pio_copy(piobuf + 64, addr, cnt >> 2);
890 emsecs = jiffies_to_msecs(jiffies) - msecs;
891 }
892
893 /* 1 GiB/sec, slightly over IB SDR line rate */
894 if (lcnt < (emsecs * 1024U))
895 qib_dev_err(dd,
896 "Performance problem: bandwidth to PIO buffers is "
897 "only %u MiB/sec\n",
898 lcnt / (u32) emsecs);
899
900 preempt_enable();
901
902 vfree(addr);
903
904 done:
905 /* disarm piobuf, so it's available again */
906 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
907 qib_sendbuf_done(dd, pbnum);
908 dd->f_set_armlaunch(dd, 1);
909 }
910
911
912 void qib_free_devdata(struct qib_devdata *dd)
913 {
914 unsigned long flags;
915
916 spin_lock_irqsave(&qib_devs_lock, flags);
917 idr_remove(&qib_unit_table, dd->unit);
918 list_del(&dd->list);
919 spin_unlock_irqrestore(&qib_devs_lock, flags);
920
921 ib_dealloc_device(&dd->verbs_dev.ibdev);
922 }
923
924 /*
925 * Allocate our primary per-unit data structure. Must be done via verbs
926 * allocator, because the verbs cleanup process both does cleanup and
927 * free of the data structure.
928 * "extra" is for chip-specific data.
929 *
930 * Use the idr mechanism to get a unit number for this unit.
931 */
932 struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
933 {
934 unsigned long flags;
935 struct qib_devdata *dd;
936 int ret;
937
938 if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
939 dd = ERR_PTR(-ENOMEM);
940 goto bail;
941 }
942
943 dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
944 if (!dd) {
945 dd = ERR_PTR(-ENOMEM);
946 goto bail;
947 }
948
949 spin_lock_irqsave(&qib_devs_lock, flags);
950 ret = idr_get_new(&qib_unit_table, dd, &dd->unit);
951 if (ret >= 0)
952 list_add(&dd->list, &qib_dev_list);
953 spin_unlock_irqrestore(&qib_devs_lock, flags);
954
955 if (ret < 0) {
956 qib_early_err(&pdev->dev,
957 "Could not allocate unit ID: error %d\n", -ret);
958 ib_dealloc_device(&dd->verbs_dev.ibdev);
959 dd = ERR_PTR(ret);
960 goto bail;
961 }
962
963 if (!qib_cpulist_count) {
964 u32 count = num_online_cpus();
965 qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
966 sizeof(long), GFP_KERNEL);
967 if (qib_cpulist)
968 qib_cpulist_count = count;
969 else
970 qib_early_err(&pdev->dev, "Could not alloc cpulist "
971 "info, cpu affinity might be wrong\n");
972 }
973
974 bail:
975 return dd;
976 }
977
978 /*
979 * Called from freeze mode handlers, and from PCI error
980 * reporting code. Should be paranoid about state of
981 * system and data structures.
982 */
983 void qib_disable_after_error(struct qib_devdata *dd)
984 {
985 if (dd->flags & QIB_INITTED) {
986 u32 pidx;
987
988 dd->flags &= ~QIB_INITTED;
989 if (dd->pport)
990 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
991 struct qib_pportdata *ppd;
992
993 ppd = dd->pport + pidx;
994 if (dd->flags & QIB_PRESENT) {
995 qib_set_linkstate(ppd,
996 QIB_IB_LINKDOWN_DISABLE);
997 dd->f_setextled(ppd, 0);
998 }
999 *ppd->statusp &= ~QIB_STATUS_IB_READY;
1000 }
1001 }
1002
1003 /*
1004 * Mark as having had an error for driver, and also
1005 * for /sys and status word mapped to user programs.
1006 * This marks unit as not usable, until reset.
1007 */
1008 if (dd->devstatusp)
1009 *dd->devstatusp |= QIB_STATUS_HWERROR;
1010 }
1011
1012 static void __devexit qib_remove_one(struct pci_dev *);
1013 static int __devinit qib_init_one(struct pci_dev *,
1014 const struct pci_device_id *);
1015
1016 #define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
1017 #define PFX QIB_DRV_NAME ": "
1018
1019 static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
1020 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
1021 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
1022 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
1023 { 0, }
1024 };
1025
1026 MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
1027
1028 struct pci_driver qib_driver = {
1029 .name = QIB_DRV_NAME,
1030 .probe = qib_init_one,
1031 .remove = __devexit_p(qib_remove_one),
1032 .id_table = qib_pci_tbl,
1033 .err_handler = &qib_pci_err_handler,
1034 };
1035
1036 /*
1037 * Do all the generic driver unit- and chip-independent memory
1038 * allocation and initialization.
1039 */
1040 static int __init qlogic_ib_init(void)
1041 {
1042 int ret;
1043
1044 ret = qib_dev_init();
1045 if (ret)
1046 goto bail;
1047
1048 qib_cq_wq = create_singlethread_workqueue("qib_cq");
1049 if (!qib_cq_wq) {
1050 ret = -ENOMEM;
1051 goto bail_dev;
1052 }
1053
1054 /*
1055 * These must be called before the driver is registered with
1056 * the PCI subsystem.
1057 */
1058 idr_init(&qib_unit_table);
1059 if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
1060 printk(KERN_ERR QIB_DRV_NAME ": idr_pre_get() failed\n");
1061 ret = -ENOMEM;
1062 goto bail_cq_wq;
1063 }
1064
1065 ret = pci_register_driver(&qib_driver);
1066 if (ret < 0) {
1067 printk(KERN_ERR QIB_DRV_NAME
1068 ": Unable to register driver: error %d\n", -ret);
1069 goto bail_unit;
1070 }
1071
1072 /* not fatal if it doesn't work */
1073 if (qib_init_qibfs())
1074 printk(KERN_ERR QIB_DRV_NAME ": Unable to register ipathfs\n");
1075 goto bail; /* all OK */
1076
1077 bail_unit:
1078 idr_destroy(&qib_unit_table);
1079 bail_cq_wq:
1080 destroy_workqueue(qib_cq_wq);
1081 bail_dev:
1082 qib_dev_cleanup();
1083 bail:
1084 return ret;
1085 }
1086
1087 module_init(qlogic_ib_init);
1088
1089 /*
1090 * Do the non-unit driver cleanup, memory free, etc. at unload.
1091 */
1092 static void __exit qlogic_ib_cleanup(void)
1093 {
1094 int ret;
1095
1096 ret = qib_exit_qibfs();
1097 if (ret)
1098 printk(KERN_ERR QIB_DRV_NAME ": "
1099 "Unable to cleanup counter filesystem: "
1100 "error %d\n", -ret);
1101
1102 pci_unregister_driver(&qib_driver);
1103
1104 destroy_workqueue(qib_cq_wq);
1105
1106 qib_cpulist_count = 0;
1107 kfree(qib_cpulist);
1108
1109 idr_destroy(&qib_unit_table);
1110 qib_dev_cleanup();
1111 }
1112
1113 module_exit(qlogic_ib_cleanup);
1114
1115 /* this can only be called after a successful initialization */
1116 static void cleanup_device_data(struct qib_devdata *dd)
1117 {
1118 int ctxt;
1119 int pidx;
1120 struct qib_ctxtdata **tmp;
1121 unsigned long flags;
1122
1123 /* users can't do anything more with chip */
1124 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1125 if (dd->pport[pidx].statusp)
1126 *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
1127
1128 if (!qib_wc_pat)
1129 qib_disable_wc(dd);
1130
1131 if (dd->pioavailregs_dma) {
1132 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1133 (void *) dd->pioavailregs_dma,
1134 dd->pioavailregs_phys);
1135 dd->pioavailregs_dma = NULL;
1136 }
1137
1138 if (dd->pageshadow) {
1139 struct page **tmpp = dd->pageshadow;
1140 dma_addr_t *tmpd = dd->physshadow;
1141 int i, cnt = 0;
1142
1143 for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
1144 int ctxt_tidbase = ctxt * dd->rcvtidcnt;
1145 int maxtid = ctxt_tidbase + dd->rcvtidcnt;
1146
1147 for (i = ctxt_tidbase; i < maxtid; i++) {
1148 if (!tmpp[i])
1149 continue;
1150 pci_unmap_page(dd->pcidev, tmpd[i],
1151 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1152 qib_release_user_pages(&tmpp[i], 1);
1153 tmpp[i] = NULL;
1154 cnt++;
1155 }
1156 }
1157
1158 tmpp = dd->pageshadow;
1159 dd->pageshadow = NULL;
1160 vfree(tmpp);
1161 }
1162
1163 /*
1164 * Free any resources still in use (usually just kernel contexts)
1165 * at unload; we do for ctxtcnt, because that's what we allocate.
1166 * We acquire lock to be really paranoid that rcd isn't being
1167 * accessed from some interrupt-related code (that should not happen,
1168 * but best to be sure).
1169 */
1170 spin_lock_irqsave(&dd->uctxt_lock, flags);
1171 tmp = dd->rcd;
1172 dd->rcd = NULL;
1173 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1174 for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
1175 struct qib_ctxtdata *rcd = tmp[ctxt];
1176
1177 tmp[ctxt] = NULL; /* debugging paranoia */
1178 qib_free_ctxtdata(dd, rcd);
1179 }
1180 kfree(tmp);
1181 kfree(dd->boardname);
1182 }
1183
1184 /*
1185 * Clean up on unit shutdown, or error during unit load after
1186 * successful initialization.
1187 */
1188 static void qib_postinit_cleanup(struct qib_devdata *dd)
1189 {
1190 /*
1191 * Clean up chip-specific stuff.
1192 * We check for NULL here, because it's outside
1193 * the kregbase check, and we need to call it
1194 * after the free_irq. Thus it's possible that
1195 * the function pointers were never initialized.
1196 */
1197 if (dd->f_cleanup)
1198 dd->f_cleanup(dd);
1199
1200 qib_pcie_ddcleanup(dd);
1201
1202 cleanup_device_data(dd);
1203
1204 qib_free_devdata(dd);
1205 }
1206
1207 static int __devinit qib_init_one(struct pci_dev *pdev,
1208 const struct pci_device_id *ent)
1209 {
1210 int ret, j, pidx, initfail;
1211 struct qib_devdata *dd = NULL;
1212
1213 ret = qib_pcie_init(pdev, ent);
1214 if (ret)
1215 goto bail;
1216
1217 /*
1218 * Do device-specific initialiation, function table setup, dd
1219 * allocation, etc.
1220 */
1221 switch (ent->device) {
1222 case PCI_DEVICE_ID_QLOGIC_IB_6120:
1223 #ifdef CONFIG_PCI_MSI
1224 dd = qib_init_iba6120_funcs(pdev, ent);
1225 #else
1226 qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot "
1227 "work if CONFIG_PCI_MSI is not enabled\n",
1228 ent->device);
1229 dd = ERR_PTR(-ENODEV);
1230 #endif
1231 break;
1232
1233 case PCI_DEVICE_ID_QLOGIC_IB_7220:
1234 dd = qib_init_iba7220_funcs(pdev, ent);
1235 break;
1236
1237 case PCI_DEVICE_ID_QLOGIC_IB_7322:
1238 dd = qib_init_iba7322_funcs(pdev, ent);
1239 break;
1240
1241 default:
1242 qib_early_err(&pdev->dev, "Failing on unknown QLogic "
1243 "deviceid 0x%x\n", ent->device);
1244 ret = -ENODEV;
1245 }
1246
1247 if (IS_ERR(dd))
1248 ret = PTR_ERR(dd);
1249 if (ret)
1250 goto bail; /* error already printed */
1251
1252 /* do the generic initialization */
1253 initfail = qib_init(dd, 0);
1254
1255 ret = qib_register_ib_device(dd);
1256
1257 /*
1258 * Now ready for use. this should be cleared whenever we
1259 * detect a reset, or initiate one. If earlier failure,
1260 * we still create devices, so diags, etc. can be used
1261 * to determine cause of problem.
1262 */
1263 if (!qib_mini_init && !initfail && !ret)
1264 dd->flags |= QIB_INITTED;
1265
1266 j = qib_device_create(dd);
1267 if (j)
1268 qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1269 j = qibfs_add(dd);
1270 if (j)
1271 qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
1272 -j);
1273
1274 if (qib_mini_init || initfail || ret) {
1275 qib_stop_timers(dd);
1276 flush_workqueue(ib_wq);
1277 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1278 dd->f_quiet_serdes(dd->pport + pidx);
1279 if (qib_mini_init)
1280 goto bail;
1281 if (!j) {
1282 (void) qibfs_remove(dd);
1283 qib_device_remove(dd);
1284 }
1285 if (!ret)
1286 qib_unregister_ib_device(dd);
1287 qib_postinit_cleanup(dd);
1288 if (initfail)
1289 ret = initfail;
1290 goto bail;
1291 }
1292
1293 if (!qib_wc_pat) {
1294 ret = qib_enable_wc(dd);
1295 if (ret) {
1296 qib_dev_err(dd, "Write combining not enabled "
1297 "(err %d): performance may be poor\n",
1298 -ret);
1299 ret = 0;
1300 }
1301 }
1302
1303 qib_verify_pioperf(dd);
1304 bail:
1305 return ret;
1306 }
1307
1308 static void __devexit qib_remove_one(struct pci_dev *pdev)
1309 {
1310 struct qib_devdata *dd = pci_get_drvdata(pdev);
1311 int ret;
1312
1313 /* unregister from IB core */
1314 qib_unregister_ib_device(dd);
1315
1316 /*
1317 * Disable the IB link, disable interrupts on the device,
1318 * clear dma engines, etc.
1319 */
1320 if (!qib_mini_init)
1321 qib_shutdown_device(dd);
1322
1323 qib_stop_timers(dd);
1324
1325 /* wait until all of our (qsfp) queue_work() calls complete */
1326 flush_workqueue(ib_wq);
1327
1328 ret = qibfs_remove(dd);
1329 if (ret)
1330 qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
1331 -ret);
1332
1333 qib_device_remove(dd);
1334
1335 qib_postinit_cleanup(dd);
1336 }
1337
1338 /**
1339 * qib_create_rcvhdrq - create a receive header queue
1340 * @dd: the qlogic_ib device
1341 * @rcd: the context data
1342 *
1343 * This must be contiguous memory (from an i/o perspective), and must be
1344 * DMA'able (which means for some systems, it will go through an IOMMU,
1345 * or be forced into a low address range).
1346 */
1347 int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
1348 {
1349 unsigned amt;
1350
1351 if (!rcd->rcvhdrq) {
1352 dma_addr_t phys_hdrqtail;
1353 gfp_t gfp_flags;
1354
1355 amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1356 sizeof(u32), PAGE_SIZE);
1357 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
1358 GFP_USER : GFP_KERNEL;
1359 rcd->rcvhdrq = dma_alloc_coherent(
1360 &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
1361 gfp_flags | __GFP_COMP);
1362
1363 if (!rcd->rcvhdrq) {
1364 qib_dev_err(dd, "attempt to allocate %d bytes "
1365 "for ctxt %u rcvhdrq failed\n",
1366 amt, rcd->ctxt);
1367 goto bail;
1368 }
1369
1370 if (rcd->ctxt >= dd->first_user_ctxt) {
1371 rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
1372 if (!rcd->user_event_mask)
1373 goto bail_free_hdrq;
1374 }
1375
1376 if (!(dd->flags & QIB_NODMA_RTAIL)) {
1377 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
1378 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1379 gfp_flags);
1380 if (!rcd->rcvhdrtail_kvaddr)
1381 goto bail_free;
1382 rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
1383 }
1384
1385 rcd->rcvhdrq_size = amt;
1386 }
1387
1388 /* clear for security and sanity on each use */
1389 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
1390 if (rcd->rcvhdrtail_kvaddr)
1391 memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE);
1392 return 0;
1393
1394 bail_free:
1395 qib_dev_err(dd, "attempt to allocate 1 page for ctxt %u "
1396 "rcvhdrqtailaddr failed\n", rcd->ctxt);
1397 vfree(rcd->user_event_mask);
1398 rcd->user_event_mask = NULL;
1399 bail_free_hdrq:
1400 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1401 rcd->rcvhdrq_phys);
1402 rcd->rcvhdrq = NULL;
1403 bail:
1404 return -ENOMEM;
1405 }
1406
1407 /**
1408 * allocate eager buffers, both kernel and user contexts.
1409 * @rcd: the context we are setting up.
1410 *
1411 * Allocate the eager TID buffers and program them into hip.
1412 * They are no longer completely contiguous, we do multiple allocation
1413 * calls. Otherwise we get the OOM code involved, by asking for too
1414 * much per call, with disastrous results on some kernels.
1415 */
1416 int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
1417 {
1418 struct qib_devdata *dd = rcd->dd;
1419 unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
1420 size_t size;
1421 gfp_t gfp_flags;
1422
1423 /*
1424 * GFP_USER, but without GFP_FS, so buffer cache can be
1425 * coalesced (we hope); otherwise, even at order 4,
1426 * heavy filesystem activity makes these fail, and we can
1427 * use compound pages.
1428 */
1429 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
1430
1431 egrcnt = rcd->rcvegrcnt;
1432 egroff = rcd->rcvegr_tid_base;
1433 egrsize = dd->rcvegrbufsize;
1434
1435 chunk = rcd->rcvegrbuf_chunks;
1436 egrperchunk = rcd->rcvegrbufs_perchunk;
1437 size = rcd->rcvegrbuf_size;
1438 if (!rcd->rcvegrbuf) {
1439 rcd->rcvegrbuf =
1440 kzalloc(chunk * sizeof(rcd->rcvegrbuf[0]),
1441 GFP_KERNEL);
1442 if (!rcd->rcvegrbuf)
1443 goto bail;
1444 }
1445 if (!rcd->rcvegrbuf_phys) {
1446 rcd->rcvegrbuf_phys =
1447 kmalloc(chunk * sizeof(rcd->rcvegrbuf_phys[0]),
1448 GFP_KERNEL);
1449 if (!rcd->rcvegrbuf_phys)
1450 goto bail_rcvegrbuf;
1451 }
1452 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
1453 if (rcd->rcvegrbuf[e])
1454 continue;
1455 rcd->rcvegrbuf[e] =
1456 dma_alloc_coherent(&dd->pcidev->dev, size,
1457 &rcd->rcvegrbuf_phys[e],
1458 gfp_flags);
1459 if (!rcd->rcvegrbuf[e])
1460 goto bail_rcvegrbuf_phys;
1461 }
1462
1463 rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0];
1464
1465 for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) {
1466 dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
1467 unsigned i;
1468
1469 /* clear for security and sanity on each use */
1470 memset(rcd->rcvegrbuf[chunk], 0, size);
1471
1472 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
1473 dd->f_put_tid(dd, e + egroff +
1474 (u64 __iomem *)
1475 ((char __iomem *)
1476 dd->kregbase +
1477 dd->rcvegrbase),
1478 RCVHQ_RCV_TYPE_EAGER, pa);
1479 pa += egrsize;
1480 }
1481 cond_resched(); /* don't hog the cpu */
1482 }
1483
1484 return 0;
1485
1486 bail_rcvegrbuf_phys:
1487 for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++)
1488 dma_free_coherent(&dd->pcidev->dev, size,
1489 rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]);
1490 kfree(rcd->rcvegrbuf_phys);
1491 rcd->rcvegrbuf_phys = NULL;
1492 bail_rcvegrbuf:
1493 kfree(rcd->rcvegrbuf);
1494 rcd->rcvegrbuf = NULL;
1495 bail:
1496 return -ENOMEM;
1497 }
1498
1499 /*
1500 * Note: Changes to this routine should be mirrored
1501 * for the diagnostics routine qib_remap_ioaddr32().
1502 * There is also related code for VL15 buffers in qib_init_7322_variables().
1503 * The teardown code that unmaps is in qib_pcie_ddcleanup()
1504 */
1505 int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
1506 {
1507 u64 __iomem *qib_kregbase = NULL;
1508 void __iomem *qib_piobase = NULL;
1509 u64 __iomem *qib_userbase = NULL;
1510 u64 qib_kreglen;
1511 u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
1512 u64 qib_pio4koffset = dd->piobufbase >> 32;
1513 u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
1514 u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
1515 u64 qib_physaddr = dd->physaddr;
1516 u64 qib_piolen;
1517 u64 qib_userlen = 0;
1518
1519 /*
1520 * Free the old mapping because the kernel will try to reuse the
1521 * old mapping and not create a new mapping with the
1522 * write combining attribute.
1523 */
1524 iounmap(dd->kregbase);
1525 dd->kregbase = NULL;
1526
1527 /*
1528 * Assumes chip address space looks like:
1529 * - kregs + sregs + cregs + uregs (in any order)
1530 * - piobufs (2K and 4K bufs in either order)
1531 * or:
1532 * - kregs + sregs + cregs (in any order)
1533 * - piobufs (2K and 4K bufs in either order)
1534 * - uregs
1535 */
1536 if (dd->piobcnt4k == 0) {
1537 qib_kreglen = qib_pio2koffset;
1538 qib_piolen = qib_pio2klen;
1539 } else if (qib_pio2koffset < qib_pio4koffset) {
1540 qib_kreglen = qib_pio2koffset;
1541 qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen;
1542 } else {
1543 qib_kreglen = qib_pio4koffset;
1544 qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen;
1545 }
1546 qib_piolen += vl15buflen;
1547 /* Map just the configured ports (not all hw ports) */
1548 if (dd->uregbase > qib_kreglen)
1549 qib_userlen = dd->ureg_align * dd->cfgctxts;
1550
1551 /* Sanity checks passed, now create the new mappings */
1552 qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen);
1553 if (!qib_kregbase)
1554 goto bail;
1555
1556 qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen);
1557 if (!qib_piobase)
1558 goto bail_kregbase;
1559
1560 if (qib_userlen) {
1561 qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
1562 qib_userlen);
1563 if (!qib_userbase)
1564 goto bail_piobase;
1565 }
1566
1567 dd->kregbase = qib_kregbase;
1568 dd->kregend = (u64 __iomem *)
1569 ((char __iomem *) qib_kregbase + qib_kreglen);
1570 dd->piobase = qib_piobase;
1571 dd->pio2kbase = (void __iomem *)
1572 (((char __iomem *) dd->piobase) +
1573 qib_pio2koffset - qib_kreglen);
1574 if (dd->piobcnt4k)
1575 dd->pio4kbase = (void __iomem *)
1576 (((char __iomem *) dd->piobase) +
1577 qib_pio4koffset - qib_kreglen);
1578 if (qib_userlen)
1579 /* ureg will now be accessed relative to dd->userbase */
1580 dd->userbase = qib_userbase;
1581 return 0;
1582
1583 bail_piobase:
1584 iounmap(qib_piobase);
1585 bail_kregbase:
1586 iounmap(qib_kregbase);
1587 bail:
1588 return -ENOMEM;
1589 }
This page took 0.061977 seconds and 6 git commands to generate.