Commit | Line | Data |
---|---|---|
77241056 MM |
1 | /* |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
8 | * Copyright(c) 2015 Intel Corporation. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * BSD LICENSE | |
20 | * | |
21 | * Copyright(c) 2015 Intel Corporation. | |
22 | * | |
23 | * Redistribution and use in source and binary forms, with or without | |
24 | * modification, are permitted provided that the following conditions | |
25 | * are met: | |
26 | * | |
27 | * - Redistributions of source code must retain the above copyright | |
28 | * notice, this list of conditions and the following disclaimer. | |
29 | * - Redistributions in binary form must reproduce the above copyright | |
30 | * notice, this list of conditions and the following disclaimer in | |
31 | * the documentation and/or other materials provided with the | |
32 | * distribution. | |
33 | * - Neither the name of Intel Corporation nor the names of its | |
34 | * contributors may be used to endorse or promote products derived | |
35 | * from this software without specific prior written permission. | |
36 | * | |
37 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
38 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
39 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
40 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
41 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
42 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
43 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
44 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
45 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
46 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
47 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
48 | * | |
49 | */ | |
50 | ||
51 | #include <linux/pci.h> | |
52 | #include <linux/netdevice.h> | |
53 | #include <linux/vmalloc.h> | |
54 | #include <linux/delay.h> | |
55 | #include <linux/idr.h> | |
56 | #include <linux/module.h> | |
57 | #include <linux/printk.h> | |
58 | #include <linux/hrtimer.h> | |
59 | ||
60 | #include "hfi.h" | |
61 | #include "device.h" | |
62 | #include "common.h" | |
63 | #include "mad.h" | |
64 | #include "sdma.h" | |
65 | #include "debugfs.h" | |
66 | #include "verbs.h" | |
67 | ||
68 | #undef pr_fmt | |
69 | #define pr_fmt(fmt) DRIVER_NAME ": " fmt | |
70 | ||
71 | /* | |
72 | * min buffers we want to have per context, after driver | |
73 | */ | |
74 | #define HFI1_MIN_USER_CTXT_BUFCNT 7 | |
75 | ||
76 | #define HFI1_MIN_HDRQ_EGRBUF_CNT 2 | |
77 | #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */ | |
78 | #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */ | |
79 | ||
80 | /* | |
81 | * Number of user receive contexts we are configured to use (to allow for more | |
82 | * pio buffers per ctxt, etc.) Zero means use one user context per CPU. | |
83 | */ | |
84 | uint num_rcv_contexts; | |
85 | module_param_named(num_rcv_contexts, num_rcv_contexts, uint, S_IRUGO); | |
86 | MODULE_PARM_DESC( | |
87 | num_rcv_contexts, "Set max number of user receive contexts to use"); | |
88 | ||
89 | u8 krcvqs[RXE_NUM_DATA_VL]; | |
90 | int krcvqsset; | |
91 | module_param_array(krcvqs, byte, &krcvqsset, S_IRUGO); | |
92 | MODULE_PARM_DESC(krcvqs, "Array of the number of kernel receive queues by VL"); | |
93 | ||
94 | /* computed based on above array */ | |
95 | unsigned n_krcvqs; | |
96 | ||
97 | static unsigned hfi1_rcvarr_split = 25; | |
98 | module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); | |
99 | MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers"); | |
100 | ||
101 | static uint eager_buffer_size = (2 << 20); /* 2MB */ | |
102 | module_param(eager_buffer_size, uint, S_IRUGO); | |
103 | MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 2MB"); | |
104 | ||
105 | static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */ | |
106 | module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO); | |
107 | MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)"); | |
108 | ||
109 | static uint hfi1_hdrq_entsize = 32; | |
110 | module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO); | |
111 | MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B"); | |
112 | ||
113 | unsigned int user_credit_return_threshold = 33; /* default is 33% */ | |
114 | module_param(user_credit_return_threshold, uint, S_IRUGO); | |
115 | MODULE_PARM_DESC(user_credit_return_theshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)"); | |
116 | ||
117 | static inline u64 encode_rcv_header_entry_size(u16); | |
118 | ||
119 | static struct idr hfi1_unit_table; | |
120 | u32 hfi1_cpulist_count; | |
121 | unsigned long *hfi1_cpulist; | |
122 | ||
123 | /* | |
124 | * Common code for creating the receive context array. | |
125 | */ | |
126 | int hfi1_create_ctxts(struct hfi1_devdata *dd) | |
127 | { | |
128 | unsigned i; | |
129 | int ret; | |
130 | int local_node_id = pcibus_to_node(dd->pcidev->bus); | |
131 | ||
132 | if (local_node_id < 0) | |
133 | local_node_id = numa_node_id(); | |
134 | dd->assigned_node_id = local_node_id; | |
135 | ||
136 | dd->rcd = kcalloc(dd->num_rcv_contexts, sizeof(*dd->rcd), GFP_KERNEL); | |
137 | if (!dd->rcd) { | |
138 | dd_dev_err(dd, | |
139 | "Unable to allocate receive context array, failing\n"); | |
140 | goto nomem; | |
141 | } | |
142 | ||
143 | /* create one or more kernel contexts */ | |
144 | for (i = 0; i < dd->first_user_ctxt; ++i) { | |
145 | struct hfi1_pportdata *ppd; | |
146 | struct hfi1_ctxtdata *rcd; | |
147 | ||
148 | ppd = dd->pport + (i % dd->num_pports); | |
149 | rcd = hfi1_create_ctxtdata(ppd, i); | |
150 | if (!rcd) { | |
151 | dd_dev_err(dd, | |
152 | "Unable to allocate kernel receive context, failing\n"); | |
153 | goto nomem; | |
154 | } | |
155 | /* | |
156 | * Set up the kernel context flags here and now because they | |
157 | * use default values for all receive side memories. User | |
158 | * contexts will be handled as they are created. | |
159 | */ | |
160 | rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) | | |
161 | HFI1_CAP_KGET(NODROP_RHQ_FULL) | | |
162 | HFI1_CAP_KGET(NODROP_EGR_FULL) | | |
163 | HFI1_CAP_KGET(DMA_RTAIL); | |
164 | rcd->seq_cnt = 1; | |
165 | ||
166 | rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); | |
167 | if (!rcd->sc) { | |
168 | dd_dev_err(dd, | |
169 | "Unable to allocate kernel send context, failing\n"); | |
170 | dd->rcd[rcd->ctxt] = NULL; | |
171 | hfi1_free_ctxtdata(dd, rcd); | |
172 | goto nomem; | |
173 | } | |
174 | ||
175 | ret = hfi1_init_ctxt(rcd->sc); | |
176 | if (ret < 0) { | |
177 | dd_dev_err(dd, | |
178 | "Failed to setup kernel receive context, failing\n"); | |
179 | sc_free(rcd->sc); | |
180 | dd->rcd[rcd->ctxt] = NULL; | |
181 | hfi1_free_ctxtdata(dd, rcd); | |
182 | ret = -EFAULT; | |
183 | goto bail; | |
184 | } | |
185 | } | |
186 | ||
187 | return 0; | |
188 | nomem: | |
189 | ret = -ENOMEM; | |
190 | bail: | |
191 | kfree(dd->rcd); | |
192 | dd->rcd = NULL; | |
193 | return ret; | |
194 | } | |
195 | ||
196 | /* | |
197 | * Common code for user and kernel context setup. | |
198 | */ | |
199 | struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt) | |
200 | { | |
201 | struct hfi1_devdata *dd = ppd->dd; | |
202 | struct hfi1_ctxtdata *rcd; | |
203 | unsigned kctxt_ngroups = 0; | |
204 | u32 base; | |
205 | ||
206 | if (dd->rcv_entries.nctxt_extra > | |
207 | dd->num_rcv_contexts - dd->first_user_ctxt) | |
208 | kctxt_ngroups = (dd->rcv_entries.nctxt_extra - | |
209 | (dd->num_rcv_contexts - dd->first_user_ctxt)); | |
210 | rcd = kzalloc(sizeof(*rcd), GFP_KERNEL); | |
211 | if (rcd) { | |
212 | u32 rcvtids, max_entries; | |
213 | ||
214 | dd_dev_info(dd, "%s: setting up context %u\n", __func__, ctxt); | |
215 | ||
216 | INIT_LIST_HEAD(&rcd->qp_wait_list); | |
217 | rcd->ppd = ppd; | |
218 | rcd->dd = dd; | |
219 | rcd->cnt = 1; | |
220 | rcd->ctxt = ctxt; | |
221 | dd->rcd[ctxt] = rcd; | |
222 | rcd->numa_id = numa_node_id(); | |
223 | rcd->rcv_array_groups = dd->rcv_entries.ngroups; | |
224 | ||
225 | spin_lock_init(&rcd->exp_lock); | |
226 | ||
227 | /* | |
228 | * Calculate the context's RcvArray entry starting point. | |
229 | * We do this here because we have to take into account all | |
230 | * the RcvArray entries that previous context would have | |
231 | * taken and we have to account for any extra groups | |
232 | * assigned to the kernel or user contexts. | |
233 | */ | |
234 | if (ctxt < dd->first_user_ctxt) { | |
235 | if (ctxt < kctxt_ngroups) { | |
236 | base = ctxt * (dd->rcv_entries.ngroups + 1); | |
237 | rcd->rcv_array_groups++; | |
238 | } else | |
239 | base = kctxt_ngroups + | |
240 | (ctxt * dd->rcv_entries.ngroups); | |
241 | } else { | |
242 | u16 ct = ctxt - dd->first_user_ctxt; | |
243 | ||
244 | base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) + | |
245 | kctxt_ngroups); | |
246 | if (ct < dd->rcv_entries.nctxt_extra) { | |
247 | base += ct * (dd->rcv_entries.ngroups + 1); | |
248 | rcd->rcv_array_groups++; | |
249 | } else | |
250 | base += dd->rcv_entries.nctxt_extra + | |
251 | (ct * dd->rcv_entries.ngroups); | |
252 | } | |
253 | rcd->eager_base = base * dd->rcv_entries.group_size; | |
254 | ||
255 | /* Validate and initialize Rcv Hdr Q variables */ | |
256 | if (rcvhdrcnt % HDRQ_INCREMENT) { | |
257 | dd_dev_err(dd, | |
258 | "ctxt%u: header queue count %d must be divisible by %d\n", | |
259 | rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT); | |
260 | goto bail; | |
261 | } | |
262 | rcd->rcvhdrq_cnt = rcvhdrcnt; | |
263 | rcd->rcvhdrqentsize = hfi1_hdrq_entsize; | |
264 | /* | |
265 | * Simple Eager buffer allocation: we have already pre-allocated | |
266 | * the number of RcvArray entry groups. Each ctxtdata structure | |
267 | * holds the number of groups for that context. | |
268 | * | |
269 | * To follow CSR requirements and maintain cacheline alignment, | |
270 | * make sure all sizes and bases are multiples of group_size. | |
271 | * | |
272 | * The expected entry count is what is left after assigning | |
273 | * eager. | |
274 | */ | |
275 | max_entries = rcd->rcv_array_groups * | |
276 | dd->rcv_entries.group_size; | |
277 | rcvtids = ((max_entries * hfi1_rcvarr_split) / 100); | |
278 | rcd->egrbufs.count = round_down(rcvtids, | |
279 | dd->rcv_entries.group_size); | |
280 | if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) { | |
281 | dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n", | |
282 | rcd->ctxt); | |
283 | rcd->egrbufs.count = MAX_EAGER_ENTRIES; | |
284 | } | |
285 | dd_dev_info(dd, "ctxt%u: max Eager buffer RcvArray entries: %u\n", | |
286 | rcd->ctxt, rcd->egrbufs.count); | |
287 | ||
288 | /* | |
289 | * Allocate array that will hold the eager buffer accounting | |
290 | * data. | |
291 | * This will allocate the maximum possible buffer count based | |
292 | * on the value of the RcvArray split parameter. | |
293 | * The resulting value will be rounded down to the closest | |
294 | * multiple of dd->rcv_entries.group_size. | |
295 | */ | |
296 | rcd->egrbufs.buffers = kzalloc(sizeof(*rcd->egrbufs.buffers) * | |
297 | rcd->egrbufs.count, GFP_KERNEL); | |
298 | if (!rcd->egrbufs.buffers) | |
299 | goto bail; | |
300 | rcd->egrbufs.rcvtids = kzalloc(sizeof(*rcd->egrbufs.rcvtids) * | |
301 | rcd->egrbufs.count, GFP_KERNEL); | |
302 | if (!rcd->egrbufs.rcvtids) | |
303 | goto bail; | |
304 | rcd->egrbufs.size = eager_buffer_size; | |
305 | /* | |
306 | * The size of the buffers programmed into the RcvArray | |
307 | * entries needs to be big enough to handle the highest | |
308 | * MTU supported. | |
309 | */ | |
310 | if (rcd->egrbufs.size < hfi1_max_mtu) { | |
311 | rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu); | |
312 | dd_dev_info(dd, | |
313 | "ctxt%u: eager bufs size too small. Adjusting to %zu\n", | |
314 | rcd->ctxt, rcd->egrbufs.size); | |
315 | } | |
316 | rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE; | |
317 | ||
318 | if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */ | |
319 | rcd->opstats = kzalloc(sizeof(*rcd->opstats), | |
320 | GFP_KERNEL); | |
321 | if (!rcd->opstats) { | |
322 | dd_dev_err(dd, | |
323 | "ctxt%u: Unable to allocate per ctxt stats buffer\n", | |
324 | rcd->ctxt); | |
325 | goto bail; | |
326 | } | |
327 | } | |
328 | } | |
329 | return rcd; | |
330 | bail: | |
331 | kfree(rcd->opstats); | |
332 | kfree(rcd->egrbufs.rcvtids); | |
333 | kfree(rcd->egrbufs.buffers); | |
334 | kfree(rcd); | |
335 | return NULL; | |
336 | } | |
337 | ||
338 | /* | |
339 | * Convert a receive header entry size that to the encoding used in the CSR. | |
340 | * | |
341 | * Return a zero if the given size is invalid. | |
342 | */ | |
343 | static inline u64 encode_rcv_header_entry_size(u16 size) | |
344 | { | |
345 | /* there are only 3 valid receive header entry sizes */ | |
346 | if (size == 2) | |
347 | return 1; | |
348 | if (size == 16) | |
349 | return 2; | |
350 | else if (size == 32) | |
351 | return 4; | |
352 | return 0; /* invalid */ | |
353 | } | |
354 | ||
355 | /* | |
356 | * Select the largest ccti value over all SLs to determine the intra- | |
357 | * packet gap for the link. | |
358 | * | |
359 | * called with cca_timer_lock held (to protect access to cca_timer | |
360 | * array), and rcu_read_lock() (to protect access to cc_state). | |
361 | */ | |
362 | void set_link_ipg(struct hfi1_pportdata *ppd) | |
363 | { | |
364 | struct hfi1_devdata *dd = ppd->dd; | |
365 | struct cc_state *cc_state; | |
366 | int i; | |
367 | u16 cce, ccti_limit, max_ccti = 0; | |
368 | u16 shift, mult; | |
369 | u64 src; | |
370 | u32 current_egress_rate; /* Mbits /sec */ | |
371 | u32 max_pkt_time; | |
372 | /* | |
373 | * max_pkt_time is the maximum packet egress time in units | |
374 | * of the fabric clock period 1/(805 MHz). | |
375 | */ | |
376 | ||
377 | cc_state = get_cc_state(ppd); | |
378 | ||
379 | if (cc_state == NULL) | |
380 | /* | |
381 | * This should _never_ happen - rcu_read_lock() is held, | |
382 | * and set_link_ipg() should not be called if cc_state | |
383 | * is NULL. | |
384 | */ | |
385 | return; | |
386 | ||
387 | for (i = 0; i < OPA_MAX_SLS; i++) { | |
388 | u16 ccti = ppd->cca_timer[i].ccti; | |
389 | ||
390 | if (ccti > max_ccti) | |
391 | max_ccti = ccti; | |
392 | } | |
393 | ||
394 | ccti_limit = cc_state->cct.ccti_limit; | |
395 | if (max_ccti > ccti_limit) | |
396 | max_ccti = ccti_limit; | |
397 | ||
398 | cce = cc_state->cct.entries[max_ccti].entry; | |
399 | shift = (cce & 0xc000) >> 14; | |
400 | mult = (cce & 0x3fff); | |
401 | ||
402 | current_egress_rate = active_egress_rate(ppd); | |
403 | ||
404 | max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate); | |
405 | ||
406 | src = (max_pkt_time >> shift) * mult; | |
407 | ||
408 | src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK; | |
409 | src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT; | |
410 | ||
411 | write_csr(dd, SEND_STATIC_RATE_CONTROL, src); | |
412 | } | |
413 | ||
414 | static enum hrtimer_restart cca_timer_fn(struct hrtimer *t) | |
415 | { | |
416 | struct cca_timer *cca_timer; | |
417 | struct hfi1_pportdata *ppd; | |
418 | int sl; | |
419 | u16 ccti, ccti_timer, ccti_min; | |
420 | struct cc_state *cc_state; | |
421 | ||
422 | cca_timer = container_of(t, struct cca_timer, hrtimer); | |
423 | ppd = cca_timer->ppd; | |
424 | sl = cca_timer->sl; | |
425 | ||
426 | rcu_read_lock(); | |
427 | ||
428 | cc_state = get_cc_state(ppd); | |
429 | ||
430 | if (cc_state == NULL) { | |
431 | rcu_read_unlock(); | |
432 | return HRTIMER_NORESTART; | |
433 | } | |
434 | ||
435 | /* | |
436 | * 1) decrement ccti for SL | |
437 | * 2) calculate IPG for link (set_link_ipg()) | |
438 | * 3) restart timer, unless ccti is at min value | |
439 | */ | |
440 | ||
441 | ccti_min = cc_state->cong_setting.entries[sl].ccti_min; | |
442 | ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; | |
443 | ||
444 | spin_lock(&ppd->cca_timer_lock); | |
445 | ||
446 | ccti = cca_timer->ccti; | |
447 | ||
448 | if (ccti > ccti_min) { | |
449 | cca_timer->ccti--; | |
450 | set_link_ipg(ppd); | |
451 | } | |
452 | ||
453 | spin_unlock(&ppd->cca_timer_lock); | |
454 | ||
455 | rcu_read_unlock(); | |
456 | ||
457 | if (ccti > ccti_min) { | |
458 | unsigned long nsec = 1024 * ccti_timer; | |
459 | /* ccti_timer is in units of 1.024 usec */ | |
460 | hrtimer_forward_now(t, ns_to_ktime(nsec)); | |
461 | return HRTIMER_RESTART; | |
462 | } | |
463 | return HRTIMER_NORESTART; | |
464 | } | |
465 | ||
466 | /* | |
467 | * Common code for initializing the physical port structure. | |
468 | */ | |
469 | void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, | |
470 | struct hfi1_devdata *dd, u8 hw_pidx, u8 port) | |
471 | { | |
472 | int i, size; | |
473 | uint default_pkey_idx; | |
474 | ||
475 | ppd->dd = dd; | |
476 | ppd->hw_pidx = hw_pidx; | |
477 | ppd->port = port; /* IB port number, not index */ | |
478 | ||
479 | default_pkey_idx = 1; | |
480 | ||
481 | ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY; | |
482 | if (loopback) { | |
483 | hfi1_early_err(&pdev->dev, | |
484 | "Faking data partition 0x8001 in idx %u\n", | |
485 | !default_pkey_idx); | |
486 | ppd->pkeys[!default_pkey_idx] = 0x8001; | |
487 | } | |
488 | ||
489 | INIT_WORK(&ppd->link_vc_work, handle_verify_cap); | |
490 | INIT_WORK(&ppd->link_up_work, handle_link_up); | |
491 | INIT_WORK(&ppd->link_down_work, handle_link_down); | |
492 | INIT_WORK(&ppd->freeze_work, handle_freeze); | |
493 | INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); | |
494 | INIT_WORK(&ppd->sma_message_work, handle_sma_message); | |
495 | INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); | |
496 | mutex_init(&ppd->hls_lock); | |
497 | spin_lock_init(&ppd->sdma_alllock); | |
498 | spin_lock_init(&ppd->qsfp_info.qsfp_lock); | |
499 | ||
500 | ppd->sm_trap_qp = 0x0; | |
501 | ppd->sa_qp = 0x1; | |
502 | ||
503 | ppd->hfi1_wq = NULL; | |
504 | ||
505 | spin_lock_init(&ppd->cca_timer_lock); | |
506 | ||
507 | for (i = 0; i < OPA_MAX_SLS; i++) { | |
508 | hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC, | |
509 | HRTIMER_MODE_REL); | |
510 | ppd->cca_timer[i].ppd = ppd; | |
511 | ppd->cca_timer[i].sl = i; | |
512 | ppd->cca_timer[i].ccti = 0; | |
513 | ppd->cca_timer[i].hrtimer.function = cca_timer_fn; | |
514 | } | |
515 | ||
516 | ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT; | |
517 | ||
518 | spin_lock_init(&ppd->cc_state_lock); | |
519 | spin_lock_init(&ppd->cc_log_lock); | |
520 | size = sizeof(struct cc_state); | |
521 | RCU_INIT_POINTER(ppd->cc_state, kzalloc(size, GFP_KERNEL)); | |
522 | if (!rcu_dereference(ppd->cc_state)) | |
523 | goto bail; | |
524 | return; | |
525 | ||
526 | bail: | |
527 | ||
528 | hfi1_early_err(&pdev->dev, | |
529 | "Congestion Control Agent disabled for port %d\n", port); | |
530 | } | |
531 | ||
532 | /* | |
533 | * Do initialization for device that is only needed on | |
534 | * first detect, not on resets. | |
535 | */ | |
536 | static int loadtime_init(struct hfi1_devdata *dd) | |
537 | { | |
538 | return 0; | |
539 | } | |
540 | ||
541 | /** | |
542 | * init_after_reset - re-initialize after a reset | |
543 | * @dd: the hfi1_ib device | |
544 | * | |
545 | * sanity check at least some of the values after reset, and | |
546 | * ensure no receive or transmit (explicitly, in case reset | |
547 | * failed | |
548 | */ | |
549 | static int init_after_reset(struct hfi1_devdata *dd) | |
550 | { | |
551 | int i; | |
552 | ||
553 | /* | |
554 | * Ensure chip does no sends or receives, tail updates, or | |
555 | * pioavail updates while we re-initialize. This is mostly | |
556 | * for the driver data structures, not chip registers. | |
557 | */ | |
558 | for (i = 0; i < dd->num_rcv_contexts; i++) | |
559 | hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | | |
560 | HFI1_RCVCTRL_INTRAVAIL_DIS | | |
561 | HFI1_RCVCTRL_TAILUPD_DIS, i); | |
562 | pio_send_control(dd, PSC_GLOBAL_DISABLE); | |
563 | for (i = 0; i < dd->num_send_contexts; i++) | |
564 | sc_disable(dd->send_contexts[i].sc); | |
565 | ||
566 | return 0; | |
567 | } | |
568 | ||
569 | static void enable_chip(struct hfi1_devdata *dd) | |
570 | { | |
571 | u32 rcvmask; | |
572 | u32 i; | |
573 | ||
574 | /* enable PIO send */ | |
575 | pio_send_control(dd, PSC_GLOBAL_ENABLE); | |
576 | ||
577 | /* | |
578 | * Enable kernel ctxts' receive and receive interrupt. | |
579 | * Other ctxts done as user opens and initializes them. | |
580 | */ | |
581 | rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB; | |
582 | for (i = 0; i < dd->first_user_ctxt; ++i) { | |
583 | rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ? | |
584 | HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; | |
585 | if (!HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, MULTI_PKT_EGR)) | |
586 | rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; | |
587 | if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_RHQ_FULL)) | |
588 | rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; | |
589 | if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_EGR_FULL)) | |
590 | rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; | |
591 | hfi1_rcvctrl(dd, rcvmask, i); | |
592 | sc_enable(dd->rcd[i]->sc); | |
593 | } | |
594 | } | |
595 | ||
596 | /** | |
597 | * create_workqueues - create per port workqueues | |
598 | * @dd: the hfi1_ib device | |
599 | */ | |
600 | static int create_workqueues(struct hfi1_devdata *dd) | |
601 | { | |
602 | int pidx; | |
603 | struct hfi1_pportdata *ppd; | |
604 | ||
605 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
606 | ppd = dd->pport + pidx; | |
607 | if (!ppd->hfi1_wq) { | |
608 | char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */ | |
609 | ||
610 | snprintf(wq_name, sizeof(wq_name), "hfi%d_%d", | |
611 | dd->unit, pidx); | |
612 | ppd->hfi1_wq = | |
613 | create_singlethread_workqueue(wq_name); | |
614 | if (!ppd->hfi1_wq) | |
615 | goto wq_error; | |
616 | } | |
617 | } | |
618 | return 0; | |
619 | wq_error: | |
620 | pr_err("create_singlethread_workqueue failed for port %d\n", | |
621 | pidx + 1); | |
622 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
623 | ppd = dd->pport + pidx; | |
624 | if (ppd->hfi1_wq) { | |
625 | destroy_workqueue(ppd->hfi1_wq); | |
626 | ppd->hfi1_wq = NULL; | |
627 | } | |
628 | } | |
629 | return -ENOMEM; | |
630 | } | |
631 | ||
632 | /** | |
633 | * hfi1_init - do the actual initialization sequence on the chip | |
634 | * @dd: the hfi1_ib device | |
635 | * @reinit: re-initializing, so don't allocate new memory | |
636 | * | |
637 | * Do the actual initialization sequence on the chip. This is done | |
638 | * both from the init routine called from the PCI infrastructure, and | |
639 | * when we reset the chip, or detect that it was reset internally, | |
640 | * or it's administratively re-enabled. | |
641 | * | |
642 | * Memory allocation here and in called routines is only done in | |
643 | * the first case (reinit == 0). We have to be careful, because even | |
644 | * without memory allocation, we need to re-write all the chip registers | |
645 | * TIDs, etc. after the reset or enable has completed. | |
646 | */ | |
647 | int hfi1_init(struct hfi1_devdata *dd, int reinit) | |
648 | { | |
649 | int ret = 0, pidx, lastfail = 0; | |
650 | unsigned i, len; | |
651 | struct hfi1_ctxtdata *rcd; | |
652 | struct hfi1_pportdata *ppd; | |
653 | ||
654 | /* Set up recv low level handlers */ | |
655 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] = | |
656 | kdeth_process_expected; | |
657 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] = | |
658 | kdeth_process_eager; | |
659 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib; | |
660 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] = | |
661 | process_receive_error; | |
662 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] = | |
663 | process_receive_bypass; | |
664 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] = | |
665 | process_receive_invalid; | |
666 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] = | |
667 | process_receive_invalid; | |
668 | dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] = | |
669 | process_receive_invalid; | |
670 | dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions; | |
671 | ||
672 | /* Set up send low level handlers */ | |
673 | dd->process_pio_send = hfi1_verbs_send_pio; | |
674 | dd->process_dma_send = hfi1_verbs_send_dma; | |
675 | dd->pio_inline_send = pio_copy; | |
676 | ||
677 | if (is_a0(dd)) { | |
678 | atomic_set(&dd->drop_packet, DROP_PACKET_ON); | |
679 | dd->do_drop = 1; | |
680 | } else { | |
681 | atomic_set(&dd->drop_packet, DROP_PACKET_OFF); | |
682 | dd->do_drop = 0; | |
683 | } | |
684 | ||
685 | /* make sure the link is not "up" */ | |
686 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
687 | ppd = dd->pport + pidx; | |
688 | ppd->linkup = 0; | |
689 | } | |
690 | ||
691 | if (reinit) | |
692 | ret = init_after_reset(dd); | |
693 | else | |
694 | ret = loadtime_init(dd); | |
695 | if (ret) | |
696 | goto done; | |
697 | ||
698 | /* dd->rcd can be NULL if early initialization failed */ | |
699 | for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { | |
700 | /* | |
701 | * Set up the (kernel) rcvhdr queue and egr TIDs. If doing | |
702 | * re-init, the simplest way to handle this is to free | |
703 | * existing, and re-allocate. | |
704 | * Need to re-create rest of ctxt 0 ctxtdata as well. | |
705 | */ | |
706 | rcd = dd->rcd[i]; | |
707 | if (!rcd) | |
708 | continue; | |
709 | ||
710 | rcd->do_interrupt = &handle_receive_interrupt; | |
711 | ||
712 | lastfail = hfi1_create_rcvhdrq(dd, rcd); | |
713 | if (!lastfail) | |
714 | lastfail = hfi1_setup_eagerbufs(rcd); | |
715 | if (lastfail) | |
716 | dd_dev_err(dd, | |
717 | "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); | |
718 | } | |
719 | if (lastfail) | |
720 | ret = lastfail; | |
721 | ||
722 | /* Allocate enough memory for user event notification. */ | |
723 | len = ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS * | |
724 | sizeof(*dd->events), PAGE_SIZE); | |
725 | dd->events = vmalloc_user(len); | |
726 | if (!dd->events) | |
727 | dd_dev_err(dd, "Failed to allocate user events page\n"); | |
728 | /* | |
729 | * Allocate a page for device and port status. | |
730 | * Page will be shared amongst all user processes. | |
731 | */ | |
732 | dd->status = vmalloc_user(PAGE_SIZE); | |
733 | if (!dd->status) | |
734 | dd_dev_err(dd, "Failed to allocate dev status page\n"); | |
735 | else | |
736 | dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) - | |
737 | sizeof(dd->status->freezemsg)); | |
738 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
739 | ppd = dd->pport + pidx; | |
740 | if (dd->status) | |
741 | /* Currently, we only have one port */ | |
742 | ppd->statusp = &dd->status->port; | |
743 | ||
744 | set_mtu(ppd); | |
745 | } | |
746 | ||
747 | /* enable chip even if we have an error, so we can debug cause */ | |
748 | enable_chip(dd); | |
749 | ||
750 | ret = hfi1_cq_init(dd); | |
751 | done: | |
752 | /* | |
753 | * Set status even if port serdes is not initialized | |
754 | * so that diags will work. | |
755 | */ | |
756 | if (dd->status) | |
757 | dd->status->dev |= HFI1_STATUS_CHIP_PRESENT | | |
758 | HFI1_STATUS_INITTED; | |
759 | if (!ret) { | |
760 | /* enable all interrupts from the chip */ | |
761 | set_intr_state(dd, 1); | |
762 | ||
763 | /* chip is OK for user apps; mark it as initialized */ | |
764 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
765 | ppd = dd->pport + pidx; | |
766 | ||
767 | /* initialize the qsfp if it exists | |
768 | * Requires interrupts to be enabled so we are notified | |
769 | * when the QSFP completes reset, and has | |
770 | * to be done before bringing up the SERDES | |
771 | */ | |
772 | init_qsfp(ppd); | |
773 | ||
774 | /* start the serdes - must be after interrupts are | |
775 | enabled so we are notified when the link goes up */ | |
776 | lastfail = bringup_serdes(ppd); | |
777 | if (lastfail) | |
778 | dd_dev_info(dd, | |
779 | "Failed to bring up port %u\n", | |
780 | ppd->port); | |
781 | ||
782 | /* | |
783 | * Set status even if port serdes is not initialized | |
784 | * so that diags will work. | |
785 | */ | |
786 | if (ppd->statusp) | |
787 | *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT | | |
788 | HFI1_STATUS_INITTED; | |
789 | if (!ppd->link_speed_enabled) | |
790 | continue; | |
791 | } | |
792 | } | |
793 | ||
794 | /* if ret is non-zero, we probably should do some cleanup here... */ | |
795 | return ret; | |
796 | } | |
797 | ||
798 | static inline struct hfi1_devdata *__hfi1_lookup(int unit) | |
799 | { | |
800 | return idr_find(&hfi1_unit_table, unit); | |
801 | } | |
802 | ||
803 | struct hfi1_devdata *hfi1_lookup(int unit) | |
804 | { | |
805 | struct hfi1_devdata *dd; | |
806 | unsigned long flags; | |
807 | ||
808 | spin_lock_irqsave(&hfi1_devs_lock, flags); | |
809 | dd = __hfi1_lookup(unit); | |
810 | spin_unlock_irqrestore(&hfi1_devs_lock, flags); | |
811 | ||
812 | return dd; | |
813 | } | |
814 | ||
815 | /* | |
816 | * Stop the timers during unit shutdown, or after an error late | |
817 | * in initialization. | |
818 | */ | |
819 | static void stop_timers(struct hfi1_devdata *dd) | |
820 | { | |
821 | struct hfi1_pportdata *ppd; | |
822 | int pidx; | |
823 | ||
824 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
825 | ppd = dd->pport + pidx; | |
826 | if (ppd->led_override_timer.data) { | |
827 | del_timer_sync(&ppd->led_override_timer); | |
828 | atomic_set(&ppd->led_override_timer_active, 0); | |
829 | } | |
830 | } | |
831 | } | |
832 | ||
833 | /** | |
834 | * shutdown_device - shut down a device | |
835 | * @dd: the hfi1_ib device | |
836 | * | |
837 | * This is called to make the device quiet when we are about to | |
838 | * unload the driver, and also when the device is administratively | |
839 | * disabled. It does not free any data structures. | |
840 | * Everything it does has to be setup again by hfi1_init(dd, 1) | |
841 | */ | |
842 | static void shutdown_device(struct hfi1_devdata *dd) | |
843 | { | |
844 | struct hfi1_pportdata *ppd; | |
845 | unsigned pidx; | |
846 | int i; | |
847 | ||
848 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
849 | ppd = dd->pport + pidx; | |
850 | ||
851 | ppd->linkup = 0; | |
852 | if (ppd->statusp) | |
853 | *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | | |
854 | HFI1_STATUS_IB_READY); | |
855 | } | |
856 | dd->flags &= ~HFI1_INITTED; | |
857 | ||
858 | /* mask interrupts, but not errors */ | |
859 | set_intr_state(dd, 0); | |
860 | ||
861 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
862 | ppd = dd->pport + pidx; | |
863 | for (i = 0; i < dd->num_rcv_contexts; i++) | |
864 | hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS | | |
865 | HFI1_RCVCTRL_CTXT_DIS | | |
866 | HFI1_RCVCTRL_INTRAVAIL_DIS | | |
867 | HFI1_RCVCTRL_PKEY_DIS | | |
868 | HFI1_RCVCTRL_ONE_PKT_EGR_DIS, i); | |
869 | /* | |
870 | * Gracefully stop all sends allowing any in progress to | |
871 | * trickle out first. | |
872 | */ | |
873 | for (i = 0; i < dd->num_send_contexts; i++) | |
874 | sc_flush(dd->send_contexts[i].sc); | |
875 | } | |
876 | ||
877 | /* | |
878 | * Enough for anything that's going to trickle out to have actually | |
879 | * done so. | |
880 | */ | |
881 | udelay(20); | |
882 | ||
883 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
884 | ppd = dd->pport + pidx; | |
885 | ||
886 | /* disable all contexts */ | |
887 | for (i = 0; i < dd->num_send_contexts; i++) | |
888 | sc_disable(dd->send_contexts[i].sc); | |
889 | /* disable the send device */ | |
890 | pio_send_control(dd, PSC_GLOBAL_DISABLE); | |
891 | ||
892 | /* | |
893 | * Clear SerdesEnable. | |
894 | * We can't count on interrupts since we are stopping. | |
895 | */ | |
896 | hfi1_quiet_serdes(ppd); | |
897 | ||
898 | if (ppd->hfi1_wq) { | |
899 | destroy_workqueue(ppd->hfi1_wq); | |
900 | ppd->hfi1_wq = NULL; | |
901 | } | |
902 | } | |
903 | sdma_exit(dd); | |
904 | } | |
905 | ||
906 | /** | |
907 | * hfi1_free_ctxtdata - free a context's allocated data | |
908 | * @dd: the hfi1_ib device | |
909 | * @rcd: the ctxtdata structure | |
910 | * | |
911 | * free up any allocated data for a context | |
912 | * This should not touch anything that would affect a simultaneous | |
913 | * re-allocation of context data, because it is called after hfi1_mutex | |
914 | * is released (and can be called from reinit as well). | |
915 | * It should never change any chip state, or global driver state. | |
916 | */ | |
917 | void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) | |
918 | { | |
919 | unsigned e; | |
920 | ||
921 | if (!rcd) | |
922 | return; | |
923 | ||
924 | if (rcd->rcvhdrq) { | |
925 | dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size, | |
926 | rcd->rcvhdrq, rcd->rcvhdrq_phys); | |
927 | rcd->rcvhdrq = NULL; | |
928 | if (rcd->rcvhdrtail_kvaddr) { | |
929 | dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, | |
930 | (void *)rcd->rcvhdrtail_kvaddr, | |
931 | rcd->rcvhdrqtailaddr_phys); | |
932 | rcd->rcvhdrtail_kvaddr = NULL; | |
933 | } | |
934 | } | |
935 | ||
936 | /* all the RcvArray entries should have been cleared by now */ | |
937 | kfree(rcd->egrbufs.rcvtids); | |
938 | ||
939 | for (e = 0; e < rcd->egrbufs.alloced; e++) { | |
940 | if (rcd->egrbufs.buffers[e].phys) | |
941 | dma_free_coherent(&dd->pcidev->dev, | |
942 | rcd->egrbufs.buffers[e].len, | |
943 | rcd->egrbufs.buffers[e].addr, | |
944 | rcd->egrbufs.buffers[e].phys); | |
945 | } | |
946 | kfree(rcd->egrbufs.buffers); | |
947 | ||
948 | sc_free(rcd->sc); | |
949 | vfree(rcd->physshadow); | |
950 | vfree(rcd->tid_pg_list); | |
951 | vfree(rcd->user_event_mask); | |
952 | vfree(rcd->subctxt_uregbase); | |
953 | vfree(rcd->subctxt_rcvegrbuf); | |
954 | vfree(rcd->subctxt_rcvhdr_base); | |
955 | kfree(rcd->tidusemap); | |
956 | kfree(rcd->opstats); | |
957 | kfree(rcd); | |
958 | } | |
959 | ||
960 | void hfi1_free_devdata(struct hfi1_devdata *dd) | |
961 | { | |
962 | unsigned long flags; | |
963 | ||
964 | spin_lock_irqsave(&hfi1_devs_lock, flags); | |
965 | idr_remove(&hfi1_unit_table, dd->unit); | |
966 | list_del(&dd->list); | |
967 | spin_unlock_irqrestore(&hfi1_devs_lock, flags); | |
968 | hfi1_dbg_ibdev_exit(&dd->verbs_dev); | |
969 | rcu_barrier(); /* wait for rcu callbacks to complete */ | |
970 | free_percpu(dd->int_counter); | |
971 | free_percpu(dd->rcv_limit); | |
972 | ib_dealloc_device(&dd->verbs_dev.ibdev); | |
973 | } | |
974 | ||
975 | /* | |
976 | * Allocate our primary per-unit data structure. Must be done via verbs | |
977 | * allocator, because the verbs cleanup process both does cleanup and | |
978 | * free of the data structure. | |
979 | * "extra" is for chip-specific data. | |
980 | * | |
981 | * Use the idr mechanism to get a unit number for this unit. | |
982 | */ | |
983 | struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) | |
984 | { | |
985 | unsigned long flags; | |
986 | struct hfi1_devdata *dd; | |
987 | int ret; | |
988 | ||
989 | dd = (struct hfi1_devdata *)ib_alloc_device(sizeof(*dd) + extra); | |
990 | if (!dd) | |
991 | return ERR_PTR(-ENOMEM); | |
992 | /* extra is * number of ports */ | |
993 | dd->num_pports = extra / sizeof(struct hfi1_pportdata); | |
994 | dd->pport = (struct hfi1_pportdata *)(dd + 1); | |
995 | ||
996 | INIT_LIST_HEAD(&dd->list); | |
997 | dd->node = dev_to_node(&pdev->dev); | |
998 | if (dd->node < 0) | |
999 | dd->node = 0; | |
1000 | idr_preload(GFP_KERNEL); | |
1001 | spin_lock_irqsave(&hfi1_devs_lock, flags); | |
1002 | ||
1003 | ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT); | |
1004 | if (ret >= 0) { | |
1005 | dd->unit = ret; | |
1006 | list_add(&dd->list, &hfi1_dev_list); | |
1007 | } | |
1008 | ||
1009 | spin_unlock_irqrestore(&hfi1_devs_lock, flags); | |
1010 | idr_preload_end(); | |
1011 | ||
1012 | if (ret < 0) { | |
1013 | hfi1_early_err(&pdev->dev, | |
1014 | "Could not allocate unit ID: error %d\n", -ret); | |
1015 | goto bail; | |
1016 | } | |
1017 | /* | |
1018 | * Initialize all locks for the device. This needs to be as early as | |
1019 | * possible so locks are usable. | |
1020 | */ | |
1021 | spin_lock_init(&dd->sc_lock); | |
1022 | spin_lock_init(&dd->sendctrl_lock); | |
1023 | spin_lock_init(&dd->rcvctrl_lock); | |
1024 | spin_lock_init(&dd->uctxt_lock); | |
1025 | spin_lock_init(&dd->hfi1_diag_trans_lock); | |
1026 | spin_lock_init(&dd->sc_init_lock); | |
1027 | spin_lock_init(&dd->dc8051_lock); | |
1028 | spin_lock_init(&dd->dc8051_memlock); | |
1029 | mutex_init(&dd->qsfp_i2c_mutex); | |
1030 | seqlock_init(&dd->sc2vl_lock); | |
1031 | spin_lock_init(&dd->sde_map_lock); | |
1032 | init_waitqueue_head(&dd->event_queue); | |
1033 | ||
1034 | dd->int_counter = alloc_percpu(u64); | |
1035 | if (!dd->int_counter) { | |
1036 | ret = -ENOMEM; | |
1037 | hfi1_early_err(&pdev->dev, | |
1038 | "Could not allocate per-cpu int_counter\n"); | |
1039 | goto bail; | |
1040 | } | |
1041 | ||
1042 | dd->rcv_limit = alloc_percpu(u64); | |
1043 | if (!dd->rcv_limit) { | |
1044 | ret = -ENOMEM; | |
1045 | hfi1_early_err(&pdev->dev, | |
1046 | "Could not allocate per-cpu rcv_limit\n"); | |
1047 | goto bail; | |
1048 | } | |
1049 | ||
1050 | if (!hfi1_cpulist_count) { | |
1051 | u32 count = num_online_cpus(); | |
1052 | ||
1053 | hfi1_cpulist = kzalloc(BITS_TO_LONGS(count) * | |
1054 | sizeof(long), GFP_KERNEL); | |
1055 | if (hfi1_cpulist) | |
1056 | hfi1_cpulist_count = count; | |
1057 | else | |
1058 | hfi1_early_err( | |
1059 | &pdev->dev, | |
1060 | "Could not alloc cpulist info, cpu affinity might be wrong\n"); | |
1061 | } | |
1062 | hfi1_dbg_ibdev_init(&dd->verbs_dev); | |
1063 | return dd; | |
1064 | ||
1065 | bail: | |
1066 | if (!list_empty(&dd->list)) | |
1067 | list_del_init(&dd->list); | |
1068 | ib_dealloc_device(&dd->verbs_dev.ibdev); | |
1069 | return ERR_PTR(ret); | |
1070 | } | |
1071 | ||
1072 | /* | |
1073 | * Called from freeze mode handlers, and from PCI error | |
1074 | * reporting code. Should be paranoid about state of | |
1075 | * system and data structures. | |
1076 | */ | |
1077 | void hfi1_disable_after_error(struct hfi1_devdata *dd) | |
1078 | { | |
1079 | if (dd->flags & HFI1_INITTED) { | |
1080 | u32 pidx; | |
1081 | ||
1082 | dd->flags &= ~HFI1_INITTED; | |
1083 | if (dd->pport) | |
1084 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
1085 | struct hfi1_pportdata *ppd; | |
1086 | ||
1087 | ppd = dd->pport + pidx; | |
1088 | if (dd->flags & HFI1_PRESENT) | |
1089 | set_link_state(ppd, HLS_DN_DISABLE); | |
1090 | ||
1091 | if (ppd->statusp) | |
1092 | *ppd->statusp &= ~HFI1_STATUS_IB_READY; | |
1093 | } | |
1094 | } | |
1095 | ||
1096 | /* | |
1097 | * Mark as having had an error for driver, and also | |
1098 | * for /sys and status word mapped to user programs. | |
1099 | * This marks unit as not usable, until reset. | |
1100 | */ | |
1101 | if (dd->status) | |
1102 | dd->status->dev |= HFI1_STATUS_HWERROR; | |
1103 | } | |
1104 | ||
1105 | static void remove_one(struct pci_dev *); | |
1106 | static int init_one(struct pci_dev *, const struct pci_device_id *); | |
1107 | ||
1108 | #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: " | |
1109 | #define PFX DRIVER_NAME ": " | |
1110 | ||
1111 | static const struct pci_device_id hfi1_pci_tbl[] = { | |
1112 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) }, | |
1113 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) }, | |
1114 | { 0, } | |
1115 | }; | |
1116 | ||
1117 | MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl); | |
1118 | ||
1119 | static struct pci_driver hfi1_pci_driver = { | |
1120 | .name = DRIVER_NAME, | |
1121 | .probe = init_one, | |
1122 | .remove = remove_one, | |
1123 | .id_table = hfi1_pci_tbl, | |
1124 | .err_handler = &hfi1_pci_err_handler, | |
1125 | }; | |
1126 | ||
1127 | static void __init compute_krcvqs(void) | |
1128 | { | |
1129 | int i; | |
1130 | ||
1131 | for (i = 0; i < krcvqsset; i++) | |
1132 | n_krcvqs += krcvqs[i]; | |
1133 | } | |
1134 | ||
1135 | /* | |
1136 | * Do all the generic driver unit- and chip-independent memory | |
1137 | * allocation and initialization. | |
1138 | */ | |
1139 | static int __init hfi1_mod_init(void) | |
1140 | { | |
1141 | int ret; | |
1142 | ||
1143 | ret = dev_init(); | |
1144 | if (ret) | |
1145 | goto bail; | |
1146 | ||
1147 | /* validate max MTU before any devices start */ | |
1148 | if (!valid_opa_max_mtu(hfi1_max_mtu)) { | |
1149 | pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n", | |
1150 | hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU); | |
1151 | hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; | |
1152 | } | |
1153 | /* valid CUs run from 1-128 in powers of 2 */ | |
1154 | if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu)) | |
1155 | hfi1_cu = 1; | |
1156 | /* valid credit return threshold is 0-100, variable is unsigned */ | |
1157 | if (user_credit_return_threshold > 100) | |
1158 | user_credit_return_threshold = 100; | |
1159 | ||
1160 | compute_krcvqs(); | |
1161 | /* sanitize receive interrupt count, time must wait until after | |
1162 | the hardware type is known */ | |
1163 | if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK) | |
1164 | rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK; | |
1165 | /* reject invalid combinations */ | |
1166 | if (rcv_intr_count == 0 && rcv_intr_timeout == 0) { | |
1167 | pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n"); | |
1168 | rcv_intr_count = 1; | |
1169 | } | |
1170 | if (rcv_intr_count > 1 && rcv_intr_timeout == 0) { | |
1171 | /* | |
1172 | * Avoid indefinite packet delivery by requiring a timeout | |
1173 | * if count is > 1. | |
1174 | */ | |
1175 | pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n"); | |
1176 | rcv_intr_timeout = 1; | |
1177 | } | |
1178 | if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) { | |
1179 | /* | |
1180 | * The dynamic algorithm expects a non-zero timeout | |
1181 | * and a count > 1. | |
1182 | */ | |
1183 | pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n"); | |
1184 | rcv_intr_dynamic = 0; | |
1185 | } | |
1186 | ||
1187 | /* sanitize link CRC options */ | |
1188 | link_crc_mask &= SUPPORTED_CRCS; | |
1189 | ||
1190 | /* | |
1191 | * These must be called before the driver is registered with | |
1192 | * the PCI subsystem. | |
1193 | */ | |
1194 | idr_init(&hfi1_unit_table); | |
1195 | ||
1196 | hfi1_dbg_init(); | |
1197 | ret = pci_register_driver(&hfi1_pci_driver); | |
1198 | if (ret < 0) { | |
1199 | pr_err("Unable to register driver: error %d\n", -ret); | |
1200 | goto bail_dev; | |
1201 | } | |
1202 | goto bail; /* all OK */ | |
1203 | ||
1204 | bail_dev: | |
1205 | hfi1_dbg_exit(); | |
1206 | idr_destroy(&hfi1_unit_table); | |
1207 | dev_cleanup(); | |
1208 | bail: | |
1209 | return ret; | |
1210 | } | |
1211 | ||
1212 | module_init(hfi1_mod_init); | |
1213 | ||
1214 | /* | |
1215 | * Do the non-unit driver cleanup, memory free, etc. at unload. | |
1216 | */ | |
1217 | static void __exit hfi1_mod_cleanup(void) | |
1218 | { | |
1219 | pci_unregister_driver(&hfi1_pci_driver); | |
1220 | hfi1_dbg_exit(); | |
1221 | hfi1_cpulist_count = 0; | |
1222 | kfree(hfi1_cpulist); | |
1223 | ||
1224 | idr_destroy(&hfi1_unit_table); | |
1225 | dispose_firmware(); /* asymmetric with obtain_firmware() */ | |
1226 | dev_cleanup(); | |
1227 | } | |
1228 | ||
1229 | module_exit(hfi1_mod_cleanup); | |
1230 | ||
1231 | /* this can only be called after a successful initialization */ | |
1232 | static void cleanup_device_data(struct hfi1_devdata *dd) | |
1233 | { | |
1234 | int ctxt; | |
1235 | int pidx; | |
1236 | struct hfi1_ctxtdata **tmp; | |
1237 | unsigned long flags; | |
1238 | ||
1239 | /* users can't do anything more with chip */ | |
1240 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
1241 | struct hfi1_pportdata *ppd = &dd->pport[pidx]; | |
1242 | struct cc_state *cc_state; | |
1243 | int i; | |
1244 | ||
1245 | if (ppd->statusp) | |
1246 | *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT; | |
1247 | ||
1248 | for (i = 0; i < OPA_MAX_SLS; i++) | |
1249 | hrtimer_cancel(&ppd->cca_timer[i].hrtimer); | |
1250 | ||
1251 | spin_lock(&ppd->cc_state_lock); | |
1252 | cc_state = get_cc_state(ppd); | |
1253 | rcu_assign_pointer(ppd->cc_state, NULL); | |
1254 | spin_unlock(&ppd->cc_state_lock); | |
1255 | ||
1256 | if (cc_state) | |
1257 | call_rcu(&cc_state->rcu, cc_state_reclaim); | |
1258 | } | |
1259 | ||
1260 | free_credit_return(dd); | |
1261 | ||
1262 | /* | |
1263 | * Free any resources still in use (usually just kernel contexts) | |
1264 | * at unload; we do for ctxtcnt, because that's what we allocate. | |
1265 | * We acquire lock to be really paranoid that rcd isn't being | |
1266 | * accessed from some interrupt-related code (that should not happen, | |
1267 | * but best to be sure). | |
1268 | */ | |
1269 | spin_lock_irqsave(&dd->uctxt_lock, flags); | |
1270 | tmp = dd->rcd; | |
1271 | dd->rcd = NULL; | |
1272 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | |
1273 | for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) { | |
1274 | struct hfi1_ctxtdata *rcd = tmp[ctxt]; | |
1275 | ||
1276 | tmp[ctxt] = NULL; /* debugging paranoia */ | |
1277 | if (rcd) { | |
1278 | hfi1_clear_tids(rcd); | |
1279 | hfi1_free_ctxtdata(dd, rcd); | |
1280 | } | |
1281 | } | |
1282 | kfree(tmp); | |
1283 | /* must follow rcv context free - need to remove rcv's hooks */ | |
1284 | for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) | |
1285 | sc_free(dd->send_contexts[ctxt].sc); | |
1286 | dd->num_send_contexts = 0; | |
1287 | kfree(dd->send_contexts); | |
1288 | dd->send_contexts = NULL; | |
1289 | kfree(dd->boardname); | |
1290 | vfree(dd->events); | |
1291 | vfree(dd->status); | |
1292 | hfi1_cq_exit(dd); | |
1293 | } | |
1294 | ||
1295 | /* | |
1296 | * Clean up on unit shutdown, or error during unit load after | |
1297 | * successful initialization. | |
1298 | */ | |
1299 | static void postinit_cleanup(struct hfi1_devdata *dd) | |
1300 | { | |
1301 | hfi1_start_cleanup(dd); | |
1302 | ||
1303 | hfi1_pcie_ddcleanup(dd); | |
1304 | hfi1_pcie_cleanup(dd->pcidev); | |
1305 | ||
1306 | cleanup_device_data(dd); | |
1307 | ||
1308 | hfi1_free_devdata(dd); | |
1309 | } | |
1310 | ||
1311 | static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |
1312 | { | |
1313 | int ret = 0, j, pidx, initfail; | |
1314 | struct hfi1_devdata *dd = NULL; | |
1315 | ||
1316 | /* First, lock the non-writable module parameters */ | |
1317 | HFI1_CAP_LOCK(); | |
1318 | ||
1319 | /* Validate some global module parameters */ | |
1320 | if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { | |
1321 | hfi1_early_err(&pdev->dev, "Header queue count too small\n"); | |
1322 | ret = -EINVAL; | |
1323 | goto bail; | |
1324 | } | |
1325 | /* use the encoding function as a sanitization check */ | |
1326 | if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { | |
1327 | hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", | |
1328 | hfi1_hdrq_entsize); | |
1329 | goto bail; | |
1330 | } | |
1331 | ||
1332 | /* The receive eager buffer size must be set before the receive | |
1333 | * contexts are created. | |
1334 | * | |
1335 | * Set the eager buffer size. Validate that it falls in a range | |
1336 | * allowed by the hardware - all powers of 2 between the min and | |
1337 | * max. The maximum valid MTU is within the eager buffer range | |
1338 | * so we do not need to cap the max_mtu by an eager buffer size | |
1339 | * setting. | |
1340 | */ | |
1341 | if (eager_buffer_size) { | |
1342 | if (!is_power_of_2(eager_buffer_size)) | |
1343 | eager_buffer_size = | |
1344 | roundup_pow_of_two(eager_buffer_size); | |
1345 | eager_buffer_size = | |
1346 | clamp_val(eager_buffer_size, | |
1347 | MIN_EAGER_BUFFER * 8, | |
1348 | MAX_EAGER_BUFFER_TOTAL); | |
1349 | hfi1_early_info(&pdev->dev, "Eager buffer size %u\n", | |
1350 | eager_buffer_size); | |
1351 | } else { | |
1352 | hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n"); | |
1353 | ret = -EINVAL; | |
1354 | goto bail; | |
1355 | } | |
1356 | ||
1357 | /* restrict value of hfi1_rcvarr_split */ | |
1358 | hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100); | |
1359 | ||
1360 | ret = hfi1_pcie_init(pdev, ent); | |
1361 | if (ret) | |
1362 | goto bail; | |
1363 | ||
1364 | /* | |
1365 | * Do device-specific initialization, function table setup, dd | |
1366 | * allocation, etc. | |
1367 | */ | |
1368 | switch (ent->device) { | |
1369 | case PCI_DEVICE_ID_INTEL0: | |
1370 | case PCI_DEVICE_ID_INTEL1: | |
1371 | dd = hfi1_init_dd(pdev, ent); | |
1372 | break; | |
1373 | default: | |
1374 | hfi1_early_err(&pdev->dev, | |
1375 | "Failing on unknown Intel deviceid 0x%x\n", | |
1376 | ent->device); | |
1377 | ret = -ENODEV; | |
1378 | } | |
1379 | ||
1380 | if (IS_ERR(dd)) | |
1381 | ret = PTR_ERR(dd); | |
1382 | if (ret) | |
1383 | goto clean_bail; /* error already printed */ | |
1384 | ||
1385 | ret = create_workqueues(dd); | |
1386 | if (ret) | |
1387 | goto clean_bail; | |
1388 | ||
1389 | /* do the generic initialization */ | |
1390 | initfail = hfi1_init(dd, 0); | |
1391 | ||
1392 | ret = hfi1_register_ib_device(dd); | |
1393 | ||
1394 | /* | |
1395 | * Now ready for use. this should be cleared whenever we | |
1396 | * detect a reset, or initiate one. If earlier failure, | |
1397 | * we still create devices, so diags, etc. can be used | |
1398 | * to determine cause of problem. | |
1399 | */ | |
1400 | if (!initfail && !ret) | |
1401 | dd->flags |= HFI1_INITTED; | |
1402 | ||
1403 | j = hfi1_device_create(dd); | |
1404 | if (j) | |
1405 | dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); | |
1406 | ||
1407 | if (initfail || ret) { | |
1408 | stop_timers(dd); | |
1409 | flush_workqueue(ib_wq); | |
1410 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | |
1411 | hfi1_quiet_serdes(dd->pport + pidx); | |
1412 | if (!j) | |
1413 | hfi1_device_remove(dd); | |
1414 | if (!ret) | |
1415 | hfi1_unregister_ib_device(dd); | |
1416 | postinit_cleanup(dd); | |
1417 | if (initfail) | |
1418 | ret = initfail; | |
1419 | goto bail; /* everything already cleaned */ | |
1420 | } | |
1421 | ||
1422 | sdma_start(dd); | |
1423 | ||
1424 | return 0; | |
1425 | ||
1426 | clean_bail: | |
1427 | hfi1_pcie_cleanup(pdev); | |
1428 | bail: | |
1429 | return ret; | |
1430 | } | |
1431 | ||
1432 | static void remove_one(struct pci_dev *pdev) | |
1433 | { | |
1434 | struct hfi1_devdata *dd = pci_get_drvdata(pdev); | |
1435 | ||
1436 | /* unregister from IB core */ | |
1437 | hfi1_unregister_ib_device(dd); | |
1438 | ||
1439 | /* | |
1440 | * Disable the IB link, disable interrupts on the device, | |
1441 | * clear dma engines, etc. | |
1442 | */ | |
1443 | shutdown_device(dd); | |
1444 | ||
1445 | stop_timers(dd); | |
1446 | ||
1447 | /* wait until all of our (qsfp) queue_work() calls complete */ | |
1448 | flush_workqueue(ib_wq); | |
1449 | ||
1450 | hfi1_device_remove(dd); | |
1451 | ||
1452 | postinit_cleanup(dd); | |
1453 | } | |
1454 | ||
1455 | /** | |
1456 | * hfi1_create_rcvhdrq - create a receive header queue | |
1457 | * @dd: the hfi1_ib device | |
1458 | * @rcd: the context data | |
1459 | * | |
1460 | * This must be contiguous memory (from an i/o perspective), and must be | |
1461 | * DMA'able (which means for some systems, it will go through an IOMMU, | |
1462 | * or be forced into a low address range). | |
1463 | */ | |
1464 | int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) | |
1465 | { | |
1466 | unsigned amt; | |
1467 | u64 reg; | |
1468 | ||
1469 | if (!rcd->rcvhdrq) { | |
1470 | dma_addr_t phys_hdrqtail; | |
1471 | gfp_t gfp_flags; | |
1472 | ||
1473 | /* | |
1474 | * rcvhdrqentsize is in DWs, so we have to convert to bytes | |
1475 | * (* sizeof(u32)). | |
1476 | */ | |
1477 | amt = ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize * | |
1478 | sizeof(u32), PAGE_SIZE); | |
1479 | ||
1480 | gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? | |
1481 | GFP_USER : GFP_KERNEL; | |
1482 | rcd->rcvhdrq = dma_zalloc_coherent( | |
1483 | &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys, | |
1484 | gfp_flags | __GFP_COMP); | |
1485 | ||
1486 | if (!rcd->rcvhdrq) { | |
1487 | dd_dev_err(dd, | |
1488 | "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", | |
1489 | amt, rcd->ctxt); | |
1490 | goto bail; | |
1491 | } | |
1492 | ||
1493 | /* Event mask is per device now and is in hfi1_devdata */ | |
1494 | /*if (rcd->ctxt >= dd->first_user_ctxt) { | |
1495 | rcd->user_event_mask = vmalloc_user(PAGE_SIZE); | |
1496 | if (!rcd->user_event_mask) | |
1497 | goto bail_free_hdrq; | |
1498 | }*/ | |
1499 | ||
1500 | if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { | |
1501 | rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( | |
1502 | &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, | |
1503 | gfp_flags); | |
1504 | if (!rcd->rcvhdrtail_kvaddr) | |
1505 | goto bail_free; | |
1506 | rcd->rcvhdrqtailaddr_phys = phys_hdrqtail; | |
1507 | } | |
1508 | ||
1509 | rcd->rcvhdrq_size = amt; | |
1510 | } | |
1511 | /* | |
1512 | * These values are per-context: | |
1513 | * RcvHdrCnt | |
1514 | * RcvHdrEntSize | |
1515 | * RcvHdrSize | |
1516 | */ | |
1517 | reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT) | |
1518 | & RCV_HDR_CNT_CNT_MASK) | |
1519 | << RCV_HDR_CNT_CNT_SHIFT; | |
1520 | write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg); | |
1521 | reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize) | |
1522 | & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) | |
1523 | << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; | |
1524 | write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg); | |
1525 | reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK) | |
1526 | << RCV_HDR_SIZE_HDR_SIZE_SHIFT; | |
1527 | write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg); | |
1528 | return 0; | |
1529 | ||
1530 | bail_free: | |
1531 | dd_dev_err(dd, | |
1532 | "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", | |
1533 | rcd->ctxt); | |
1534 | vfree(rcd->user_event_mask); | |
1535 | rcd->user_event_mask = NULL; | |
1536 | dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, | |
1537 | rcd->rcvhdrq_phys); | |
1538 | rcd->rcvhdrq = NULL; | |
1539 | bail: | |
1540 | return -ENOMEM; | |
1541 | } | |
1542 | ||
1543 | /** | |
1544 | * allocate eager buffers, both kernel and user contexts. | |
1545 | * @rcd: the context we are setting up. | |
1546 | * | |
1547 | * Allocate the eager TID buffers and program them into hip. | |
1548 | * They are no longer completely contiguous, we do multiple allocation | |
1549 | * calls. Otherwise we get the OOM code involved, by asking for too | |
1550 | * much per call, with disastrous results on some kernels. | |
1551 | */ | |
1552 | int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) | |
1553 | { | |
1554 | struct hfi1_devdata *dd = rcd->dd; | |
1555 | u32 max_entries, egrtop, alloced_bytes = 0, idx = 0; | |
1556 | gfp_t gfp_flags; | |
1557 | u16 order; | |
1558 | int ret = 0; | |
1559 | u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu); | |
1560 | ||
1561 | /* | |
1562 | * GFP_USER, but without GFP_FS, so buffer cache can be | |
1563 | * coalesced (we hope); otherwise, even at order 4, | |
1564 | * heavy filesystem activity makes these fail, and we can | |
1565 | * use compound pages. | |
1566 | */ | |
1567 | gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; | |
1568 | ||
1569 | /* | |
1570 | * The minimum size of the eager buffers is a groups of MTU-sized | |
1571 | * buffers. | |
1572 | * The global eager_buffer_size parameter is checked against the | |
1573 | * theoretical lower limit of the value. Here, we check against the | |
1574 | * MTU. | |
1575 | */ | |
1576 | if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size)) | |
1577 | rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size; | |
1578 | /* | |
1579 | * If using one-pkt-per-egr-buffer, lower the eager buffer | |
1580 | * size to the max MTU (page-aligned). | |
1581 | */ | |
1582 | if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) | |
1583 | rcd->egrbufs.rcvtid_size = round_mtu; | |
1584 | ||
1585 | /* | |
1586 | * Eager buffers sizes of 1MB or less require smaller TID sizes | |
1587 | * to satisfy the "multiple of 8 RcvArray entries" requirement. | |
1588 | */ | |
1589 | if (rcd->egrbufs.size <= (1 << 20)) | |
1590 | rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu, | |
1591 | rounddown_pow_of_two(rcd->egrbufs.size / 8)); | |
1592 | ||
1593 | while (alloced_bytes < rcd->egrbufs.size && | |
1594 | rcd->egrbufs.alloced < rcd->egrbufs.count) { | |
1595 | rcd->egrbufs.buffers[idx].addr = | |
1596 | dma_zalloc_coherent(&dd->pcidev->dev, | |
1597 | rcd->egrbufs.rcvtid_size, | |
1598 | &rcd->egrbufs.buffers[idx].phys, | |
1599 | gfp_flags); | |
1600 | if (rcd->egrbufs.buffers[idx].addr) { | |
1601 | rcd->egrbufs.buffers[idx].len = | |
1602 | rcd->egrbufs.rcvtid_size; | |
1603 | rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr = | |
1604 | rcd->egrbufs.buffers[idx].addr; | |
1605 | rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].phys = | |
1606 | rcd->egrbufs.buffers[idx].phys; | |
1607 | rcd->egrbufs.alloced++; | |
1608 | alloced_bytes += rcd->egrbufs.rcvtid_size; | |
1609 | idx++; | |
1610 | } else { | |
1611 | u32 new_size, i, j; | |
1612 | u64 offset = 0; | |
1613 | ||
1614 | /* | |
1615 | * Fail the eager buffer allocation if: | |
1616 | * - we are already using the lowest acceptable size | |
1617 | * - we are using one-pkt-per-egr-buffer (this implies | |
1618 | * that we are accepting only one size) | |
1619 | */ | |
1620 | if (rcd->egrbufs.rcvtid_size == round_mtu || | |
1621 | !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { | |
1622 | dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", | |
1623 | rcd->ctxt); | |
1624 | goto bail_rcvegrbuf_phys; | |
1625 | } | |
1626 | ||
1627 | new_size = rcd->egrbufs.rcvtid_size / 2; | |
1628 | ||
1629 | /* | |
1630 | * If the first attempt to allocate memory failed, don't | |
1631 | * fail everything but continue with the next lower | |
1632 | * size. | |
1633 | */ | |
1634 | if (idx == 0) { | |
1635 | rcd->egrbufs.rcvtid_size = new_size; | |
1636 | continue; | |
1637 | } | |
1638 | ||
1639 | /* | |
1640 | * Re-partition already allocated buffers to a smaller | |
1641 | * size. | |
1642 | */ | |
1643 | rcd->egrbufs.alloced = 0; | |
1644 | for (i = 0, j = 0, offset = 0; j < idx; i++) { | |
1645 | if (i >= rcd->egrbufs.count) | |
1646 | break; | |
1647 | rcd->egrbufs.rcvtids[i].phys = | |
1648 | rcd->egrbufs.buffers[j].phys + offset; | |
1649 | rcd->egrbufs.rcvtids[i].addr = | |
1650 | rcd->egrbufs.buffers[j].addr + offset; | |
1651 | rcd->egrbufs.alloced++; | |
1652 | if ((rcd->egrbufs.buffers[j].phys + offset + | |
1653 | new_size) == | |
1654 | (rcd->egrbufs.buffers[j].phys + | |
1655 | rcd->egrbufs.buffers[j].len)) { | |
1656 | j++; | |
1657 | offset = 0; | |
1658 | } else | |
1659 | offset += new_size; | |
1660 | } | |
1661 | rcd->egrbufs.rcvtid_size = new_size; | |
1662 | } | |
1663 | } | |
1664 | rcd->egrbufs.numbufs = idx; | |
1665 | rcd->egrbufs.size = alloced_bytes; | |
1666 | ||
1667 | dd_dev_info(dd, "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n", | |
1668 | rcd->ctxt, rcd->egrbufs.alloced, rcd->egrbufs.rcvtid_size, | |
1669 | rcd->egrbufs.size); | |
1670 | ||
1671 | /* | |
1672 | * Set the contexts rcv array head update threshold to the closest | |
1673 | * power of 2 (so we can use a mask instead of modulo) below half | |
1674 | * the allocated entries. | |
1675 | */ | |
1676 | rcd->egrbufs.threshold = | |
1677 | rounddown_pow_of_two(rcd->egrbufs.alloced / 2); | |
1678 | /* | |
1679 | * Compute the expected RcvArray entry base. This is done after | |
1680 | * allocating the eager buffers in order to maximize the | |
1681 | * expected RcvArray entries for the context. | |
1682 | */ | |
1683 | max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; | |
1684 | egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size); | |
1685 | rcd->expected_count = max_entries - egrtop; | |
1686 | if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2) | |
1687 | rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2; | |
1688 | ||
1689 | rcd->expected_base = rcd->eager_base + egrtop; | |
1690 | dd_dev_info(dd, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n", | |
1691 | rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count, | |
1692 | rcd->eager_base, rcd->expected_base); | |
1693 | ||
1694 | if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) { | |
1695 | dd_dev_err(dd, "ctxt%u: current Eager buffer size is invalid %u\n", | |
1696 | rcd->ctxt, rcd->egrbufs.rcvtid_size); | |
1697 | ret = -EINVAL; | |
1698 | goto bail; | |
1699 | } | |
1700 | ||
1701 | for (idx = 0; idx < rcd->egrbufs.alloced; idx++) { | |
1702 | hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, | |
1703 | rcd->egrbufs.rcvtids[idx].phys, order); | |
1704 | cond_resched(); | |
1705 | } | |
1706 | goto bail; | |
1707 | ||
1708 | bail_rcvegrbuf_phys: | |
1709 | for (idx = 0; idx < rcd->egrbufs.alloced && | |
1710 | rcd->egrbufs.buffers[idx].addr; | |
1711 | idx++) { | |
1712 | dma_free_coherent(&dd->pcidev->dev, | |
1713 | rcd->egrbufs.buffers[idx].len, | |
1714 | rcd->egrbufs.buffers[idx].addr, | |
1715 | rcd->egrbufs.buffers[idx].phys); | |
1716 | rcd->egrbufs.buffers[idx].addr = NULL; | |
1717 | rcd->egrbufs.buffers[idx].phys = 0; | |
1718 | rcd->egrbufs.buffers[idx].len = 0; | |
1719 | } | |
1720 | bail: | |
1721 | return ret; | |
1722 | } |