Merge tag 'firewire-update2' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee139...
[deliverable/linux.git] / drivers / net / ethernet / qlogic / qed / qed_cxt.c
1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9 #include <linux/types.h>
10 #include <linux/bitops.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/list.h>
15 #include <linux/log2.h>
16 #include <linux/pci.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <linux/bitops.h>
20 #include "qed.h"
21 #include "qed_cxt.h"
22 #include "qed_dev_api.h"
23 #include "qed_hsi.h"
24 #include "qed_hw.h"
25 #include "qed_init_ops.h"
26 #include "qed_reg_addr.h"
27
28 /* Max number of connection types in HW (DQ/CDU etc.) */
29 #define MAX_CONN_TYPES PROTOCOLID_COMMON
30 #define NUM_TASK_TYPES 2
31 #define NUM_TASK_PF_SEGMENTS 4
32
33 /* QM constants */
34 #define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
35
36 /* Doorbell-Queue constants */
37 #define DQ_RANGE_SHIFT 4
38 #define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
39
40 /* ILT constants */
41 #define ILT_DEFAULT_HW_P_SIZE 3
42 #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
43 #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
44
45 /* ILT entry structure */
46 #define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
47 #define ILT_ENTRY_PHY_ADDR_SHIFT 0
48 #define ILT_ENTRY_VALID_MASK 0x1ULL
49 #define ILT_ENTRY_VALID_SHIFT 52
50 #define ILT_ENTRY_IN_REGS 2
51 #define ILT_REG_SIZE_IN_BYTES 4
52
53 /* connection context union */
54 union conn_context {
55 struct core_conn_context core_ctx;
56 struct eth_conn_context eth_ctx;
57 };
58
59 #define CONN_CXT_SIZE(p_hwfn) \
60 ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
61
62 /* PF per protocl configuration object */
63 struct qed_conn_type_cfg {
64 u32 cid_count;
65 u32 cid_start;
66 };
67
68 /* ILT Client configuration, Per connection type (protocol) resources. */
69 #define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
70 #define CDUC_BLK (0)
71
72 enum ilt_clients {
73 ILT_CLI_CDUC,
74 ILT_CLI_QM,
75 ILT_CLI_MAX
76 };
77
78 struct ilt_cfg_pair {
79 u32 reg;
80 u32 val;
81 };
82
83 struct qed_ilt_cli_blk {
84 u32 total_size; /* 0 means not active */
85 u32 real_size_in_page;
86 u32 start_line;
87 };
88
89 struct qed_ilt_client_cfg {
90 bool active;
91
92 /* ILT boundaries */
93 struct ilt_cfg_pair first;
94 struct ilt_cfg_pair last;
95 struct ilt_cfg_pair p_size;
96
97 /* ILT client blocks for PF */
98 struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
99 u32 pf_total_lines;
100 };
101
102 /* Per Path -
103 * ILT shadow table
104 * Protocol acquired CID lists
105 * PF start line in ILT
106 */
107 struct qed_dma_mem {
108 dma_addr_t p_phys;
109 void *p_virt;
110 size_t size;
111 };
112
113 struct qed_cid_acquired_map {
114 u32 start_cid;
115 u32 max_count;
116 unsigned long *cid_map;
117 };
118
119 struct qed_cxt_mngr {
120 /* Per protocl configuration */
121 struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
122
123 /* computed ILT structure */
124 struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
125
126 /* Acquired CIDs */
127 struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
128
129 /* ILT shadow table */
130 struct qed_dma_mem *ilt_shadow;
131 u32 pf_start_line;
132 };
133
134 static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr)
135 {
136 u32 type, pf_cids = 0;
137
138 for (type = 0; type < MAX_CONN_TYPES; type++)
139 pf_cids += p_mngr->conn_cfg[type].cid_count;
140
141 return pf_cids;
142 }
143
144 static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
145 struct qed_qm_iids *iids)
146 {
147 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
148 int type;
149
150 for (type = 0; type < MAX_CONN_TYPES; type++)
151 iids->cids += p_mngr->conn_cfg[type].cid_count;
152
153 DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids);
154 }
155
156 /* set the iids count per protocol */
157 static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
158 enum protocol_type type,
159 u32 cid_count)
160 {
161 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
162 struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
163
164 p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
165 }
166
167 static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
168 struct qed_ilt_cli_blk *p_blk,
169 u32 start_line, u32 total_size,
170 u32 elem_size)
171 {
172 u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
173
174 /* verify thatits called only once for each block */
175 if (p_blk->total_size)
176 return;
177
178 p_blk->total_size = total_size;
179 p_blk->real_size_in_page = 0;
180 if (elem_size)
181 p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
182 p_blk->start_line = start_line;
183 }
184
185 static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
186 struct qed_ilt_client_cfg *p_cli,
187 struct qed_ilt_cli_blk *p_blk,
188 u32 *p_line, enum ilt_clients client_id)
189 {
190 if (!p_blk->total_size)
191 return;
192
193 if (!p_cli->active)
194 p_cli->first.val = *p_line;
195
196 p_cli->active = true;
197 *p_line += DIV_ROUND_UP(p_blk->total_size,
198 p_blk->real_size_in_page);
199 p_cli->last.val = *p_line - 1;
200
201 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
202 "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
203 client_id, p_cli->first.val,
204 p_cli->last.val, p_blk->total_size,
205 p_blk->real_size_in_page, p_blk->start_line);
206 }
207
208 int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
209 {
210 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
211 struct qed_ilt_client_cfg *p_cli;
212 struct qed_ilt_cli_blk *p_blk;
213 u32 curr_line, total, pf_cids;
214 struct qed_qm_iids qm_iids;
215
216 memset(&qm_iids, 0, sizeof(qm_iids));
217
218 p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
219
220 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
221 "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
222 p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
223
224 /* CDUC */
225 p_cli = &p_mngr->clients[ILT_CLI_CDUC];
226 curr_line = p_mngr->pf_start_line;
227 p_cli->pf_total_lines = 0;
228
229 /* get the counters for the CDUC and QM clients */
230 pf_cids = qed_cxt_cdu_iids(p_mngr);
231
232 p_blk = &p_cli->pf_blks[CDUC_BLK];
233
234 total = pf_cids * CONN_CXT_SIZE(p_hwfn);
235
236 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
237 total, CONN_CXT_SIZE(p_hwfn));
238
239 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
240 p_cli->pf_total_lines = curr_line - p_blk->start_line;
241
242 /* QM */
243 p_cli = &p_mngr->clients[ILT_CLI_QM];
244 p_blk = &p_cli->pf_blks[0];
245
246 qed_cxt_qm_iids(p_hwfn, &qm_iids);
247 total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0,
248 p_hwfn->qm_info.num_pqs, 0);
249
250 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
251 "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
252 qm_iids.cids, p_hwfn->qm_info.num_pqs, total);
253
254 qed_ilt_cli_blk_fill(p_cli, p_blk,
255 curr_line, total * 0x1000,
256 QM_PQ_ELEMENT_SIZE);
257
258 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
259 p_cli->pf_total_lines = curr_line - p_blk->start_line;
260
261 if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
262 RESC_NUM(p_hwfn, QED_ILT)) {
263 DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
264 curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
265 return -EINVAL;
266 }
267
268 return 0;
269 }
270
271 #define for_each_ilt_valid_client(pos, clients) \
272 for (pos = 0; pos < ILT_CLI_MAX; pos++)
273
274 /* Total number of ILT lines used by this PF */
275 static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
276 {
277 u32 size = 0;
278 u32 i;
279
280 for_each_ilt_valid_client(i, ilt_clients) {
281 if (!ilt_clients[i].active)
282 continue;
283 size += (ilt_clients[i].last.val -
284 ilt_clients[i].first.val + 1);
285 }
286
287 return size;
288 }
289
290 static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
291 {
292 struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
293 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
294 u32 ilt_size, i;
295
296 ilt_size = qed_cxt_ilt_shadow_size(p_cli);
297
298 for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
299 struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
300
301 if (p_dma->p_virt)
302 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
303 p_dma->size, p_dma->p_virt,
304 p_dma->p_phys);
305 p_dma->p_virt = NULL;
306 }
307 kfree(p_mngr->ilt_shadow);
308 }
309
310 static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
311 struct qed_ilt_cli_blk *p_blk,
312 enum ilt_clients ilt_client,
313 u32 start_line_offset)
314 {
315 struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
316 u32 lines, line, sz_left;
317
318 if (!p_blk->total_size)
319 return 0;
320
321 sz_left = p_blk->total_size;
322 lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page);
323 line = p_blk->start_line + start_line_offset -
324 p_hwfn->p_cxt_mngr->pf_start_line;
325
326 for (; lines; lines--) {
327 dma_addr_t p_phys;
328 void *p_virt;
329 u32 size;
330
331 size = min_t(u32, sz_left,
332 p_blk->real_size_in_page);
333 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
334 size,
335 &p_phys,
336 GFP_KERNEL);
337 if (!p_virt)
338 return -ENOMEM;
339 memset(p_virt, 0, size);
340
341 ilt_shadow[line].p_phys = p_phys;
342 ilt_shadow[line].p_virt = p_virt;
343 ilt_shadow[line].size = size;
344
345 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
346 "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
347 line, (u64)p_phys, p_virt, size);
348
349 sz_left -= size;
350 line++;
351 }
352
353 return 0;
354 }
355
356 static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
357 {
358 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
359 struct qed_ilt_client_cfg *clients = p_mngr->clients;
360 struct qed_ilt_cli_blk *p_blk;
361 u32 size, i, j;
362 int rc;
363
364 size = qed_cxt_ilt_shadow_size(clients);
365 p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
366 GFP_KERNEL);
367 if (!p_mngr->ilt_shadow) {
368 DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
369 rc = -ENOMEM;
370 goto ilt_shadow_fail;
371 }
372
373 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
374 "Allocated 0x%x bytes for ilt shadow\n",
375 (u32)(size * sizeof(struct qed_dma_mem)));
376
377 for_each_ilt_valid_client(i, clients) {
378 if (!clients[i].active)
379 continue;
380 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
381 p_blk = &clients[i].pf_blks[j];
382 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
383 if (rc != 0)
384 goto ilt_shadow_fail;
385 }
386 }
387
388 return 0;
389
390 ilt_shadow_fail:
391 qed_ilt_shadow_free(p_hwfn);
392 return rc;
393 }
394
395 static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
396 {
397 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
398 u32 type;
399
400 for (type = 0; type < MAX_CONN_TYPES; type++) {
401 kfree(p_mngr->acquired[type].cid_map);
402 p_mngr->acquired[type].max_count = 0;
403 p_mngr->acquired[type].start_cid = 0;
404 }
405 }
406
407 static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
408 {
409 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
410 u32 start_cid = 0;
411 u32 type;
412
413 for (type = 0; type < MAX_CONN_TYPES; type++) {
414 u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
415 u32 size;
416
417 if (cid_cnt == 0)
418 continue;
419
420 size = DIV_ROUND_UP(cid_cnt,
421 sizeof(unsigned long) * BITS_PER_BYTE) *
422 sizeof(unsigned long);
423 p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
424 if (!p_mngr->acquired[type].cid_map)
425 goto cid_map_fail;
426
427 p_mngr->acquired[type].max_count = cid_cnt;
428 p_mngr->acquired[type].start_cid = start_cid;
429
430 p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
431
432 DP_VERBOSE(p_hwfn, QED_MSG_CXT,
433 "Type %08x start: %08x count %08x\n",
434 type, p_mngr->acquired[type].start_cid,
435 p_mngr->acquired[type].max_count);
436 start_cid += cid_cnt;
437 }
438
439 return 0;
440
441 cid_map_fail:
442 qed_cid_map_free(p_hwfn);
443 return -ENOMEM;
444 }
445
446 int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
447 {
448 struct qed_cxt_mngr *p_mngr;
449 u32 i;
450
451 p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
452 if (!p_mngr) {
453 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
454 return -ENOMEM;
455 }
456
457 /* Initialize ILT client registers */
458 p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
459 p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
460 p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
461
462 p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
463 p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
464 p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
465
466 /* default ILT page size for all clients is 32K */
467 for (i = 0; i < ILT_CLI_MAX; i++)
468 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
469
470 /* Set the cxt mangr pointer priori to further allocations */
471 p_hwfn->p_cxt_mngr = p_mngr;
472
473 return 0;
474 }
475
476 int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
477 {
478 int rc;
479
480 /* Allocate the ILT shadow table */
481 rc = qed_ilt_shadow_alloc(p_hwfn);
482 if (rc) {
483 DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
484 goto tables_alloc_fail;
485 }
486
487 /* Allocate and initialize the acquired cids bitmaps */
488 rc = qed_cid_map_alloc(p_hwfn);
489 if (rc) {
490 DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
491 goto tables_alloc_fail;
492 }
493
494 return 0;
495
496 tables_alloc_fail:
497 qed_cxt_mngr_free(p_hwfn);
498 return rc;
499 }
500
501 void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
502 {
503 if (!p_hwfn->p_cxt_mngr)
504 return;
505
506 qed_cid_map_free(p_hwfn);
507 qed_ilt_shadow_free(p_hwfn);
508 kfree(p_hwfn->p_cxt_mngr);
509
510 p_hwfn->p_cxt_mngr = NULL;
511 }
512
513 void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
514 {
515 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
516 int type;
517
518 /* Reset acquired cids */
519 for (type = 0; type < MAX_CONN_TYPES; type++) {
520 u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
521
522 if (cid_cnt == 0)
523 continue;
524
525 memset(p_mngr->acquired[type].cid_map, 0,
526 DIV_ROUND_UP(cid_cnt,
527 sizeof(unsigned long) * BITS_PER_BYTE) *
528 sizeof(unsigned long));
529 }
530 }
531
532 /* CDU Common */
533 #define CDUC_CXT_SIZE_SHIFT \
534 CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
535
536 #define CDUC_CXT_SIZE_MASK \
537 (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
538
539 #define CDUC_BLOCK_WASTE_SHIFT \
540 CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
541
542 #define CDUC_BLOCK_WASTE_MASK \
543 (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
544
545 #define CDUC_NCIB_SHIFT \
546 CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
547
548 #define CDUC_NCIB_MASK \
549 (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
550
551 static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
552 {
553 u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
554
555 /* CDUC - connection configuration */
556 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
557 cxt_size = CONN_CXT_SIZE(p_hwfn);
558 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
559 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
560
561 SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
562 SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
563 SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
564 STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
565 }
566
567 void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
568 {
569 struct qed_qm_pf_rt_init_params params;
570 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
571 struct qed_qm_iids iids;
572
573 memset(&iids, 0, sizeof(iids));
574 qed_cxt_qm_iids(p_hwfn, &iids);
575
576 memset(&params, 0, sizeof(params));
577 params.port_id = p_hwfn->port_id;
578 params.pf_id = p_hwfn->rel_pf_id;
579 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
580 params.is_first_pf = p_hwfn->first_on_engine;
581 params.num_pf_cids = iids.cids;
582 params.start_pq = qm_info->start_pq;
583 params.num_pf_pqs = qm_info->num_pqs;
584 params.start_vport = qm_info->start_vport;
585 params.num_vports = qm_info->num_vports;
586 params.pf_wfq = qm_info->pf_wfq;
587 params.pf_rl = qm_info->pf_rl;
588 params.pq_params = qm_info->qm_pq_params;
589 params.vport_params = qm_info->qm_vport_params;
590
591 qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
592 }
593
594 /* CM PF */
595 static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
596 {
597 union qed_qm_pq_params pq_params;
598 u16 pq;
599
600 /* XCM pure-LB queue */
601 memset(&pq_params, 0, sizeof(pq_params));
602 pq_params.core.tc = LB_TC;
603 pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
604 STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
605
606 return 0;
607 }
608
609 /* DQ PF */
610 static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
611 {
612 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
613 u32 dq_pf_max_cid = 0;
614
615 dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
616 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
617
618 dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
619 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
620
621 dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
622 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
623
624 dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
625 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
626
627 dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
628 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
629
630 /* 5 - PF */
631 dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
632 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
633 }
634
635 static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
636 {
637 struct qed_ilt_client_cfg *ilt_clients;
638 int i;
639
640 ilt_clients = p_hwfn->p_cxt_mngr->clients;
641 for_each_ilt_valid_client(i, ilt_clients) {
642 if (!ilt_clients[i].active)
643 continue;
644 STORE_RT_REG(p_hwfn,
645 ilt_clients[i].first.reg,
646 ilt_clients[i].first.val);
647 STORE_RT_REG(p_hwfn,
648 ilt_clients[i].last.reg,
649 ilt_clients[i].last.val);
650 STORE_RT_REG(p_hwfn,
651 ilt_clients[i].p_size.reg,
652 ilt_clients[i].p_size.val);
653 }
654 }
655
656 /* ILT (PSWRQ2) PF */
657 static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
658 {
659 struct qed_ilt_client_cfg *clients;
660 struct qed_cxt_mngr *p_mngr;
661 struct qed_dma_mem *p_shdw;
662 u32 line, rt_offst, i;
663
664 qed_ilt_bounds_init(p_hwfn);
665
666 p_mngr = p_hwfn->p_cxt_mngr;
667 p_shdw = p_mngr->ilt_shadow;
668 clients = p_hwfn->p_cxt_mngr->clients;
669
670 for_each_ilt_valid_client(i, clients) {
671 if (!clients[i].active)
672 continue;
673
674 /** Client's 1st val and RT array are absolute, ILT shadows'
675 * lines are relative.
676 */
677 line = clients[i].first.val - p_mngr->pf_start_line;
678 rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
679 clients[i].first.val * ILT_ENTRY_IN_REGS;
680
681 for (; line <= clients[i].last.val - p_mngr->pf_start_line;
682 line++, rt_offst += ILT_ENTRY_IN_REGS) {
683 u64 ilt_hw_entry = 0;
684
685 /** p_virt could be NULL incase of dynamic
686 * allocation
687 */
688 if (p_shdw[line].p_virt) {
689 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
690 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
691 (p_shdw[line].p_phys >> 12));
692
693 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
694 "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
695 rt_offst, line, i,
696 (u64)(p_shdw[line].p_phys >> 12));
697 }
698
699 STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
700 }
701 }
702 }
703
704 void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
705 {
706 qed_cdu_init_common(p_hwfn);
707 }
708
709 void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
710 {
711 qed_qm_init_pf(p_hwfn);
712 qed_cm_init_pf(p_hwfn);
713 qed_dq_init_pf(p_hwfn);
714 qed_ilt_init_pf(p_hwfn);
715 }
716
717 int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
718 enum protocol_type type,
719 u32 *p_cid)
720 {
721 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
722 u32 rel_cid;
723
724 if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
725 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
726 return -EINVAL;
727 }
728
729 rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
730 p_mngr->acquired[type].max_count);
731
732 if (rel_cid >= p_mngr->acquired[type].max_count) {
733 DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
734 type);
735 return -EINVAL;
736 }
737
738 __set_bit(rel_cid, p_mngr->acquired[type].cid_map);
739
740 *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
741
742 return 0;
743 }
744
745 static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
746 u32 cid,
747 enum protocol_type *p_type)
748 {
749 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
750 struct qed_cid_acquired_map *p_map;
751 enum protocol_type p;
752 u32 rel_cid;
753
754 /* Iterate over protocols and find matching cid range */
755 for (p = 0; p < MAX_CONN_TYPES; p++) {
756 p_map = &p_mngr->acquired[p];
757
758 if (!p_map->cid_map)
759 continue;
760 if (cid >= p_map->start_cid &&
761 cid < p_map->start_cid + p_map->max_count)
762 break;
763 }
764 *p_type = p;
765
766 if (p == MAX_CONN_TYPES) {
767 DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
768 return false;
769 }
770
771 rel_cid = cid - p_map->start_cid;
772 if (!test_bit(rel_cid, p_map->cid_map)) {
773 DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
774 return false;
775 }
776 return true;
777 }
778
779 void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
780 u32 cid)
781 {
782 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
783 enum protocol_type type;
784 bool b_acquired;
785 u32 rel_cid;
786
787 /* Test acquired and find matching per-protocol map */
788 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
789
790 if (!b_acquired)
791 return;
792
793 rel_cid = cid - p_mngr->acquired[type].start_cid;
794 __clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
795 }
796
797 int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
798 struct qed_cxt_info *p_info)
799 {
800 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
801 u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
802 enum protocol_type type;
803 bool b_acquired;
804
805 /* Test acquired and find matching per-protocol map */
806 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
807
808 if (!b_acquired)
809 return -EINVAL;
810
811 /* set the protocl type */
812 p_info->type = type;
813
814 /* compute context virtual pointer */
815 hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
816
817 conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
818 cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
819 line = p_info->iid / cxts_per_p;
820
821 /* Make sure context is allocated (dynamic allocation) */
822 if (!p_mngr->ilt_shadow[line].p_virt)
823 return -EINVAL;
824
825 p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
826 p_info->iid % cxts_per_p * conn_cxt_size;
827
828 DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
829 "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
830 p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
831
832 return 0;
833 }
834
835 int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
836 {
837 struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params;
838
839 /* Set the number of required CORE connections */
840 u32 core_cids = 1; /* SPQ */
841
842 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids);
843
844 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
845 p_params->num_cons);
846
847 return 0;
848 }
This page took 0.06334 seconds and 5 git commands to generate.