Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi...
[deliverable/linux.git] / drivers / net / ethernet / qlogic / qed / qed_init_ops.c
1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9 #include <linux/types.h>
10 #include <linux/io.h>
11 #include <linux/delay.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include "qed.h"
17 #include "qed_hsi.h"
18 #include "qed_hw.h"
19 #include "qed_init_ops.h"
20 #include "qed_reg_addr.h"
21
22 #define QED_INIT_MAX_POLL_COUNT 100
23 #define QED_INIT_POLL_PERIOD_US 500
24
25 static u32 pxp_global_win[] = {
26 0,
27 0,
28 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
29 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
30 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
31 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
32 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
33 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
34 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
35 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
36 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
37 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
38 0,
39 0,
40 0,
41 0,
42 0,
43 0,
44 0,
45 };
46
47 void qed_init_iro_array(struct qed_dev *cdev)
48 {
49 cdev->iro_arr = iro_arr;
50 }
51
52 /* Runtime configuration helpers */
53 void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
54 {
55 int i;
56
57 for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
58 p_hwfn->rt_data[i].b_valid = false;
59 }
60
61 void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
62 u32 rt_offset,
63 u32 val)
64 {
65 p_hwfn->rt_data[rt_offset].init_val = val;
66 p_hwfn->rt_data[rt_offset].b_valid = true;
67 }
68
69 void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
70 u32 rt_offset,
71 u32 *val,
72 size_t size)
73 {
74 size_t i;
75
76 for (i = 0; i < size / sizeof(u32); i++) {
77 p_hwfn->rt_data[rt_offset + i].init_val = val[i];
78 p_hwfn->rt_data[rt_offset + i].b_valid = true;
79 }
80 }
81
82 static void qed_init_rt(struct qed_hwfn *p_hwfn,
83 struct qed_ptt *p_ptt,
84 u32 addr,
85 u32 rt_offset,
86 u32 size)
87 {
88 struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset;
89 u32 i;
90
91 for (i = 0; i < size; i++) {
92 if (!rt_data[i].b_valid)
93 continue;
94 qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val);
95 }
96 }
97
98 int qed_init_alloc(struct qed_hwfn *p_hwfn)
99 {
100 struct qed_rt_data *rt_data;
101
102 rt_data = kzalloc(sizeof(*rt_data) * RUNTIME_ARRAY_SIZE, GFP_ATOMIC);
103 if (!rt_data)
104 return -ENOMEM;
105
106 p_hwfn->rt_data = rt_data;
107
108 return 0;
109 }
110
111 void qed_init_free(struct qed_hwfn *p_hwfn)
112 {
113 kfree(p_hwfn->rt_data);
114 p_hwfn->rt_data = NULL;
115 }
116
117 static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
118 struct qed_ptt *p_ptt,
119 u32 addr,
120 u32 dmae_data_offset,
121 u32 size,
122 const u32 *buf,
123 bool b_must_dmae,
124 bool b_can_dmae)
125 {
126 int rc = 0;
127
128 /* Perform DMAE only for lengthy enough sections or for wide-bus */
129 if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
130 const u32 *data = buf + dmae_data_offset;
131 u32 i;
132
133 for (i = 0; i < size; i++)
134 qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
135 } else {
136 rc = qed_dmae_host2grc(p_hwfn, p_ptt,
137 (uintptr_t)(buf + dmae_data_offset),
138 addr, size, 0);
139 }
140
141 return rc;
142 }
143
144 static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
145 struct qed_ptt *p_ptt,
146 u32 addr,
147 u32 fill,
148 u32 fill_count)
149 {
150 static u32 zero_buffer[DMAE_MAX_RW_SIZE];
151
152 memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
153
154 /* invoke the DMAE virtual/physical buffer API with
155 * 1. DMAE init channel
156 * 2. addr,
157 * 3. p_hwfb->temp_data,
158 * 4. fill_count
159 */
160
161 return qed_dmae_host2grc(p_hwfn, p_ptt,
162 (uintptr_t)(&zero_buffer[0]),
163 addr, fill_count,
164 QED_DMAE_FLAG_RW_REPL_SRC);
165 }
166
167 static void qed_init_fill(struct qed_hwfn *p_hwfn,
168 struct qed_ptt *p_ptt,
169 u32 addr,
170 u32 fill,
171 u32 fill_count)
172 {
173 u32 i;
174
175 for (i = 0; i < fill_count; i++, addr += sizeof(u32))
176 qed_wr(p_hwfn, p_ptt, addr, fill);
177 }
178
179 static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
180 struct qed_ptt *p_ptt,
181 struct init_write_op *cmd,
182 bool b_must_dmae,
183 bool b_can_dmae)
184 {
185 u32 data = le32_to_cpu(cmd->data);
186 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
187 u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
188 u32 offset, output_len, input_len, max_size;
189 struct qed_dev *cdev = p_hwfn->cdev;
190 union init_array_hdr *hdr;
191 const u32 *array_data;
192 int rc = 0;
193 u32 size;
194
195 array_data = cdev->fw_data->arr_data;
196
197 hdr = (union init_array_hdr *)(array_data +
198 dmae_array_offset);
199 data = le32_to_cpu(hdr->raw.data);
200 switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
201 case INIT_ARR_ZIPPED:
202 offset = dmae_array_offset + 1;
203 input_len = GET_FIELD(data,
204 INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
205 max_size = MAX_ZIPPED_SIZE * 4;
206 memset(p_hwfn->unzip_buf, 0, max_size);
207
208 output_len = qed_unzip_data(p_hwfn, input_len,
209 (u8 *)&array_data[offset],
210 max_size, (u8 *)p_hwfn->unzip_buf);
211 if (output_len) {
212 rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
213 output_len,
214 p_hwfn->unzip_buf,
215 b_must_dmae, b_can_dmae);
216 } else {
217 DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
218 rc = -EINVAL;
219 }
220 break;
221 case INIT_ARR_PATTERN:
222 {
223 u32 repeats = GET_FIELD(data,
224 INIT_ARRAY_PATTERN_HDR_REPETITIONS);
225 u32 i;
226
227 size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
228
229 for (i = 0; i < repeats; i++, addr += size << 2) {
230 rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
231 dmae_array_offset + 1,
232 size, array_data,
233 b_must_dmae, b_can_dmae);
234 if (rc)
235 break;
236 }
237 break;
238 }
239 case INIT_ARR_STANDARD:
240 size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
241 rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
242 dmae_array_offset + 1,
243 size, array_data,
244 b_must_dmae, b_can_dmae);
245 break;
246 }
247
248 return rc;
249 }
250
251 /* init_ops write command */
252 static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
253 struct qed_ptt *p_ptt,
254 struct init_write_op *cmd,
255 bool b_can_dmae)
256 {
257 u32 data = le32_to_cpu(cmd->data);
258 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
259 bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
260 union init_write_args *arg = &cmd->args;
261 int rc = 0;
262
263 /* Sanitize */
264 if (b_must_dmae && !b_can_dmae) {
265 DP_NOTICE(p_hwfn,
266 "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
267 addr);
268 return -EINVAL;
269 }
270
271 switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
272 case INIT_SRC_INLINE:
273 qed_wr(p_hwfn, p_ptt, addr,
274 le32_to_cpu(arg->inline_val));
275 break;
276 case INIT_SRC_ZEROS:
277 if (b_must_dmae ||
278 (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
279 rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
280 le32_to_cpu(arg->zeros_count));
281 else
282 qed_init_fill(p_hwfn, p_ptt, addr, 0,
283 le32_to_cpu(arg->zeros_count));
284 break;
285 case INIT_SRC_ARRAY:
286 rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
287 b_must_dmae, b_can_dmae);
288 break;
289 case INIT_SRC_RUNTIME:
290 qed_init_rt(p_hwfn, p_ptt, addr,
291 le16_to_cpu(arg->runtime.offset),
292 le16_to_cpu(arg->runtime.size));
293 break;
294 }
295
296 return rc;
297 }
298
299 static inline bool comp_eq(u32 val, u32 expected_val)
300 {
301 return val == expected_val;
302 }
303
304 static inline bool comp_and(u32 val, u32 expected_val)
305 {
306 return (val & expected_val) == expected_val;
307 }
308
309 static inline bool comp_or(u32 val, u32 expected_val)
310 {
311 return (val | expected_val) > 0;
312 }
313
314 /* init_ops read/poll commands */
315 static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
316 struct qed_ptt *p_ptt,
317 struct init_read_op *cmd)
318 {
319 u32 data = le32_to_cpu(cmd->op_data);
320 u32 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
321
322 bool (*comp_check)(u32 val,
323 u32 expected_val);
324 u32 delay = QED_INIT_POLL_PERIOD_US, val;
325
326 val = qed_rd(p_hwfn, p_ptt, addr);
327
328 data = le32_to_cpu(cmd->op_data);
329 if (GET_FIELD(data, INIT_READ_OP_POLL)) {
330 int i;
331
332 switch (GET_FIELD(data, INIT_READ_OP_POLL_COMP)) {
333 case INIT_COMPARISON_EQ:
334 comp_check = comp_eq;
335 break;
336 case INIT_COMPARISON_OR:
337 comp_check = comp_or;
338 break;
339 case INIT_COMPARISON_AND:
340 comp_check = comp_and;
341 break;
342 default:
343 comp_check = NULL;
344 DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
345 data);
346 return;
347 }
348
349 for (i = 0;
350 i < QED_INIT_MAX_POLL_COUNT &&
351 !comp_check(val, le32_to_cpu(cmd->expected_val));
352 i++) {
353 udelay(delay);
354 val = qed_rd(p_hwfn, p_ptt, addr);
355 }
356
357 if (i == QED_INIT_MAX_POLL_COUNT)
358 DP_ERR(p_hwfn,
359 "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
360 addr, le32_to_cpu(cmd->expected_val),
361 val, data);
362 }
363 }
364
365 /* init_ops callbacks entry point */
366 static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
367 struct qed_ptt *p_ptt,
368 struct init_callback_op *p_cmd)
369 {
370 DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
371 }
372
373 static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
374 u16 *offset,
375 int modes)
376 {
377 struct qed_dev *cdev = p_hwfn->cdev;
378 const u8 *modes_tree_buf;
379 u8 arg1, arg2, tree_val;
380
381 modes_tree_buf = cdev->fw_data->modes_tree_buf;
382 tree_val = modes_tree_buf[(*offset)++];
383 switch (tree_val) {
384 case INIT_MODE_OP_NOT:
385 return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
386 case INIT_MODE_OP_OR:
387 arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
388 arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
389 return arg1 | arg2;
390 case INIT_MODE_OP_AND:
391 arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
392 arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
393 return arg1 & arg2;
394 default:
395 tree_val -= MAX_INIT_MODE_OPS;
396 return (modes & (1 << tree_val)) ? 1 : 0;
397 }
398 }
399
400 static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
401 struct init_if_mode_op *p_cmd,
402 int modes)
403 {
404 u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
405
406 if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
407 return 0;
408 else
409 return GET_FIELD(le32_to_cpu(p_cmd->op_data),
410 INIT_IF_MODE_OP_CMD_OFFSET);
411 }
412
413 static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
414 struct init_if_phase_op *p_cmd,
415 u32 phase,
416 u32 phase_id)
417 {
418 u32 data = le32_to_cpu(p_cmd->phase_data);
419 u32 op_data = le32_to_cpu(p_cmd->op_data);
420
421 if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
422 (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
423 GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
424 return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
425 else
426 return 0;
427 }
428
429 int qed_init_run(struct qed_hwfn *p_hwfn,
430 struct qed_ptt *p_ptt,
431 int phase,
432 int phase_id,
433 int modes)
434 {
435 struct qed_dev *cdev = p_hwfn->cdev;
436 u32 cmd_num, num_init_ops;
437 union init_op *init_ops;
438 bool b_dmae = false;
439 int rc = 0;
440
441 num_init_ops = cdev->fw_data->init_ops_size;
442 init_ops = cdev->fw_data->init_ops;
443
444 p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
445 if (!p_hwfn->unzip_buf) {
446 DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n");
447 return -ENOMEM;
448 }
449
450 for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
451 union init_op *cmd = &init_ops[cmd_num];
452 u32 data = le32_to_cpu(cmd->raw.op_data);
453
454 switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
455 case INIT_OP_WRITE:
456 rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
457 b_dmae);
458 break;
459 case INIT_OP_READ:
460 qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
461 break;
462 case INIT_OP_IF_MODE:
463 cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
464 modes);
465 break;
466 case INIT_OP_IF_PHASE:
467 cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
468 phase, phase_id);
469 b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
470 break;
471 case INIT_OP_DELAY:
472 /* qed_init_run is always invoked from
473 * sleep-able context
474 */
475 udelay(le32_to_cpu(cmd->delay.delay));
476 break;
477
478 case INIT_OP_CALLBACK:
479 qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
480 break;
481 }
482
483 if (rc)
484 break;
485 }
486
487 kfree(p_hwfn->unzip_buf);
488 return rc;
489 }
490
491 void qed_gtt_init(struct qed_hwfn *p_hwfn)
492 {
493 u32 gtt_base;
494 u32 i;
495
496 /* Set the global windows */
497 gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
498
499 for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
500 if (pxp_global_win[i])
501 REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
502 pxp_global_win[i]);
503 }
504
505 int qed_init_fw_data(struct qed_dev *cdev,
506 const u8 *data)
507 {
508 struct qed_fw_data *fw = cdev->fw_data;
509 struct bin_buffer_hdr *buf_hdr;
510 u32 offset, len;
511
512 if (!data) {
513 DP_NOTICE(cdev, "Invalid fw data\n");
514 return -EINVAL;
515 }
516
517 buf_hdr = (struct bin_buffer_hdr *)data;
518
519 offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
520 fw->init_ops = (union init_op *)(data + offset);
521
522 offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
523 fw->arr_data = (u32 *)(data + offset);
524
525 offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
526 fw->modes_tree_buf = (u8 *)(data + offset);
527 len = buf_hdr[BIN_BUF_INIT_CMD].length;
528 fw->init_ops_size = len / sizeof(struct init_raw_op);
529
530 return 0;
531 }
This page took 0.042904 seconds and 6 git commands to generate.