mlxsw: core: Do not use EMADs in mlxsw_emad_fini
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlxsw / core.c
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/core.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/export.h>
41 #include <linux/err.h>
42 #include <linux/if_link.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/u64_stats_sync.h>
46 #include <linux/netdevice.h>
47 #include <linux/wait.h>
48 #include <linux/skbuff.h>
49 #include <linux/etherdevice.h>
50 #include <linux/types.h>
51 #include <linux/string.h>
52 #include <linux/gfp.h>
53 #include <linux/random.h>
54 #include <linux/jiffies.h>
55 #include <linux/mutex.h>
56 #include <linux/rcupdate.h>
57 #include <linux/slab.h>
58 #include <asm/byteorder.h>
59
60 #include "core.h"
61 #include "item.h"
62 #include "cmd.h"
63 #include "port.h"
64 #include "trap.h"
65 #include "emad.h"
66 #include "reg.h"
67
68 static LIST_HEAD(mlxsw_core_driver_list);
69 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
70
71 static const char mlxsw_core_driver_name[] = "mlxsw_core";
72
73 static struct dentry *mlxsw_core_dbg_root;
74
75 struct mlxsw_core_pcpu_stats {
76 u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
77 u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
78 u64 port_rx_packets[MLXSW_PORT_MAX_PORTS];
79 u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS];
80 struct u64_stats_sync syncp;
81 u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX];
82 u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS];
83 u32 trap_rx_invalid;
84 u32 port_rx_invalid;
85 };
86
87 struct mlxsw_core {
88 struct mlxsw_driver *driver;
89 const struct mlxsw_bus *bus;
90 void *bus_priv;
91 const struct mlxsw_bus_info *bus_info;
92 struct list_head rx_listener_list;
93 struct list_head event_listener_list;
94 struct {
95 struct sk_buff *resp_skb;
96 u64 tid;
97 wait_queue_head_t wait;
98 bool trans_active;
99 struct mutex lock; /* One EMAD transaction at a time. */
100 bool use_emad;
101 } emad;
102 struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
103 struct dentry *dbg_dir;
104 struct {
105 struct debugfs_blob_wrapper vsd_blob;
106 struct debugfs_blob_wrapper psid_blob;
107 } dbg;
108 unsigned long driver_priv[0];
109 /* driver_priv has to be always the last item */
110 };
111
112 struct mlxsw_rx_listener_item {
113 struct list_head list;
114 struct mlxsw_rx_listener rxl;
115 void *priv;
116 };
117
118 struct mlxsw_event_listener_item {
119 struct list_head list;
120 struct mlxsw_event_listener el;
121 void *priv;
122 };
123
124 /******************
125 * EMAD processing
126 ******************/
127
128 /* emad_eth_hdr_dmac
129 * Destination MAC in EMAD's Ethernet header.
130 * Must be set to 01:02:c9:00:00:01
131 */
132 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
133
134 /* emad_eth_hdr_smac
135 * Source MAC in EMAD's Ethernet header.
136 * Must be set to 00:02:c9:01:02:03
137 */
138 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
139
140 /* emad_eth_hdr_ethertype
141 * Ethertype in EMAD's Ethernet header.
142 * Must be set to 0x8932
143 */
144 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
145
146 /* emad_eth_hdr_mlx_proto
147 * Mellanox protocol.
148 * Must be set to 0x0.
149 */
150 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
151
152 /* emad_eth_hdr_ver
153 * Mellanox protocol version.
154 * Must be set to 0x0.
155 */
156 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
157
158 /* emad_op_tlv_type
159 * Type of the TLV.
160 * Must be set to 0x1 (operation TLV).
161 */
162 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
163
164 /* emad_op_tlv_len
165 * Length of the operation TLV in u32.
166 * Must be set to 0x4.
167 */
168 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
169
170 /* emad_op_tlv_dr
171 * Direct route bit. Setting to 1 indicates the EMAD is a direct route
172 * EMAD. DR TLV must follow.
173 *
174 * Note: Currently not supported and must not be set.
175 */
176 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
177
178 /* emad_op_tlv_status
179 * Returned status in case of EMAD response. Must be set to 0 in case
180 * of EMAD request.
181 * 0x0 - success
182 * 0x1 - device is busy. Requester should retry
183 * 0x2 - Mellanox protocol version not supported
184 * 0x3 - unknown TLV
185 * 0x4 - register not supported
186 * 0x5 - operation class not supported
187 * 0x6 - EMAD method not supported
188 * 0x7 - bad parameter (e.g. port out of range)
189 * 0x8 - resource not available
190 * 0x9 - message receipt acknowledgment. Requester should retry
191 * 0x70 - internal error
192 */
193 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
194
195 /* emad_op_tlv_register_id
196 * Register ID of register within register TLV.
197 */
198 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
199
200 /* emad_op_tlv_r
201 * Response bit. Setting to 1 indicates Response, otherwise request.
202 */
203 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
204
205 /* emad_op_tlv_method
206 * EMAD method type.
207 * 0x1 - query
208 * 0x2 - write
209 * 0x3 - send (currently not supported)
210 * 0x4 - event
211 */
212 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
213
214 /* emad_op_tlv_class
215 * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
216 */
217 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
218
219 /* emad_op_tlv_tid
220 * EMAD transaction ID. Used for pairing request and response EMADs.
221 */
222 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
223
224 /* emad_reg_tlv_type
225 * Type of the TLV.
226 * Must be set to 0x3 (register TLV).
227 */
228 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
229
230 /* emad_reg_tlv_len
231 * Length of the operation TLV in u32.
232 */
233 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
234
235 /* emad_end_tlv_type
236 * Type of the TLV.
237 * Must be set to 0x0 (end TLV).
238 */
239 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
240
241 /* emad_end_tlv_len
242 * Length of the end TLV in u32.
243 * Must be set to 1.
244 */
245 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
246
247 enum mlxsw_core_reg_access_type {
248 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
249 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
250 };
251
252 static inline const char *
253 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
254 {
255 switch (type) {
256 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
257 return "query";
258 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
259 return "write";
260 }
261 BUG();
262 }
263
264 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
265 {
266 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
267 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
268 }
269
270 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
271 const struct mlxsw_reg_info *reg,
272 char *payload)
273 {
274 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
275 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
276 memcpy(reg_tlv + sizeof(u32), payload, reg->len);
277 }
278
279 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
280 const struct mlxsw_reg_info *reg,
281 enum mlxsw_core_reg_access_type type,
282 struct mlxsw_core *mlxsw_core)
283 {
284 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
285 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
286 mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
287 mlxsw_emad_op_tlv_status_set(op_tlv, 0);
288 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
289 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
290 if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type)
291 mlxsw_emad_op_tlv_method_set(op_tlv,
292 MLXSW_EMAD_OP_TLV_METHOD_QUERY);
293 else
294 mlxsw_emad_op_tlv_method_set(op_tlv,
295 MLXSW_EMAD_OP_TLV_METHOD_WRITE);
296 mlxsw_emad_op_tlv_class_set(op_tlv,
297 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
298 mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
299 }
300
301 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
302 {
303 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
304
305 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
306 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
307 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
308 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
309 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
310
311 skb_reset_mac_header(skb);
312
313 return 0;
314 }
315
316 static void mlxsw_emad_construct(struct sk_buff *skb,
317 const struct mlxsw_reg_info *reg,
318 char *payload,
319 enum mlxsw_core_reg_access_type type,
320 struct mlxsw_core *mlxsw_core)
321 {
322 char *buf;
323
324 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
325 mlxsw_emad_pack_end_tlv(buf);
326
327 buf = skb_push(skb, reg->len + sizeof(u32));
328 mlxsw_emad_pack_reg_tlv(buf, reg, payload);
329
330 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
331 mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
332
333 mlxsw_emad_construct_eth_hdr(skb);
334 }
335
336 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
337 {
338 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
339 }
340
341 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
342 {
343 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
344 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
345 }
346
347 static char *mlxsw_emad_reg_payload(const char *op_tlv)
348 {
349 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
350 }
351
352 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
353 {
354 char *op_tlv;
355
356 op_tlv = mlxsw_emad_op_tlv(skb);
357 return mlxsw_emad_op_tlv_tid_get(op_tlv);
358 }
359
360 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
361 {
362 char *op_tlv;
363
364 op_tlv = mlxsw_emad_op_tlv(skb);
365 return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv));
366 }
367
368 #define MLXSW_EMAD_TIMEOUT_MS 200
369
370 static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
371 struct sk_buff *skb,
372 const struct mlxsw_tx_info *tx_info)
373 {
374 int err;
375 int ret;
376
377 err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
378 if (err) {
379 dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
380 mlxsw_core->emad.tid);
381 dev_kfree_skb(skb);
382 return err;
383 }
384
385 mlxsw_core->emad.trans_active = true;
386 ret = wait_event_timeout(mlxsw_core->emad.wait,
387 !(mlxsw_core->emad.trans_active),
388 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
389 if (!ret) {
390 dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
391 mlxsw_core->emad.tid);
392 mlxsw_core->emad.trans_active = false;
393 return -EIO;
394 }
395
396 return 0;
397 }
398
399 static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
400 char *op_tlv)
401 {
402 enum mlxsw_emad_op_tlv_status status;
403 u64 tid;
404
405 status = mlxsw_emad_op_tlv_status_get(op_tlv);
406 tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
407
408 switch (status) {
409 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
410 return 0;
411 case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
412 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
413 dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
414 tid, status, mlxsw_emad_op_tlv_status_str(status));
415 return -EAGAIN;
416 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
417 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
418 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
419 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
420 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
421 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
422 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
423 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
424 default:
425 dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
426 tid, status, mlxsw_emad_op_tlv_status_str(status));
427 return -EIO;
428 }
429 }
430
431 static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
432 struct sk_buff *skb)
433 {
434 return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
435 }
436
437 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
438 struct sk_buff *skb,
439 const struct mlxsw_tx_info *tx_info)
440 {
441 struct sk_buff *trans_skb;
442 int n_retry;
443 int err;
444
445 n_retry = 0;
446 retry:
447 /* We copy the EMAD to a new skb, since we might need
448 * to retransmit it in case of failure.
449 */
450 trans_skb = skb_copy(skb, GFP_KERNEL);
451 if (!trans_skb) {
452 err = -ENOMEM;
453 goto out;
454 }
455
456 err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
457 if (!err) {
458 struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
459
460 err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
461 if (err)
462 dev_kfree_skb(resp_skb);
463 if (!err || err != -EAGAIN)
464 goto out;
465 }
466 if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
467 goto retry;
468
469 out:
470 dev_kfree_skb(skb);
471 mlxsw_core->emad.tid++;
472 return err;
473 }
474
475 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
476 void *priv)
477 {
478 struct mlxsw_core *mlxsw_core = priv;
479
480 if (mlxsw_emad_is_resp(skb) &&
481 mlxsw_core->emad.trans_active &&
482 mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
483 mlxsw_core->emad.resp_skb = skb;
484 mlxsw_core->emad.trans_active = false;
485 wake_up(&mlxsw_core->emad.wait);
486 } else {
487 dev_kfree_skb(skb);
488 }
489 }
490
491 static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
492 .func = mlxsw_emad_rx_listener_func,
493 .local_port = MLXSW_PORT_DONT_CARE,
494 .trap_id = MLXSW_TRAP_ID_ETHEMAD,
495 };
496
497 static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
498 {
499 char htgt_pl[MLXSW_REG_HTGT_LEN];
500 char hpkt_pl[MLXSW_REG_HPKT_LEN];
501 int err;
502
503 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
504 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
505 if (err)
506 return err;
507
508 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
509 MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
510 MLXSW_TRAP_ID_ETHEMAD);
511 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
512 }
513
514 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
515 {
516 int err;
517
518 /* Set the upper 32 bits of the transaction ID field to a random
519 * number. This allows us to discard EMADs addressed to other
520 * devices.
521 */
522 get_random_bytes(&mlxsw_core->emad.tid, 4);
523 mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
524
525 init_waitqueue_head(&mlxsw_core->emad.wait);
526 mlxsw_core->emad.trans_active = false;
527 mutex_init(&mlxsw_core->emad.lock);
528
529 err = mlxsw_core_rx_listener_register(mlxsw_core,
530 &mlxsw_emad_rx_listener,
531 mlxsw_core);
532 if (err)
533 return err;
534
535 err = mlxsw_emad_traps_set(mlxsw_core);
536 if (err)
537 goto err_emad_trap_set;
538
539 mlxsw_core->emad.use_emad = true;
540
541 return 0;
542
543 err_emad_trap_set:
544 mlxsw_core_rx_listener_unregister(mlxsw_core,
545 &mlxsw_emad_rx_listener,
546 mlxsw_core);
547 return err;
548 }
549
550 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
551 {
552 char hpkt_pl[MLXSW_REG_HPKT_LEN];
553
554 mlxsw_core->emad.use_emad = false;
555 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
556 MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
557 MLXSW_TRAP_ID_ETHEMAD);
558 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
559
560 mlxsw_core_rx_listener_unregister(mlxsw_core,
561 &mlxsw_emad_rx_listener,
562 mlxsw_core);
563 }
564
565 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
566 u16 reg_len)
567 {
568 struct sk_buff *skb;
569 u16 emad_len;
570
571 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
572 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
573 sizeof(u32) + mlxsw_core->driver->txhdr_len);
574 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
575 return NULL;
576
577 skb = netdev_alloc_skb(NULL, emad_len);
578 if (!skb)
579 return NULL;
580 memset(skb->data, 0, emad_len);
581 skb_reserve(skb, emad_len);
582
583 return skb;
584 }
585
586 /*****************
587 * Core functions
588 *****************/
589
590 static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
591 {
592 struct mlxsw_core *mlxsw_core = file->private;
593 struct mlxsw_core_pcpu_stats *p;
594 u64 rx_packets, rx_bytes;
595 u64 tmp_rx_packets, tmp_rx_bytes;
596 u32 rx_dropped, rx_invalid;
597 unsigned int start;
598 int i;
599 int j;
600 static const char hdr[] =
601 " NUM RX_PACKETS RX_BYTES RX_DROPPED\n";
602
603 seq_printf(file, hdr);
604 for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
605 rx_packets = 0;
606 rx_bytes = 0;
607 rx_dropped = 0;
608 for_each_possible_cpu(j) {
609 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
610 do {
611 start = u64_stats_fetch_begin(&p->syncp);
612 tmp_rx_packets = p->trap_rx_packets[i];
613 tmp_rx_bytes = p->trap_rx_bytes[i];
614 } while (u64_stats_fetch_retry(&p->syncp, start));
615
616 rx_packets += tmp_rx_packets;
617 rx_bytes += tmp_rx_bytes;
618 rx_dropped += p->trap_rx_dropped[i];
619 }
620 seq_printf(file, "trap %3d %12llu %12llu %10u\n",
621 i, rx_packets, rx_bytes, rx_dropped);
622 }
623 rx_invalid = 0;
624 for_each_possible_cpu(j) {
625 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
626 rx_invalid += p->trap_rx_invalid;
627 }
628 seq_printf(file, "trap INV %10u\n",
629 rx_invalid);
630
631 for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
632 rx_packets = 0;
633 rx_bytes = 0;
634 rx_dropped = 0;
635 for_each_possible_cpu(j) {
636 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
637 do {
638 start = u64_stats_fetch_begin(&p->syncp);
639 tmp_rx_packets = p->port_rx_packets[i];
640 tmp_rx_bytes = p->port_rx_bytes[i];
641 } while (u64_stats_fetch_retry(&p->syncp, start));
642
643 rx_packets += tmp_rx_packets;
644 rx_bytes += tmp_rx_bytes;
645 rx_dropped += p->port_rx_dropped[i];
646 }
647 seq_printf(file, "port %3d %12llu %12llu %10u\n",
648 i, rx_packets, rx_bytes, rx_dropped);
649 }
650 rx_invalid = 0;
651 for_each_possible_cpu(j) {
652 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
653 rx_invalid += p->port_rx_invalid;
654 }
655 seq_printf(file, "port INV %10u\n",
656 rx_invalid);
657 return 0;
658 }
659
660 static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
661 {
662 struct mlxsw_core *mlxsw_core = inode->i_private;
663
664 return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
665 }
666
667 static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
668 .owner = THIS_MODULE,
669 .open = mlxsw_core_rx_stats_dbg_open,
670 .release = single_release,
671 .read = seq_read,
672 .llseek = seq_lseek
673 };
674
675 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
676 const char *buf, size_t size)
677 {
678 __be32 *m = (__be32 *) buf;
679 int i;
680 int count = size / sizeof(__be32);
681
682 for (i = count - 1; i >= 0; i--)
683 if (m[i])
684 break;
685 i++;
686 count = i ? i : 1;
687 for (i = 0; i < count; i += 4)
688 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
689 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
690 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
691 }
692
693 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
694 {
695 spin_lock(&mlxsw_core_driver_list_lock);
696 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
697 spin_unlock(&mlxsw_core_driver_list_lock);
698 return 0;
699 }
700 EXPORT_SYMBOL(mlxsw_core_driver_register);
701
702 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
703 {
704 spin_lock(&mlxsw_core_driver_list_lock);
705 list_del(&mlxsw_driver->list);
706 spin_unlock(&mlxsw_core_driver_list_lock);
707 }
708 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
709
710 static struct mlxsw_driver *__driver_find(const char *kind)
711 {
712 struct mlxsw_driver *mlxsw_driver;
713
714 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
715 if (strcmp(mlxsw_driver->kind, kind) == 0)
716 return mlxsw_driver;
717 }
718 return NULL;
719 }
720
721 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
722 {
723 struct mlxsw_driver *mlxsw_driver;
724
725 spin_lock(&mlxsw_core_driver_list_lock);
726 mlxsw_driver = __driver_find(kind);
727 if (!mlxsw_driver) {
728 spin_unlock(&mlxsw_core_driver_list_lock);
729 request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
730 spin_lock(&mlxsw_core_driver_list_lock);
731 mlxsw_driver = __driver_find(kind);
732 }
733 if (mlxsw_driver) {
734 if (!try_module_get(mlxsw_driver->owner))
735 mlxsw_driver = NULL;
736 }
737
738 spin_unlock(&mlxsw_core_driver_list_lock);
739 return mlxsw_driver;
740 }
741
742 static void mlxsw_core_driver_put(const char *kind)
743 {
744 struct mlxsw_driver *mlxsw_driver;
745
746 spin_lock(&mlxsw_core_driver_list_lock);
747 mlxsw_driver = __driver_find(kind);
748 spin_unlock(&mlxsw_core_driver_list_lock);
749 if (!mlxsw_driver)
750 return;
751 module_put(mlxsw_driver->owner);
752 }
753
754 static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
755 {
756 const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
757
758 mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
759 mlxsw_core_dbg_root);
760 if (!mlxsw_core->dbg_dir)
761 return -ENOMEM;
762 debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
763 mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
764 mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
765 mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
766 debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
767 &mlxsw_core->dbg.vsd_blob);
768 mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
769 mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
770 debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
771 &mlxsw_core->dbg.psid_blob);
772 return 0;
773 }
774
775 static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
776 {
777 debugfs_remove_recursive(mlxsw_core->dbg_dir);
778 }
779
780 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
781 const struct mlxsw_bus *mlxsw_bus,
782 void *bus_priv)
783 {
784 const char *device_kind = mlxsw_bus_info->device_kind;
785 struct mlxsw_core *mlxsw_core;
786 struct mlxsw_driver *mlxsw_driver;
787 size_t alloc_size;
788 int err;
789
790 mlxsw_driver = mlxsw_core_driver_get(device_kind);
791 if (!mlxsw_driver)
792 return -EINVAL;
793 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
794 mlxsw_core = kzalloc(alloc_size, GFP_KERNEL);
795 if (!mlxsw_core) {
796 err = -ENOMEM;
797 goto err_core_alloc;
798 }
799
800 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
801 INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
802 mlxsw_core->driver = mlxsw_driver;
803 mlxsw_core->bus = mlxsw_bus;
804 mlxsw_core->bus_priv = bus_priv;
805 mlxsw_core->bus_info = mlxsw_bus_info;
806
807 mlxsw_core->pcpu_stats =
808 netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
809 if (!mlxsw_core->pcpu_stats) {
810 err = -ENOMEM;
811 goto err_alloc_stats;
812 }
813
814 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
815 if (err)
816 goto err_bus_init;
817
818 err = mlxsw_emad_init(mlxsw_core);
819 if (err)
820 goto err_emad_init;
821
822 err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
823 mlxsw_bus_info);
824 if (err)
825 goto err_driver_init;
826
827 err = mlxsw_core_debugfs_init(mlxsw_core);
828 if (err)
829 goto err_debugfs_init;
830
831 return 0;
832
833 err_debugfs_init:
834 mlxsw_core->driver->fini(mlxsw_core->driver_priv);
835 err_driver_init:
836 mlxsw_emad_fini(mlxsw_core);
837 err_emad_init:
838 mlxsw_bus->fini(bus_priv);
839 err_bus_init:
840 free_percpu(mlxsw_core->pcpu_stats);
841 err_alloc_stats:
842 kfree(mlxsw_core);
843 err_core_alloc:
844 mlxsw_core_driver_put(device_kind);
845 return err;
846 }
847 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
848
849 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
850 {
851 const char *device_kind = mlxsw_core->bus_info->device_kind;
852
853 mlxsw_core_debugfs_fini(mlxsw_core);
854 mlxsw_core->driver->fini(mlxsw_core->driver_priv);
855 mlxsw_emad_fini(mlxsw_core);
856 mlxsw_core->bus->fini(mlxsw_core->bus_priv);
857 free_percpu(mlxsw_core->pcpu_stats);
858 kfree(mlxsw_core);
859 mlxsw_core_driver_put(device_kind);
860 }
861 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
862
863 static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
864 {
865 return container_of(driver_priv, struct mlxsw_core, driver_priv);
866 }
867
868 bool mlxsw_core_skb_transmit_busy(void *driver_priv,
869 const struct mlxsw_tx_info *tx_info)
870 {
871 struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
872
873 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
874 tx_info);
875 }
876 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
877
878 int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
879 const struct mlxsw_tx_info *tx_info)
880 {
881 struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
882
883 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
884 tx_info);
885 }
886 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
887
888 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
889 const struct mlxsw_rx_listener *rxl_b)
890 {
891 return (rxl_a->func == rxl_b->func &&
892 rxl_a->local_port == rxl_b->local_port &&
893 rxl_a->trap_id == rxl_b->trap_id);
894 }
895
896 static struct mlxsw_rx_listener_item *
897 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
898 const struct mlxsw_rx_listener *rxl,
899 void *priv)
900 {
901 struct mlxsw_rx_listener_item *rxl_item;
902
903 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
904 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
905 rxl_item->priv == priv)
906 return rxl_item;
907 }
908 return NULL;
909 }
910
911 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
912 const struct mlxsw_rx_listener *rxl,
913 void *priv)
914 {
915 struct mlxsw_rx_listener_item *rxl_item;
916
917 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
918 if (rxl_item)
919 return -EEXIST;
920 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
921 if (!rxl_item)
922 return -ENOMEM;
923 rxl_item->rxl = *rxl;
924 rxl_item->priv = priv;
925
926 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
927 return 0;
928 }
929 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
930
931 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
932 const struct mlxsw_rx_listener *rxl,
933 void *priv)
934 {
935 struct mlxsw_rx_listener_item *rxl_item;
936
937 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
938 if (!rxl_item)
939 return;
940 list_del_rcu(&rxl_item->list);
941 synchronize_rcu();
942 kfree(rxl_item);
943 }
944 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
945
946 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
947 void *priv)
948 {
949 struct mlxsw_event_listener_item *event_listener_item = priv;
950 struct mlxsw_reg_info reg;
951 char *payload;
952 char *op_tlv = mlxsw_emad_op_tlv(skb);
953 char *reg_tlv = mlxsw_emad_reg_tlv(skb);
954
955 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
956 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
957 payload = mlxsw_emad_reg_payload(op_tlv);
958 event_listener_item->el.func(&reg, payload, event_listener_item->priv);
959 dev_kfree_skb(skb);
960 }
961
962 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
963 const struct mlxsw_event_listener *el_b)
964 {
965 return (el_a->func == el_b->func &&
966 el_a->trap_id == el_b->trap_id);
967 }
968
969 static struct mlxsw_event_listener_item *
970 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
971 const struct mlxsw_event_listener *el,
972 void *priv)
973 {
974 struct mlxsw_event_listener_item *el_item;
975
976 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
977 if (__is_event_listener_equal(&el_item->el, el) &&
978 el_item->priv == priv)
979 return el_item;
980 }
981 return NULL;
982 }
983
984 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
985 const struct mlxsw_event_listener *el,
986 void *priv)
987 {
988 int err;
989 struct mlxsw_event_listener_item *el_item;
990 const struct mlxsw_rx_listener rxl = {
991 .func = mlxsw_core_event_listener_func,
992 .local_port = MLXSW_PORT_DONT_CARE,
993 .trap_id = el->trap_id,
994 };
995
996 el_item = __find_event_listener_item(mlxsw_core, el, priv);
997 if (el_item)
998 return -EEXIST;
999 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
1000 if (!el_item)
1001 return -ENOMEM;
1002 el_item->el = *el;
1003 el_item->priv = priv;
1004
1005 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
1006 if (err)
1007 goto err_rx_listener_register;
1008
1009 /* No reason to save item if we did not manage to register an RX
1010 * listener for it.
1011 */
1012 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
1013
1014 return 0;
1015
1016 err_rx_listener_register:
1017 kfree(el_item);
1018 return err;
1019 }
1020 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
1021
1022 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
1023 const struct mlxsw_event_listener *el,
1024 void *priv)
1025 {
1026 struct mlxsw_event_listener_item *el_item;
1027 const struct mlxsw_rx_listener rxl = {
1028 .func = mlxsw_core_event_listener_func,
1029 .local_port = MLXSW_PORT_DONT_CARE,
1030 .trap_id = el->trap_id,
1031 };
1032
1033 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1034 if (!el_item)
1035 return;
1036 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
1037 list_del(&el_item->list);
1038 kfree(el_item);
1039 }
1040 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
1041
1042 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1043 const struct mlxsw_reg_info *reg,
1044 char *payload,
1045 enum mlxsw_core_reg_access_type type)
1046 {
1047 int err;
1048 char *op_tlv;
1049 struct sk_buff *skb;
1050 struct mlxsw_tx_info tx_info = {
1051 .local_port = MLXSW_PORT_CPU_PORT,
1052 .is_emad = true,
1053 };
1054
1055 skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
1056 if (!skb)
1057 return -ENOMEM;
1058
1059 mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
1060 mlxsw_core->driver->txhdr_construct(skb, &tx_info);
1061
1062 dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
1063 mlxsw_core->emad.tid);
1064 mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
1065
1066 err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
1067 if (!err) {
1068 op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
1069 memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
1070 reg->len);
1071
1072 dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
1073 mlxsw_core->emad.tid - 1);
1074 mlxsw_core_buf_dump_dbg(mlxsw_core,
1075 mlxsw_core->emad.resp_skb->data,
1076 mlxsw_core->emad.resp_skb->len);
1077
1078 dev_kfree_skb(mlxsw_core->emad.resp_skb);
1079 }
1080
1081 return err;
1082 }
1083
1084 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
1085 const struct mlxsw_reg_info *reg,
1086 char *payload,
1087 enum mlxsw_core_reg_access_type type)
1088 {
1089 int err, n_retry;
1090 char *in_mbox, *out_mbox, *tmp;
1091
1092 in_mbox = mlxsw_cmd_mbox_alloc();
1093 if (!in_mbox)
1094 return -ENOMEM;
1095
1096 out_mbox = mlxsw_cmd_mbox_alloc();
1097 if (!out_mbox) {
1098 err = -ENOMEM;
1099 goto free_in_mbox;
1100 }
1101
1102 mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
1103 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
1104 mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
1105
1106 n_retry = 0;
1107 retry:
1108 err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
1109 if (!err) {
1110 err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
1111 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
1112 goto retry;
1113 }
1114
1115 if (!err)
1116 memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
1117 reg->len);
1118
1119 mlxsw_core->emad.tid++;
1120 mlxsw_cmd_mbox_free(out_mbox);
1121 free_in_mbox:
1122 mlxsw_cmd_mbox_free(in_mbox);
1123 return err;
1124 }
1125
1126 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
1127 const struct mlxsw_reg_info *reg,
1128 char *payload,
1129 enum mlxsw_core_reg_access_type type)
1130 {
1131 u64 cur_tid;
1132 int err;
1133
1134 if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
1135 dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
1136 reg->id, mlxsw_reg_id_str(reg->id),
1137 mlxsw_core_reg_access_type_str(type));
1138 return -EINTR;
1139 }
1140
1141 cur_tid = mlxsw_core->emad.tid;
1142 dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
1143 cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
1144 mlxsw_core_reg_access_type_str(type));
1145
1146 /* During initialization EMAD interface is not available to us,
1147 * so we default to command interface. We switch to EMAD interface
1148 * after setting the appropriate traps.
1149 */
1150 if (!mlxsw_core->emad.use_emad)
1151 err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
1152 payload, type);
1153 else
1154 err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
1155 payload, type);
1156
1157 if (err)
1158 dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
1159 cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
1160 mlxsw_core_reg_access_type_str(type));
1161
1162 mutex_unlock(&mlxsw_core->emad.lock);
1163 return err;
1164 }
1165
1166 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
1167 const struct mlxsw_reg_info *reg, char *payload)
1168 {
1169 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1170 MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
1171 }
1172 EXPORT_SYMBOL(mlxsw_reg_query);
1173
1174 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
1175 const struct mlxsw_reg_info *reg, char *payload)
1176 {
1177 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1178 MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
1179 }
1180 EXPORT_SYMBOL(mlxsw_reg_write);
1181
1182 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1183 struct mlxsw_rx_info *rx_info)
1184 {
1185 struct mlxsw_rx_listener_item *rxl_item;
1186 const struct mlxsw_rx_listener *rxl;
1187 struct mlxsw_core_pcpu_stats *pcpu_stats;
1188 u8 local_port = rx_info->sys_port;
1189 bool found = false;
1190
1191 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n",
1192 __func__, rx_info->sys_port, rx_info->trap_id);
1193
1194 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
1195 (local_port >= MLXSW_PORT_MAX_PORTS))
1196 goto drop;
1197
1198 rcu_read_lock();
1199 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
1200 rxl = &rxl_item->rxl;
1201 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
1202 rxl->local_port == local_port) &&
1203 rxl->trap_id == rx_info->trap_id) {
1204 found = true;
1205 break;
1206 }
1207 }
1208 rcu_read_unlock();
1209 if (!found)
1210 goto drop;
1211
1212 pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
1213 u64_stats_update_begin(&pcpu_stats->syncp);
1214 pcpu_stats->port_rx_packets[local_port]++;
1215 pcpu_stats->port_rx_bytes[local_port] += skb->len;
1216 pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
1217 pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
1218 u64_stats_update_end(&pcpu_stats->syncp);
1219
1220 rxl->func(skb, local_port, rxl_item->priv);
1221 return;
1222
1223 drop:
1224 if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
1225 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
1226 else
1227 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
1228 if (local_port >= MLXSW_PORT_MAX_PORTS)
1229 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
1230 else
1231 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
1232 dev_kfree_skb(skb);
1233 }
1234 EXPORT_SYMBOL(mlxsw_core_skb_receive);
1235
1236 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
1237 u32 in_mod, bool out_mbox_direct,
1238 char *in_mbox, size_t in_mbox_size,
1239 char *out_mbox, size_t out_mbox_size)
1240 {
1241 u8 status;
1242 int err;
1243
1244 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
1245 if (!mlxsw_core->bus->cmd_exec)
1246 return -EOPNOTSUPP;
1247
1248 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1249 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
1250 if (in_mbox) {
1251 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
1252 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
1253 }
1254
1255 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
1256 opcode_mod, in_mod, out_mbox_direct,
1257 in_mbox, in_mbox_size,
1258 out_mbox, out_mbox_size, &status);
1259
1260 if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
1261 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1262 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1263 in_mod, status, mlxsw_cmd_status_str(status));
1264 } else if (err == -ETIMEDOUT) {
1265 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1266 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1267 in_mod);
1268 }
1269
1270 if (!err && out_mbox) {
1271 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
1272 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
1273 }
1274 return err;
1275 }
1276 EXPORT_SYMBOL(mlxsw_cmd_exec);
1277
1278 static int __init mlxsw_core_module_init(void)
1279 {
1280 mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
1281 if (!mlxsw_core_dbg_root)
1282 return -ENOMEM;
1283 return 0;
1284 }
1285
1286 static void __exit mlxsw_core_module_exit(void)
1287 {
1288 debugfs_remove_recursive(mlxsw_core_dbg_root);
1289 }
1290
1291 module_init(mlxsw_core_module_init);
1292 module_exit(mlxsw_core_module_exit);
1293
1294 MODULE_LICENSE("Dual BSD/GPL");
1295 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1296 MODULE_DESCRIPTION("Mellanox switch device core driver");
This page took 0.106076 seconds and 5 git commands to generate.