2 * drivers/net/ethernet/mellanox/mlxsw/core.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/export.h>
41 #include <linux/err.h>
42 #include <linux/if_link.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/u64_stats_sync.h>
46 #include <linux/netdevice.h>
47 #include <linux/wait.h>
48 #include <linux/skbuff.h>
49 #include <linux/etherdevice.h>
50 #include <linux/types.h>
51 #include <linux/string.h>
52 #include <linux/gfp.h>
53 #include <linux/random.h>
54 #include <linux/jiffies.h>
55 #include <linux/mutex.h>
56 #include <linux/rcupdate.h>
57 #include <linux/slab.h>
58 #include <asm/byteorder.h>
59 #include <net/devlink.h>
69 static LIST_HEAD(mlxsw_core_driver_list
);
70 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock
);
72 static const char mlxsw_core_driver_name
[] = "mlxsw_core";
74 static struct dentry
*mlxsw_core_dbg_root
;
76 struct mlxsw_core_pcpu_stats
{
77 u64 trap_rx_packets
[MLXSW_TRAP_ID_MAX
];
78 u64 trap_rx_bytes
[MLXSW_TRAP_ID_MAX
];
79 u64 port_rx_packets
[MLXSW_PORT_MAX_PORTS
];
80 u64 port_rx_bytes
[MLXSW_PORT_MAX_PORTS
];
81 struct u64_stats_sync syncp
;
82 u32 trap_rx_dropped
[MLXSW_TRAP_ID_MAX
];
83 u32 port_rx_dropped
[MLXSW_PORT_MAX_PORTS
];
89 struct mlxsw_driver
*driver
;
90 const struct mlxsw_bus
*bus
;
92 const struct mlxsw_bus_info
*bus_info
;
93 struct list_head rx_listener_list
;
94 struct list_head event_listener_list
;
96 struct sk_buff
*resp_skb
;
98 wait_queue_head_t wait
;
100 struct mutex lock
; /* One EMAD transaction at a time. */
103 struct mlxsw_core_pcpu_stats __percpu
*pcpu_stats
;
104 struct dentry
*dbg_dir
;
106 struct debugfs_blob_wrapper vsd_blob
;
107 struct debugfs_blob_wrapper psid_blob
;
110 u8
*mapping
; /* lag_id+port_index to local_port mapping */
112 struct mlxsw_hwmon
*hwmon
;
113 unsigned long driver_priv
[0];
114 /* driver_priv has to be always the last item */
117 struct mlxsw_rx_listener_item
{
118 struct list_head list
;
119 struct mlxsw_rx_listener rxl
;
123 struct mlxsw_event_listener_item
{
124 struct list_head list
;
125 struct mlxsw_event_listener el
;
134 * Destination MAC in EMAD's Ethernet header.
135 * Must be set to 01:02:c9:00:00:01
137 MLXSW_ITEM_BUF(emad
, eth_hdr
, dmac
, 0x00, 6);
140 * Source MAC in EMAD's Ethernet header.
141 * Must be set to 00:02:c9:01:02:03
143 MLXSW_ITEM_BUF(emad
, eth_hdr
, smac
, 0x06, 6);
145 /* emad_eth_hdr_ethertype
146 * Ethertype in EMAD's Ethernet header.
147 * Must be set to 0x8932
149 MLXSW_ITEM32(emad
, eth_hdr
, ethertype
, 0x0C, 16, 16);
151 /* emad_eth_hdr_mlx_proto
153 * Must be set to 0x0.
155 MLXSW_ITEM32(emad
, eth_hdr
, mlx_proto
, 0x0C, 8, 8);
158 * Mellanox protocol version.
159 * Must be set to 0x0.
161 MLXSW_ITEM32(emad
, eth_hdr
, ver
, 0x0C, 4, 4);
165 * Must be set to 0x1 (operation TLV).
167 MLXSW_ITEM32(emad
, op_tlv
, type
, 0x00, 27, 5);
170 * Length of the operation TLV in u32.
171 * Must be set to 0x4.
173 MLXSW_ITEM32(emad
, op_tlv
, len
, 0x00, 16, 11);
176 * Direct route bit. Setting to 1 indicates the EMAD is a direct route
177 * EMAD. DR TLV must follow.
179 * Note: Currently not supported and must not be set.
181 MLXSW_ITEM32(emad
, op_tlv
, dr
, 0x00, 15, 1);
183 /* emad_op_tlv_status
184 * Returned status in case of EMAD response. Must be set to 0 in case
187 * 0x1 - device is busy. Requester should retry
188 * 0x2 - Mellanox protocol version not supported
190 * 0x4 - register not supported
191 * 0x5 - operation class not supported
192 * 0x6 - EMAD method not supported
193 * 0x7 - bad parameter (e.g. port out of range)
194 * 0x8 - resource not available
195 * 0x9 - message receipt acknowledgment. Requester should retry
196 * 0x70 - internal error
198 MLXSW_ITEM32(emad
, op_tlv
, status
, 0x00, 8, 7);
200 /* emad_op_tlv_register_id
201 * Register ID of register within register TLV.
203 MLXSW_ITEM32(emad
, op_tlv
, register_id
, 0x04, 16, 16);
206 * Response bit. Setting to 1 indicates Response, otherwise request.
208 MLXSW_ITEM32(emad
, op_tlv
, r
, 0x04, 15, 1);
210 /* emad_op_tlv_method
214 * 0x3 - send (currently not supported)
217 MLXSW_ITEM32(emad
, op_tlv
, method
, 0x04, 8, 7);
220 * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
222 MLXSW_ITEM32(emad
, op_tlv
, class, 0x04, 0, 8);
225 * EMAD transaction ID. Used for pairing request and response EMADs.
227 MLXSW_ITEM64(emad
, op_tlv
, tid
, 0x08, 0, 64);
231 * Must be set to 0x3 (register TLV).
233 MLXSW_ITEM32(emad
, reg_tlv
, type
, 0x00, 27, 5);
236 * Length of the operation TLV in u32.
238 MLXSW_ITEM32(emad
, reg_tlv
, len
, 0x00, 16, 11);
242 * Must be set to 0x0 (end TLV).
244 MLXSW_ITEM32(emad
, end_tlv
, type
, 0x00, 27, 5);
247 * Length of the end TLV in u32.
250 MLXSW_ITEM32(emad
, end_tlv
, len
, 0x00, 16, 11);
252 enum mlxsw_core_reg_access_type
{
253 MLXSW_CORE_REG_ACCESS_TYPE_QUERY
,
254 MLXSW_CORE_REG_ACCESS_TYPE_WRITE
,
257 static inline const char *
258 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type
)
261 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY
:
263 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE
:
269 static void mlxsw_emad_pack_end_tlv(char *end_tlv
)
271 mlxsw_emad_end_tlv_type_set(end_tlv
, MLXSW_EMAD_TLV_TYPE_END
);
272 mlxsw_emad_end_tlv_len_set(end_tlv
, MLXSW_EMAD_END_TLV_LEN
);
275 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv
,
276 const struct mlxsw_reg_info
*reg
,
279 mlxsw_emad_reg_tlv_type_set(reg_tlv
, MLXSW_EMAD_TLV_TYPE_REG
);
280 mlxsw_emad_reg_tlv_len_set(reg_tlv
, reg
->len
/ sizeof(u32
) + 1);
281 memcpy(reg_tlv
+ sizeof(u32
), payload
, reg
->len
);
284 static void mlxsw_emad_pack_op_tlv(char *op_tlv
,
285 const struct mlxsw_reg_info
*reg
,
286 enum mlxsw_core_reg_access_type type
,
287 struct mlxsw_core
*mlxsw_core
)
289 mlxsw_emad_op_tlv_type_set(op_tlv
, MLXSW_EMAD_TLV_TYPE_OP
);
290 mlxsw_emad_op_tlv_len_set(op_tlv
, MLXSW_EMAD_OP_TLV_LEN
);
291 mlxsw_emad_op_tlv_dr_set(op_tlv
, 0);
292 mlxsw_emad_op_tlv_status_set(op_tlv
, 0);
293 mlxsw_emad_op_tlv_register_id_set(op_tlv
, reg
->id
);
294 mlxsw_emad_op_tlv_r_set(op_tlv
, MLXSW_EMAD_OP_TLV_REQUEST
);
295 if (type
== MLXSW_CORE_REG_ACCESS_TYPE_QUERY
)
296 mlxsw_emad_op_tlv_method_set(op_tlv
,
297 MLXSW_EMAD_OP_TLV_METHOD_QUERY
);
299 mlxsw_emad_op_tlv_method_set(op_tlv
,
300 MLXSW_EMAD_OP_TLV_METHOD_WRITE
);
301 mlxsw_emad_op_tlv_class_set(op_tlv
,
302 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS
);
303 mlxsw_emad_op_tlv_tid_set(op_tlv
, mlxsw_core
->emad
.tid
);
306 static int mlxsw_emad_construct_eth_hdr(struct sk_buff
*skb
)
308 char *eth_hdr
= skb_push(skb
, MLXSW_EMAD_ETH_HDR_LEN
);
310 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr
, MLXSW_EMAD_EH_DMAC
);
311 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr
, MLXSW_EMAD_EH_SMAC
);
312 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr
, MLXSW_EMAD_EH_ETHERTYPE
);
313 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr
, MLXSW_EMAD_EH_MLX_PROTO
);
314 mlxsw_emad_eth_hdr_ver_set(eth_hdr
, MLXSW_EMAD_EH_PROTO_VERSION
);
316 skb_reset_mac_header(skb
);
321 static void mlxsw_emad_construct(struct sk_buff
*skb
,
322 const struct mlxsw_reg_info
*reg
,
324 enum mlxsw_core_reg_access_type type
,
325 struct mlxsw_core
*mlxsw_core
)
329 buf
= skb_push(skb
, MLXSW_EMAD_END_TLV_LEN
* sizeof(u32
));
330 mlxsw_emad_pack_end_tlv(buf
);
332 buf
= skb_push(skb
, reg
->len
+ sizeof(u32
));
333 mlxsw_emad_pack_reg_tlv(buf
, reg
, payload
);
335 buf
= skb_push(skb
, MLXSW_EMAD_OP_TLV_LEN
* sizeof(u32
));
336 mlxsw_emad_pack_op_tlv(buf
, reg
, type
, mlxsw_core
);
338 mlxsw_emad_construct_eth_hdr(skb
);
341 static char *mlxsw_emad_op_tlv(const struct sk_buff
*skb
)
343 return ((char *) (skb
->data
+ MLXSW_EMAD_ETH_HDR_LEN
));
346 static char *mlxsw_emad_reg_tlv(const struct sk_buff
*skb
)
348 return ((char *) (skb
->data
+ MLXSW_EMAD_ETH_HDR_LEN
+
349 MLXSW_EMAD_OP_TLV_LEN
* sizeof(u32
)));
352 static char *mlxsw_emad_reg_payload(const char *op_tlv
)
354 return ((char *) (op_tlv
+ (MLXSW_EMAD_OP_TLV_LEN
+ 1) * sizeof(u32
)));
357 static u64
mlxsw_emad_get_tid(const struct sk_buff
*skb
)
361 op_tlv
= mlxsw_emad_op_tlv(skb
);
362 return mlxsw_emad_op_tlv_tid_get(op_tlv
);
365 static bool mlxsw_emad_is_resp(const struct sk_buff
*skb
)
369 op_tlv
= mlxsw_emad_op_tlv(skb
);
370 return (mlxsw_emad_op_tlv_r_get(op_tlv
) == MLXSW_EMAD_OP_TLV_RESPONSE
);
373 #define MLXSW_EMAD_TIMEOUT_MS 200
375 static int __mlxsw_emad_transmit(struct mlxsw_core
*mlxsw_core
,
377 const struct mlxsw_tx_info
*tx_info
)
382 mlxsw_core
->emad
.trans_active
= true;
384 err
= mlxsw_core_skb_transmit(mlxsw_core
->driver_priv
, skb
, tx_info
);
386 dev_err(mlxsw_core
->bus_info
->dev
, "Failed to transmit EMAD (tid=%llx)\n",
387 mlxsw_core
->emad
.tid
);
389 goto trans_inactive_out
;
392 ret
= wait_event_timeout(mlxsw_core
->emad
.wait
,
393 !(mlxsw_core
->emad
.trans_active
),
394 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS
));
396 dev_warn(mlxsw_core
->bus_info
->dev
, "EMAD timed-out (tid=%llx)\n",
397 mlxsw_core
->emad
.tid
);
399 goto trans_inactive_out
;
405 mlxsw_core
->emad
.trans_active
= false;
409 static int mlxsw_emad_process_status(struct mlxsw_core
*mlxsw_core
,
412 enum mlxsw_emad_op_tlv_status status
;
415 status
= mlxsw_emad_op_tlv_status_get(op_tlv
);
416 tid
= mlxsw_emad_op_tlv_tid_get(op_tlv
);
419 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS
:
421 case MLXSW_EMAD_OP_TLV_STATUS_BUSY
:
422 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK
:
423 dev_warn(mlxsw_core
->bus_info
->dev
, "Reg access status again (tid=%llx,status=%x(%s))\n",
424 tid
, status
, mlxsw_emad_op_tlv_status_str(status
));
426 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED
:
427 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV
:
428 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED
:
429 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED
:
430 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED
:
431 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER
:
432 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE
:
433 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR
:
435 dev_err(mlxsw_core
->bus_info
->dev
, "Reg access status failed (tid=%llx,status=%x(%s))\n",
436 tid
, status
, mlxsw_emad_op_tlv_status_str(status
));
441 static int mlxsw_emad_process_status_skb(struct mlxsw_core
*mlxsw_core
,
444 return mlxsw_emad_process_status(mlxsw_core
, mlxsw_emad_op_tlv(skb
));
447 static int mlxsw_emad_transmit(struct mlxsw_core
*mlxsw_core
,
449 const struct mlxsw_tx_info
*tx_info
)
451 struct sk_buff
*trans_skb
;
457 /* We copy the EMAD to a new skb, since we might need
458 * to retransmit it in case of failure.
460 trans_skb
= skb_copy(skb
, GFP_KERNEL
);
466 err
= __mlxsw_emad_transmit(mlxsw_core
, trans_skb
, tx_info
);
468 struct sk_buff
*resp_skb
= mlxsw_core
->emad
.resp_skb
;
470 err
= mlxsw_emad_process_status_skb(mlxsw_core
, resp_skb
);
472 dev_kfree_skb(resp_skb
);
473 if (!err
|| err
!= -EAGAIN
)
476 if (n_retry
++ < MLXSW_EMAD_MAX_RETRY
)
481 mlxsw_core
->emad
.tid
++;
485 static void mlxsw_emad_rx_listener_func(struct sk_buff
*skb
, u8 local_port
,
488 struct mlxsw_core
*mlxsw_core
= priv
;
490 if (mlxsw_emad_is_resp(skb
) &&
491 mlxsw_core
->emad
.trans_active
&&
492 mlxsw_emad_get_tid(skb
) == mlxsw_core
->emad
.tid
) {
493 mlxsw_core
->emad
.resp_skb
= skb
;
494 mlxsw_core
->emad
.trans_active
= false;
495 wake_up(&mlxsw_core
->emad
.wait
);
501 static const struct mlxsw_rx_listener mlxsw_emad_rx_listener
= {
502 .func
= mlxsw_emad_rx_listener_func
,
503 .local_port
= MLXSW_PORT_DONT_CARE
,
504 .trap_id
= MLXSW_TRAP_ID_ETHEMAD
,
507 static int mlxsw_emad_traps_set(struct mlxsw_core
*mlxsw_core
)
509 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
510 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
513 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_EMAD
);
514 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
518 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU
,
519 MLXSW_TRAP_ID_ETHEMAD
);
520 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(hpkt
), hpkt_pl
);
523 static int mlxsw_emad_init(struct mlxsw_core
*mlxsw_core
)
527 /* Set the upper 32 bits of the transaction ID field to a random
528 * number. This allows us to discard EMADs addressed to other
531 get_random_bytes(&mlxsw_core
->emad
.tid
, 4);
532 mlxsw_core
->emad
.tid
= mlxsw_core
->emad
.tid
<< 32;
534 init_waitqueue_head(&mlxsw_core
->emad
.wait
);
535 mlxsw_core
->emad
.trans_active
= false;
536 mutex_init(&mlxsw_core
->emad
.lock
);
538 err
= mlxsw_core_rx_listener_register(mlxsw_core
,
539 &mlxsw_emad_rx_listener
,
544 err
= mlxsw_emad_traps_set(mlxsw_core
);
546 goto err_emad_trap_set
;
548 mlxsw_core
->emad
.use_emad
= true;
553 mlxsw_core_rx_listener_unregister(mlxsw_core
,
554 &mlxsw_emad_rx_listener
,
559 static void mlxsw_emad_fini(struct mlxsw_core
*mlxsw_core
)
561 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
563 mlxsw_core
->emad
.use_emad
= false;
564 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_DISCARD
,
565 MLXSW_TRAP_ID_ETHEMAD
);
566 mlxsw_reg_write(mlxsw_core
, MLXSW_REG(hpkt
), hpkt_pl
);
568 mlxsw_core_rx_listener_unregister(mlxsw_core
,
569 &mlxsw_emad_rx_listener
,
573 static struct sk_buff
*mlxsw_emad_alloc(const struct mlxsw_core
*mlxsw_core
,
579 emad_len
= (reg_len
+ sizeof(u32
) + MLXSW_EMAD_ETH_HDR_LEN
+
580 (MLXSW_EMAD_OP_TLV_LEN
+ MLXSW_EMAD_END_TLV_LEN
) *
581 sizeof(u32
) + mlxsw_core
->driver
->txhdr_len
);
582 if (emad_len
> MLXSW_EMAD_MAX_FRAME_LEN
)
585 skb
= netdev_alloc_skb(NULL
, emad_len
);
588 memset(skb
->data
, 0, emad_len
);
589 skb_reserve(skb
, emad_len
);
598 static int mlxsw_core_rx_stats_dbg_read(struct seq_file
*file
, void *data
)
600 struct mlxsw_core
*mlxsw_core
= file
->private;
601 struct mlxsw_core_pcpu_stats
*p
;
602 u64 rx_packets
, rx_bytes
;
603 u64 tmp_rx_packets
, tmp_rx_bytes
;
604 u32 rx_dropped
, rx_invalid
;
608 static const char hdr
[] =
609 " NUM RX_PACKETS RX_BYTES RX_DROPPED\n";
611 seq_printf(file
, hdr
);
612 for (i
= 0; i
< MLXSW_TRAP_ID_MAX
; i
++) {
616 for_each_possible_cpu(j
) {
617 p
= per_cpu_ptr(mlxsw_core
->pcpu_stats
, j
);
619 start
= u64_stats_fetch_begin(&p
->syncp
);
620 tmp_rx_packets
= p
->trap_rx_packets
[i
];
621 tmp_rx_bytes
= p
->trap_rx_bytes
[i
];
622 } while (u64_stats_fetch_retry(&p
->syncp
, start
));
624 rx_packets
+= tmp_rx_packets
;
625 rx_bytes
+= tmp_rx_bytes
;
626 rx_dropped
+= p
->trap_rx_dropped
[i
];
628 seq_printf(file
, "trap %3d %12llu %12llu %10u\n",
629 i
, rx_packets
, rx_bytes
, rx_dropped
);
632 for_each_possible_cpu(j
) {
633 p
= per_cpu_ptr(mlxsw_core
->pcpu_stats
, j
);
634 rx_invalid
+= p
->trap_rx_invalid
;
636 seq_printf(file
, "trap INV %10u\n",
639 for (i
= 0; i
< MLXSW_PORT_MAX_PORTS
; i
++) {
643 for_each_possible_cpu(j
) {
644 p
= per_cpu_ptr(mlxsw_core
->pcpu_stats
, j
);
646 start
= u64_stats_fetch_begin(&p
->syncp
);
647 tmp_rx_packets
= p
->port_rx_packets
[i
];
648 tmp_rx_bytes
= p
->port_rx_bytes
[i
];
649 } while (u64_stats_fetch_retry(&p
->syncp
, start
));
651 rx_packets
+= tmp_rx_packets
;
652 rx_bytes
+= tmp_rx_bytes
;
653 rx_dropped
+= p
->port_rx_dropped
[i
];
655 seq_printf(file
, "port %3d %12llu %12llu %10u\n",
656 i
, rx_packets
, rx_bytes
, rx_dropped
);
659 for_each_possible_cpu(j
) {
660 p
= per_cpu_ptr(mlxsw_core
->pcpu_stats
, j
);
661 rx_invalid
+= p
->port_rx_invalid
;
663 seq_printf(file
, "port INV %10u\n",
668 static int mlxsw_core_rx_stats_dbg_open(struct inode
*inode
, struct file
*f
)
670 struct mlxsw_core
*mlxsw_core
= inode
->i_private
;
672 return single_open(f
, mlxsw_core_rx_stats_dbg_read
, mlxsw_core
);
675 static const struct file_operations mlxsw_core_rx_stats_dbg_ops
= {
676 .owner
= THIS_MODULE
,
677 .open
= mlxsw_core_rx_stats_dbg_open
,
678 .release
= single_release
,
683 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core
*mlxsw_core
,
684 const char *buf
, size_t size
)
686 __be32
*m
= (__be32
*) buf
;
688 int count
= size
/ sizeof(__be32
);
690 for (i
= count
- 1; i
>= 0; i
--)
695 for (i
= 0; i
< count
; i
+= 4)
696 dev_dbg(mlxsw_core
->bus_info
->dev
, "%04x - %08x %08x %08x %08x\n",
697 i
* 4, be32_to_cpu(m
[i
]), be32_to_cpu(m
[i
+ 1]),
698 be32_to_cpu(m
[i
+ 2]), be32_to_cpu(m
[i
+ 3]));
701 int mlxsw_core_driver_register(struct mlxsw_driver
*mlxsw_driver
)
703 spin_lock(&mlxsw_core_driver_list_lock
);
704 list_add_tail(&mlxsw_driver
->list
, &mlxsw_core_driver_list
);
705 spin_unlock(&mlxsw_core_driver_list_lock
);
708 EXPORT_SYMBOL(mlxsw_core_driver_register
);
710 void mlxsw_core_driver_unregister(struct mlxsw_driver
*mlxsw_driver
)
712 spin_lock(&mlxsw_core_driver_list_lock
);
713 list_del(&mlxsw_driver
->list
);
714 spin_unlock(&mlxsw_core_driver_list_lock
);
716 EXPORT_SYMBOL(mlxsw_core_driver_unregister
);
718 static struct mlxsw_driver
*__driver_find(const char *kind
)
720 struct mlxsw_driver
*mlxsw_driver
;
722 list_for_each_entry(mlxsw_driver
, &mlxsw_core_driver_list
, list
) {
723 if (strcmp(mlxsw_driver
->kind
, kind
) == 0)
729 static struct mlxsw_driver
*mlxsw_core_driver_get(const char *kind
)
731 struct mlxsw_driver
*mlxsw_driver
;
733 spin_lock(&mlxsw_core_driver_list_lock
);
734 mlxsw_driver
= __driver_find(kind
);
736 spin_unlock(&mlxsw_core_driver_list_lock
);
737 request_module(MLXSW_MODULE_ALIAS_PREFIX
"%s", kind
);
738 spin_lock(&mlxsw_core_driver_list_lock
);
739 mlxsw_driver
= __driver_find(kind
);
742 if (!try_module_get(mlxsw_driver
->owner
))
746 spin_unlock(&mlxsw_core_driver_list_lock
);
750 static void mlxsw_core_driver_put(const char *kind
)
752 struct mlxsw_driver
*mlxsw_driver
;
754 spin_lock(&mlxsw_core_driver_list_lock
);
755 mlxsw_driver
= __driver_find(kind
);
756 spin_unlock(&mlxsw_core_driver_list_lock
);
759 module_put(mlxsw_driver
->owner
);
762 static int mlxsw_core_debugfs_init(struct mlxsw_core
*mlxsw_core
)
764 const struct mlxsw_bus_info
*bus_info
= mlxsw_core
->bus_info
;
766 mlxsw_core
->dbg_dir
= debugfs_create_dir(bus_info
->device_name
,
767 mlxsw_core_dbg_root
);
768 if (!mlxsw_core
->dbg_dir
)
770 debugfs_create_file("rx_stats", S_IRUGO
, mlxsw_core
->dbg_dir
,
771 mlxsw_core
, &mlxsw_core_rx_stats_dbg_ops
);
772 mlxsw_core
->dbg
.vsd_blob
.data
= (void *) &bus_info
->vsd
;
773 mlxsw_core
->dbg
.vsd_blob
.size
= sizeof(bus_info
->vsd
);
774 debugfs_create_blob("vsd", S_IRUGO
, mlxsw_core
->dbg_dir
,
775 &mlxsw_core
->dbg
.vsd_blob
);
776 mlxsw_core
->dbg
.psid_blob
.data
= (void *) &bus_info
->psid
;
777 mlxsw_core
->dbg
.psid_blob
.size
= sizeof(bus_info
->psid
);
778 debugfs_create_blob("psid", S_IRUGO
, mlxsw_core
->dbg_dir
,
779 &mlxsw_core
->dbg
.psid_blob
);
783 static void mlxsw_core_debugfs_fini(struct mlxsw_core
*mlxsw_core
)
785 debugfs_remove_recursive(mlxsw_core
->dbg_dir
);
788 static int mlxsw_devlink_port_split(struct devlink
*devlink
,
789 unsigned int port_index
,
792 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
794 if (port_index
>= MLXSW_PORT_MAX_PORTS
)
796 if (!mlxsw_core
->driver
->port_split
)
798 return mlxsw_core
->driver
->port_split(mlxsw_core
->driver_priv
,
802 static int mlxsw_devlink_port_unsplit(struct devlink
*devlink
,
803 unsigned int port_index
)
805 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
807 if (port_index
>= MLXSW_PORT_MAX_PORTS
)
809 if (!mlxsw_core
->driver
->port_unsplit
)
811 return mlxsw_core
->driver
->port_unsplit(mlxsw_core
->driver_priv
,
815 static const struct devlink_ops mlxsw_devlink_ops
= {
816 .port_split
= mlxsw_devlink_port_split
,
817 .port_unsplit
= mlxsw_devlink_port_unsplit
,
820 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info
*mlxsw_bus_info
,
821 const struct mlxsw_bus
*mlxsw_bus
,
824 const char *device_kind
= mlxsw_bus_info
->device_kind
;
825 struct mlxsw_core
*mlxsw_core
;
826 struct mlxsw_driver
*mlxsw_driver
;
827 struct devlink
*devlink
;
831 mlxsw_driver
= mlxsw_core_driver_get(device_kind
);
834 alloc_size
= sizeof(*mlxsw_core
) + mlxsw_driver
->priv_size
;
835 devlink
= devlink_alloc(&mlxsw_devlink_ops
, alloc_size
);
838 goto err_devlink_alloc
;
841 mlxsw_core
= devlink_priv(devlink
);
842 INIT_LIST_HEAD(&mlxsw_core
->rx_listener_list
);
843 INIT_LIST_HEAD(&mlxsw_core
->event_listener_list
);
844 mlxsw_core
->driver
= mlxsw_driver
;
845 mlxsw_core
->bus
= mlxsw_bus
;
846 mlxsw_core
->bus_priv
= bus_priv
;
847 mlxsw_core
->bus_info
= mlxsw_bus_info
;
849 mlxsw_core
->pcpu_stats
=
850 netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats
);
851 if (!mlxsw_core
->pcpu_stats
) {
853 goto err_alloc_stats
;
856 if (mlxsw_driver
->profile
->used_max_lag
&&
857 mlxsw_driver
->profile
->used_max_port_per_lag
) {
858 alloc_size
= sizeof(u8
) * mlxsw_driver
->profile
->max_lag
*
859 mlxsw_driver
->profile
->max_port_per_lag
;
860 mlxsw_core
->lag
.mapping
= kzalloc(alloc_size
, GFP_KERNEL
);
861 if (!mlxsw_core
->lag
.mapping
) {
863 goto err_alloc_lag_mapping
;
867 err
= mlxsw_bus
->init(bus_priv
, mlxsw_core
, mlxsw_driver
->profile
);
871 err
= mlxsw_emad_init(mlxsw_core
);
875 err
= mlxsw_hwmon_init(mlxsw_core
, mlxsw_bus_info
, &mlxsw_core
->hwmon
);
879 err
= devlink_register(devlink
, mlxsw_bus_info
->dev
);
881 goto err_devlink_register
;
883 err
= mlxsw_driver
->init(mlxsw_core
->driver_priv
, mlxsw_core
,
886 goto err_driver_init
;
888 err
= mlxsw_core_debugfs_init(mlxsw_core
);
890 goto err_debugfs_init
;
895 mlxsw_core
->driver
->fini(mlxsw_core
->driver_priv
);
897 devlink_unregister(devlink
);
898 err_devlink_register
:
900 mlxsw_emad_fini(mlxsw_core
);
902 mlxsw_bus
->fini(bus_priv
);
904 kfree(mlxsw_core
->lag
.mapping
);
905 err_alloc_lag_mapping
:
906 free_percpu(mlxsw_core
->pcpu_stats
);
908 devlink_free(devlink
);
910 mlxsw_core_driver_put(device_kind
);
913 EXPORT_SYMBOL(mlxsw_core_bus_device_register
);
915 void mlxsw_core_bus_device_unregister(struct mlxsw_core
*mlxsw_core
)
917 const char *device_kind
= mlxsw_core
->bus_info
->device_kind
;
918 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
920 mlxsw_core_debugfs_fini(mlxsw_core
);
921 mlxsw_core
->driver
->fini(mlxsw_core
->driver_priv
);
922 devlink_unregister(devlink
);
923 mlxsw_emad_fini(mlxsw_core
);
924 mlxsw_core
->bus
->fini(mlxsw_core
->bus_priv
);
925 kfree(mlxsw_core
->lag
.mapping
);
926 free_percpu(mlxsw_core
->pcpu_stats
);
927 devlink_free(devlink
);
928 mlxsw_core_driver_put(device_kind
);
930 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister
);
932 static struct mlxsw_core
*__mlxsw_core_get(void *driver_priv
)
934 return container_of(driver_priv
, struct mlxsw_core
, driver_priv
);
937 bool mlxsw_core_skb_transmit_busy(void *driver_priv
,
938 const struct mlxsw_tx_info
*tx_info
)
940 struct mlxsw_core
*mlxsw_core
= __mlxsw_core_get(driver_priv
);
942 return mlxsw_core
->bus
->skb_transmit_busy(mlxsw_core
->bus_priv
,
945 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy
);
947 int mlxsw_core_skb_transmit(void *driver_priv
, struct sk_buff
*skb
,
948 const struct mlxsw_tx_info
*tx_info
)
950 struct mlxsw_core
*mlxsw_core
= __mlxsw_core_get(driver_priv
);
952 return mlxsw_core
->bus
->skb_transmit(mlxsw_core
->bus_priv
, skb
,
955 EXPORT_SYMBOL(mlxsw_core_skb_transmit
);
957 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener
*rxl_a
,
958 const struct mlxsw_rx_listener
*rxl_b
)
960 return (rxl_a
->func
== rxl_b
->func
&&
961 rxl_a
->local_port
== rxl_b
->local_port
&&
962 rxl_a
->trap_id
== rxl_b
->trap_id
);
965 static struct mlxsw_rx_listener_item
*
966 __find_rx_listener_item(struct mlxsw_core
*mlxsw_core
,
967 const struct mlxsw_rx_listener
*rxl
,
970 struct mlxsw_rx_listener_item
*rxl_item
;
972 list_for_each_entry(rxl_item
, &mlxsw_core
->rx_listener_list
, list
) {
973 if (__is_rx_listener_equal(&rxl_item
->rxl
, rxl
) &&
974 rxl_item
->priv
== priv
)
980 int mlxsw_core_rx_listener_register(struct mlxsw_core
*mlxsw_core
,
981 const struct mlxsw_rx_listener
*rxl
,
984 struct mlxsw_rx_listener_item
*rxl_item
;
986 rxl_item
= __find_rx_listener_item(mlxsw_core
, rxl
, priv
);
989 rxl_item
= kmalloc(sizeof(*rxl_item
), GFP_KERNEL
);
992 rxl_item
->rxl
= *rxl
;
993 rxl_item
->priv
= priv
;
995 list_add_rcu(&rxl_item
->list
, &mlxsw_core
->rx_listener_list
);
998 EXPORT_SYMBOL(mlxsw_core_rx_listener_register
);
1000 void mlxsw_core_rx_listener_unregister(struct mlxsw_core
*mlxsw_core
,
1001 const struct mlxsw_rx_listener
*rxl
,
1004 struct mlxsw_rx_listener_item
*rxl_item
;
1006 rxl_item
= __find_rx_listener_item(mlxsw_core
, rxl
, priv
);
1009 list_del_rcu(&rxl_item
->list
);
1013 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister
);
1015 static void mlxsw_core_event_listener_func(struct sk_buff
*skb
, u8 local_port
,
1018 struct mlxsw_event_listener_item
*event_listener_item
= priv
;
1019 struct mlxsw_reg_info reg
;
1021 char *op_tlv
= mlxsw_emad_op_tlv(skb
);
1022 char *reg_tlv
= mlxsw_emad_reg_tlv(skb
);
1024 reg
.id
= mlxsw_emad_op_tlv_register_id_get(op_tlv
);
1025 reg
.len
= (mlxsw_emad_reg_tlv_len_get(reg_tlv
) - 1) * sizeof(u32
);
1026 payload
= mlxsw_emad_reg_payload(op_tlv
);
1027 event_listener_item
->el
.func(®
, payload
, event_listener_item
->priv
);
1031 static bool __is_event_listener_equal(const struct mlxsw_event_listener
*el_a
,
1032 const struct mlxsw_event_listener
*el_b
)
1034 return (el_a
->func
== el_b
->func
&&
1035 el_a
->trap_id
== el_b
->trap_id
);
1038 static struct mlxsw_event_listener_item
*
1039 __find_event_listener_item(struct mlxsw_core
*mlxsw_core
,
1040 const struct mlxsw_event_listener
*el
,
1043 struct mlxsw_event_listener_item
*el_item
;
1045 list_for_each_entry(el_item
, &mlxsw_core
->event_listener_list
, list
) {
1046 if (__is_event_listener_equal(&el_item
->el
, el
) &&
1047 el_item
->priv
== priv
)
1053 int mlxsw_core_event_listener_register(struct mlxsw_core
*mlxsw_core
,
1054 const struct mlxsw_event_listener
*el
,
1058 struct mlxsw_event_listener_item
*el_item
;
1059 const struct mlxsw_rx_listener rxl
= {
1060 .func
= mlxsw_core_event_listener_func
,
1061 .local_port
= MLXSW_PORT_DONT_CARE
,
1062 .trap_id
= el
->trap_id
,
1065 el_item
= __find_event_listener_item(mlxsw_core
, el
, priv
);
1068 el_item
= kmalloc(sizeof(*el_item
), GFP_KERNEL
);
1072 el_item
->priv
= priv
;
1074 err
= mlxsw_core_rx_listener_register(mlxsw_core
, &rxl
, el_item
);
1076 goto err_rx_listener_register
;
1078 /* No reason to save item if we did not manage to register an RX
1081 list_add_rcu(&el_item
->list
, &mlxsw_core
->event_listener_list
);
1085 err_rx_listener_register
:
1089 EXPORT_SYMBOL(mlxsw_core_event_listener_register
);
1091 void mlxsw_core_event_listener_unregister(struct mlxsw_core
*mlxsw_core
,
1092 const struct mlxsw_event_listener
*el
,
1095 struct mlxsw_event_listener_item
*el_item
;
1096 const struct mlxsw_rx_listener rxl
= {
1097 .func
= mlxsw_core_event_listener_func
,
1098 .local_port
= MLXSW_PORT_DONT_CARE
,
1099 .trap_id
= el
->trap_id
,
1102 el_item
= __find_event_listener_item(mlxsw_core
, el
, priv
);
1105 mlxsw_core_rx_listener_unregister(mlxsw_core
, &rxl
, el_item
);
1106 list_del(&el_item
->list
);
1109 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister
);
1111 static int mlxsw_core_reg_access_emad(struct mlxsw_core
*mlxsw_core
,
1112 const struct mlxsw_reg_info
*reg
,
1114 enum mlxsw_core_reg_access_type type
)
1118 struct sk_buff
*skb
;
1119 struct mlxsw_tx_info tx_info
= {
1120 .local_port
= MLXSW_PORT_CPU_PORT
,
1124 skb
= mlxsw_emad_alloc(mlxsw_core
, reg
->len
);
1128 mlxsw_emad_construct(skb
, reg
, payload
, type
, mlxsw_core
);
1129 mlxsw_core
->driver
->txhdr_construct(skb
, &tx_info
);
1131 dev_dbg(mlxsw_core
->bus_info
->dev
, "EMAD send (tid=%llx)\n",
1132 mlxsw_core
->emad
.tid
);
1133 mlxsw_core_buf_dump_dbg(mlxsw_core
, skb
->data
, skb
->len
);
1135 err
= mlxsw_emad_transmit(mlxsw_core
, skb
, &tx_info
);
1137 op_tlv
= mlxsw_emad_op_tlv(mlxsw_core
->emad
.resp_skb
);
1138 memcpy(payload
, mlxsw_emad_reg_payload(op_tlv
),
1141 dev_dbg(mlxsw_core
->bus_info
->dev
, "EMAD recv (tid=%llx)\n",
1142 mlxsw_core
->emad
.tid
- 1);
1143 mlxsw_core_buf_dump_dbg(mlxsw_core
,
1144 mlxsw_core
->emad
.resp_skb
->data
,
1145 mlxsw_core
->emad
.resp_skb
->len
);
1147 dev_kfree_skb(mlxsw_core
->emad
.resp_skb
);
1153 static int mlxsw_core_reg_access_cmd(struct mlxsw_core
*mlxsw_core
,
1154 const struct mlxsw_reg_info
*reg
,
1156 enum mlxsw_core_reg_access_type type
)
1159 char *in_mbox
, *out_mbox
, *tmp
;
1161 in_mbox
= mlxsw_cmd_mbox_alloc();
1165 out_mbox
= mlxsw_cmd_mbox_alloc();
1171 mlxsw_emad_pack_op_tlv(in_mbox
, reg
, type
, mlxsw_core
);
1172 tmp
= in_mbox
+ MLXSW_EMAD_OP_TLV_LEN
* sizeof(u32
);
1173 mlxsw_emad_pack_reg_tlv(tmp
, reg
, payload
);
1177 err
= mlxsw_cmd_access_reg(mlxsw_core
, in_mbox
, out_mbox
);
1179 err
= mlxsw_emad_process_status(mlxsw_core
, out_mbox
);
1180 if (err
== -EAGAIN
&& n_retry
++ < MLXSW_EMAD_MAX_RETRY
)
1185 memcpy(payload
, mlxsw_emad_reg_payload(out_mbox
),
1188 mlxsw_core
->emad
.tid
++;
1189 mlxsw_cmd_mbox_free(out_mbox
);
1191 mlxsw_cmd_mbox_free(in_mbox
);
1195 static int mlxsw_core_reg_access(struct mlxsw_core
*mlxsw_core
,
1196 const struct mlxsw_reg_info
*reg
,
1198 enum mlxsw_core_reg_access_type type
)
1203 if (mutex_lock_interruptible(&mlxsw_core
->emad
.lock
)) {
1204 dev_err(mlxsw_core
->bus_info
->dev
, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
1205 reg
->id
, mlxsw_reg_id_str(reg
->id
),
1206 mlxsw_core_reg_access_type_str(type
));
1210 cur_tid
= mlxsw_core
->emad
.tid
;
1211 dev_dbg(mlxsw_core
->bus_info
->dev
, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
1212 cur_tid
, reg
->id
, mlxsw_reg_id_str(reg
->id
),
1213 mlxsw_core_reg_access_type_str(type
));
1215 /* During initialization EMAD interface is not available to us,
1216 * so we default to command interface. We switch to EMAD interface
1217 * after setting the appropriate traps.
1219 if (!mlxsw_core
->emad
.use_emad
)
1220 err
= mlxsw_core_reg_access_cmd(mlxsw_core
, reg
,
1223 err
= mlxsw_core_reg_access_emad(mlxsw_core
, reg
,
1227 dev_err(mlxsw_core
->bus_info
->dev
, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
1228 cur_tid
, reg
->id
, mlxsw_reg_id_str(reg
->id
),
1229 mlxsw_core_reg_access_type_str(type
));
1231 mutex_unlock(&mlxsw_core
->emad
.lock
);
1235 int mlxsw_reg_query(struct mlxsw_core
*mlxsw_core
,
1236 const struct mlxsw_reg_info
*reg
, char *payload
)
1238 return mlxsw_core_reg_access(mlxsw_core
, reg
, payload
,
1239 MLXSW_CORE_REG_ACCESS_TYPE_QUERY
);
1241 EXPORT_SYMBOL(mlxsw_reg_query
);
1243 int mlxsw_reg_write(struct mlxsw_core
*mlxsw_core
,
1244 const struct mlxsw_reg_info
*reg
, char *payload
)
1246 return mlxsw_core_reg_access(mlxsw_core
, reg
, payload
,
1247 MLXSW_CORE_REG_ACCESS_TYPE_WRITE
);
1249 EXPORT_SYMBOL(mlxsw_reg_write
);
1251 void mlxsw_core_skb_receive(struct mlxsw_core
*mlxsw_core
, struct sk_buff
*skb
,
1252 struct mlxsw_rx_info
*rx_info
)
1254 struct mlxsw_rx_listener_item
*rxl_item
;
1255 const struct mlxsw_rx_listener
*rxl
;
1256 struct mlxsw_core_pcpu_stats
*pcpu_stats
;
1260 if (rx_info
->is_lag
) {
1261 dev_dbg_ratelimited(mlxsw_core
->bus_info
->dev
, "%s: lag_id = %d, lag_port_index = 0x%x\n",
1262 __func__
, rx_info
->u
.lag_id
,
1264 /* Upper layer does not care if the skb came from LAG or not,
1265 * so just get the local_port for the lag port and push it up.
1267 local_port
= mlxsw_core_lag_mapping_get(mlxsw_core
,
1269 rx_info
->lag_port_index
);
1271 local_port
= rx_info
->u
.sys_port
;
1274 dev_dbg_ratelimited(mlxsw_core
->bus_info
->dev
, "%s: local_port = %d, trap_id = 0x%x\n",
1275 __func__
, local_port
, rx_info
->trap_id
);
1277 if ((rx_info
->trap_id
>= MLXSW_TRAP_ID_MAX
) ||
1278 (local_port
>= MLXSW_PORT_MAX_PORTS
))
1282 list_for_each_entry_rcu(rxl_item
, &mlxsw_core
->rx_listener_list
, list
) {
1283 rxl
= &rxl_item
->rxl
;
1284 if ((rxl
->local_port
== MLXSW_PORT_DONT_CARE
||
1285 rxl
->local_port
== local_port
) &&
1286 rxl
->trap_id
== rx_info
->trap_id
) {
1295 pcpu_stats
= this_cpu_ptr(mlxsw_core
->pcpu_stats
);
1296 u64_stats_update_begin(&pcpu_stats
->syncp
);
1297 pcpu_stats
->port_rx_packets
[local_port
]++;
1298 pcpu_stats
->port_rx_bytes
[local_port
] += skb
->len
;
1299 pcpu_stats
->trap_rx_packets
[rx_info
->trap_id
]++;
1300 pcpu_stats
->trap_rx_bytes
[rx_info
->trap_id
] += skb
->len
;
1301 u64_stats_update_end(&pcpu_stats
->syncp
);
1303 rxl
->func(skb
, local_port
, rxl_item
->priv
);
1307 if (rx_info
->trap_id
>= MLXSW_TRAP_ID_MAX
)
1308 this_cpu_inc(mlxsw_core
->pcpu_stats
->trap_rx_invalid
);
1310 this_cpu_inc(mlxsw_core
->pcpu_stats
->trap_rx_dropped
[rx_info
->trap_id
]);
1311 if (local_port
>= MLXSW_PORT_MAX_PORTS
)
1312 this_cpu_inc(mlxsw_core
->pcpu_stats
->port_rx_invalid
);
1314 this_cpu_inc(mlxsw_core
->pcpu_stats
->port_rx_dropped
[local_port
]);
1317 EXPORT_SYMBOL(mlxsw_core_skb_receive
);
1319 static int mlxsw_core_lag_mapping_index(struct mlxsw_core
*mlxsw_core
,
1320 u16 lag_id
, u8 port_index
)
1322 return mlxsw_core
->driver
->profile
->max_port_per_lag
* lag_id
+
1326 void mlxsw_core_lag_mapping_set(struct mlxsw_core
*mlxsw_core
,
1327 u16 lag_id
, u8 port_index
, u8 local_port
)
1329 int index
= mlxsw_core_lag_mapping_index(mlxsw_core
,
1330 lag_id
, port_index
);
1332 mlxsw_core
->lag
.mapping
[index
] = local_port
;
1334 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set
);
1336 u8
mlxsw_core_lag_mapping_get(struct mlxsw_core
*mlxsw_core
,
1337 u16 lag_id
, u8 port_index
)
1339 int index
= mlxsw_core_lag_mapping_index(mlxsw_core
,
1340 lag_id
, port_index
);
1342 return mlxsw_core
->lag
.mapping
[index
];
1344 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get
);
1346 void mlxsw_core_lag_mapping_clear(struct mlxsw_core
*mlxsw_core
,
1347 u16 lag_id
, u8 local_port
)
1351 for (i
= 0; i
< mlxsw_core
->driver
->profile
->max_port_per_lag
; i
++) {
1352 int index
= mlxsw_core_lag_mapping_index(mlxsw_core
,
1355 if (mlxsw_core
->lag
.mapping
[index
] == local_port
)
1356 mlxsw_core
->lag
.mapping
[index
] = 0;
1359 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear
);
1361 int mlxsw_cmd_exec(struct mlxsw_core
*mlxsw_core
, u16 opcode
, u8 opcode_mod
,
1362 u32 in_mod
, bool out_mbox_direct
,
1363 char *in_mbox
, size_t in_mbox_size
,
1364 char *out_mbox
, size_t out_mbox_size
)
1369 BUG_ON(in_mbox_size
% sizeof(u32
) || out_mbox_size
% sizeof(u32
));
1370 if (!mlxsw_core
->bus
->cmd_exec
)
1373 dev_dbg(mlxsw_core
->bus_info
->dev
, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1374 opcode
, mlxsw_cmd_opcode_str(opcode
), opcode_mod
, in_mod
);
1376 dev_dbg(mlxsw_core
->bus_info
->dev
, "Input mailbox:\n");
1377 mlxsw_core_buf_dump_dbg(mlxsw_core
, in_mbox
, in_mbox_size
);
1380 err
= mlxsw_core
->bus
->cmd_exec(mlxsw_core
->bus_priv
, opcode
,
1381 opcode_mod
, in_mod
, out_mbox_direct
,
1382 in_mbox
, in_mbox_size
,
1383 out_mbox
, out_mbox_size
, &status
);
1385 if (err
== -EIO
&& status
!= MLXSW_CMD_STATUS_OK
) {
1386 dev_err(mlxsw_core
->bus_info
->dev
, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1387 opcode
, mlxsw_cmd_opcode_str(opcode
), opcode_mod
,
1388 in_mod
, status
, mlxsw_cmd_status_str(status
));
1389 } else if (err
== -ETIMEDOUT
) {
1390 dev_err(mlxsw_core
->bus_info
->dev
, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1391 opcode
, mlxsw_cmd_opcode_str(opcode
), opcode_mod
,
1395 if (!err
&& out_mbox
) {
1396 dev_dbg(mlxsw_core
->bus_info
->dev
, "Output mailbox:\n");
1397 mlxsw_core_buf_dump_dbg(mlxsw_core
, out_mbox
, out_mbox_size
);
1401 EXPORT_SYMBOL(mlxsw_cmd_exec
);
1403 static int __init
mlxsw_core_module_init(void)
1405 mlxsw_core_dbg_root
= debugfs_create_dir(mlxsw_core_driver_name
, NULL
);
1406 if (!mlxsw_core_dbg_root
)
1411 static void __exit
mlxsw_core_module_exit(void)
1413 debugfs_remove_recursive(mlxsw_core_dbg_root
);
1416 module_init(mlxsw_core_module_init
);
1417 module_exit(mlxsw_core_module_exit
);
1419 MODULE_LICENSE("Dual BSD/GPL");
1420 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1421 MODULE_DESCRIPTION("Mellanox switch device core driver");