mlxsw: core: Implement temperature hwmon interface
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlxsw / core.c
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/core.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/export.h>
41 #include <linux/err.h>
42 #include <linux/if_link.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/u64_stats_sync.h>
46 #include <linux/netdevice.h>
47 #include <linux/wait.h>
48 #include <linux/skbuff.h>
49 #include <linux/etherdevice.h>
50 #include <linux/types.h>
51 #include <linux/string.h>
52 #include <linux/gfp.h>
53 #include <linux/random.h>
54 #include <linux/jiffies.h>
55 #include <linux/mutex.h>
56 #include <linux/rcupdate.h>
57 #include <linux/slab.h>
58 #include <asm/byteorder.h>
59
60 #include "core.h"
61 #include "item.h"
62 #include "cmd.h"
63 #include "port.h"
64 #include "trap.h"
65 #include "emad.h"
66 #include "reg.h"
67
68 static LIST_HEAD(mlxsw_core_driver_list);
69 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
70
71 static const char mlxsw_core_driver_name[] = "mlxsw_core";
72
73 static struct dentry *mlxsw_core_dbg_root;
74
75 struct mlxsw_core_pcpu_stats {
76 u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
77 u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
78 u64 port_rx_packets[MLXSW_PORT_MAX_PORTS];
79 u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS];
80 struct u64_stats_sync syncp;
81 u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX];
82 u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS];
83 u32 trap_rx_invalid;
84 u32 port_rx_invalid;
85 };
86
87 struct mlxsw_core {
88 struct mlxsw_driver *driver;
89 const struct mlxsw_bus *bus;
90 void *bus_priv;
91 const struct mlxsw_bus_info *bus_info;
92 struct list_head rx_listener_list;
93 struct list_head event_listener_list;
94 struct {
95 struct sk_buff *resp_skb;
96 u64 tid;
97 wait_queue_head_t wait;
98 bool trans_active;
99 struct mutex lock; /* One EMAD transaction at a time. */
100 bool use_emad;
101 } emad;
102 struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
103 struct dentry *dbg_dir;
104 struct {
105 struct debugfs_blob_wrapper vsd_blob;
106 struct debugfs_blob_wrapper psid_blob;
107 } dbg;
108 struct mlxsw_hwmon *hwmon;
109 unsigned long driver_priv[0];
110 /* driver_priv has to be always the last item */
111 };
112
113 struct mlxsw_rx_listener_item {
114 struct list_head list;
115 struct mlxsw_rx_listener rxl;
116 void *priv;
117 };
118
119 struct mlxsw_event_listener_item {
120 struct list_head list;
121 struct mlxsw_event_listener el;
122 void *priv;
123 };
124
125 /******************
126 * EMAD processing
127 ******************/
128
129 /* emad_eth_hdr_dmac
130 * Destination MAC in EMAD's Ethernet header.
131 * Must be set to 01:02:c9:00:00:01
132 */
133 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
134
135 /* emad_eth_hdr_smac
136 * Source MAC in EMAD's Ethernet header.
137 * Must be set to 00:02:c9:01:02:03
138 */
139 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
140
141 /* emad_eth_hdr_ethertype
142 * Ethertype in EMAD's Ethernet header.
143 * Must be set to 0x8932
144 */
145 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
146
147 /* emad_eth_hdr_mlx_proto
148 * Mellanox protocol.
149 * Must be set to 0x0.
150 */
151 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
152
153 /* emad_eth_hdr_ver
154 * Mellanox protocol version.
155 * Must be set to 0x0.
156 */
157 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
158
159 /* emad_op_tlv_type
160 * Type of the TLV.
161 * Must be set to 0x1 (operation TLV).
162 */
163 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
164
165 /* emad_op_tlv_len
166 * Length of the operation TLV in u32.
167 * Must be set to 0x4.
168 */
169 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
170
171 /* emad_op_tlv_dr
172 * Direct route bit. Setting to 1 indicates the EMAD is a direct route
173 * EMAD. DR TLV must follow.
174 *
175 * Note: Currently not supported and must not be set.
176 */
177 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
178
179 /* emad_op_tlv_status
180 * Returned status in case of EMAD response. Must be set to 0 in case
181 * of EMAD request.
182 * 0x0 - success
183 * 0x1 - device is busy. Requester should retry
184 * 0x2 - Mellanox protocol version not supported
185 * 0x3 - unknown TLV
186 * 0x4 - register not supported
187 * 0x5 - operation class not supported
188 * 0x6 - EMAD method not supported
189 * 0x7 - bad parameter (e.g. port out of range)
190 * 0x8 - resource not available
191 * 0x9 - message receipt acknowledgment. Requester should retry
192 * 0x70 - internal error
193 */
194 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
195
196 /* emad_op_tlv_register_id
197 * Register ID of register within register TLV.
198 */
199 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
200
201 /* emad_op_tlv_r
202 * Response bit. Setting to 1 indicates Response, otherwise request.
203 */
204 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
205
206 /* emad_op_tlv_method
207 * EMAD method type.
208 * 0x1 - query
209 * 0x2 - write
210 * 0x3 - send (currently not supported)
211 * 0x4 - event
212 */
213 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
214
215 /* emad_op_tlv_class
216 * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
217 */
218 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
219
220 /* emad_op_tlv_tid
221 * EMAD transaction ID. Used for pairing request and response EMADs.
222 */
223 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
224
225 /* emad_reg_tlv_type
226 * Type of the TLV.
227 * Must be set to 0x3 (register TLV).
228 */
229 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
230
231 /* emad_reg_tlv_len
232 * Length of the operation TLV in u32.
233 */
234 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
235
236 /* emad_end_tlv_type
237 * Type of the TLV.
238 * Must be set to 0x0 (end TLV).
239 */
240 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
241
242 /* emad_end_tlv_len
243 * Length of the end TLV in u32.
244 * Must be set to 1.
245 */
246 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
247
248 enum mlxsw_core_reg_access_type {
249 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
250 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
251 };
252
253 static inline const char *
254 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
255 {
256 switch (type) {
257 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
258 return "query";
259 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
260 return "write";
261 }
262 BUG();
263 }
264
265 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
266 {
267 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
268 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
269 }
270
271 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
272 const struct mlxsw_reg_info *reg,
273 char *payload)
274 {
275 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
276 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
277 memcpy(reg_tlv + sizeof(u32), payload, reg->len);
278 }
279
280 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
281 const struct mlxsw_reg_info *reg,
282 enum mlxsw_core_reg_access_type type,
283 struct mlxsw_core *mlxsw_core)
284 {
285 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
286 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
287 mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
288 mlxsw_emad_op_tlv_status_set(op_tlv, 0);
289 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
290 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
291 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
292 mlxsw_emad_op_tlv_method_set(op_tlv,
293 MLXSW_EMAD_OP_TLV_METHOD_QUERY);
294 else
295 mlxsw_emad_op_tlv_method_set(op_tlv,
296 MLXSW_EMAD_OP_TLV_METHOD_WRITE);
297 mlxsw_emad_op_tlv_class_set(op_tlv,
298 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
299 mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
300 }
301
302 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
303 {
304 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
305
306 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
307 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
308 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
309 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
310 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
311
312 skb_reset_mac_header(skb);
313
314 return 0;
315 }
316
317 static void mlxsw_emad_construct(struct sk_buff *skb,
318 const struct mlxsw_reg_info *reg,
319 char *payload,
320 enum mlxsw_core_reg_access_type type,
321 struct mlxsw_core *mlxsw_core)
322 {
323 char *buf;
324
325 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
326 mlxsw_emad_pack_end_tlv(buf);
327
328 buf = skb_push(skb, reg->len + sizeof(u32));
329 mlxsw_emad_pack_reg_tlv(buf, reg, payload);
330
331 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
332 mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
333
334 mlxsw_emad_construct_eth_hdr(skb);
335 }
336
337 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
338 {
339 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
340 }
341
342 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
343 {
344 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
345 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
346 }
347
348 static char *mlxsw_emad_reg_payload(const char *op_tlv)
349 {
350 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
351 }
352
353 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
354 {
355 char *op_tlv;
356
357 op_tlv = mlxsw_emad_op_tlv(skb);
358 return mlxsw_emad_op_tlv_tid_get(op_tlv);
359 }
360
361 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
362 {
363 char *op_tlv;
364
365 op_tlv = mlxsw_emad_op_tlv(skb);
366 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
367 }
368
369 #define MLXSW_EMAD_TIMEOUT_MS 200
370
371 static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
372 struct sk_buff *skb,
373 const struct mlxsw_tx_info *tx_info)
374 {
375 int err;
376 int ret;
377
378 mlxsw_core->emad.trans_active = true;
379
380 err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
381 if (err) {
382 dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
383 mlxsw_core->emad.tid);
384 dev_kfree_skb(skb);
385 goto trans_inactive_out;
386 }
387
388 ret = wait_event_timeout(mlxsw_core->emad.wait,
389 !(mlxsw_core->emad.trans_active),
390 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
391 if (!ret) {
392 dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
393 mlxsw_core->emad.tid);
394 err = -EIO;
395 goto trans_inactive_out;
396 }
397
398 return 0;
399
400 trans_inactive_out:
401 mlxsw_core->emad.trans_active = false;
402 return err;
403 }
404
405 static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
406 char *op_tlv)
407 {
408 enum mlxsw_emad_op_tlv_status status;
409 u64 tid;
410
411 status = mlxsw_emad_op_tlv_status_get(op_tlv);
412 tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
413
414 switch (status) {
415 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
416 return 0;
417 case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
418 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
419 dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
420 tid, status, mlxsw_emad_op_tlv_status_str(status));
421 return -EAGAIN;
422 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
423 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
424 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
425 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
426 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
427 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
428 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
429 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
430 default:
431 dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
432 tid, status, mlxsw_emad_op_tlv_status_str(status));
433 return -EIO;
434 }
435 }
436
437 static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
438 struct sk_buff *skb)
439 {
440 return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
441 }
442
443 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
444 struct sk_buff *skb,
445 const struct mlxsw_tx_info *tx_info)
446 {
447 struct sk_buff *trans_skb;
448 int n_retry;
449 int err;
450
451 n_retry = 0;
452 retry:
453 /* We copy the EMAD to a new skb, since we might need
454 * to retransmit it in case of failure.
455 */
456 trans_skb = skb_copy(skb, GFP_KERNEL);
457 if (!trans_skb) {
458 err = -ENOMEM;
459 goto out;
460 }
461
462 err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
463 if (!err) {
464 struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
465
466 err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
467 if (err)
468 dev_kfree_skb(resp_skb);
469 if (!err || err != -EAGAIN)
470 goto out;
471 }
472 if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
473 goto retry;
474
475 out:
476 dev_kfree_skb(skb);
477 mlxsw_core->emad.tid++;
478 return err;
479 }
480
481 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
482 void *priv)
483 {
484 struct mlxsw_core *mlxsw_core = priv;
485
486 if (mlxsw_emad_is_resp(skb) &&
487 mlxsw_core->emad.trans_active &&
488 mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
489 mlxsw_core->emad.resp_skb = skb;
490 mlxsw_core->emad.trans_active = false;
491 wake_up(&mlxsw_core->emad.wait);
492 } else {
493 dev_kfree_skb(skb);
494 }
495 }
496
497 static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
498 .func = mlxsw_emad_rx_listener_func,
499 .local_port = MLXSW_PORT_DONT_CARE,
500 .trap_id = MLXSW_TRAP_ID_ETHEMAD,
501 };
502
503 static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
504 {
505 char htgt_pl[MLXSW_REG_HTGT_LEN];
506 char hpkt_pl[MLXSW_REG_HPKT_LEN];
507 int err;
508
509 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
510 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
511 if (err)
512 return err;
513
514 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
515 MLXSW_TRAP_ID_ETHEMAD);
516 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
517 }
518
519 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
520 {
521 int err;
522
523 /* Set the upper 32 bits of the transaction ID field to a random
524 * number. This allows us to discard EMADs addressed to other
525 * devices.
526 */
527 get_random_bytes(&mlxsw_core->emad.tid, 4);
528 mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
529
530 init_waitqueue_head(&mlxsw_core->emad.wait);
531 mlxsw_core->emad.trans_active = false;
532 mutex_init(&mlxsw_core->emad.lock);
533
534 err = mlxsw_core_rx_listener_register(mlxsw_core,
535 &mlxsw_emad_rx_listener,
536 mlxsw_core);
537 if (err)
538 return err;
539
540 err = mlxsw_emad_traps_set(mlxsw_core);
541 if (err)
542 goto err_emad_trap_set;
543
544 mlxsw_core->emad.use_emad = true;
545
546 return 0;
547
548 err_emad_trap_set:
549 mlxsw_core_rx_listener_unregister(mlxsw_core,
550 &mlxsw_emad_rx_listener,
551 mlxsw_core);
552 return err;
553 }
554
555 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
556 {
557 char hpkt_pl[MLXSW_REG_HPKT_LEN];
558
559 mlxsw_core->emad.use_emad = false;
560 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
561 MLXSW_TRAP_ID_ETHEMAD);
562 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
563
564 mlxsw_core_rx_listener_unregister(mlxsw_core,
565 &mlxsw_emad_rx_listener,
566 mlxsw_core);
567 }
568
569 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
570 u16 reg_len)
571 {
572 struct sk_buff *skb;
573 u16 emad_len;
574
575 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
576 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
577 sizeof(u32) + mlxsw_core->driver->txhdr_len);
578 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
579 return NULL;
580
581 skb = netdev_alloc_skb(NULL, emad_len);
582 if (!skb)
583 return NULL;
584 memset(skb->data, 0, emad_len);
585 skb_reserve(skb, emad_len);
586
587 return skb;
588 }
589
590 /*****************
591 * Core functions
592 *****************/
593
594 static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
595 {
596 struct mlxsw_core *mlxsw_core = file->private;
597 struct mlxsw_core_pcpu_stats *p;
598 u64 rx_packets, rx_bytes;
599 u64 tmp_rx_packets, tmp_rx_bytes;
600 u32 rx_dropped, rx_invalid;
601 unsigned int start;
602 int i;
603 int j;
604 static const char hdr[] =
605 " NUM RX_PACKETS RX_BYTES RX_DROPPED\n";
606
607 seq_printf(file, hdr);
608 for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
609 rx_packets = 0;
610 rx_bytes = 0;
611 rx_dropped = 0;
612 for_each_possible_cpu(j) {
613 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
614 do {
615 start = u64_stats_fetch_begin(&p->syncp);
616 tmp_rx_packets = p->trap_rx_packets[i];
617 tmp_rx_bytes = p->trap_rx_bytes[i];
618 } while (u64_stats_fetch_retry(&p->syncp, start));
619
620 rx_packets += tmp_rx_packets;
621 rx_bytes += tmp_rx_bytes;
622 rx_dropped += p->trap_rx_dropped[i];
623 }
624 seq_printf(file, "trap %3d %12llu %12llu %10u\n",
625 i, rx_packets, rx_bytes, rx_dropped);
626 }
627 rx_invalid = 0;
628 for_each_possible_cpu(j) {
629 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
630 rx_invalid += p->trap_rx_invalid;
631 }
632 seq_printf(file, "trap INV %10u\n",
633 rx_invalid);
634
635 for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
636 rx_packets = 0;
637 rx_bytes = 0;
638 rx_dropped = 0;
639 for_each_possible_cpu(j) {
640 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
641 do {
642 start = u64_stats_fetch_begin(&p->syncp);
643 tmp_rx_packets = p->port_rx_packets[i];
644 tmp_rx_bytes = p->port_rx_bytes[i];
645 } while (u64_stats_fetch_retry(&p->syncp, start));
646
647 rx_packets += tmp_rx_packets;
648 rx_bytes += tmp_rx_bytes;
649 rx_dropped += p->port_rx_dropped[i];
650 }
651 seq_printf(file, "port %3d %12llu %12llu %10u\n",
652 i, rx_packets, rx_bytes, rx_dropped);
653 }
654 rx_invalid = 0;
655 for_each_possible_cpu(j) {
656 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
657 rx_invalid += p->port_rx_invalid;
658 }
659 seq_printf(file, "port INV %10u\n",
660 rx_invalid);
661 return 0;
662 }
663
664 static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
665 {
666 struct mlxsw_core *mlxsw_core = inode->i_private;
667
668 return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
669 }
670
671 static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
672 .owner = THIS_MODULE,
673 .open = mlxsw_core_rx_stats_dbg_open,
674 .release = single_release,
675 .read = seq_read,
676 .llseek = seq_lseek
677 };
678
679 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
680 const char *buf, size_t size)
681 {
682 __be32 *m = (__be32 *) buf;
683 int i;
684 int count = size / sizeof(__be32);
685
686 for (i = count - 1; i >= 0; i--)
687 if (m[i])
688 break;
689 i++;
690 count = i ? i : 1;
691 for (i = 0; i < count; i += 4)
692 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
693 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
694 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
695 }
696
697 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
698 {
699 spin_lock(&mlxsw_core_driver_list_lock);
700 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
701 spin_unlock(&mlxsw_core_driver_list_lock);
702 return 0;
703 }
704 EXPORT_SYMBOL(mlxsw_core_driver_register);
705
706 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
707 {
708 spin_lock(&mlxsw_core_driver_list_lock);
709 list_del(&mlxsw_driver->list);
710 spin_unlock(&mlxsw_core_driver_list_lock);
711 }
712 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
713
714 static struct mlxsw_driver *__driver_find(const char *kind)
715 {
716 struct mlxsw_driver *mlxsw_driver;
717
718 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
719 if (strcmp(mlxsw_driver->kind, kind) == 0)
720 return mlxsw_driver;
721 }
722 return NULL;
723 }
724
725 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
726 {
727 struct mlxsw_driver *mlxsw_driver;
728
729 spin_lock(&mlxsw_core_driver_list_lock);
730 mlxsw_driver = __driver_find(kind);
731 if (!mlxsw_driver) {
732 spin_unlock(&mlxsw_core_driver_list_lock);
733 request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
734 spin_lock(&mlxsw_core_driver_list_lock);
735 mlxsw_driver = __driver_find(kind);
736 }
737 if (mlxsw_driver) {
738 if (!try_module_get(mlxsw_driver->owner))
739 mlxsw_driver = NULL;
740 }
741
742 spin_unlock(&mlxsw_core_driver_list_lock);
743 return mlxsw_driver;
744 }
745
746 static void mlxsw_core_driver_put(const char *kind)
747 {
748 struct mlxsw_driver *mlxsw_driver;
749
750 spin_lock(&mlxsw_core_driver_list_lock);
751 mlxsw_driver = __driver_find(kind);
752 spin_unlock(&mlxsw_core_driver_list_lock);
753 if (!mlxsw_driver)
754 return;
755 module_put(mlxsw_driver->owner);
756 }
757
758 static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
759 {
760 const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
761
762 mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
763 mlxsw_core_dbg_root);
764 if (!mlxsw_core->dbg_dir)
765 return -ENOMEM;
766 debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
767 mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
768 mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
769 mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
770 debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
771 &mlxsw_core->dbg.vsd_blob);
772 mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
773 mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
774 debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
775 &mlxsw_core->dbg.psid_blob);
776 return 0;
777 }
778
779 static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
780 {
781 debugfs_remove_recursive(mlxsw_core->dbg_dir);
782 }
783
784 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
785 const struct mlxsw_bus *mlxsw_bus,
786 void *bus_priv)
787 {
788 const char *device_kind = mlxsw_bus_info->device_kind;
789 struct mlxsw_core *mlxsw_core;
790 struct mlxsw_driver *mlxsw_driver;
791 size_t alloc_size;
792 int err;
793
794 mlxsw_driver = mlxsw_core_driver_get(device_kind);
795 if (!mlxsw_driver)
796 return -EINVAL;
797 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
798 mlxsw_core = kzalloc(alloc_size, GFP_KERNEL);
799 if (!mlxsw_core) {
800 err = -ENOMEM;
801 goto err_core_alloc;
802 }
803
804 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
805 INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
806 mlxsw_core->driver = mlxsw_driver;
807 mlxsw_core->bus = mlxsw_bus;
808 mlxsw_core->bus_priv = bus_priv;
809 mlxsw_core->bus_info = mlxsw_bus_info;
810
811 mlxsw_core->pcpu_stats =
812 netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
813 if (!mlxsw_core->pcpu_stats) {
814 err = -ENOMEM;
815 goto err_alloc_stats;
816 }
817
818 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
819 if (err)
820 goto err_bus_init;
821
822 err = mlxsw_emad_init(mlxsw_core);
823 if (err)
824 goto err_emad_init;
825
826 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
827 if (err)
828 goto err_hwmon_init;
829
830 err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
831 mlxsw_bus_info);
832 if (err)
833 goto err_driver_init;
834
835 err = mlxsw_core_debugfs_init(mlxsw_core);
836 if (err)
837 goto err_debugfs_init;
838
839 return 0;
840
841 err_debugfs_init:
842 mlxsw_core->driver->fini(mlxsw_core->driver_priv);
843 err_driver_init:
844 mlxsw_hwmon_fini(mlxsw_core->hwmon);
845 err_hwmon_init:
846 mlxsw_emad_fini(mlxsw_core);
847 err_emad_init:
848 mlxsw_bus->fini(bus_priv);
849 err_bus_init:
850 free_percpu(mlxsw_core->pcpu_stats);
851 err_alloc_stats:
852 kfree(mlxsw_core);
853 err_core_alloc:
854 mlxsw_core_driver_put(device_kind);
855 return err;
856 }
857 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
858
859 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
860 {
861 const char *device_kind = mlxsw_core->bus_info->device_kind;
862
863 mlxsw_core_debugfs_fini(mlxsw_core);
864 mlxsw_core->driver->fini(mlxsw_core->driver_priv);
865 mlxsw_hwmon_fini(mlxsw_core->hwmon);
866 mlxsw_emad_fini(mlxsw_core);
867 mlxsw_core->bus->fini(mlxsw_core->bus_priv);
868 free_percpu(mlxsw_core->pcpu_stats);
869 kfree(mlxsw_core);
870 mlxsw_core_driver_put(device_kind);
871 }
872 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
873
874 static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
875 {
876 return container_of(driver_priv, struct mlxsw_core, driver_priv);
877 }
878
879 bool mlxsw_core_skb_transmit_busy(void *driver_priv,
880 const struct mlxsw_tx_info *tx_info)
881 {
882 struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
883
884 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
885 tx_info);
886 }
887 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
888
889 int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
890 const struct mlxsw_tx_info *tx_info)
891 {
892 struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
893
894 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
895 tx_info);
896 }
897 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
898
899 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
900 const struct mlxsw_rx_listener *rxl_b)
901 {
902 return (rxl_a->func == rxl_b->func &&
903 rxl_a->local_port == rxl_b->local_port &&
904 rxl_a->trap_id == rxl_b->trap_id);
905 }
906
907 static struct mlxsw_rx_listener_item *
908 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
909 const struct mlxsw_rx_listener *rxl,
910 void *priv)
911 {
912 struct mlxsw_rx_listener_item *rxl_item;
913
914 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
915 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
916 rxl_item->priv == priv)
917 return rxl_item;
918 }
919 return NULL;
920 }
921
922 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
923 const struct mlxsw_rx_listener *rxl,
924 void *priv)
925 {
926 struct mlxsw_rx_listener_item *rxl_item;
927
928 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
929 if (rxl_item)
930 return -EEXIST;
931 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
932 if (!rxl_item)
933 return -ENOMEM;
934 rxl_item->rxl = *rxl;
935 rxl_item->priv = priv;
936
937 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
938 return 0;
939 }
940 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
941
942 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
943 const struct mlxsw_rx_listener *rxl,
944 void *priv)
945 {
946 struct mlxsw_rx_listener_item *rxl_item;
947
948 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
949 if (!rxl_item)
950 return;
951 list_del_rcu(&rxl_item->list);
952 synchronize_rcu();
953 kfree(rxl_item);
954 }
955 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
956
957 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
958 void *priv)
959 {
960 struct mlxsw_event_listener_item *event_listener_item = priv;
961 struct mlxsw_reg_info reg;
962 char *payload;
963 char *op_tlv = mlxsw_emad_op_tlv(skb);
964 char *reg_tlv = mlxsw_emad_reg_tlv(skb);
965
966 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
967 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
968 payload = mlxsw_emad_reg_payload(op_tlv);
969 event_listener_item->el.func(&reg, payload, event_listener_item->priv);
970 dev_kfree_skb(skb);
971 }
972
973 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
974 const struct mlxsw_event_listener *el_b)
975 {
976 return (el_a->func == el_b->func &&
977 el_a->trap_id == el_b->trap_id);
978 }
979
980 static struct mlxsw_event_listener_item *
981 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
982 const struct mlxsw_event_listener *el,
983 void *priv)
984 {
985 struct mlxsw_event_listener_item *el_item;
986
987 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
988 if (__is_event_listener_equal(&el_item->el, el) &&
989 el_item->priv == priv)
990 return el_item;
991 }
992 return NULL;
993 }
994
995 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
996 const struct mlxsw_event_listener *el,
997 void *priv)
998 {
999 int err;
1000 struct mlxsw_event_listener_item *el_item;
1001 const struct mlxsw_rx_listener rxl = {
1002 .func = mlxsw_core_event_listener_func,
1003 .local_port = MLXSW_PORT_DONT_CARE,
1004 .trap_id = el->trap_id,
1005 };
1006
1007 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1008 if (el_item)
1009 return -EEXIST;
1010 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
1011 if (!el_item)
1012 return -ENOMEM;
1013 el_item->el = *el;
1014 el_item->priv = priv;
1015
1016 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
1017 if (err)
1018 goto err_rx_listener_register;
1019
1020 /* No reason to save item if we did not manage to register an RX
1021 * listener for it.
1022 */
1023 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
1024
1025 return 0;
1026
1027 err_rx_listener_register:
1028 kfree(el_item);
1029 return err;
1030 }
1031 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
1032
1033 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
1034 const struct mlxsw_event_listener *el,
1035 void *priv)
1036 {
1037 struct mlxsw_event_listener_item *el_item;
1038 const struct mlxsw_rx_listener rxl = {
1039 .func = mlxsw_core_event_listener_func,
1040 .local_port = MLXSW_PORT_DONT_CARE,
1041 .trap_id = el->trap_id,
1042 };
1043
1044 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1045 if (!el_item)
1046 return;
1047 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
1048 list_del(&el_item->list);
1049 kfree(el_item);
1050 }
1051 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
1052
1053 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1054 const struct mlxsw_reg_info *reg,
1055 char *payload,
1056 enum mlxsw_core_reg_access_type type)
1057 {
1058 int err;
1059 char *op_tlv;
1060 struct sk_buff *skb;
1061 struct mlxsw_tx_info tx_info = {
1062 .local_port = MLXSW_PORT_CPU_PORT,
1063 .is_emad = true,
1064 };
1065
1066 skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
1067 if (!skb)
1068 return -ENOMEM;
1069
1070 mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
1071 mlxsw_core->driver->txhdr_construct(skb, &tx_info);
1072
1073 dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
1074 mlxsw_core->emad.tid);
1075 mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
1076
1077 err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
1078 if (!err) {
1079 op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
1080 memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
1081 reg->len);
1082
1083 dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
1084 mlxsw_core->emad.tid - 1);
1085 mlxsw_core_buf_dump_dbg(mlxsw_core,
1086 mlxsw_core->emad.resp_skb->data,
1087 mlxsw_core->emad.resp_skb->len);
1088
1089 dev_kfree_skb(mlxsw_core->emad.resp_skb);
1090 }
1091
1092 return err;
1093 }
1094
1095 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
1096 const struct mlxsw_reg_info *reg,
1097 char *payload,
1098 enum mlxsw_core_reg_access_type type)
1099 {
1100 int err, n_retry;
1101 char *in_mbox, *out_mbox, *tmp;
1102
1103 in_mbox = mlxsw_cmd_mbox_alloc();
1104 if (!in_mbox)
1105 return -ENOMEM;
1106
1107 out_mbox = mlxsw_cmd_mbox_alloc();
1108 if (!out_mbox) {
1109 err = -ENOMEM;
1110 goto free_in_mbox;
1111 }
1112
1113 mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
1114 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
1115 mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
1116
1117 n_retry = 0;
1118 retry:
1119 err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
1120 if (!err) {
1121 err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
1122 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
1123 goto retry;
1124 }
1125
1126 if (!err)
1127 memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
1128 reg->len);
1129
1130 mlxsw_core->emad.tid++;
1131 mlxsw_cmd_mbox_free(out_mbox);
1132 free_in_mbox:
1133 mlxsw_cmd_mbox_free(in_mbox);
1134 return err;
1135 }
1136
1137 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
1138 const struct mlxsw_reg_info *reg,
1139 char *payload,
1140 enum mlxsw_core_reg_access_type type)
1141 {
1142 u64 cur_tid;
1143 int err;
1144
1145 if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
1146 dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
1147 reg->id, mlxsw_reg_id_str(reg->id),
1148 mlxsw_core_reg_access_type_str(type));
1149 return -EINTR;
1150 }
1151
1152 cur_tid = mlxsw_core->emad.tid;
1153 dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
1154 cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
1155 mlxsw_core_reg_access_type_str(type));
1156
1157 /* During initialization EMAD interface is not available to us,
1158 * so we default to command interface. We switch to EMAD interface
1159 * after setting the appropriate traps.
1160 */
1161 if (!mlxsw_core->emad.use_emad)
1162 err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
1163 payload, type);
1164 else
1165 err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
1166 payload, type);
1167
1168 if (err)
1169 dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
1170 cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
1171 mlxsw_core_reg_access_type_str(type));
1172
1173 mutex_unlock(&mlxsw_core->emad.lock);
1174 return err;
1175 }
1176
1177 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
1178 const struct mlxsw_reg_info *reg, char *payload)
1179 {
1180 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1181 MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
1182 }
1183 EXPORT_SYMBOL(mlxsw_reg_query);
1184
1185 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
1186 const struct mlxsw_reg_info *reg, char *payload)
1187 {
1188 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1189 MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
1190 }
1191 EXPORT_SYMBOL(mlxsw_reg_write);
1192
1193 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1194 struct mlxsw_rx_info *rx_info)
1195 {
1196 struct mlxsw_rx_listener_item *rxl_item;
1197 const struct mlxsw_rx_listener *rxl;
1198 struct mlxsw_core_pcpu_stats *pcpu_stats;
1199 u8 local_port = rx_info->sys_port;
1200 bool found = false;
1201
1202 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n",
1203 __func__, rx_info->sys_port, rx_info->trap_id);
1204
1205 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
1206 (local_port >= MLXSW_PORT_MAX_PORTS))
1207 goto drop;
1208
1209 rcu_read_lock();
1210 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
1211 rxl = &rxl_item->rxl;
1212 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
1213 rxl->local_port == local_port) &&
1214 rxl->trap_id == rx_info->trap_id) {
1215 found = true;
1216 break;
1217 }
1218 }
1219 rcu_read_unlock();
1220 if (!found)
1221 goto drop;
1222
1223 pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
1224 u64_stats_update_begin(&pcpu_stats->syncp);
1225 pcpu_stats->port_rx_packets[local_port]++;
1226 pcpu_stats->port_rx_bytes[local_port] += skb->len;
1227 pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
1228 pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
1229 u64_stats_update_end(&pcpu_stats->syncp);
1230
1231 rxl->func(skb, local_port, rxl_item->priv);
1232 return;
1233
1234 drop:
1235 if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
1236 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
1237 else
1238 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
1239 if (local_port >= MLXSW_PORT_MAX_PORTS)
1240 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
1241 else
1242 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
1243 dev_kfree_skb(skb);
1244 }
1245 EXPORT_SYMBOL(mlxsw_core_skb_receive);
1246
1247 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
1248 u32 in_mod, bool out_mbox_direct,
1249 char *in_mbox, size_t in_mbox_size,
1250 char *out_mbox, size_t out_mbox_size)
1251 {
1252 u8 status;
1253 int err;
1254
1255 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
1256 if (!mlxsw_core->bus->cmd_exec)
1257 return -EOPNOTSUPP;
1258
1259 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1260 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
1261 if (in_mbox) {
1262 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
1263 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
1264 }
1265
1266 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
1267 opcode_mod, in_mod, out_mbox_direct,
1268 in_mbox, in_mbox_size,
1269 out_mbox, out_mbox_size, &status);
1270
1271 if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
1272 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1273 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1274 in_mod, status, mlxsw_cmd_status_str(status));
1275 } else if (err == -ETIMEDOUT) {
1276 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1277 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1278 in_mod);
1279 }
1280
1281 if (!err && out_mbox) {
1282 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
1283 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
1284 }
1285 return err;
1286 }
1287 EXPORT_SYMBOL(mlxsw_cmd_exec);
1288
1289 static int __init mlxsw_core_module_init(void)
1290 {
1291 mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
1292 if (!mlxsw_core_dbg_root)
1293 return -ENOMEM;
1294 return 0;
1295 }
1296
1297 static void __exit mlxsw_core_module_exit(void)
1298 {
1299 debugfs_remove_recursive(mlxsw_core_dbg_root);
1300 }
1301
1302 module_init(mlxsw_core_module_init);
1303 module_exit(mlxsw_core_module_exit);
1304
1305 MODULE_LICENSE("Dual BSD/GPL");
1306 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1307 MODULE_DESCRIPTION("Mellanox switch device core driver");
This page took 0.059432 seconds and 5 git commands to generate.