2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <linux/ctype.h>
34 #include <net/switchdev.h>
35 #include <net/rtnetlink.h>
36 #include <net/ip_fib.h>
37 #include <net/netevent.h>
39 #include <asm-generic/io-64-nonatomic-lo-hi.h>
40 #include <generated/utsrelease.h>
44 static const char rocker_driver_name
[] = "rocker";
46 static const struct pci_device_id rocker_pci_id_table
[] = {
47 {PCI_VDEVICE(REDHAT
, PCI_DEVICE_ID_REDHAT_ROCKER
), 0},
51 struct rocker_flow_tbl_key
{
53 enum rocker_of_dpa_table_id tbl_id
;
58 enum rocker_of_dpa_table_id goto_tbl
;
64 enum rocker_of_dpa_table_id goto_tbl
;
73 u8 eth_dst_mask
[ETH_ALEN
];
76 enum rocker_of_dpa_table_id goto_tbl
;
83 enum rocker_of_dpa_table_id goto_tbl
;
88 u8 eth_dst_mask
[ETH_ALEN
];
93 enum rocker_of_dpa_table_id goto_tbl
;
100 u8 eth_src
[ETH_ALEN
];
101 u8 eth_src_mask
[ETH_ALEN
];
102 u8 eth_dst
[ETH_ALEN
];
103 u8 eth_dst_mask
[ETH_ALEN
];
116 struct rocker_flow_tbl_entry
{
117 struct hlist_node entry
;
120 struct rocker_flow_tbl_key key
;
122 u32 key_crc32
; /* key */
125 struct rocker_group_tbl_entry
{
126 struct hlist_node entry
;
128 u32 group_id
; /* key */
136 u8 eth_src
[ETH_ALEN
];
137 u8 eth_dst
[ETH_ALEN
];
142 u8 eth_src
[ETH_ALEN
];
143 u8 eth_dst
[ETH_ALEN
];
151 struct rocker_fdb_tbl_entry
{
152 struct hlist_node entry
;
153 u32 key_crc32
; /* key */
155 struct rocker_fdb_tbl_key
{
162 struct rocker_internal_vlan_tbl_entry
{
163 struct hlist_node entry
;
164 int ifindex
; /* key */
169 struct rocker_neigh_tbl_entry
{
170 struct hlist_node entry
;
171 __be32 ip_addr
; /* key */
172 struct net_device
*dev
;
175 u8 eth_dst
[ETH_ALEN
];
179 struct rocker_desc_info
{
180 char *data
; /* mapped */
183 struct rocker_desc
*desc
;
187 struct rocker_dma_ring_info
{
191 struct rocker_desc
*desc
; /* mapped */
193 struct rocker_desc_info
*desc_info
;
200 ROCKER_CTRL_LINK_LOCAL_MCAST
,
201 ROCKER_CTRL_LOCAL_ARP
,
202 ROCKER_CTRL_IPV4_MCAST
,
203 ROCKER_CTRL_IPV6_MCAST
,
204 ROCKER_CTRL_DFLT_BRIDGING
,
205 ROCKER_CTRL_DFLT_OVS
,
209 #define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
210 #define ROCKER_N_INTERNAL_VLANS 255
211 #define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
212 #define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
215 struct net_device
*dev
;
216 struct net_device
*bridge_dev
;
217 struct rocker
*rocker
;
218 unsigned int port_number
;
220 __be16 internal_vlan_id
;
223 bool ctrls
[ROCKER_CTRL_MAX
];
224 unsigned long vlan_bitmap
[ROCKER_VLAN_BITMAP_LEN
];
225 struct napi_struct napi_tx
;
226 struct napi_struct napi_rx
;
227 struct rocker_dma_ring_info tx_ring
;
228 struct rocker_dma_ring_info rx_ring
;
229 struct list_head trans_mem
;
233 struct pci_dev
*pdev
;
235 struct msix_entry
*msix_entries
;
236 unsigned int port_count
;
237 struct rocker_port
**ports
;
241 spinlock_t cmd_ring_lock
; /* for cmd ring accesses */
242 struct rocker_dma_ring_info cmd_ring
;
243 struct rocker_dma_ring_info event_ring
;
244 DECLARE_HASHTABLE(flow_tbl
, 16);
245 spinlock_t flow_tbl_lock
; /* for flow tbl accesses */
246 u64 flow_tbl_next_cookie
;
247 DECLARE_HASHTABLE(group_tbl
, 16);
248 spinlock_t group_tbl_lock
; /* for group tbl accesses */
249 DECLARE_HASHTABLE(fdb_tbl
, 16);
250 spinlock_t fdb_tbl_lock
; /* for fdb tbl accesses */
251 unsigned long internal_vlan_bitmap
[ROCKER_INTERNAL_VLAN_BITMAP_LEN
];
252 DECLARE_HASHTABLE(internal_vlan_tbl
, 8);
253 spinlock_t internal_vlan_tbl_lock
; /* for vlan tbl accesses */
254 DECLARE_HASHTABLE(neigh_tbl
, 16);
255 spinlock_t neigh_tbl_lock
; /* for neigh tbl accesses */
256 u32 neigh_tbl_next_index
;
259 static const u8 zero_mac
[ETH_ALEN
] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
260 static const u8 ff_mac
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
261 static const u8 ll_mac
[ETH_ALEN
] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
262 static const u8 ll_mask
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
263 static const u8 mcast_mac
[ETH_ALEN
] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
264 static const u8 ipv4_mcast
[ETH_ALEN
] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
265 static const u8 ipv4_mask
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
266 static const u8 ipv6_mcast
[ETH_ALEN
] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
267 static const u8 ipv6_mask
[ETH_ALEN
] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
269 /* Rocker priority levels for flow table entries. Higher
270 * priority match takes precedence over lower priority match.
274 ROCKER_PRIORITY_UNKNOWN
= 0,
275 ROCKER_PRIORITY_IG_PORT
= 1,
276 ROCKER_PRIORITY_VLAN
= 1,
277 ROCKER_PRIORITY_TERM_MAC_UCAST
= 0,
278 ROCKER_PRIORITY_TERM_MAC_MCAST
= 1,
279 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT
= 1,
280 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD
= 2,
281 ROCKER_PRIORITY_BRIDGING_VLAN
= 3,
282 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT
= 1,
283 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD
= 2,
284 ROCKER_PRIORITY_BRIDGING_TENANT
= 3,
285 ROCKER_PRIORITY_ACL_CTRL
= 3,
286 ROCKER_PRIORITY_ACL_NORMAL
= 2,
287 ROCKER_PRIORITY_ACL_DFLT
= 1,
290 static bool rocker_vlan_id_is_internal(__be16 vlan_id
)
292 u16 start
= ROCKER_INTERNAL_VLAN_ID_BASE
;
294 u16 _vlan_id
= ntohs(vlan_id
);
296 return (_vlan_id
>= start
&& _vlan_id
<= end
);
299 static __be16
rocker_port_vid_to_vlan(const struct rocker_port
*rocker_port
,
300 u16 vid
, bool *pop_vlan
)
306 vlan_id
= htons(vid
);
308 vlan_id
= rocker_port
->internal_vlan_id
;
316 static u16
rocker_port_vlan_to_vid(const struct rocker_port
*rocker_port
,
319 if (rocker_vlan_id_is_internal(vlan_id
))
322 return ntohs(vlan_id
);
325 static bool rocker_port_is_slave(const struct rocker_port
*rocker_port
,
328 return rocker_port
->bridge_dev
&&
329 !strcmp(rocker_port
->bridge_dev
->rtnl_link_ops
->kind
, kind
);
332 static bool rocker_port_is_bridged(const struct rocker_port
*rocker_port
)
334 return rocker_port_is_slave(rocker_port
, "bridge");
337 static bool rocker_port_is_ovsed(const struct rocker_port
*rocker_port
)
339 return rocker_port_is_slave(rocker_port
, "openvswitch");
342 #define ROCKER_OP_FLAG_REMOVE BIT(0)
343 #define ROCKER_OP_FLAG_NOWAIT BIT(1)
344 #define ROCKER_OP_FLAG_LEARNED BIT(2)
345 #define ROCKER_OP_FLAG_REFRESH BIT(3)
347 static void *__rocker_port_mem_alloc(struct rocker_port
*rocker_port
,
348 enum switchdev_trans trans
, int flags
,
351 struct list_head
*elem
= NULL
;
352 gfp_t gfp_flags
= (flags
& ROCKER_OP_FLAG_NOWAIT
) ?
353 GFP_ATOMIC
: GFP_KERNEL
;
355 /* If in transaction prepare phase, allocate the memory
356 * and enqueue it on a per-port list. If in transaction
357 * commit phase, dequeue the memory from the per-port list
358 * rather than re-allocating the memory. The idea is the
359 * driver code paths for prepare and commit are identical
360 * so the memory allocated in the prepare phase is the
361 * memory used in the commit phase.
365 case SWITCHDEV_TRANS_PREPARE
:
366 elem
= kzalloc(size
+ sizeof(*elem
), gfp_flags
);
369 list_add_tail(elem
, &rocker_port
->trans_mem
);
371 case SWITCHDEV_TRANS_COMMIT
:
372 BUG_ON(list_empty(&rocker_port
->trans_mem
));
373 elem
= rocker_port
->trans_mem
.next
;
376 case SWITCHDEV_TRANS_NONE
:
377 elem
= kzalloc(size
+ sizeof(*elem
), gfp_flags
);
379 INIT_LIST_HEAD(elem
);
385 return elem
? elem
+ 1 : NULL
;
388 static void *rocker_port_kzalloc(struct rocker_port
*rocker_port
,
389 enum switchdev_trans trans
, int flags
,
392 return __rocker_port_mem_alloc(rocker_port
, trans
, flags
, size
);
395 static void *rocker_port_kcalloc(struct rocker_port
*rocker_port
,
396 enum switchdev_trans trans
, int flags
,
397 size_t n
, size_t size
)
399 return __rocker_port_mem_alloc(rocker_port
, trans
, flags
, n
* size
);
402 static void rocker_port_kfree(enum switchdev_trans trans
, const void *mem
)
404 struct list_head
*elem
;
406 /* Frees are ignored if in transaction prepare phase. The
407 * memory remains on the per-port list until freed in the
411 if (trans
== SWITCHDEV_TRANS_PREPARE
)
414 elem
= (struct list_head
*)mem
- 1;
415 BUG_ON(!list_empty(elem
));
420 wait_queue_head_t wait
;
425 static void rocker_wait_reset(struct rocker_wait
*wait
)
428 wait
->nowait
= false;
431 static void rocker_wait_init(struct rocker_wait
*wait
)
433 init_waitqueue_head(&wait
->wait
);
434 rocker_wait_reset(wait
);
437 static struct rocker_wait
*rocker_wait_create(struct rocker_port
*rocker_port
,
438 enum switchdev_trans trans
,
441 struct rocker_wait
*wait
;
443 wait
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*wait
));
446 rocker_wait_init(wait
);
450 static void rocker_wait_destroy(enum switchdev_trans trans
,
451 struct rocker_wait
*wait
)
453 rocker_port_kfree(trans
, wait
);
456 static bool rocker_wait_event_timeout(struct rocker_wait
*wait
,
457 unsigned long timeout
)
459 wait_event_timeout(wait
->wait
, wait
->done
, HZ
/ 10);
465 static void rocker_wait_wake_up(struct rocker_wait
*wait
)
468 wake_up(&wait
->wait
);
471 static u32
rocker_msix_vector(const struct rocker
*rocker
, unsigned int vector
)
473 return rocker
->msix_entries
[vector
].vector
;
476 static u32
rocker_msix_tx_vector(const struct rocker_port
*rocker_port
)
478 return rocker_msix_vector(rocker_port
->rocker
,
479 ROCKER_MSIX_VEC_TX(rocker_port
->port_number
));
482 static u32
rocker_msix_rx_vector(const struct rocker_port
*rocker_port
)
484 return rocker_msix_vector(rocker_port
->rocker
,
485 ROCKER_MSIX_VEC_RX(rocker_port
->port_number
));
488 #define rocker_write32(rocker, reg, val) \
489 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
490 #define rocker_read32(rocker, reg) \
491 readl((rocker)->hw_addr + (ROCKER_ ## reg))
492 #define rocker_write64(rocker, reg, val) \
493 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
494 #define rocker_read64(rocker, reg) \
495 readq((rocker)->hw_addr + (ROCKER_ ## reg))
497 /*****************************
498 * HW basic testing functions
499 *****************************/
501 static int rocker_reg_test(const struct rocker
*rocker
)
503 const struct pci_dev
*pdev
= rocker
->pdev
;
509 rocker_write32(rocker
, TEST_REG
, rnd
);
510 test_reg
= rocker_read32(rocker
, TEST_REG
);
511 if (test_reg
!= rnd
* 2) {
512 dev_err(&pdev
->dev
, "unexpected 32bit register value %08llx, expected %08llx\n",
519 rnd
|= prandom_u32();
520 rocker_write64(rocker
, TEST_REG64
, rnd
);
521 test_reg
= rocker_read64(rocker
, TEST_REG64
);
522 if (test_reg
!= rnd
* 2) {
523 dev_err(&pdev
->dev
, "unexpected 64bit register value %16llx, expected %16llx\n",
531 static int rocker_dma_test_one(const struct rocker
*rocker
,
532 struct rocker_wait
*wait
, u32 test_type
,
533 dma_addr_t dma_handle
, const unsigned char *buf
,
534 const unsigned char *expect
, size_t size
)
536 const struct pci_dev
*pdev
= rocker
->pdev
;
539 rocker_wait_reset(wait
);
540 rocker_write32(rocker
, TEST_DMA_CTRL
, test_type
);
542 if (!rocker_wait_event_timeout(wait
, HZ
/ 10)) {
543 dev_err(&pdev
->dev
, "no interrupt received within a timeout\n");
547 for (i
= 0; i
< size
; i
++) {
548 if (buf
[i
] != expect
[i
]) {
549 dev_err(&pdev
->dev
, "unexpected memory content %02x at byte %x\n, %02x expected",
550 buf
[i
], i
, expect
[i
]);
557 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
558 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
560 static int rocker_dma_test_offset(const struct rocker
*rocker
,
561 struct rocker_wait
*wait
, int offset
)
563 struct pci_dev
*pdev
= rocker
->pdev
;
564 unsigned char *alloc
;
566 unsigned char *expect
;
567 dma_addr_t dma_handle
;
571 alloc
= kzalloc(ROCKER_TEST_DMA_BUF_SIZE
* 2 + offset
,
572 GFP_KERNEL
| GFP_DMA
);
575 buf
= alloc
+ offset
;
576 expect
= buf
+ ROCKER_TEST_DMA_BUF_SIZE
;
578 dma_handle
= pci_map_single(pdev
, buf
, ROCKER_TEST_DMA_BUF_SIZE
,
579 PCI_DMA_BIDIRECTIONAL
);
580 if (pci_dma_mapping_error(pdev
, dma_handle
)) {
585 rocker_write64(rocker
, TEST_DMA_ADDR
, dma_handle
);
586 rocker_write32(rocker
, TEST_DMA_SIZE
, ROCKER_TEST_DMA_BUF_SIZE
);
588 memset(expect
, ROCKER_TEST_DMA_FILL_PATTERN
, ROCKER_TEST_DMA_BUF_SIZE
);
589 err
= rocker_dma_test_one(rocker
, wait
, ROCKER_TEST_DMA_CTRL_FILL
,
590 dma_handle
, buf
, expect
,
591 ROCKER_TEST_DMA_BUF_SIZE
);
595 memset(expect
, 0, ROCKER_TEST_DMA_BUF_SIZE
);
596 err
= rocker_dma_test_one(rocker
, wait
, ROCKER_TEST_DMA_CTRL_CLEAR
,
597 dma_handle
, buf
, expect
,
598 ROCKER_TEST_DMA_BUF_SIZE
);
602 prandom_bytes(buf
, ROCKER_TEST_DMA_BUF_SIZE
);
603 for (i
= 0; i
< ROCKER_TEST_DMA_BUF_SIZE
; i
++)
605 err
= rocker_dma_test_one(rocker
, wait
, ROCKER_TEST_DMA_CTRL_INVERT
,
606 dma_handle
, buf
, expect
,
607 ROCKER_TEST_DMA_BUF_SIZE
);
612 pci_unmap_single(pdev
, dma_handle
, ROCKER_TEST_DMA_BUF_SIZE
,
613 PCI_DMA_BIDIRECTIONAL
);
620 static int rocker_dma_test(const struct rocker
*rocker
,
621 struct rocker_wait
*wait
)
626 for (i
= 0; i
< 8; i
++) {
627 err
= rocker_dma_test_offset(rocker
, wait
, i
);
634 static irqreturn_t
rocker_test_irq_handler(int irq
, void *dev_id
)
636 struct rocker_wait
*wait
= dev_id
;
638 rocker_wait_wake_up(wait
);
643 static int rocker_basic_hw_test(const struct rocker
*rocker
)
645 const struct pci_dev
*pdev
= rocker
->pdev
;
646 struct rocker_wait wait
;
649 err
= rocker_reg_test(rocker
);
651 dev_err(&pdev
->dev
, "reg test failed\n");
655 err
= request_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_TEST
),
656 rocker_test_irq_handler
, 0,
657 rocker_driver_name
, &wait
);
659 dev_err(&pdev
->dev
, "cannot assign test irq\n");
663 rocker_wait_init(&wait
);
664 rocker_write32(rocker
, TEST_IRQ
, ROCKER_MSIX_VEC_TEST
);
666 if (!rocker_wait_event_timeout(&wait
, HZ
/ 10)) {
667 dev_err(&pdev
->dev
, "no interrupt received within a timeout\n");
672 err
= rocker_dma_test(rocker
, &wait
);
674 dev_err(&pdev
->dev
, "dma test failed\n");
677 free_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_TEST
), &wait
);
685 #define ROCKER_TLV_ALIGNTO 8U
686 #define ROCKER_TLV_ALIGN(len) \
687 (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
688 #define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
690 /* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
691 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
692 * | Header | Pad | Payload | Pad |
693 * | (struct rocker_tlv) | ing | | ing |
694 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
695 * <--------------------------- tlv->len -------------------------->
698 static struct rocker_tlv
*rocker_tlv_next(const struct rocker_tlv
*tlv
,
701 int totlen
= ROCKER_TLV_ALIGN(tlv
->len
);
703 *remaining
-= totlen
;
704 return (struct rocker_tlv
*) ((char *) tlv
+ totlen
);
707 static int rocker_tlv_ok(const struct rocker_tlv
*tlv
, int remaining
)
709 return remaining
>= (int) ROCKER_TLV_HDRLEN
&&
710 tlv
->len
>= ROCKER_TLV_HDRLEN
&&
711 tlv
->len
<= remaining
;
714 #define rocker_tlv_for_each(pos, head, len, rem) \
715 for (pos = head, rem = len; \
716 rocker_tlv_ok(pos, rem); \
717 pos = rocker_tlv_next(pos, &(rem)))
719 #define rocker_tlv_for_each_nested(pos, tlv, rem) \
720 rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
721 rocker_tlv_len(tlv), rem)
723 static int rocker_tlv_attr_size(int payload
)
725 return ROCKER_TLV_HDRLEN
+ payload
;
728 static int rocker_tlv_total_size(int payload
)
730 return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload
));
733 static int rocker_tlv_padlen(int payload
)
735 return rocker_tlv_total_size(payload
) - rocker_tlv_attr_size(payload
);
738 static int rocker_tlv_type(const struct rocker_tlv
*tlv
)
743 static void *rocker_tlv_data(const struct rocker_tlv
*tlv
)
745 return (char *) tlv
+ ROCKER_TLV_HDRLEN
;
748 static int rocker_tlv_len(const struct rocker_tlv
*tlv
)
750 return tlv
->len
- ROCKER_TLV_HDRLEN
;
753 static u8
rocker_tlv_get_u8(const struct rocker_tlv
*tlv
)
755 return *(u8
*) rocker_tlv_data(tlv
);
758 static u16
rocker_tlv_get_u16(const struct rocker_tlv
*tlv
)
760 return *(u16
*) rocker_tlv_data(tlv
);
763 static __be16
rocker_tlv_get_be16(const struct rocker_tlv
*tlv
)
765 return *(__be16
*) rocker_tlv_data(tlv
);
768 static u32
rocker_tlv_get_u32(const struct rocker_tlv
*tlv
)
770 return *(u32
*) rocker_tlv_data(tlv
);
773 static u64
rocker_tlv_get_u64(const struct rocker_tlv
*tlv
)
775 return *(u64
*) rocker_tlv_data(tlv
);
778 static void rocker_tlv_parse(const struct rocker_tlv
**tb
, int maxtype
,
779 const char *buf
, int buf_len
)
781 const struct rocker_tlv
*tlv
;
782 const struct rocker_tlv
*head
= (const struct rocker_tlv
*) buf
;
785 memset(tb
, 0, sizeof(struct rocker_tlv
*) * (maxtype
+ 1));
787 rocker_tlv_for_each(tlv
, head
, buf_len
, rem
) {
788 u32 type
= rocker_tlv_type(tlv
);
790 if (type
> 0 && type
<= maxtype
)
795 static void rocker_tlv_parse_nested(const struct rocker_tlv
**tb
, int maxtype
,
796 const struct rocker_tlv
*tlv
)
798 rocker_tlv_parse(tb
, maxtype
, rocker_tlv_data(tlv
),
799 rocker_tlv_len(tlv
));
802 static void rocker_tlv_parse_desc(const struct rocker_tlv
**tb
, int maxtype
,
803 const struct rocker_desc_info
*desc_info
)
805 rocker_tlv_parse(tb
, maxtype
, desc_info
->data
,
806 desc_info
->desc
->tlv_size
);
809 static struct rocker_tlv
*rocker_tlv_start(struct rocker_desc_info
*desc_info
)
811 return (struct rocker_tlv
*) ((char *) desc_info
->data
+
812 desc_info
->tlv_size
);
815 static int rocker_tlv_put(struct rocker_desc_info
*desc_info
,
816 int attrtype
, int attrlen
, const void *data
)
818 int tail_room
= desc_info
->data_size
- desc_info
->tlv_size
;
819 int total_size
= rocker_tlv_total_size(attrlen
);
820 struct rocker_tlv
*tlv
;
822 if (unlikely(tail_room
< total_size
))
825 tlv
= rocker_tlv_start(desc_info
);
826 desc_info
->tlv_size
+= total_size
;
827 tlv
->type
= attrtype
;
828 tlv
->len
= rocker_tlv_attr_size(attrlen
);
829 memcpy(rocker_tlv_data(tlv
), data
, attrlen
);
830 memset((char *) tlv
+ tlv
->len
, 0, rocker_tlv_padlen(attrlen
));
834 static int rocker_tlv_put_u8(struct rocker_desc_info
*desc_info
,
835 int attrtype
, u8 value
)
837 return rocker_tlv_put(desc_info
, attrtype
, sizeof(u8
), &value
);
840 static int rocker_tlv_put_u16(struct rocker_desc_info
*desc_info
,
841 int attrtype
, u16 value
)
843 return rocker_tlv_put(desc_info
, attrtype
, sizeof(u16
), &value
);
846 static int rocker_tlv_put_be16(struct rocker_desc_info
*desc_info
,
847 int attrtype
, __be16 value
)
849 return rocker_tlv_put(desc_info
, attrtype
, sizeof(__be16
), &value
);
852 static int rocker_tlv_put_u32(struct rocker_desc_info
*desc_info
,
853 int attrtype
, u32 value
)
855 return rocker_tlv_put(desc_info
, attrtype
, sizeof(u32
), &value
);
858 static int rocker_tlv_put_be32(struct rocker_desc_info
*desc_info
,
859 int attrtype
, __be32 value
)
861 return rocker_tlv_put(desc_info
, attrtype
, sizeof(__be32
), &value
);
864 static int rocker_tlv_put_u64(struct rocker_desc_info
*desc_info
,
865 int attrtype
, u64 value
)
867 return rocker_tlv_put(desc_info
, attrtype
, sizeof(u64
), &value
);
870 static struct rocker_tlv
*
871 rocker_tlv_nest_start(struct rocker_desc_info
*desc_info
, int attrtype
)
873 struct rocker_tlv
*start
= rocker_tlv_start(desc_info
);
875 if (rocker_tlv_put(desc_info
, attrtype
, 0, NULL
) < 0)
881 static void rocker_tlv_nest_end(struct rocker_desc_info
*desc_info
,
882 struct rocker_tlv
*start
)
884 start
->len
= (char *) rocker_tlv_start(desc_info
) - (char *) start
;
887 static void rocker_tlv_nest_cancel(struct rocker_desc_info
*desc_info
,
888 const struct rocker_tlv
*start
)
890 desc_info
->tlv_size
= (const char *) start
- desc_info
->data
;
893 /******************************************
894 * DMA rings and descriptors manipulations
895 ******************************************/
897 static u32
__pos_inc(u32 pos
, size_t limit
)
899 return ++pos
== limit
? 0 : pos
;
902 static int rocker_desc_err(const struct rocker_desc_info
*desc_info
)
904 int err
= desc_info
->desc
->comp_err
& ~ROCKER_DMA_DESC_COMP_ERR_GEN
;
919 case -ROCKER_EMSGSIZE
:
921 case -ROCKER_ENOTSUP
:
923 case -ROCKER_ENOBUFS
:
930 static void rocker_desc_gen_clear(const struct rocker_desc_info
*desc_info
)
932 desc_info
->desc
->comp_err
&= ~ROCKER_DMA_DESC_COMP_ERR_GEN
;
935 static bool rocker_desc_gen(const struct rocker_desc_info
*desc_info
)
937 u32 comp_err
= desc_info
->desc
->comp_err
;
939 return comp_err
& ROCKER_DMA_DESC_COMP_ERR_GEN
? true : false;
942 static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info
*desc_info
)
944 return (void *)(uintptr_t)desc_info
->desc
->cookie
;
947 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info
*desc_info
,
950 desc_info
->desc
->cookie
= (uintptr_t) ptr
;
953 static struct rocker_desc_info
*
954 rocker_desc_head_get(const struct rocker_dma_ring_info
*info
)
956 static struct rocker_desc_info
*desc_info
;
957 u32 head
= __pos_inc(info
->head
, info
->size
);
959 desc_info
= &info
->desc_info
[info
->head
];
960 if (head
== info
->tail
)
961 return NULL
; /* ring full */
962 desc_info
->tlv_size
= 0;
966 static void rocker_desc_commit(const struct rocker_desc_info
*desc_info
)
968 desc_info
->desc
->buf_size
= desc_info
->data_size
;
969 desc_info
->desc
->tlv_size
= desc_info
->tlv_size
;
972 static void rocker_desc_head_set(const struct rocker
*rocker
,
973 struct rocker_dma_ring_info
*info
,
974 const struct rocker_desc_info
*desc_info
)
976 u32 head
= __pos_inc(info
->head
, info
->size
);
978 BUG_ON(head
== info
->tail
);
979 rocker_desc_commit(desc_info
);
981 rocker_write32(rocker
, DMA_DESC_HEAD(info
->type
), head
);
984 static struct rocker_desc_info
*
985 rocker_desc_tail_get(struct rocker_dma_ring_info
*info
)
987 static struct rocker_desc_info
*desc_info
;
989 if (info
->tail
== info
->head
)
990 return NULL
; /* nothing to be done between head and tail */
991 desc_info
= &info
->desc_info
[info
->tail
];
992 if (!rocker_desc_gen(desc_info
))
993 return NULL
; /* gen bit not set, desc is not ready yet */
994 info
->tail
= __pos_inc(info
->tail
, info
->size
);
995 desc_info
->tlv_size
= desc_info
->desc
->tlv_size
;
999 static void rocker_dma_ring_credits_set(const struct rocker
*rocker
,
1000 const struct rocker_dma_ring_info
*info
,
1004 rocker_write32(rocker
, DMA_DESC_CREDITS(info
->type
), credits
);
1007 static unsigned long rocker_dma_ring_size_fix(size_t size
)
1009 return max(ROCKER_DMA_SIZE_MIN
,
1010 min(roundup_pow_of_two(size
), ROCKER_DMA_SIZE_MAX
));
1013 static int rocker_dma_ring_create(const struct rocker
*rocker
,
1016 struct rocker_dma_ring_info
*info
)
1020 BUG_ON(size
!= rocker_dma_ring_size_fix(size
));
1025 info
->desc_info
= kcalloc(info
->size
, sizeof(*info
->desc_info
),
1027 if (!info
->desc_info
)
1030 info
->desc
= pci_alloc_consistent(rocker
->pdev
,
1031 info
->size
* sizeof(*info
->desc
),
1034 kfree(info
->desc_info
);
1038 for (i
= 0; i
< info
->size
; i
++)
1039 info
->desc_info
[i
].desc
= &info
->desc
[i
];
1041 rocker_write32(rocker
, DMA_DESC_CTRL(info
->type
),
1042 ROCKER_DMA_DESC_CTRL_RESET
);
1043 rocker_write64(rocker
, DMA_DESC_ADDR(info
->type
), info
->mapaddr
);
1044 rocker_write32(rocker
, DMA_DESC_SIZE(info
->type
), info
->size
);
1049 static void rocker_dma_ring_destroy(const struct rocker
*rocker
,
1050 const struct rocker_dma_ring_info
*info
)
1052 rocker_write64(rocker
, DMA_DESC_ADDR(info
->type
), 0);
1054 pci_free_consistent(rocker
->pdev
,
1055 info
->size
* sizeof(struct rocker_desc
),
1056 info
->desc
, info
->mapaddr
);
1057 kfree(info
->desc_info
);
1060 static void rocker_dma_ring_pass_to_producer(const struct rocker
*rocker
,
1061 struct rocker_dma_ring_info
*info
)
1065 BUG_ON(info
->head
|| info
->tail
);
1067 /* When ring is consumer, we need to advance head for each desc.
1068 * That tells hw that the desc is ready to be used by it.
1070 for (i
= 0; i
< info
->size
- 1; i
++)
1071 rocker_desc_head_set(rocker
, info
, &info
->desc_info
[i
]);
1072 rocker_desc_commit(&info
->desc_info
[i
]);
1075 static int rocker_dma_ring_bufs_alloc(const struct rocker
*rocker
,
1076 const struct rocker_dma_ring_info
*info
,
1077 int direction
, size_t buf_size
)
1079 struct pci_dev
*pdev
= rocker
->pdev
;
1083 for (i
= 0; i
< info
->size
; i
++) {
1084 struct rocker_desc_info
*desc_info
= &info
->desc_info
[i
];
1085 struct rocker_desc
*desc
= &info
->desc
[i
];
1086 dma_addr_t dma_handle
;
1089 buf
= kzalloc(buf_size
, GFP_KERNEL
| GFP_DMA
);
1095 dma_handle
= pci_map_single(pdev
, buf
, buf_size
, direction
);
1096 if (pci_dma_mapping_error(pdev
, dma_handle
)) {
1102 desc_info
->data
= buf
;
1103 desc_info
->data_size
= buf_size
;
1104 dma_unmap_addr_set(desc_info
, mapaddr
, dma_handle
);
1106 desc
->buf_addr
= dma_handle
;
1107 desc
->buf_size
= buf_size
;
1112 for (i
--; i
>= 0; i
--) {
1113 const struct rocker_desc_info
*desc_info
= &info
->desc_info
[i
];
1115 pci_unmap_single(pdev
, dma_unmap_addr(desc_info
, mapaddr
),
1116 desc_info
->data_size
, direction
);
1117 kfree(desc_info
->data
);
1122 static void rocker_dma_ring_bufs_free(const struct rocker
*rocker
,
1123 const struct rocker_dma_ring_info
*info
,
1126 struct pci_dev
*pdev
= rocker
->pdev
;
1129 for (i
= 0; i
< info
->size
; i
++) {
1130 const struct rocker_desc_info
*desc_info
= &info
->desc_info
[i
];
1131 struct rocker_desc
*desc
= &info
->desc
[i
];
1135 pci_unmap_single(pdev
, dma_unmap_addr(desc_info
, mapaddr
),
1136 desc_info
->data_size
, direction
);
1137 kfree(desc_info
->data
);
1141 static int rocker_dma_rings_init(struct rocker
*rocker
)
1143 const struct pci_dev
*pdev
= rocker
->pdev
;
1146 err
= rocker_dma_ring_create(rocker
, ROCKER_DMA_CMD
,
1147 ROCKER_DMA_CMD_DEFAULT_SIZE
,
1150 dev_err(&pdev
->dev
, "failed to create command dma ring\n");
1154 spin_lock_init(&rocker
->cmd_ring_lock
);
1156 err
= rocker_dma_ring_bufs_alloc(rocker
, &rocker
->cmd_ring
,
1157 PCI_DMA_BIDIRECTIONAL
, PAGE_SIZE
);
1159 dev_err(&pdev
->dev
, "failed to alloc command dma ring buffers\n");
1160 goto err_dma_cmd_ring_bufs_alloc
;
1163 err
= rocker_dma_ring_create(rocker
, ROCKER_DMA_EVENT
,
1164 ROCKER_DMA_EVENT_DEFAULT_SIZE
,
1165 &rocker
->event_ring
);
1167 dev_err(&pdev
->dev
, "failed to create event dma ring\n");
1168 goto err_dma_event_ring_create
;
1171 err
= rocker_dma_ring_bufs_alloc(rocker
, &rocker
->event_ring
,
1172 PCI_DMA_FROMDEVICE
, PAGE_SIZE
);
1174 dev_err(&pdev
->dev
, "failed to alloc event dma ring buffers\n");
1175 goto err_dma_event_ring_bufs_alloc
;
1177 rocker_dma_ring_pass_to_producer(rocker
, &rocker
->event_ring
);
1180 err_dma_event_ring_bufs_alloc
:
1181 rocker_dma_ring_destroy(rocker
, &rocker
->event_ring
);
1182 err_dma_event_ring_create
:
1183 rocker_dma_ring_bufs_free(rocker
, &rocker
->cmd_ring
,
1184 PCI_DMA_BIDIRECTIONAL
);
1185 err_dma_cmd_ring_bufs_alloc
:
1186 rocker_dma_ring_destroy(rocker
, &rocker
->cmd_ring
);
1190 static void rocker_dma_rings_fini(struct rocker
*rocker
)
1192 rocker_dma_ring_bufs_free(rocker
, &rocker
->event_ring
,
1193 PCI_DMA_BIDIRECTIONAL
);
1194 rocker_dma_ring_destroy(rocker
, &rocker
->event_ring
);
1195 rocker_dma_ring_bufs_free(rocker
, &rocker
->cmd_ring
,
1196 PCI_DMA_BIDIRECTIONAL
);
1197 rocker_dma_ring_destroy(rocker
, &rocker
->cmd_ring
);
1200 static int rocker_dma_rx_ring_skb_map(const struct rocker_port
*rocker_port
,
1201 struct rocker_desc_info
*desc_info
,
1202 struct sk_buff
*skb
, size_t buf_len
)
1204 const struct rocker
*rocker
= rocker_port
->rocker
;
1205 struct pci_dev
*pdev
= rocker
->pdev
;
1206 dma_addr_t dma_handle
;
1208 dma_handle
= pci_map_single(pdev
, skb
->data
, buf_len
,
1209 PCI_DMA_FROMDEVICE
);
1210 if (pci_dma_mapping_error(pdev
, dma_handle
))
1212 if (rocker_tlv_put_u64(desc_info
, ROCKER_TLV_RX_FRAG_ADDR
, dma_handle
))
1213 goto tlv_put_failure
;
1214 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_RX_FRAG_MAX_LEN
, buf_len
))
1215 goto tlv_put_failure
;
1219 pci_unmap_single(pdev
, dma_handle
, buf_len
, PCI_DMA_FROMDEVICE
);
1220 desc_info
->tlv_size
= 0;
1224 static size_t rocker_port_rx_buf_len(const struct rocker_port
*rocker_port
)
1226 return rocker_port
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
1229 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port
*rocker_port
,
1230 struct rocker_desc_info
*desc_info
)
1232 struct net_device
*dev
= rocker_port
->dev
;
1233 struct sk_buff
*skb
;
1234 size_t buf_len
= rocker_port_rx_buf_len(rocker_port
);
1237 /* Ensure that hw will see tlv_size zero in case of an error.
1238 * That tells hw to use another descriptor.
1240 rocker_desc_cookie_ptr_set(desc_info
, NULL
);
1241 desc_info
->tlv_size
= 0;
1243 skb
= netdev_alloc_skb_ip_align(dev
, buf_len
);
1246 err
= rocker_dma_rx_ring_skb_map(rocker_port
, desc_info
, skb
, buf_len
);
1248 dev_kfree_skb_any(skb
);
1251 rocker_desc_cookie_ptr_set(desc_info
, skb
);
1255 static void rocker_dma_rx_ring_skb_unmap(const struct rocker
*rocker
,
1256 const struct rocker_tlv
**attrs
)
1258 struct pci_dev
*pdev
= rocker
->pdev
;
1259 dma_addr_t dma_handle
;
1262 if (!attrs
[ROCKER_TLV_RX_FRAG_ADDR
] ||
1263 !attrs
[ROCKER_TLV_RX_FRAG_MAX_LEN
])
1265 dma_handle
= rocker_tlv_get_u64(attrs
[ROCKER_TLV_RX_FRAG_ADDR
]);
1266 len
= rocker_tlv_get_u16(attrs
[ROCKER_TLV_RX_FRAG_MAX_LEN
]);
1267 pci_unmap_single(pdev
, dma_handle
, len
, PCI_DMA_FROMDEVICE
);
1270 static void rocker_dma_rx_ring_skb_free(const struct rocker
*rocker
,
1271 const struct rocker_desc_info
*desc_info
)
1273 const struct rocker_tlv
*attrs
[ROCKER_TLV_RX_MAX
+ 1];
1274 struct sk_buff
*skb
= rocker_desc_cookie_ptr_get(desc_info
);
1278 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_RX_MAX
, desc_info
);
1279 rocker_dma_rx_ring_skb_unmap(rocker
, attrs
);
1280 dev_kfree_skb_any(skb
);
1283 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port
*rocker_port
)
1285 const struct rocker_dma_ring_info
*rx_ring
= &rocker_port
->rx_ring
;
1286 const struct rocker
*rocker
= rocker_port
->rocker
;
1290 for (i
= 0; i
< rx_ring
->size
; i
++) {
1291 err
= rocker_dma_rx_ring_skb_alloc(rocker_port
,
1292 &rx_ring
->desc_info
[i
]);
1299 for (i
--; i
>= 0; i
--)
1300 rocker_dma_rx_ring_skb_free(rocker
, &rx_ring
->desc_info
[i
]);
1304 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port
*rocker_port
)
1306 const struct rocker_dma_ring_info
*rx_ring
= &rocker_port
->rx_ring
;
1307 const struct rocker
*rocker
= rocker_port
->rocker
;
1310 for (i
= 0; i
< rx_ring
->size
; i
++)
1311 rocker_dma_rx_ring_skb_free(rocker
, &rx_ring
->desc_info
[i
]);
1314 static int rocker_port_dma_rings_init(struct rocker_port
*rocker_port
)
1316 struct rocker
*rocker
= rocker_port
->rocker
;
1319 err
= rocker_dma_ring_create(rocker
,
1320 ROCKER_DMA_TX(rocker_port
->port_number
),
1321 ROCKER_DMA_TX_DEFAULT_SIZE
,
1322 &rocker_port
->tx_ring
);
1324 netdev_err(rocker_port
->dev
, "failed to create tx dma ring\n");
1328 err
= rocker_dma_ring_bufs_alloc(rocker
, &rocker_port
->tx_ring
,
1330 ROCKER_DMA_TX_DESC_SIZE
);
1332 netdev_err(rocker_port
->dev
, "failed to alloc tx dma ring buffers\n");
1333 goto err_dma_tx_ring_bufs_alloc
;
1336 err
= rocker_dma_ring_create(rocker
,
1337 ROCKER_DMA_RX(rocker_port
->port_number
),
1338 ROCKER_DMA_RX_DEFAULT_SIZE
,
1339 &rocker_port
->rx_ring
);
1341 netdev_err(rocker_port
->dev
, "failed to create rx dma ring\n");
1342 goto err_dma_rx_ring_create
;
1345 err
= rocker_dma_ring_bufs_alloc(rocker
, &rocker_port
->rx_ring
,
1346 PCI_DMA_BIDIRECTIONAL
,
1347 ROCKER_DMA_RX_DESC_SIZE
);
1349 netdev_err(rocker_port
->dev
, "failed to alloc rx dma ring buffers\n");
1350 goto err_dma_rx_ring_bufs_alloc
;
1353 err
= rocker_dma_rx_ring_skbs_alloc(rocker_port
);
1355 netdev_err(rocker_port
->dev
, "failed to alloc rx dma ring skbs\n");
1356 goto err_dma_rx_ring_skbs_alloc
;
1358 rocker_dma_ring_pass_to_producer(rocker
, &rocker_port
->rx_ring
);
1362 err_dma_rx_ring_skbs_alloc
:
1363 rocker_dma_ring_bufs_free(rocker
, &rocker_port
->rx_ring
,
1364 PCI_DMA_BIDIRECTIONAL
);
1365 err_dma_rx_ring_bufs_alloc
:
1366 rocker_dma_ring_destroy(rocker
, &rocker_port
->rx_ring
);
1367 err_dma_rx_ring_create
:
1368 rocker_dma_ring_bufs_free(rocker
, &rocker_port
->tx_ring
,
1370 err_dma_tx_ring_bufs_alloc
:
1371 rocker_dma_ring_destroy(rocker
, &rocker_port
->tx_ring
);
1375 static void rocker_port_dma_rings_fini(struct rocker_port
*rocker_port
)
1377 struct rocker
*rocker
= rocker_port
->rocker
;
1379 rocker_dma_rx_ring_skbs_free(rocker_port
);
1380 rocker_dma_ring_bufs_free(rocker
, &rocker_port
->rx_ring
,
1381 PCI_DMA_BIDIRECTIONAL
);
1382 rocker_dma_ring_destroy(rocker
, &rocker_port
->rx_ring
);
1383 rocker_dma_ring_bufs_free(rocker
, &rocker_port
->tx_ring
,
1385 rocker_dma_ring_destroy(rocker
, &rocker_port
->tx_ring
);
1388 static void rocker_port_set_enable(const struct rocker_port
*rocker_port
,
1391 u64 val
= rocker_read64(rocker_port
->rocker
, PORT_PHYS_ENABLE
);
1394 val
|= 1ULL << rocker_port
->pport
;
1396 val
&= ~(1ULL << rocker_port
->pport
);
1397 rocker_write64(rocker_port
->rocker
, PORT_PHYS_ENABLE
, val
);
1400 /********************************
1401 * Interrupt handler and helpers
1402 ********************************/
1404 static irqreturn_t
rocker_cmd_irq_handler(int irq
, void *dev_id
)
1406 struct rocker
*rocker
= dev_id
;
1407 const struct rocker_desc_info
*desc_info
;
1408 struct rocker_wait
*wait
;
1411 spin_lock(&rocker
->cmd_ring_lock
);
1412 while ((desc_info
= rocker_desc_tail_get(&rocker
->cmd_ring
))) {
1413 wait
= rocker_desc_cookie_ptr_get(desc_info
);
1415 rocker_desc_gen_clear(desc_info
);
1416 rocker_wait_destroy(SWITCHDEV_TRANS_NONE
, wait
);
1418 rocker_wait_wake_up(wait
);
1422 spin_unlock(&rocker
->cmd_ring_lock
);
1423 rocker_dma_ring_credits_set(rocker
, &rocker
->cmd_ring
, credits
);
1428 static void rocker_port_link_up(const struct rocker_port
*rocker_port
)
1430 netif_carrier_on(rocker_port
->dev
);
1431 netdev_info(rocker_port
->dev
, "Link is up\n");
1434 static void rocker_port_link_down(const struct rocker_port
*rocker_port
)
1436 netif_carrier_off(rocker_port
->dev
);
1437 netdev_info(rocker_port
->dev
, "Link is down\n");
1440 static int rocker_event_link_change(const struct rocker
*rocker
,
1441 const struct rocker_tlv
*info
)
1443 const struct rocker_tlv
*attrs
[ROCKER_TLV_EVENT_LINK_CHANGED_MAX
+ 1];
1444 unsigned int port_number
;
1446 struct rocker_port
*rocker_port
;
1448 rocker_tlv_parse_nested(attrs
, ROCKER_TLV_EVENT_LINK_CHANGED_MAX
, info
);
1449 if (!attrs
[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT
] ||
1450 !attrs
[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP
])
1453 rocker_tlv_get_u32(attrs
[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT
]) - 1;
1454 link_up
= rocker_tlv_get_u8(attrs
[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP
]);
1456 if (port_number
>= rocker
->port_count
)
1459 rocker_port
= rocker
->ports
[port_number
];
1460 if (netif_carrier_ok(rocker_port
->dev
) != link_up
) {
1462 rocker_port_link_up(rocker_port
);
1464 rocker_port_link_down(rocker_port
);
1470 static int rocker_port_fdb(struct rocker_port
*rocker_port
,
1471 enum switchdev_trans trans
,
1472 const unsigned char *addr
,
1473 __be16 vlan_id
, int flags
);
1475 static int rocker_event_mac_vlan_seen(const struct rocker
*rocker
,
1476 const struct rocker_tlv
*info
)
1478 const struct rocker_tlv
*attrs
[ROCKER_TLV_EVENT_MAC_VLAN_MAX
+ 1];
1479 unsigned int port_number
;
1480 struct rocker_port
*rocker_port
;
1481 const unsigned char *addr
;
1482 int flags
= ROCKER_OP_FLAG_NOWAIT
| ROCKER_OP_FLAG_LEARNED
;
1485 rocker_tlv_parse_nested(attrs
, ROCKER_TLV_EVENT_MAC_VLAN_MAX
, info
);
1486 if (!attrs
[ROCKER_TLV_EVENT_MAC_VLAN_PPORT
] ||
1487 !attrs
[ROCKER_TLV_EVENT_MAC_VLAN_MAC
] ||
1488 !attrs
[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID
])
1491 rocker_tlv_get_u32(attrs
[ROCKER_TLV_EVENT_MAC_VLAN_PPORT
]) - 1;
1492 addr
= rocker_tlv_data(attrs
[ROCKER_TLV_EVENT_MAC_VLAN_MAC
]);
1493 vlan_id
= rocker_tlv_get_be16(attrs
[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID
]);
1495 if (port_number
>= rocker
->port_count
)
1498 rocker_port
= rocker
->ports
[port_number
];
1500 if (rocker_port
->stp_state
!= BR_STATE_LEARNING
&&
1501 rocker_port
->stp_state
!= BR_STATE_FORWARDING
)
1504 return rocker_port_fdb(rocker_port
, SWITCHDEV_TRANS_NONE
,
1505 addr
, vlan_id
, flags
);
1508 static int rocker_event_process(const struct rocker
*rocker
,
1509 const struct rocker_desc_info
*desc_info
)
1511 const struct rocker_tlv
*attrs
[ROCKER_TLV_EVENT_MAX
+ 1];
1512 const struct rocker_tlv
*info
;
1515 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_EVENT_MAX
, desc_info
);
1516 if (!attrs
[ROCKER_TLV_EVENT_TYPE
] ||
1517 !attrs
[ROCKER_TLV_EVENT_INFO
])
1520 type
= rocker_tlv_get_u16(attrs
[ROCKER_TLV_EVENT_TYPE
]);
1521 info
= attrs
[ROCKER_TLV_EVENT_INFO
];
1524 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED
:
1525 return rocker_event_link_change(rocker
, info
);
1526 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN
:
1527 return rocker_event_mac_vlan_seen(rocker
, info
);
1533 static irqreturn_t
rocker_event_irq_handler(int irq
, void *dev_id
)
1535 struct rocker
*rocker
= dev_id
;
1536 const struct pci_dev
*pdev
= rocker
->pdev
;
1537 const struct rocker_desc_info
*desc_info
;
1541 while ((desc_info
= rocker_desc_tail_get(&rocker
->event_ring
))) {
1542 err
= rocker_desc_err(desc_info
);
1544 dev_err(&pdev
->dev
, "event desc received with err %d\n",
1547 err
= rocker_event_process(rocker
, desc_info
);
1549 dev_err(&pdev
->dev
, "event processing failed with err %d\n",
1552 rocker_desc_gen_clear(desc_info
);
1553 rocker_desc_head_set(rocker
, &rocker
->event_ring
, desc_info
);
1556 rocker_dma_ring_credits_set(rocker
, &rocker
->event_ring
, credits
);
1561 static irqreturn_t
rocker_tx_irq_handler(int irq
, void *dev_id
)
1563 struct rocker_port
*rocker_port
= dev_id
;
1565 napi_schedule(&rocker_port
->napi_tx
);
1569 static irqreturn_t
rocker_rx_irq_handler(int irq
, void *dev_id
)
1571 struct rocker_port
*rocker_port
= dev_id
;
1573 napi_schedule(&rocker_port
->napi_rx
);
1577 /********************
1579 ********************/
1581 typedef int (*rocker_cmd_prep_cb_t
)(const struct rocker_port
*rocker_port
,
1582 struct rocker_desc_info
*desc_info
,
1585 typedef int (*rocker_cmd_proc_cb_t
)(const struct rocker_port
*rocker_port
,
1586 const struct rocker_desc_info
*desc_info
,
1589 static int rocker_cmd_exec(struct rocker_port
*rocker_port
,
1590 enum switchdev_trans trans
, int flags
,
1591 rocker_cmd_prep_cb_t prepare
, void *prepare_priv
,
1592 rocker_cmd_proc_cb_t process
, void *process_priv
)
1594 struct rocker
*rocker
= rocker_port
->rocker
;
1595 struct rocker_desc_info
*desc_info
;
1596 struct rocker_wait
*wait
;
1597 bool nowait
= !!(flags
& ROCKER_OP_FLAG_NOWAIT
);
1598 unsigned long lock_flags
;
1601 wait
= rocker_wait_create(rocker_port
, trans
, flags
);
1604 wait
->nowait
= nowait
;
1606 spin_lock_irqsave(&rocker
->cmd_ring_lock
, lock_flags
);
1608 desc_info
= rocker_desc_head_get(&rocker
->cmd_ring
);
1610 spin_unlock_irqrestore(&rocker
->cmd_ring_lock
, lock_flags
);
1615 err
= prepare(rocker_port
, desc_info
, prepare_priv
);
1617 spin_unlock_irqrestore(&rocker
->cmd_ring_lock
, lock_flags
);
1621 rocker_desc_cookie_ptr_set(desc_info
, wait
);
1623 if (trans
!= SWITCHDEV_TRANS_PREPARE
)
1624 rocker_desc_head_set(rocker
, &rocker
->cmd_ring
, desc_info
);
1626 spin_unlock_irqrestore(&rocker
->cmd_ring_lock
, lock_flags
);
1631 if (trans
!= SWITCHDEV_TRANS_PREPARE
)
1632 if (!rocker_wait_event_timeout(wait
, HZ
/ 10))
1635 err
= rocker_desc_err(desc_info
);
1640 err
= process(rocker_port
, desc_info
, process_priv
);
1642 rocker_desc_gen_clear(desc_info
);
1644 rocker_wait_destroy(trans
, wait
);
1649 rocker_cmd_get_port_settings_prep(const struct rocker_port
*rocker_port
,
1650 struct rocker_desc_info
*desc_info
,
1653 struct rocker_tlv
*cmd_info
;
1655 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
1656 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS
))
1658 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
1661 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
,
1662 rocker_port
->pport
))
1664 rocker_tlv_nest_end(desc_info
, cmd_info
);
1669 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port
*rocker_port
,
1670 const struct rocker_desc_info
*desc_info
,
1673 struct ethtool_cmd
*ecmd
= priv
;
1674 const struct rocker_tlv
*attrs
[ROCKER_TLV_CMD_MAX
+ 1];
1675 const struct rocker_tlv
*info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_MAX
+ 1];
1680 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_CMD_MAX
, desc_info
);
1681 if (!attrs
[ROCKER_TLV_CMD_INFO
])
1684 rocker_tlv_parse_nested(info_attrs
, ROCKER_TLV_CMD_PORT_SETTINGS_MAX
,
1685 attrs
[ROCKER_TLV_CMD_INFO
]);
1686 if (!info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED
] ||
1687 !info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX
] ||
1688 !info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG
])
1691 speed
= rocker_tlv_get_u32(info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED
]);
1692 duplex
= rocker_tlv_get_u8(info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX
]);
1693 autoneg
= rocker_tlv_get_u8(info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG
]);
1695 ecmd
->transceiver
= XCVR_INTERNAL
;
1696 ecmd
->supported
= SUPPORTED_TP
;
1697 ecmd
->phy_address
= 0xff;
1698 ecmd
->port
= PORT_TP
;
1699 ethtool_cmd_speed_set(ecmd
, speed
);
1700 ecmd
->duplex
= duplex
? DUPLEX_FULL
: DUPLEX_HALF
;
1701 ecmd
->autoneg
= autoneg
? AUTONEG_ENABLE
: AUTONEG_DISABLE
;
1707 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port
*rocker_port
,
1708 const struct rocker_desc_info
*desc_info
,
1711 unsigned char *macaddr
= priv
;
1712 const struct rocker_tlv
*attrs
[ROCKER_TLV_CMD_MAX
+ 1];
1713 const struct rocker_tlv
*info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_MAX
+ 1];
1714 const struct rocker_tlv
*attr
;
1716 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_CMD_MAX
, desc_info
);
1717 if (!attrs
[ROCKER_TLV_CMD_INFO
])
1720 rocker_tlv_parse_nested(info_attrs
, ROCKER_TLV_CMD_PORT_SETTINGS_MAX
,
1721 attrs
[ROCKER_TLV_CMD_INFO
]);
1722 attr
= info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR
];
1726 if (rocker_tlv_len(attr
) != ETH_ALEN
)
1729 ether_addr_copy(macaddr
, rocker_tlv_data(attr
));
1739 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port
*rocker_port
,
1740 const struct rocker_desc_info
*desc_info
,
1743 const struct rocker_tlv
*info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_MAX
+ 1];
1744 const struct rocker_tlv
*attrs
[ROCKER_TLV_CMD_MAX
+ 1];
1745 struct port_name
*name
= priv
;
1746 const struct rocker_tlv
*attr
;
1750 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_CMD_MAX
, desc_info
);
1751 if (!attrs
[ROCKER_TLV_CMD_INFO
])
1754 rocker_tlv_parse_nested(info_attrs
, ROCKER_TLV_CMD_PORT_SETTINGS_MAX
,
1755 attrs
[ROCKER_TLV_CMD_INFO
]);
1756 attr
= info_attrs
[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME
];
1760 len
= min_t(size_t, rocker_tlv_len(attr
), name
->len
);
1761 str
= rocker_tlv_data(attr
);
1763 /* make sure name only contains alphanumeric characters */
1764 for (i
= j
= 0; i
< len
; ++i
) {
1765 if (isalnum(str
[i
])) {
1766 name
->buf
[j
] = str
[i
];
1774 name
->buf
[j
] = '\0';
1780 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port
*rocker_port
,
1781 struct rocker_desc_info
*desc_info
,
1784 struct ethtool_cmd
*ecmd
= priv
;
1785 struct rocker_tlv
*cmd_info
;
1787 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
1788 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS
))
1790 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
1793 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
,
1794 rocker_port
->pport
))
1796 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED
,
1797 ethtool_cmd_speed(ecmd
)))
1799 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX
,
1802 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG
,
1805 rocker_tlv_nest_end(desc_info
, cmd_info
);
1810 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port
*rocker_port
,
1811 struct rocker_desc_info
*desc_info
,
1814 const unsigned char *macaddr
= priv
;
1815 struct rocker_tlv
*cmd_info
;
1817 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
1818 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS
))
1820 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
1823 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
,
1824 rocker_port
->pport
))
1826 if (rocker_tlv_put(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR
,
1829 rocker_tlv_nest_end(desc_info
, cmd_info
);
1834 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port
*rocker_port
,
1835 struct rocker_desc_info
*desc_info
,
1838 int mtu
= *(int *)priv
;
1839 struct rocker_tlv
*cmd_info
;
1841 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
1842 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS
))
1844 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
1847 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
,
1848 rocker_port
->pport
))
1850 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_MTU
,
1853 rocker_tlv_nest_end(desc_info
, cmd_info
);
1858 rocker_cmd_set_port_learning_prep(const struct rocker_port
*rocker_port
,
1859 struct rocker_desc_info
*desc_info
,
1862 struct rocker_tlv
*cmd_info
;
1864 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
1865 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS
))
1867 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
1870 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
,
1871 rocker_port
->pport
))
1873 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING
,
1874 !!(rocker_port
->brport_flags
& BR_LEARNING
)))
1876 rocker_tlv_nest_end(desc_info
, cmd_info
);
1880 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port
*rocker_port
,
1881 struct ethtool_cmd
*ecmd
)
1883 return rocker_cmd_exec(rocker_port
, SWITCHDEV_TRANS_NONE
, 0,
1884 rocker_cmd_get_port_settings_prep
, NULL
,
1885 rocker_cmd_get_port_settings_ethtool_proc
,
1889 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port
*rocker_port
,
1890 unsigned char *macaddr
)
1892 return rocker_cmd_exec(rocker_port
, SWITCHDEV_TRANS_NONE
, 0,
1893 rocker_cmd_get_port_settings_prep
, NULL
,
1894 rocker_cmd_get_port_settings_macaddr_proc
,
1898 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port
*rocker_port
,
1899 struct ethtool_cmd
*ecmd
)
1901 return rocker_cmd_exec(rocker_port
, SWITCHDEV_TRANS_NONE
, 0,
1902 rocker_cmd_set_port_settings_ethtool_prep
,
1906 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port
*rocker_port
,
1907 unsigned char *macaddr
)
1909 return rocker_cmd_exec(rocker_port
, SWITCHDEV_TRANS_NONE
, 0,
1910 rocker_cmd_set_port_settings_macaddr_prep
,
1911 macaddr
, NULL
, NULL
);
1914 static int rocker_cmd_set_port_settings_mtu(struct rocker_port
*rocker_port
,
1917 return rocker_cmd_exec(rocker_port
, SWITCHDEV_TRANS_NONE
, 0,
1918 rocker_cmd_set_port_settings_mtu_prep
,
1922 static int rocker_port_set_learning(struct rocker_port
*rocker_port
,
1923 enum switchdev_trans trans
)
1925 return rocker_cmd_exec(rocker_port
, trans
, 0,
1926 rocker_cmd_set_port_learning_prep
,
1931 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info
*desc_info
,
1932 const struct rocker_flow_tbl_entry
*entry
)
1934 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
1935 entry
->key
.ig_port
.in_pport
))
1937 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT_MASK
,
1938 entry
->key
.ig_port
.in_pport_mask
))
1940 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
1941 entry
->key
.ig_port
.goto_tbl
))
1948 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info
*desc_info
,
1949 const struct rocker_flow_tbl_entry
*entry
)
1951 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
1952 entry
->key
.vlan
.in_pport
))
1954 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
1955 entry
->key
.vlan
.vlan_id
))
1957 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID_MASK
,
1958 entry
->key
.vlan
.vlan_id_mask
))
1960 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
1961 entry
->key
.vlan
.goto_tbl
))
1963 if (entry
->key
.vlan
.untagged
&&
1964 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_NEW_VLAN_ID
,
1965 entry
->key
.vlan
.new_vlan_id
))
1972 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info
*desc_info
,
1973 const struct rocker_flow_tbl_entry
*entry
)
1975 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
1976 entry
->key
.term_mac
.in_pport
))
1978 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT_MASK
,
1979 entry
->key
.term_mac
.in_pport_mask
))
1981 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_ETHERTYPE
,
1982 entry
->key
.term_mac
.eth_type
))
1984 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
1985 ETH_ALEN
, entry
->key
.term_mac
.eth_dst
))
1987 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC_MASK
,
1988 ETH_ALEN
, entry
->key
.term_mac
.eth_dst_mask
))
1990 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
1991 entry
->key
.term_mac
.vlan_id
))
1993 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID_MASK
,
1994 entry
->key
.term_mac
.vlan_id_mask
))
1996 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
1997 entry
->key
.term_mac
.goto_tbl
))
1999 if (entry
->key
.term_mac
.copy_to_cpu
&&
2000 rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION
,
2001 entry
->key
.term_mac
.copy_to_cpu
))
2008 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info
*desc_info
,
2009 const struct rocker_flow_tbl_entry
*entry
)
2011 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_ETHERTYPE
,
2012 entry
->key
.ucast_routing
.eth_type
))
2014 if (rocker_tlv_put_be32(desc_info
, ROCKER_TLV_OF_DPA_DST_IP
,
2015 entry
->key
.ucast_routing
.dst4
))
2017 if (rocker_tlv_put_be32(desc_info
, ROCKER_TLV_OF_DPA_DST_IP_MASK
,
2018 entry
->key
.ucast_routing
.dst4_mask
))
2020 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
2021 entry
->key
.ucast_routing
.goto_tbl
))
2023 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
2024 entry
->key
.ucast_routing
.group_id
))
2031 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info
*desc_info
,
2032 const struct rocker_flow_tbl_entry
*entry
)
2034 if (entry
->key
.bridge
.has_eth_dst
&&
2035 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
2036 ETH_ALEN
, entry
->key
.bridge
.eth_dst
))
2038 if (entry
->key
.bridge
.has_eth_dst_mask
&&
2039 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC_MASK
,
2040 ETH_ALEN
, entry
->key
.bridge
.eth_dst_mask
))
2042 if (entry
->key
.bridge
.vlan_id
&&
2043 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
2044 entry
->key
.bridge
.vlan_id
))
2046 if (entry
->key
.bridge
.tunnel_id
&&
2047 rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_TUNNEL_ID
,
2048 entry
->key
.bridge
.tunnel_id
))
2050 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
,
2051 entry
->key
.bridge
.goto_tbl
))
2053 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
2054 entry
->key
.bridge
.group_id
))
2056 if (entry
->key
.bridge
.copy_to_cpu
&&
2057 rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION
,
2058 entry
->key
.bridge
.copy_to_cpu
))
2065 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info
*desc_info
,
2066 const struct rocker_flow_tbl_entry
*entry
)
2068 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT
,
2069 entry
->key
.acl
.in_pport
))
2071 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_IN_PPORT_MASK
,
2072 entry
->key
.acl
.in_pport_mask
))
2074 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC
,
2075 ETH_ALEN
, entry
->key
.acl
.eth_src
))
2077 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC_MASK
,
2078 ETH_ALEN
, entry
->key
.acl
.eth_src_mask
))
2080 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
2081 ETH_ALEN
, entry
->key
.acl
.eth_dst
))
2083 if (rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC_MASK
,
2084 ETH_ALEN
, entry
->key
.acl
.eth_dst_mask
))
2086 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_ETHERTYPE
,
2087 entry
->key
.acl
.eth_type
))
2089 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
2090 entry
->key
.acl
.vlan_id
))
2092 if (rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID_MASK
,
2093 entry
->key
.acl
.vlan_id_mask
))
2096 switch (ntohs(entry
->key
.acl
.eth_type
)) {
2099 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_IP_PROTO
,
2100 entry
->key
.acl
.ip_proto
))
2102 if (rocker_tlv_put_u8(desc_info
,
2103 ROCKER_TLV_OF_DPA_IP_PROTO_MASK
,
2104 entry
->key
.acl
.ip_proto_mask
))
2106 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_IP_DSCP
,
2107 entry
->key
.acl
.ip_tos
& 0x3f))
2109 if (rocker_tlv_put_u8(desc_info
,
2110 ROCKER_TLV_OF_DPA_IP_DSCP_MASK
,
2111 entry
->key
.acl
.ip_tos_mask
& 0x3f))
2113 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_IP_ECN
,
2114 (entry
->key
.acl
.ip_tos
& 0xc0) >> 6))
2116 if (rocker_tlv_put_u8(desc_info
,
2117 ROCKER_TLV_OF_DPA_IP_ECN_MASK
,
2118 (entry
->key
.acl
.ip_tos_mask
& 0xc0) >> 6))
2123 if (entry
->key
.acl
.group_id
!= ROCKER_GROUP_NONE
&&
2124 rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
2125 entry
->key
.acl
.group_id
))
2131 static int rocker_cmd_flow_tbl_add(const struct rocker_port
*rocker_port
,
2132 struct rocker_desc_info
*desc_info
,
2135 const struct rocker_flow_tbl_entry
*entry
= priv
;
2136 struct rocker_tlv
*cmd_info
;
2139 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
2141 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
2144 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_TABLE_ID
,
2147 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_PRIORITY
,
2148 entry
->key
.priority
))
2150 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_HARDTIME
, 0))
2152 if (rocker_tlv_put_u64(desc_info
, ROCKER_TLV_OF_DPA_COOKIE
,
2156 switch (entry
->key
.tbl_id
) {
2157 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT
:
2158 err
= rocker_cmd_flow_tbl_add_ig_port(desc_info
, entry
);
2160 case ROCKER_OF_DPA_TABLE_ID_VLAN
:
2161 err
= rocker_cmd_flow_tbl_add_vlan(desc_info
, entry
);
2163 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
:
2164 err
= rocker_cmd_flow_tbl_add_term_mac(desc_info
, entry
);
2166 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
:
2167 err
= rocker_cmd_flow_tbl_add_ucast_routing(desc_info
, entry
);
2169 case ROCKER_OF_DPA_TABLE_ID_BRIDGING
:
2170 err
= rocker_cmd_flow_tbl_add_bridge(desc_info
, entry
);
2172 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
:
2173 err
= rocker_cmd_flow_tbl_add_acl(desc_info
, entry
);
2183 rocker_tlv_nest_end(desc_info
, cmd_info
);
2188 static int rocker_cmd_flow_tbl_del(const struct rocker_port
*rocker_port
,
2189 struct rocker_desc_info
*desc_info
,
2192 const struct rocker_flow_tbl_entry
*entry
= priv
;
2193 struct rocker_tlv
*cmd_info
;
2195 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
2197 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
2200 if (rocker_tlv_put_u64(desc_info
, ROCKER_TLV_OF_DPA_COOKIE
,
2203 rocker_tlv_nest_end(desc_info
, cmd_info
);
2209 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info
*desc_info
,
2210 struct rocker_group_tbl_entry
*entry
)
2212 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_OUT_PPORT
,
2213 ROCKER_GROUP_PORT_GET(entry
->group_id
)))
2215 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_POP_VLAN
,
2216 entry
->l2_interface
.pop_vlan
))
2223 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info
*desc_info
,
2224 const struct rocker_group_tbl_entry
*entry
)
2226 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER
,
2227 entry
->l2_rewrite
.group_id
))
2229 if (!is_zero_ether_addr(entry
->l2_rewrite
.eth_src
) &&
2230 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC
,
2231 ETH_ALEN
, entry
->l2_rewrite
.eth_src
))
2233 if (!is_zero_ether_addr(entry
->l2_rewrite
.eth_dst
) &&
2234 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
2235 ETH_ALEN
, entry
->l2_rewrite
.eth_dst
))
2237 if (entry
->l2_rewrite
.vlan_id
&&
2238 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
2239 entry
->l2_rewrite
.vlan_id
))
2246 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info
*desc_info
,
2247 const struct rocker_group_tbl_entry
*entry
)
2250 struct rocker_tlv
*group_ids
;
2252 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_OF_DPA_GROUP_COUNT
,
2253 entry
->group_count
))
2256 group_ids
= rocker_tlv_nest_start(desc_info
,
2257 ROCKER_TLV_OF_DPA_GROUP_IDS
);
2261 for (i
= 0; i
< entry
->group_count
; i
++)
2262 /* Note TLV array is 1-based */
2263 if (rocker_tlv_put_u32(desc_info
, i
+ 1, entry
->group_ids
[i
]))
2266 rocker_tlv_nest_end(desc_info
, group_ids
);
2272 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info
*desc_info
,
2273 const struct rocker_group_tbl_entry
*entry
)
2275 if (!is_zero_ether_addr(entry
->l3_unicast
.eth_src
) &&
2276 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_SRC_MAC
,
2277 ETH_ALEN
, entry
->l3_unicast
.eth_src
))
2279 if (!is_zero_ether_addr(entry
->l3_unicast
.eth_dst
) &&
2280 rocker_tlv_put(desc_info
, ROCKER_TLV_OF_DPA_DST_MAC
,
2281 ETH_ALEN
, entry
->l3_unicast
.eth_dst
))
2283 if (entry
->l3_unicast
.vlan_id
&&
2284 rocker_tlv_put_be16(desc_info
, ROCKER_TLV_OF_DPA_VLAN_ID
,
2285 entry
->l3_unicast
.vlan_id
))
2287 if (rocker_tlv_put_u8(desc_info
, ROCKER_TLV_OF_DPA_TTL_CHECK
,
2288 entry
->l3_unicast
.ttl_check
))
2290 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER
,
2291 entry
->l3_unicast
.group_id
))
2297 static int rocker_cmd_group_tbl_add(const struct rocker_port
*rocker_port
,
2298 struct rocker_desc_info
*desc_info
,
2301 struct rocker_group_tbl_entry
*entry
= priv
;
2302 struct rocker_tlv
*cmd_info
;
2305 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
2307 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
2311 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
2315 switch (ROCKER_GROUP_TYPE_GET(entry
->group_id
)) {
2316 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE
:
2317 err
= rocker_cmd_group_tbl_add_l2_interface(desc_info
, entry
);
2319 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE
:
2320 err
= rocker_cmd_group_tbl_add_l2_rewrite(desc_info
, entry
);
2322 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD
:
2323 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST
:
2324 err
= rocker_cmd_group_tbl_add_group_ids(desc_info
, entry
);
2326 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST
:
2327 err
= rocker_cmd_group_tbl_add_l3_unicast(desc_info
, entry
);
2337 rocker_tlv_nest_end(desc_info
, cmd_info
);
2342 static int rocker_cmd_group_tbl_del(const struct rocker_port
*rocker_port
,
2343 struct rocker_desc_info
*desc_info
,
2346 const struct rocker_group_tbl_entry
*entry
= priv
;
2347 struct rocker_tlv
*cmd_info
;
2349 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
, entry
->cmd
))
2351 cmd_info
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
2354 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_OF_DPA_GROUP_ID
,
2357 rocker_tlv_nest_end(desc_info
, cmd_info
);
2362 /***************************************************
2363 * Flow, group, FDB, internal VLAN and neigh tables
2364 ***************************************************/
2366 static int rocker_init_tbls(struct rocker
*rocker
)
2368 hash_init(rocker
->flow_tbl
);
2369 spin_lock_init(&rocker
->flow_tbl_lock
);
2371 hash_init(rocker
->group_tbl
);
2372 spin_lock_init(&rocker
->group_tbl_lock
);
2374 hash_init(rocker
->fdb_tbl
);
2375 spin_lock_init(&rocker
->fdb_tbl_lock
);
2377 hash_init(rocker
->internal_vlan_tbl
);
2378 spin_lock_init(&rocker
->internal_vlan_tbl_lock
);
2380 hash_init(rocker
->neigh_tbl
);
2381 spin_lock_init(&rocker
->neigh_tbl_lock
);
2386 static void rocker_free_tbls(struct rocker
*rocker
)
2388 unsigned long flags
;
2389 struct rocker_flow_tbl_entry
*flow_entry
;
2390 struct rocker_group_tbl_entry
*group_entry
;
2391 struct rocker_fdb_tbl_entry
*fdb_entry
;
2392 struct rocker_internal_vlan_tbl_entry
*internal_vlan_entry
;
2393 struct rocker_neigh_tbl_entry
*neigh_entry
;
2394 struct hlist_node
*tmp
;
2397 spin_lock_irqsave(&rocker
->flow_tbl_lock
, flags
);
2398 hash_for_each_safe(rocker
->flow_tbl
, bkt
, tmp
, flow_entry
, entry
)
2399 hash_del(&flow_entry
->entry
);
2400 spin_unlock_irqrestore(&rocker
->flow_tbl_lock
, flags
);
2402 spin_lock_irqsave(&rocker
->group_tbl_lock
, flags
);
2403 hash_for_each_safe(rocker
->group_tbl
, bkt
, tmp
, group_entry
, entry
)
2404 hash_del(&group_entry
->entry
);
2405 spin_unlock_irqrestore(&rocker
->group_tbl_lock
, flags
);
2407 spin_lock_irqsave(&rocker
->fdb_tbl_lock
, flags
);
2408 hash_for_each_safe(rocker
->fdb_tbl
, bkt
, tmp
, fdb_entry
, entry
)
2409 hash_del(&fdb_entry
->entry
);
2410 spin_unlock_irqrestore(&rocker
->fdb_tbl_lock
, flags
);
2412 spin_lock_irqsave(&rocker
->internal_vlan_tbl_lock
, flags
);
2413 hash_for_each_safe(rocker
->internal_vlan_tbl
, bkt
,
2414 tmp
, internal_vlan_entry
, entry
)
2415 hash_del(&internal_vlan_entry
->entry
);
2416 spin_unlock_irqrestore(&rocker
->internal_vlan_tbl_lock
, flags
);
2418 spin_lock_irqsave(&rocker
->neigh_tbl_lock
, flags
);
2419 hash_for_each_safe(rocker
->neigh_tbl
, bkt
, tmp
, neigh_entry
, entry
)
2420 hash_del(&neigh_entry
->entry
);
2421 spin_unlock_irqrestore(&rocker
->neigh_tbl_lock
, flags
);
2424 static struct rocker_flow_tbl_entry
*
2425 rocker_flow_tbl_find(const struct rocker
*rocker
,
2426 const struct rocker_flow_tbl_entry
*match
)
2428 struct rocker_flow_tbl_entry
*found
;
2429 size_t key_len
= match
->key_len
? match
->key_len
: sizeof(found
->key
);
2431 hash_for_each_possible(rocker
->flow_tbl
, found
,
2432 entry
, match
->key_crc32
) {
2433 if (memcmp(&found
->key
, &match
->key
, key_len
) == 0)
2440 static int rocker_flow_tbl_add(struct rocker_port
*rocker_port
,
2441 enum switchdev_trans trans
, int flags
,
2442 struct rocker_flow_tbl_entry
*match
)
2444 struct rocker
*rocker
= rocker_port
->rocker
;
2445 struct rocker_flow_tbl_entry
*found
;
2446 size_t key_len
= match
->key_len
? match
->key_len
: sizeof(found
->key
);
2447 unsigned long lock_flags
;
2449 match
->key_crc32
= crc32(~0, &match
->key
, key_len
);
2451 spin_lock_irqsave(&rocker
->flow_tbl_lock
, lock_flags
);
2453 found
= rocker_flow_tbl_find(rocker
, match
);
2456 match
->cookie
= found
->cookie
;
2457 if (trans
!= SWITCHDEV_TRANS_PREPARE
)
2458 hash_del(&found
->entry
);
2459 rocker_port_kfree(trans
, found
);
2461 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD
;
2464 found
->cookie
= rocker
->flow_tbl_next_cookie
++;
2465 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD
;
2468 if (trans
!= SWITCHDEV_TRANS_PREPARE
)
2469 hash_add(rocker
->flow_tbl
, &found
->entry
, found
->key_crc32
);
2471 spin_unlock_irqrestore(&rocker
->flow_tbl_lock
, lock_flags
);
2473 return rocker_cmd_exec(rocker_port
, trans
, flags
,
2474 rocker_cmd_flow_tbl_add
, found
, NULL
, NULL
);
2477 static int rocker_flow_tbl_del(struct rocker_port
*rocker_port
,
2478 enum switchdev_trans trans
, int flags
,
2479 struct rocker_flow_tbl_entry
*match
)
2481 struct rocker
*rocker
= rocker_port
->rocker
;
2482 struct rocker_flow_tbl_entry
*found
;
2483 size_t key_len
= match
->key_len
? match
->key_len
: sizeof(found
->key
);
2484 unsigned long lock_flags
;
2487 match
->key_crc32
= crc32(~0, &match
->key
, key_len
);
2489 spin_lock_irqsave(&rocker
->flow_tbl_lock
, lock_flags
);
2491 found
= rocker_flow_tbl_find(rocker
, match
);
2494 if (trans
!= SWITCHDEV_TRANS_PREPARE
)
2495 hash_del(&found
->entry
);
2496 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL
;
2499 spin_unlock_irqrestore(&rocker
->flow_tbl_lock
, lock_flags
);
2501 rocker_port_kfree(trans
, match
);
2504 err
= rocker_cmd_exec(rocker_port
, trans
, flags
,
2505 rocker_cmd_flow_tbl_del
,
2507 rocker_port_kfree(trans
, found
);
2513 static int rocker_flow_tbl_do(struct rocker_port
*rocker_port
,
2514 enum switchdev_trans trans
, int flags
,
2515 struct rocker_flow_tbl_entry
*entry
)
2517 if (flags
& ROCKER_OP_FLAG_REMOVE
)
2518 return rocker_flow_tbl_del(rocker_port
, trans
, flags
, entry
);
2520 return rocker_flow_tbl_add(rocker_port
, trans
, flags
, entry
);
2523 static int rocker_flow_tbl_ig_port(struct rocker_port
*rocker_port
,
2524 enum switchdev_trans trans
, int flags
,
2525 u32 in_pport
, u32 in_pport_mask
,
2526 enum rocker_of_dpa_table_id goto_tbl
)
2528 struct rocker_flow_tbl_entry
*entry
;
2530 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2534 entry
->key
.priority
= ROCKER_PRIORITY_IG_PORT
;
2535 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT
;
2536 entry
->key
.ig_port
.in_pport
= in_pport
;
2537 entry
->key
.ig_port
.in_pport_mask
= in_pport_mask
;
2538 entry
->key
.ig_port
.goto_tbl
= goto_tbl
;
2540 return rocker_flow_tbl_do(rocker_port
, trans
, flags
, entry
);
2543 static int rocker_flow_tbl_vlan(struct rocker_port
*rocker_port
,
2544 enum switchdev_trans trans
, int flags
,
2545 u32 in_pport
, __be16 vlan_id
,
2546 __be16 vlan_id_mask
,
2547 enum rocker_of_dpa_table_id goto_tbl
,
2548 bool untagged
, __be16 new_vlan_id
)
2550 struct rocker_flow_tbl_entry
*entry
;
2552 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2556 entry
->key
.priority
= ROCKER_PRIORITY_VLAN
;
2557 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_VLAN
;
2558 entry
->key
.vlan
.in_pport
= in_pport
;
2559 entry
->key
.vlan
.vlan_id
= vlan_id
;
2560 entry
->key
.vlan
.vlan_id_mask
= vlan_id_mask
;
2561 entry
->key
.vlan
.goto_tbl
= goto_tbl
;
2563 entry
->key
.vlan
.untagged
= untagged
;
2564 entry
->key
.vlan
.new_vlan_id
= new_vlan_id
;
2566 return rocker_flow_tbl_do(rocker_port
, trans
, flags
, entry
);
2569 static int rocker_flow_tbl_term_mac(struct rocker_port
*rocker_port
,
2570 enum switchdev_trans trans
,
2571 u32 in_pport
, u32 in_pport_mask
,
2572 __be16 eth_type
, const u8
*eth_dst
,
2573 const u8
*eth_dst_mask
, __be16 vlan_id
,
2574 __be16 vlan_id_mask
, bool copy_to_cpu
,
2577 struct rocker_flow_tbl_entry
*entry
;
2579 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2583 if (is_multicast_ether_addr(eth_dst
)) {
2584 entry
->key
.priority
= ROCKER_PRIORITY_TERM_MAC_MCAST
;
2585 entry
->key
.term_mac
.goto_tbl
=
2586 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING
;
2588 entry
->key
.priority
= ROCKER_PRIORITY_TERM_MAC_UCAST
;
2589 entry
->key
.term_mac
.goto_tbl
=
2590 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
;
2593 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
;
2594 entry
->key
.term_mac
.in_pport
= in_pport
;
2595 entry
->key
.term_mac
.in_pport_mask
= in_pport_mask
;
2596 entry
->key
.term_mac
.eth_type
= eth_type
;
2597 ether_addr_copy(entry
->key
.term_mac
.eth_dst
, eth_dst
);
2598 ether_addr_copy(entry
->key
.term_mac
.eth_dst_mask
, eth_dst_mask
);
2599 entry
->key
.term_mac
.vlan_id
= vlan_id
;
2600 entry
->key
.term_mac
.vlan_id_mask
= vlan_id_mask
;
2601 entry
->key
.term_mac
.copy_to_cpu
= copy_to_cpu
;
2603 return rocker_flow_tbl_do(rocker_port
, trans
, flags
, entry
);
2606 static int rocker_flow_tbl_bridge(struct rocker_port
*rocker_port
,
2607 enum switchdev_trans trans
, int flags
,
2608 const u8
*eth_dst
, const u8
*eth_dst_mask
,
2609 __be16 vlan_id
, u32 tunnel_id
,
2610 enum rocker_of_dpa_table_id goto_tbl
,
2611 u32 group_id
, bool copy_to_cpu
)
2613 struct rocker_flow_tbl_entry
*entry
;
2615 bool vlan_bridging
= !!vlan_id
;
2616 bool dflt
= !eth_dst
|| (eth_dst
&& eth_dst_mask
);
2619 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2623 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_BRIDGING
;
2626 entry
->key
.bridge
.has_eth_dst
= 1;
2627 ether_addr_copy(entry
->key
.bridge
.eth_dst
, eth_dst
);
2630 entry
->key
.bridge
.has_eth_dst_mask
= 1;
2631 ether_addr_copy(entry
->key
.bridge
.eth_dst_mask
, eth_dst_mask
);
2632 if (!ether_addr_equal(eth_dst_mask
, ff_mac
))
2636 priority
= ROCKER_PRIORITY_UNKNOWN
;
2637 if (vlan_bridging
&& dflt
&& wild
)
2638 priority
= ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD
;
2639 else if (vlan_bridging
&& dflt
&& !wild
)
2640 priority
= ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT
;
2641 else if (vlan_bridging
&& !dflt
)
2642 priority
= ROCKER_PRIORITY_BRIDGING_VLAN
;
2643 else if (!vlan_bridging
&& dflt
&& wild
)
2644 priority
= ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD
;
2645 else if (!vlan_bridging
&& dflt
&& !wild
)
2646 priority
= ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT
;
2647 else if (!vlan_bridging
&& !dflt
)
2648 priority
= ROCKER_PRIORITY_BRIDGING_TENANT
;
2650 entry
->key
.priority
= priority
;
2651 entry
->key
.bridge
.vlan_id
= vlan_id
;
2652 entry
->key
.bridge
.tunnel_id
= tunnel_id
;
2653 entry
->key
.bridge
.goto_tbl
= goto_tbl
;
2654 entry
->key
.bridge
.group_id
= group_id
;
2655 entry
->key
.bridge
.copy_to_cpu
= copy_to_cpu
;
2657 return rocker_flow_tbl_do(rocker_port
, trans
, flags
, entry
);
2660 static int rocker_flow_tbl_ucast4_routing(struct rocker_port
*rocker_port
,
2661 enum switchdev_trans trans
,
2662 __be16 eth_type
, __be32 dst
,
2663 __be32 dst_mask
, u32 priority
,
2664 enum rocker_of_dpa_table_id goto_tbl
,
2665 u32 group_id
, int flags
)
2667 struct rocker_flow_tbl_entry
*entry
;
2669 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2673 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
;
2674 entry
->key
.priority
= priority
;
2675 entry
->key
.ucast_routing
.eth_type
= eth_type
;
2676 entry
->key
.ucast_routing
.dst4
= dst
;
2677 entry
->key
.ucast_routing
.dst4_mask
= dst_mask
;
2678 entry
->key
.ucast_routing
.goto_tbl
= goto_tbl
;
2679 entry
->key
.ucast_routing
.group_id
= group_id
;
2680 entry
->key_len
= offsetof(struct rocker_flow_tbl_key
,
2681 ucast_routing
.group_id
);
2683 return rocker_flow_tbl_do(rocker_port
, trans
, flags
, entry
);
2686 static int rocker_flow_tbl_acl(struct rocker_port
*rocker_port
,
2687 enum switchdev_trans trans
, int flags
,
2688 u32 in_pport
, u32 in_pport_mask
,
2689 const u8
*eth_src
, const u8
*eth_src_mask
,
2690 const u8
*eth_dst
, const u8
*eth_dst_mask
,
2691 __be16 eth_type
, __be16 vlan_id
,
2692 __be16 vlan_id_mask
, u8 ip_proto
,
2693 u8 ip_proto_mask
, u8 ip_tos
, u8 ip_tos_mask
,
2697 struct rocker_flow_tbl_entry
*entry
;
2699 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2703 priority
= ROCKER_PRIORITY_ACL_NORMAL
;
2704 if (eth_dst
&& eth_dst_mask
) {
2705 if (ether_addr_equal(eth_dst_mask
, mcast_mac
))
2706 priority
= ROCKER_PRIORITY_ACL_DFLT
;
2707 else if (is_link_local_ether_addr(eth_dst
))
2708 priority
= ROCKER_PRIORITY_ACL_CTRL
;
2711 entry
->key
.priority
= priority
;
2712 entry
->key
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
2713 entry
->key
.acl
.in_pport
= in_pport
;
2714 entry
->key
.acl
.in_pport_mask
= in_pport_mask
;
2717 ether_addr_copy(entry
->key
.acl
.eth_src
, eth_src
);
2719 ether_addr_copy(entry
->key
.acl
.eth_src_mask
, eth_src_mask
);
2721 ether_addr_copy(entry
->key
.acl
.eth_dst
, eth_dst
);
2723 ether_addr_copy(entry
->key
.acl
.eth_dst_mask
, eth_dst_mask
);
2725 entry
->key
.acl
.eth_type
= eth_type
;
2726 entry
->key
.acl
.vlan_id
= vlan_id
;
2727 entry
->key
.acl
.vlan_id_mask
= vlan_id_mask
;
2728 entry
->key
.acl
.ip_proto
= ip_proto
;
2729 entry
->key
.acl
.ip_proto_mask
= ip_proto_mask
;
2730 entry
->key
.acl
.ip_tos
= ip_tos
;
2731 entry
->key
.acl
.ip_tos_mask
= ip_tos_mask
;
2732 entry
->key
.acl
.group_id
= group_id
;
2734 return rocker_flow_tbl_do(rocker_port
, trans
, flags
, entry
);
2737 static struct rocker_group_tbl_entry
*
2738 rocker_group_tbl_find(const struct rocker
*rocker
,
2739 const struct rocker_group_tbl_entry
*match
)
2741 struct rocker_group_tbl_entry
*found
;
2743 hash_for_each_possible(rocker
->group_tbl
, found
,
2744 entry
, match
->group_id
) {
2745 if (found
->group_id
== match
->group_id
)
2752 static void rocker_group_tbl_entry_free(enum switchdev_trans trans
,
2753 struct rocker_group_tbl_entry
*entry
)
2755 switch (ROCKER_GROUP_TYPE_GET(entry
->group_id
)) {
2756 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD
:
2757 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST
:
2758 rocker_port_kfree(trans
, entry
->group_ids
);
2763 rocker_port_kfree(trans
, entry
);
2766 static int rocker_group_tbl_add(struct rocker_port
*rocker_port
,
2767 enum switchdev_trans trans
, int flags
,
2768 struct rocker_group_tbl_entry
*match
)
2770 struct rocker
*rocker
= rocker_port
->rocker
;
2771 struct rocker_group_tbl_entry
*found
;
2772 unsigned long lock_flags
;
2774 spin_lock_irqsave(&rocker
->group_tbl_lock
, lock_flags
);
2776 found
= rocker_group_tbl_find(rocker
, match
);
2779 if (trans
!= SWITCHDEV_TRANS_PREPARE
)
2780 hash_del(&found
->entry
);
2781 rocker_group_tbl_entry_free(trans
, found
);
2783 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD
;
2786 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD
;
2789 if (trans
!= SWITCHDEV_TRANS_PREPARE
)
2790 hash_add(rocker
->group_tbl
, &found
->entry
, found
->group_id
);
2792 spin_unlock_irqrestore(&rocker
->group_tbl_lock
, lock_flags
);
2794 return rocker_cmd_exec(rocker_port
, trans
, flags
,
2795 rocker_cmd_group_tbl_add
, found
, NULL
, NULL
);
2798 static int rocker_group_tbl_del(struct rocker_port
*rocker_port
,
2799 enum switchdev_trans trans
, int flags
,
2800 struct rocker_group_tbl_entry
*match
)
2802 struct rocker
*rocker
= rocker_port
->rocker
;
2803 struct rocker_group_tbl_entry
*found
;
2804 unsigned long lock_flags
;
2807 spin_lock_irqsave(&rocker
->group_tbl_lock
, lock_flags
);
2809 found
= rocker_group_tbl_find(rocker
, match
);
2812 if (trans
!= SWITCHDEV_TRANS_PREPARE
)
2813 hash_del(&found
->entry
);
2814 found
->cmd
= ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL
;
2817 spin_unlock_irqrestore(&rocker
->group_tbl_lock
, lock_flags
);
2819 rocker_group_tbl_entry_free(trans
, match
);
2822 err
= rocker_cmd_exec(rocker_port
, trans
, flags
,
2823 rocker_cmd_group_tbl_del
,
2825 rocker_group_tbl_entry_free(trans
, found
);
2831 static int rocker_group_tbl_do(struct rocker_port
*rocker_port
,
2832 enum switchdev_trans trans
, int flags
,
2833 struct rocker_group_tbl_entry
*entry
)
2835 if (flags
& ROCKER_OP_FLAG_REMOVE
)
2836 return rocker_group_tbl_del(rocker_port
, trans
, flags
, entry
);
2838 return rocker_group_tbl_add(rocker_port
, trans
, flags
, entry
);
2841 static int rocker_group_l2_interface(struct rocker_port
*rocker_port
,
2842 enum switchdev_trans trans
, int flags
,
2843 __be16 vlan_id
, u32 out_pport
,
2846 struct rocker_group_tbl_entry
*entry
;
2848 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2852 entry
->group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, out_pport
);
2853 entry
->l2_interface
.pop_vlan
= pop_vlan
;
2855 return rocker_group_tbl_do(rocker_port
, trans
, flags
, entry
);
2858 static int rocker_group_l2_fan_out(struct rocker_port
*rocker_port
,
2859 enum switchdev_trans trans
,
2860 int flags
, u8 group_count
,
2861 const u32
*group_ids
, u32 group_id
)
2863 struct rocker_group_tbl_entry
*entry
;
2865 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2869 entry
->group_id
= group_id
;
2870 entry
->group_count
= group_count
;
2872 entry
->group_ids
= rocker_port_kcalloc(rocker_port
, trans
, flags
,
2873 group_count
, sizeof(u32
));
2874 if (!entry
->group_ids
) {
2875 rocker_port_kfree(trans
, entry
);
2878 memcpy(entry
->group_ids
, group_ids
, group_count
* sizeof(u32
));
2880 return rocker_group_tbl_do(rocker_port
, trans
, flags
, entry
);
2883 static int rocker_group_l2_flood(struct rocker_port
*rocker_port
,
2884 enum switchdev_trans trans
, int flags
,
2885 __be16 vlan_id
, u8 group_count
,
2886 const u32
*group_ids
, u32 group_id
)
2888 return rocker_group_l2_fan_out(rocker_port
, trans
, flags
,
2889 group_count
, group_ids
,
2893 static int rocker_group_l3_unicast(struct rocker_port
*rocker_port
,
2894 enum switchdev_trans trans
, int flags
,
2895 u32 index
, const u8
*src_mac
, const u8
*dst_mac
,
2896 __be16 vlan_id
, bool ttl_check
, u32 pport
)
2898 struct rocker_group_tbl_entry
*entry
;
2900 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2904 entry
->group_id
= ROCKER_GROUP_L3_UNICAST(index
);
2906 ether_addr_copy(entry
->l3_unicast
.eth_src
, src_mac
);
2908 ether_addr_copy(entry
->l3_unicast
.eth_dst
, dst_mac
);
2909 entry
->l3_unicast
.vlan_id
= vlan_id
;
2910 entry
->l3_unicast
.ttl_check
= ttl_check
;
2911 entry
->l3_unicast
.group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, pport
);
2913 return rocker_group_tbl_do(rocker_port
, trans
, flags
, entry
);
2916 static struct rocker_neigh_tbl_entry
*
2917 rocker_neigh_tbl_find(const struct rocker
*rocker
, __be32 ip_addr
)
2919 struct rocker_neigh_tbl_entry
*found
;
2921 hash_for_each_possible(rocker
->neigh_tbl
, found
,
2922 entry
, be32_to_cpu(ip_addr
))
2923 if (found
->ip_addr
== ip_addr
)
2929 static void _rocker_neigh_add(struct rocker
*rocker
,
2930 enum switchdev_trans trans
,
2931 struct rocker_neigh_tbl_entry
*entry
)
2933 if (trans
!= SWITCHDEV_TRANS_COMMIT
)
2934 entry
->index
= rocker
->neigh_tbl_next_index
++;
2935 if (trans
== SWITCHDEV_TRANS_PREPARE
)
2938 hash_add(rocker
->neigh_tbl
, &entry
->entry
,
2939 be32_to_cpu(entry
->ip_addr
));
2942 static void _rocker_neigh_del(enum switchdev_trans trans
,
2943 struct rocker_neigh_tbl_entry
*entry
)
2945 if (trans
== SWITCHDEV_TRANS_PREPARE
)
2947 if (--entry
->ref_count
== 0) {
2948 hash_del(&entry
->entry
);
2949 rocker_port_kfree(trans
, entry
);
2953 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry
*entry
,
2954 enum switchdev_trans trans
,
2955 const u8
*eth_dst
, bool ttl_check
)
2958 ether_addr_copy(entry
->eth_dst
, eth_dst
);
2959 entry
->ttl_check
= ttl_check
;
2960 } else if (trans
!= SWITCHDEV_TRANS_PREPARE
) {
2965 static int rocker_port_ipv4_neigh(struct rocker_port
*rocker_port
,
2966 enum switchdev_trans trans
,
2967 int flags
, __be32 ip_addr
, const u8
*eth_dst
)
2969 struct rocker
*rocker
= rocker_port
->rocker
;
2970 struct rocker_neigh_tbl_entry
*entry
;
2971 struct rocker_neigh_tbl_entry
*found
;
2972 unsigned long lock_flags
;
2973 __be16 eth_type
= htons(ETH_P_IP
);
2974 enum rocker_of_dpa_table_id goto_tbl
=
2975 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
2978 bool adding
= !(flags
& ROCKER_OP_FLAG_REMOVE
);
2983 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
2987 spin_lock_irqsave(&rocker
->neigh_tbl_lock
, lock_flags
);
2989 found
= rocker_neigh_tbl_find(rocker
, ip_addr
);
2991 updating
= found
&& adding
;
2992 removing
= found
&& !adding
;
2993 adding
= !found
&& adding
;
2996 entry
->ip_addr
= ip_addr
;
2997 entry
->dev
= rocker_port
->dev
;
2998 ether_addr_copy(entry
->eth_dst
, eth_dst
);
2999 entry
->ttl_check
= true;
3000 _rocker_neigh_add(rocker
, trans
, entry
);
3001 } else if (removing
) {
3002 memcpy(entry
, found
, sizeof(*entry
));
3003 _rocker_neigh_del(trans
, found
);
3004 } else if (updating
) {
3005 _rocker_neigh_update(found
, trans
, eth_dst
, true);
3006 memcpy(entry
, found
, sizeof(*entry
));
3011 spin_unlock_irqrestore(&rocker
->neigh_tbl_lock
, lock_flags
);
3016 /* For each active neighbor, we have an L3 unicast group and
3017 * a /32 route to the neighbor, which uses the L3 unicast
3018 * group. The L3 unicast group can also be referred to by
3019 * other routes' nexthops.
3022 err
= rocker_group_l3_unicast(rocker_port
, trans
, flags
,
3024 rocker_port
->dev
->dev_addr
,
3026 rocker_port
->internal_vlan_id
,
3028 rocker_port
->pport
);
3030 netdev_err(rocker_port
->dev
,
3031 "Error (%d) L3 unicast group index %d\n",
3036 if (adding
|| removing
) {
3037 group_id
= ROCKER_GROUP_L3_UNICAST(entry
->index
);
3038 err
= rocker_flow_tbl_ucast4_routing(rocker_port
, trans
,
3045 netdev_err(rocker_port
->dev
,
3046 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3047 err
, &entry
->ip_addr
, group_id
);
3052 rocker_port_kfree(trans
, entry
);
3057 static int rocker_port_ipv4_resolve(struct rocker_port
*rocker_port
,
3058 enum switchdev_trans trans
, __be32 ip_addr
)
3060 struct net_device
*dev
= rocker_port
->dev
;
3061 struct neighbour
*n
= __ipv4_neigh_lookup(dev
, (__force u32
)ip_addr
);
3065 n
= neigh_create(&arp_tbl
, &ip_addr
, dev
);
3070 /* If the neigh is already resolved, then go ahead and
3071 * install the entry, otherwise start the ARP process to
3072 * resolve the neigh.
3075 if (n
->nud_state
& NUD_VALID
)
3076 err
= rocker_port_ipv4_neigh(rocker_port
, trans
, 0,
3079 neigh_event_send(n
, NULL
);
3085 static int rocker_port_ipv4_nh(struct rocker_port
*rocker_port
,
3086 enum switchdev_trans trans
, int flags
,
3087 __be32 ip_addr
, u32
*index
)
3089 struct rocker
*rocker
= rocker_port
->rocker
;
3090 struct rocker_neigh_tbl_entry
*entry
;
3091 struct rocker_neigh_tbl_entry
*found
;
3092 unsigned long lock_flags
;
3093 bool adding
= !(flags
& ROCKER_OP_FLAG_REMOVE
);
3096 bool resolved
= true;
3099 entry
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*entry
));
3103 spin_lock_irqsave(&rocker
->neigh_tbl_lock
, lock_flags
);
3105 found
= rocker_neigh_tbl_find(rocker
, ip_addr
);
3107 *index
= found
->index
;
3109 updating
= found
&& adding
;
3110 removing
= found
&& !adding
;
3111 adding
= !found
&& adding
;
3114 entry
->ip_addr
= ip_addr
;
3115 entry
->dev
= rocker_port
->dev
;
3116 _rocker_neigh_add(rocker
, trans
, entry
);
3117 *index
= entry
->index
;
3119 } else if (removing
) {
3120 _rocker_neigh_del(trans
, found
);
3121 } else if (updating
) {
3122 _rocker_neigh_update(found
, trans
, NULL
, false);
3123 resolved
= !is_zero_ether_addr(found
->eth_dst
);
3128 spin_unlock_irqrestore(&rocker
->neigh_tbl_lock
, lock_flags
);
3131 rocker_port_kfree(trans
, entry
);
3136 /* Resolved means neigh ip_addr is resolved to neigh mac. */
3139 err
= rocker_port_ipv4_resolve(rocker_port
, trans
, ip_addr
);
3144 static int rocker_port_vlan_flood_group(struct rocker_port
*rocker_port
,
3145 enum switchdev_trans trans
,
3146 int flags
, __be16 vlan_id
)
3148 struct rocker_port
*p
;
3149 const struct rocker
*rocker
= rocker_port
->rocker
;
3150 u32 group_id
= ROCKER_GROUP_L2_FLOOD(vlan_id
, 0);
3156 group_ids
= rocker_port_kcalloc(rocker_port
, trans
, flags
,
3157 rocker
->port_count
, sizeof(u32
));
3161 /* Adjust the flood group for this VLAN. The flood group
3162 * references an L2 interface group for each port in this
3166 for (i
= 0; i
< rocker
->port_count
; i
++) {
3167 p
= rocker
->ports
[i
];
3170 if (!rocker_port_is_bridged(p
))
3172 if (test_bit(ntohs(vlan_id
), p
->vlan_bitmap
)) {
3173 group_ids
[group_count
++] =
3174 ROCKER_GROUP_L2_INTERFACE(vlan_id
, p
->pport
);
3178 /* If there are no bridged ports in this VLAN, we're done */
3179 if (group_count
== 0)
3180 goto no_ports_in_vlan
;
3182 err
= rocker_group_l2_flood(rocker_port
, trans
, flags
, vlan_id
,
3183 group_count
, group_ids
, group_id
);
3185 netdev_err(rocker_port
->dev
,
3186 "Error (%d) port VLAN l2 flood group\n", err
);
3189 rocker_port_kfree(trans
, group_ids
);
3193 static int rocker_port_vlan_l2_groups(struct rocker_port
*rocker_port
,
3194 enum switchdev_trans trans
, int flags
,
3195 __be16 vlan_id
, bool pop_vlan
)
3197 const struct rocker
*rocker
= rocker_port
->rocker
;
3198 struct rocker_port
*p
;
3199 bool adding
= !(flags
& ROCKER_OP_FLAG_REMOVE
);
3205 /* An L2 interface group for this port in this VLAN, but
3206 * only when port STP state is LEARNING|FORWARDING.
3209 if (rocker_port
->stp_state
== BR_STATE_LEARNING
||
3210 rocker_port
->stp_state
== BR_STATE_FORWARDING
) {
3211 out_pport
= rocker_port
->pport
;
3212 err
= rocker_group_l2_interface(rocker_port
, trans
, flags
,
3213 vlan_id
, out_pport
, pop_vlan
);
3215 netdev_err(rocker_port
->dev
,
3216 "Error (%d) port VLAN l2 group for pport %d\n",
3222 /* An L2 interface group for this VLAN to CPU port.
3223 * Add when first port joins this VLAN and destroy when
3224 * last port leaves this VLAN.
3227 for (i
= 0; i
< rocker
->port_count
; i
++) {
3228 p
= rocker
->ports
[i
];
3229 if (p
&& test_bit(ntohs(vlan_id
), p
->vlan_bitmap
))
3233 if ((!adding
|| ref
!= 1) && (adding
|| ref
!= 0))
3237 err
= rocker_group_l2_interface(rocker_port
, trans
, flags
,
3238 vlan_id
, out_pport
, pop_vlan
);
3240 netdev_err(rocker_port
->dev
,
3241 "Error (%d) port VLAN l2 group for CPU port\n", err
);
3248 static struct rocker_ctrl
{
3250 const u8
*eth_dst_mask
;
3256 } rocker_ctrls
[] = {
3257 [ROCKER_CTRL_LINK_LOCAL_MCAST
] = {
3258 /* pass link local multicast pkts up to CPU for filtering */
3260 .eth_dst_mask
= ll_mask
,
3263 [ROCKER_CTRL_LOCAL_ARP
] = {
3264 /* pass local ARP pkts up to CPU */
3265 .eth_dst
= zero_mac
,
3266 .eth_dst_mask
= zero_mac
,
3267 .eth_type
= htons(ETH_P_ARP
),
3270 [ROCKER_CTRL_IPV4_MCAST
] = {
3271 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3272 .eth_dst
= ipv4_mcast
,
3273 .eth_dst_mask
= ipv4_mask
,
3274 .eth_type
= htons(ETH_P_IP
),
3276 .copy_to_cpu
= true,
3278 [ROCKER_CTRL_IPV6_MCAST
] = {
3279 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3280 .eth_dst
= ipv6_mcast
,
3281 .eth_dst_mask
= ipv6_mask
,
3282 .eth_type
= htons(ETH_P_IPV6
),
3284 .copy_to_cpu
= true,
3286 [ROCKER_CTRL_DFLT_BRIDGING
] = {
3287 /* flood any pkts on vlan */
3289 .copy_to_cpu
= true,
3291 [ROCKER_CTRL_DFLT_OVS
] = {
3292 /* pass all pkts up to CPU */
3293 .eth_dst
= zero_mac
,
3294 .eth_dst_mask
= zero_mac
,
3299 static int rocker_port_ctrl_vlan_acl(struct rocker_port
*rocker_port
,
3300 enum switchdev_trans trans
, int flags
,
3301 const struct rocker_ctrl
*ctrl
, __be16 vlan_id
)
3303 u32 in_pport
= rocker_port
->pport
;
3304 u32 in_pport_mask
= 0xffffffff;
3306 const u8
*eth_src
= NULL
;
3307 const u8
*eth_src_mask
= NULL
;
3308 __be16 vlan_id_mask
= htons(0xffff);
3310 u8 ip_proto_mask
= 0;
3313 u32 group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, out_pport
);
3316 err
= rocker_flow_tbl_acl(rocker_port
, trans
, flags
,
3317 in_pport
, in_pport_mask
,
3318 eth_src
, eth_src_mask
,
3319 ctrl
->eth_dst
, ctrl
->eth_dst_mask
,
3321 vlan_id
, vlan_id_mask
,
3322 ip_proto
, ip_proto_mask
,
3323 ip_tos
, ip_tos_mask
,
3327 netdev_err(rocker_port
->dev
, "Error (%d) ctrl ACL\n", err
);
3332 static int rocker_port_ctrl_vlan_bridge(struct rocker_port
*rocker_port
,
3333 enum switchdev_trans trans
, int flags
,
3334 const struct rocker_ctrl
*ctrl
,
3337 enum rocker_of_dpa_table_id goto_tbl
=
3338 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
3339 u32 group_id
= ROCKER_GROUP_L2_FLOOD(vlan_id
, 0);
3343 if (!rocker_port_is_bridged(rocker_port
))
3346 err
= rocker_flow_tbl_bridge(rocker_port
, trans
, flags
,
3347 ctrl
->eth_dst
, ctrl
->eth_dst_mask
,
3349 goto_tbl
, group_id
, ctrl
->copy_to_cpu
);
3352 netdev_err(rocker_port
->dev
, "Error (%d) ctrl FLOOD\n", err
);
3357 static int rocker_port_ctrl_vlan_term(struct rocker_port
*rocker_port
,
3358 enum switchdev_trans trans
, int flags
,
3359 const struct rocker_ctrl
*ctrl
, __be16 vlan_id
)
3361 u32 in_pport_mask
= 0xffffffff;
3362 __be16 vlan_id_mask
= htons(0xffff);
3365 if (ntohs(vlan_id
) == 0)
3366 vlan_id
= rocker_port
->internal_vlan_id
;
3368 err
= rocker_flow_tbl_term_mac(rocker_port
, trans
,
3369 rocker_port
->pport
, in_pport_mask
,
3370 ctrl
->eth_type
, ctrl
->eth_dst
,
3371 ctrl
->eth_dst_mask
, vlan_id
,
3372 vlan_id_mask
, ctrl
->copy_to_cpu
,
3376 netdev_err(rocker_port
->dev
, "Error (%d) ctrl term\n", err
);
3381 static int rocker_port_ctrl_vlan(struct rocker_port
*rocker_port
,
3382 enum switchdev_trans trans
, int flags
,
3383 const struct rocker_ctrl
*ctrl
, __be16 vlan_id
)
3386 return rocker_port_ctrl_vlan_acl(rocker_port
, trans
, flags
,
3389 return rocker_port_ctrl_vlan_bridge(rocker_port
, trans
, flags
,
3393 return rocker_port_ctrl_vlan_term(rocker_port
, trans
, flags
,
3399 static int rocker_port_ctrl_vlan_add(struct rocker_port
*rocker_port
,
3400 enum switchdev_trans trans
, int flags
,
3406 for (i
= 0; i
< ROCKER_CTRL_MAX
; i
++) {
3407 if (rocker_port
->ctrls
[i
]) {
3408 err
= rocker_port_ctrl_vlan(rocker_port
, trans
, flags
,
3409 &rocker_ctrls
[i
], vlan_id
);
3418 static int rocker_port_ctrl(struct rocker_port
*rocker_port
,
3419 enum switchdev_trans trans
, int flags
,
3420 const struct rocker_ctrl
*ctrl
)
3425 for (vid
= 1; vid
< VLAN_N_VID
; vid
++) {
3426 if (!test_bit(vid
, rocker_port
->vlan_bitmap
))
3428 err
= rocker_port_ctrl_vlan(rocker_port
, trans
, flags
,
3437 static int rocker_port_vlan(struct rocker_port
*rocker_port
,
3438 enum switchdev_trans trans
, int flags
, u16 vid
)
3440 enum rocker_of_dpa_table_id goto_tbl
=
3441 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
;
3442 u32 in_pport
= rocker_port
->pport
;
3443 __be16 vlan_id
= htons(vid
);
3444 __be16 vlan_id_mask
= htons(0xffff);
3445 __be16 internal_vlan_id
;
3447 bool adding
= !(flags
& ROCKER_OP_FLAG_REMOVE
);
3450 internal_vlan_id
= rocker_port_vid_to_vlan(rocker_port
, vid
, &untagged
);
3452 if (adding
&& test_bit(ntohs(internal_vlan_id
),
3453 rocker_port
->vlan_bitmap
))
3454 return 0; /* already added */
3455 else if (!adding
&& !test_bit(ntohs(internal_vlan_id
),
3456 rocker_port
->vlan_bitmap
))
3457 return 0; /* already removed */
3459 change_bit(ntohs(internal_vlan_id
), rocker_port
->vlan_bitmap
);
3462 err
= rocker_port_ctrl_vlan_add(rocker_port
, trans
, flags
,
3465 netdev_err(rocker_port
->dev
,
3466 "Error (%d) port ctrl vlan add\n", err
);
3471 err
= rocker_port_vlan_l2_groups(rocker_port
, trans
, flags
,
3472 internal_vlan_id
, untagged
);
3474 netdev_err(rocker_port
->dev
,
3475 "Error (%d) port VLAN l2 groups\n", err
);
3479 err
= rocker_port_vlan_flood_group(rocker_port
, trans
, flags
,
3482 netdev_err(rocker_port
->dev
,
3483 "Error (%d) port VLAN l2 flood group\n", err
);
3487 err
= rocker_flow_tbl_vlan(rocker_port
, trans
, flags
,
3488 in_pport
, vlan_id
, vlan_id_mask
,
3489 goto_tbl
, untagged
, internal_vlan_id
);
3491 netdev_err(rocker_port
->dev
,
3492 "Error (%d) port VLAN table\n", err
);
3495 if (trans
== SWITCHDEV_TRANS_PREPARE
)
3496 change_bit(ntohs(internal_vlan_id
), rocker_port
->vlan_bitmap
);
3501 static int rocker_port_ig_tbl(struct rocker_port
*rocker_port
,
3502 enum switchdev_trans trans
, int flags
)
3504 enum rocker_of_dpa_table_id goto_tbl
;
3509 /* Normal Ethernet Frames. Matches pkts from any local physical
3510 * ports. Goto VLAN tbl.
3514 in_pport_mask
= 0xffff0000;
3515 goto_tbl
= ROCKER_OF_DPA_TABLE_ID_VLAN
;
3517 err
= rocker_flow_tbl_ig_port(rocker_port
, trans
, flags
,
3518 in_pport
, in_pport_mask
,
3521 netdev_err(rocker_port
->dev
,
3522 "Error (%d) ingress port table entry\n", err
);
3527 struct rocker_fdb_learn_work
{
3528 struct work_struct work
;
3529 struct rocker_port
*rocker_port
;
3530 enum switchdev_trans trans
;
3536 static void rocker_port_fdb_learn_work(struct work_struct
*work
)
3538 const struct rocker_fdb_learn_work
*lw
=
3539 container_of(work
, struct rocker_fdb_learn_work
, work
);
3540 bool removing
= (lw
->flags
& ROCKER_OP_FLAG_REMOVE
);
3541 bool learned
= (lw
->flags
& ROCKER_OP_FLAG_LEARNED
);
3542 struct switchdev_notifier_fdb_info info
;
3544 info
.addr
= lw
->addr
;
3547 if (learned
&& removing
)
3548 call_switchdev_notifiers(SWITCHDEV_FDB_DEL
,
3549 lw
->rocker_port
->dev
, &info
.info
);
3550 else if (learned
&& !removing
)
3551 call_switchdev_notifiers(SWITCHDEV_FDB_ADD
,
3552 lw
->rocker_port
->dev
, &info
.info
);
3554 rocker_port_kfree(lw
->trans
, work
);
3557 static int rocker_port_fdb_learn(struct rocker_port
*rocker_port
,
3558 enum switchdev_trans trans
, int flags
,
3559 const u8
*addr
, __be16 vlan_id
)
3561 struct rocker_fdb_learn_work
*lw
;
3562 enum rocker_of_dpa_table_id goto_tbl
=
3563 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
3564 u32 out_pport
= rocker_port
->pport
;
3566 u32 group_id
= ROCKER_GROUP_NONE
;
3567 bool syncing
= !!(rocker_port
->brport_flags
& BR_LEARNING_SYNC
);
3568 bool copy_to_cpu
= false;
3571 if (rocker_port_is_bridged(rocker_port
))
3572 group_id
= ROCKER_GROUP_L2_INTERFACE(vlan_id
, out_pport
);
3574 if (!(flags
& ROCKER_OP_FLAG_REFRESH
)) {
3575 err
= rocker_flow_tbl_bridge(rocker_port
, trans
, flags
, addr
,
3576 NULL
, vlan_id
, tunnel_id
, goto_tbl
,
3577 group_id
, copy_to_cpu
);
3585 if (!rocker_port_is_bridged(rocker_port
))
3588 lw
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*lw
));
3592 INIT_WORK(&lw
->work
, rocker_port_fdb_learn_work
);
3594 lw
->rocker_port
= rocker_port
;
3597 ether_addr_copy(lw
->addr
, addr
);
3598 lw
->vid
= rocker_port_vlan_to_vid(rocker_port
, vlan_id
);
3600 if (trans
== SWITCHDEV_TRANS_PREPARE
)
3601 rocker_port_kfree(trans
, lw
);
3603 schedule_work(&lw
->work
);
3608 static struct rocker_fdb_tbl_entry
*
3609 rocker_fdb_tbl_find(const struct rocker
*rocker
,
3610 const struct rocker_fdb_tbl_entry
*match
)
3612 struct rocker_fdb_tbl_entry
*found
;
3614 hash_for_each_possible(rocker
->fdb_tbl
, found
, entry
, match
->key_crc32
)
3615 if (memcmp(&found
->key
, &match
->key
, sizeof(found
->key
)) == 0)
3621 static int rocker_port_fdb(struct rocker_port
*rocker_port
,
3622 enum switchdev_trans trans
,
3623 const unsigned char *addr
,
3624 __be16 vlan_id
, int flags
)
3626 struct rocker
*rocker
= rocker_port
->rocker
;
3627 struct rocker_fdb_tbl_entry
*fdb
;
3628 struct rocker_fdb_tbl_entry
*found
;
3629 bool removing
= (flags
& ROCKER_OP_FLAG_REMOVE
);
3630 unsigned long lock_flags
;
3632 fdb
= rocker_port_kzalloc(rocker_port
, trans
, flags
, sizeof(*fdb
));
3636 fdb
->learned
= (flags
& ROCKER_OP_FLAG_LEARNED
);
3637 fdb
->key
.pport
= rocker_port
->pport
;
3638 ether_addr_copy(fdb
->key
.addr
, addr
);
3639 fdb
->key
.vlan_id
= vlan_id
;
3640 fdb
->key_crc32
= crc32(~0, &fdb
->key
, sizeof(fdb
->key
));
3642 spin_lock_irqsave(&rocker
->fdb_tbl_lock
, lock_flags
);
3644 found
= rocker_fdb_tbl_find(rocker
, fdb
);
3646 if (removing
&& found
) {
3647 rocker_port_kfree(trans
, fdb
);
3648 if (trans
!= SWITCHDEV_TRANS_PREPARE
)
3649 hash_del(&found
->entry
);
3650 } else if (!removing
&& !found
) {
3651 if (trans
!= SWITCHDEV_TRANS_PREPARE
)
3652 hash_add(rocker
->fdb_tbl
, &fdb
->entry
, fdb
->key_crc32
);
3655 spin_unlock_irqrestore(&rocker
->fdb_tbl_lock
, lock_flags
);
3657 /* Check if adding and already exists, or removing and can't find */
3658 if (!found
!= !removing
) {
3659 rocker_port_kfree(trans
, fdb
);
3660 if (!found
&& removing
)
3662 /* Refreshing existing to update aging timers */
3663 flags
|= ROCKER_OP_FLAG_REFRESH
;
3666 return rocker_port_fdb_learn(rocker_port
, trans
, flags
, addr
, vlan_id
);
3669 static int rocker_port_fdb_flush(struct rocker_port
*rocker_port
,
3670 enum switchdev_trans trans
, int flags
)
3672 struct rocker
*rocker
= rocker_port
->rocker
;
3673 struct rocker_fdb_tbl_entry
*found
;
3674 unsigned long lock_flags
;
3675 struct hlist_node
*tmp
;
3679 if (rocker_port
->stp_state
== BR_STATE_LEARNING
||
3680 rocker_port
->stp_state
== BR_STATE_FORWARDING
)
3683 flags
|= ROCKER_OP_FLAG_REMOVE
;
3685 spin_lock_irqsave(&rocker
->fdb_tbl_lock
, lock_flags
);
3687 hash_for_each_safe(rocker
->fdb_tbl
, bkt
, tmp
, found
, entry
) {
3688 if (found
->key
.pport
!= rocker_port
->pport
)
3690 if (!found
->learned
)
3692 err
= rocker_port_fdb_learn(rocker_port
, trans
, flags
,
3694 found
->key
.vlan_id
);
3697 if (trans
!= SWITCHDEV_TRANS_PREPARE
)
3698 hash_del(&found
->entry
);
3702 spin_unlock_irqrestore(&rocker
->fdb_tbl_lock
, lock_flags
);
3707 static int rocker_port_router_mac(struct rocker_port
*rocker_port
,
3708 enum switchdev_trans trans
, int flags
,
3711 u32 in_pport_mask
= 0xffffffff;
3713 const u8
*dst_mac_mask
= ff_mac
;
3714 __be16 vlan_id_mask
= htons(0xffff);
3715 bool copy_to_cpu
= false;
3718 if (ntohs(vlan_id
) == 0)
3719 vlan_id
= rocker_port
->internal_vlan_id
;
3721 eth_type
= htons(ETH_P_IP
);
3722 err
= rocker_flow_tbl_term_mac(rocker_port
, trans
,
3723 rocker_port
->pport
, in_pport_mask
,
3724 eth_type
, rocker_port
->dev
->dev_addr
,
3725 dst_mac_mask
, vlan_id
, vlan_id_mask
,
3726 copy_to_cpu
, flags
);
3730 eth_type
= htons(ETH_P_IPV6
);
3731 err
= rocker_flow_tbl_term_mac(rocker_port
, trans
,
3732 rocker_port
->pport
, in_pport_mask
,
3733 eth_type
, rocker_port
->dev
->dev_addr
,
3734 dst_mac_mask
, vlan_id
, vlan_id_mask
,
3735 copy_to_cpu
, flags
);
3740 static int rocker_port_fwding(struct rocker_port
*rocker_port
,
3741 enum switchdev_trans trans
, int flags
)
3749 /* Port will be forwarding-enabled if its STP state is LEARNING
3750 * or FORWARDING. Traffic from CPU can still egress, regardless of
3751 * port STP state. Use L2 interface group on port VLANs as a way
3752 * to toggle port forwarding: if forwarding is disabled, L2
3753 * interface group will not exist.
3756 if (rocker_port
->stp_state
!= BR_STATE_LEARNING
&&
3757 rocker_port
->stp_state
!= BR_STATE_FORWARDING
)
3758 flags
|= ROCKER_OP_FLAG_REMOVE
;
3760 out_pport
= rocker_port
->pport
;
3761 for (vid
= 1; vid
< VLAN_N_VID
; vid
++) {
3762 if (!test_bit(vid
, rocker_port
->vlan_bitmap
))
3764 vlan_id
= htons(vid
);
3765 pop_vlan
= rocker_vlan_id_is_internal(vlan_id
);
3766 err
= rocker_group_l2_interface(rocker_port
, trans
, flags
,
3767 vlan_id
, out_pport
, pop_vlan
);
3769 netdev_err(rocker_port
->dev
,
3770 "Error (%d) port VLAN l2 group for pport %d\n",
3779 static int rocker_port_stp_update(struct rocker_port
*rocker_port
,
3780 enum switchdev_trans trans
, int flags
,
3783 bool want
[ROCKER_CTRL_MAX
] = { 0, };
3784 bool prev_ctrls
[ROCKER_CTRL_MAX
];
3789 if (trans
== SWITCHDEV_TRANS_PREPARE
) {
3790 memcpy(prev_ctrls
, rocker_port
->ctrls
, sizeof(prev_ctrls
));
3791 prev_state
= rocker_port
->stp_state
;
3794 if (rocker_port
->stp_state
== state
)
3797 rocker_port
->stp_state
= state
;
3800 case BR_STATE_DISABLED
:
3801 /* port is completely disabled */
3803 case BR_STATE_LISTENING
:
3804 case BR_STATE_BLOCKING
:
3805 want
[ROCKER_CTRL_LINK_LOCAL_MCAST
] = true;
3807 case BR_STATE_LEARNING
:
3808 case BR_STATE_FORWARDING
:
3809 if (!rocker_port_is_ovsed(rocker_port
))
3810 want
[ROCKER_CTRL_LINK_LOCAL_MCAST
] = true;
3811 want
[ROCKER_CTRL_IPV4_MCAST
] = true;
3812 want
[ROCKER_CTRL_IPV6_MCAST
] = true;
3813 if (rocker_port_is_bridged(rocker_port
))
3814 want
[ROCKER_CTRL_DFLT_BRIDGING
] = true;
3815 else if (rocker_port_is_ovsed(rocker_port
))
3816 want
[ROCKER_CTRL_DFLT_OVS
] = true;
3818 want
[ROCKER_CTRL_LOCAL_ARP
] = true;
3822 for (i
= 0; i
< ROCKER_CTRL_MAX
; i
++) {
3823 if (want
[i
] != rocker_port
->ctrls
[i
]) {
3824 int ctrl_flags
= flags
|
3825 (want
[i
] ? 0 : ROCKER_OP_FLAG_REMOVE
);
3826 err
= rocker_port_ctrl(rocker_port
, trans
, ctrl_flags
,
3830 rocker_port
->ctrls
[i
] = want
[i
];
3834 err
= rocker_port_fdb_flush(rocker_port
, trans
, flags
);
3838 err
= rocker_port_fwding(rocker_port
, trans
, flags
);
3841 if (trans
== SWITCHDEV_TRANS_PREPARE
) {
3842 memcpy(rocker_port
->ctrls
, prev_ctrls
, sizeof(prev_ctrls
));
3843 rocker_port
->stp_state
= prev_state
;
3849 static int rocker_port_fwd_enable(struct rocker_port
*rocker_port
,
3850 enum switchdev_trans trans
, int flags
)
3852 if (rocker_port_is_bridged(rocker_port
))
3853 /* bridge STP will enable port */
3856 /* port is not bridged, so simulate going to FORWARDING state */
3857 return rocker_port_stp_update(rocker_port
, trans
, flags
,
3858 BR_STATE_FORWARDING
);
3861 static int rocker_port_fwd_disable(struct rocker_port
*rocker_port
,
3862 enum switchdev_trans trans
, int flags
)
3864 if (rocker_port_is_bridged(rocker_port
))
3865 /* bridge STP will disable port */
3868 /* port is not bridged, so simulate going to DISABLED state */
3869 return rocker_port_stp_update(rocker_port
, trans
, flags
,
3873 static struct rocker_internal_vlan_tbl_entry
*
3874 rocker_internal_vlan_tbl_find(const struct rocker
*rocker
, int ifindex
)
3876 struct rocker_internal_vlan_tbl_entry
*found
;
3878 hash_for_each_possible(rocker
->internal_vlan_tbl
, found
,
3880 if (found
->ifindex
== ifindex
)
3887 static __be16
rocker_port_internal_vlan_id_get(struct rocker_port
*rocker_port
,
3890 struct rocker
*rocker
= rocker_port
->rocker
;
3891 struct rocker_internal_vlan_tbl_entry
*entry
;
3892 struct rocker_internal_vlan_tbl_entry
*found
;
3893 unsigned long lock_flags
;
3896 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
3900 entry
->ifindex
= ifindex
;
3902 spin_lock_irqsave(&rocker
->internal_vlan_tbl_lock
, lock_flags
);
3904 found
= rocker_internal_vlan_tbl_find(rocker
, ifindex
);
3911 hash_add(rocker
->internal_vlan_tbl
, &found
->entry
, found
->ifindex
);
3913 for (i
= 0; i
< ROCKER_N_INTERNAL_VLANS
; i
++) {
3914 if (test_and_set_bit(i
, rocker
->internal_vlan_bitmap
))
3916 found
->vlan_id
= htons(ROCKER_INTERNAL_VLAN_ID_BASE
+ i
);
3920 netdev_err(rocker_port
->dev
, "Out of internal VLAN IDs\n");
3924 spin_unlock_irqrestore(&rocker
->internal_vlan_tbl_lock
, lock_flags
);
3926 return found
->vlan_id
;
3930 rocker_port_internal_vlan_id_put(const struct rocker_port
*rocker_port
,
3933 struct rocker
*rocker
= rocker_port
->rocker
;
3934 struct rocker_internal_vlan_tbl_entry
*found
;
3935 unsigned long lock_flags
;
3938 spin_lock_irqsave(&rocker
->internal_vlan_tbl_lock
, lock_flags
);
3940 found
= rocker_internal_vlan_tbl_find(rocker
, ifindex
);
3942 netdev_err(rocker_port
->dev
,
3943 "ifindex (%d) not found in internal VLAN tbl\n",
3948 if (--found
->ref_count
<= 0) {
3949 bit
= ntohs(found
->vlan_id
) - ROCKER_INTERNAL_VLAN_ID_BASE
;
3950 clear_bit(bit
, rocker
->internal_vlan_bitmap
);
3951 hash_del(&found
->entry
);
3956 spin_unlock_irqrestore(&rocker
->internal_vlan_tbl_lock
, lock_flags
);
3959 static int rocker_port_fib_ipv4(struct rocker_port
*rocker_port
,
3960 enum switchdev_trans trans
, __be32 dst
,
3961 int dst_len
, const struct fib_info
*fi
,
3962 u32 tb_id
, int flags
)
3964 const struct fib_nh
*nh
;
3965 __be16 eth_type
= htons(ETH_P_IP
);
3966 __be32 dst_mask
= inet_make_mask(dst_len
);
3967 __be16 internal_vlan_id
= rocker_port
->internal_vlan_id
;
3968 u32 priority
= fi
->fib_priority
;
3969 enum rocker_of_dpa_table_id goto_tbl
=
3970 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
3977 /* XXX support ECMP */
3980 nh_on_port
= (fi
->fib_dev
== rocker_port
->dev
);
3981 has_gw
= !!nh
->nh_gw
;
3983 if (has_gw
&& nh_on_port
) {
3984 err
= rocker_port_ipv4_nh(rocker_port
, trans
, flags
,
3989 group_id
= ROCKER_GROUP_L3_UNICAST(index
);
3991 /* Send to CPU for processing */
3992 group_id
= ROCKER_GROUP_L2_INTERFACE(internal_vlan_id
, 0);
3995 err
= rocker_flow_tbl_ucast4_routing(rocker_port
, trans
, eth_type
, dst
,
3996 dst_mask
, priority
, goto_tbl
,
3999 netdev_err(rocker_port
->dev
, "Error (%d) IPv4 route %pI4\n",
4009 static int rocker_port_open(struct net_device
*dev
)
4011 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4014 err
= rocker_port_dma_rings_init(rocker_port
);
4018 err
= request_irq(rocker_msix_tx_vector(rocker_port
),
4019 rocker_tx_irq_handler
, 0,
4020 rocker_driver_name
, rocker_port
);
4022 netdev_err(rocker_port
->dev
, "cannot assign tx irq\n");
4023 goto err_request_tx_irq
;
4026 err
= request_irq(rocker_msix_rx_vector(rocker_port
),
4027 rocker_rx_irq_handler
, 0,
4028 rocker_driver_name
, rocker_port
);
4030 netdev_err(rocker_port
->dev
, "cannot assign rx irq\n");
4031 goto err_request_rx_irq
;
4034 err
= rocker_port_fwd_enable(rocker_port
, SWITCHDEV_TRANS_NONE
, 0);
4036 goto err_fwd_enable
;
4038 napi_enable(&rocker_port
->napi_tx
);
4039 napi_enable(&rocker_port
->napi_rx
);
4040 if (!dev
->proto_down
)
4041 rocker_port_set_enable(rocker_port
, true);
4042 netif_start_queue(dev
);
4046 free_irq(rocker_msix_rx_vector(rocker_port
), rocker_port
);
4048 free_irq(rocker_msix_tx_vector(rocker_port
), rocker_port
);
4050 rocker_port_dma_rings_fini(rocker_port
);
4054 static int rocker_port_stop(struct net_device
*dev
)
4056 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4058 netif_stop_queue(dev
);
4059 rocker_port_set_enable(rocker_port
, false);
4060 napi_disable(&rocker_port
->napi_rx
);
4061 napi_disable(&rocker_port
->napi_tx
);
4062 rocker_port_fwd_disable(rocker_port
, SWITCHDEV_TRANS_NONE
,
4063 ROCKER_OP_FLAG_NOWAIT
);
4064 free_irq(rocker_msix_rx_vector(rocker_port
), rocker_port
);
4065 free_irq(rocker_msix_tx_vector(rocker_port
), rocker_port
);
4066 rocker_port_dma_rings_fini(rocker_port
);
4071 static void rocker_tx_desc_frags_unmap(const struct rocker_port
*rocker_port
,
4072 const struct rocker_desc_info
*desc_info
)
4074 const struct rocker
*rocker
= rocker_port
->rocker
;
4075 struct pci_dev
*pdev
= rocker
->pdev
;
4076 const struct rocker_tlv
*attrs
[ROCKER_TLV_TX_MAX
+ 1];
4077 struct rocker_tlv
*attr
;
4080 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_TX_MAX
, desc_info
);
4081 if (!attrs
[ROCKER_TLV_TX_FRAGS
])
4083 rocker_tlv_for_each_nested(attr
, attrs
[ROCKER_TLV_TX_FRAGS
], rem
) {
4084 const struct rocker_tlv
*frag_attrs
[ROCKER_TLV_TX_FRAG_ATTR_MAX
+ 1];
4085 dma_addr_t dma_handle
;
4088 if (rocker_tlv_type(attr
) != ROCKER_TLV_TX_FRAG
)
4090 rocker_tlv_parse_nested(frag_attrs
, ROCKER_TLV_TX_FRAG_ATTR_MAX
,
4092 if (!frag_attrs
[ROCKER_TLV_TX_FRAG_ATTR_ADDR
] ||
4093 !frag_attrs
[ROCKER_TLV_TX_FRAG_ATTR_LEN
])
4095 dma_handle
= rocker_tlv_get_u64(frag_attrs
[ROCKER_TLV_TX_FRAG_ATTR_ADDR
]);
4096 len
= rocker_tlv_get_u16(frag_attrs
[ROCKER_TLV_TX_FRAG_ATTR_LEN
]);
4097 pci_unmap_single(pdev
, dma_handle
, len
, DMA_TO_DEVICE
);
4101 static int rocker_tx_desc_frag_map_put(const struct rocker_port
*rocker_port
,
4102 struct rocker_desc_info
*desc_info
,
4103 char *buf
, size_t buf_len
)
4105 const struct rocker
*rocker
= rocker_port
->rocker
;
4106 struct pci_dev
*pdev
= rocker
->pdev
;
4107 dma_addr_t dma_handle
;
4108 struct rocker_tlv
*frag
;
4110 dma_handle
= pci_map_single(pdev
, buf
, buf_len
, DMA_TO_DEVICE
);
4111 if (unlikely(pci_dma_mapping_error(pdev
, dma_handle
))) {
4112 if (net_ratelimit())
4113 netdev_err(rocker_port
->dev
, "failed to dma map tx frag\n");
4116 frag
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_TX_FRAG
);
4119 if (rocker_tlv_put_u64(desc_info
, ROCKER_TLV_TX_FRAG_ATTR_ADDR
,
4122 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_TX_FRAG_ATTR_LEN
,
4125 rocker_tlv_nest_end(desc_info
, frag
);
4129 rocker_tlv_nest_cancel(desc_info
, frag
);
4131 pci_unmap_single(pdev
, dma_handle
, buf_len
, DMA_TO_DEVICE
);
4135 static netdev_tx_t
rocker_port_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
4137 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4138 struct rocker
*rocker
= rocker_port
->rocker
;
4139 struct rocker_desc_info
*desc_info
;
4140 struct rocker_tlv
*frags
;
4144 desc_info
= rocker_desc_head_get(&rocker_port
->tx_ring
);
4145 if (unlikely(!desc_info
)) {
4146 if (net_ratelimit())
4147 netdev_err(dev
, "tx ring full when queue awake\n");
4148 return NETDEV_TX_BUSY
;
4151 rocker_desc_cookie_ptr_set(desc_info
, skb
);
4153 frags
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_TX_FRAGS
);
4156 err
= rocker_tx_desc_frag_map_put(rocker_port
, desc_info
,
4157 skb
->data
, skb_headlen(skb
));
4160 if (skb_shinfo(skb
)->nr_frags
> ROCKER_TX_FRAGS_MAX
)
4163 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
4164 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4166 err
= rocker_tx_desc_frag_map_put(rocker_port
, desc_info
,
4167 skb_frag_address(frag
),
4168 skb_frag_size(frag
));
4172 rocker_tlv_nest_end(desc_info
, frags
);
4174 rocker_desc_gen_clear(desc_info
);
4175 rocker_desc_head_set(rocker
, &rocker_port
->tx_ring
, desc_info
);
4177 desc_info
= rocker_desc_head_get(&rocker_port
->tx_ring
);
4179 netif_stop_queue(dev
);
4181 return NETDEV_TX_OK
;
4184 rocker_tx_desc_frags_unmap(rocker_port
, desc_info
);
4186 rocker_tlv_nest_cancel(desc_info
, frags
);
4189 dev
->stats
.tx_dropped
++;
4191 return NETDEV_TX_OK
;
4194 static int rocker_port_set_mac_address(struct net_device
*dev
, void *p
)
4196 struct sockaddr
*addr
= p
;
4197 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4200 if (!is_valid_ether_addr(addr
->sa_data
))
4201 return -EADDRNOTAVAIL
;
4203 err
= rocker_cmd_set_port_settings_macaddr(rocker_port
, addr
->sa_data
);
4206 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
4210 static int rocker_port_change_mtu(struct net_device
*dev
, int new_mtu
)
4212 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4213 int running
= netif_running(dev
);
4216 #define ROCKER_PORT_MIN_MTU 68
4217 #define ROCKER_PORT_MAX_MTU 9000
4219 if (new_mtu
< ROCKER_PORT_MIN_MTU
|| new_mtu
> ROCKER_PORT_MAX_MTU
)
4223 rocker_port_stop(dev
);
4225 netdev_info(dev
, "MTU change from %d to %d\n", dev
->mtu
, new_mtu
);
4228 err
= rocker_cmd_set_port_settings_mtu(rocker_port
, new_mtu
);
4233 err
= rocker_port_open(dev
);
4238 static int rocker_port_get_phys_port_name(struct net_device
*dev
,
4239 char *buf
, size_t len
)
4241 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4242 struct port_name name
= { .buf
= buf
, .len
= len
};
4245 err
= rocker_cmd_exec(rocker_port
, SWITCHDEV_TRANS_NONE
, 0,
4246 rocker_cmd_get_port_settings_prep
, NULL
,
4247 rocker_cmd_get_port_settings_phys_name_proc
,
4250 return err
? -EOPNOTSUPP
: 0;
4253 static int rocker_port_change_proto_down(struct net_device
*dev
,
4256 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4258 if (rocker_port
->dev
->flags
& IFF_UP
)
4259 rocker_port_set_enable(rocker_port
, !proto_down
);
4260 rocker_port
->dev
->proto_down
= proto_down
;
4264 static const struct net_device_ops rocker_port_netdev_ops
= {
4265 .ndo_open
= rocker_port_open
,
4266 .ndo_stop
= rocker_port_stop
,
4267 .ndo_start_xmit
= rocker_port_xmit
,
4268 .ndo_set_mac_address
= rocker_port_set_mac_address
,
4269 .ndo_change_mtu
= rocker_port_change_mtu
,
4270 .ndo_bridge_getlink
= switchdev_port_bridge_getlink
,
4271 .ndo_bridge_setlink
= switchdev_port_bridge_setlink
,
4272 .ndo_bridge_dellink
= switchdev_port_bridge_dellink
,
4273 .ndo_fdb_add
= switchdev_port_fdb_add
,
4274 .ndo_fdb_del
= switchdev_port_fdb_del
,
4275 .ndo_fdb_dump
= switchdev_port_fdb_dump
,
4276 .ndo_get_phys_port_name
= rocker_port_get_phys_port_name
,
4277 .ndo_change_proto_down
= rocker_port_change_proto_down
,
4280 /********************
4282 ********************/
4284 static int rocker_port_attr_get(struct net_device
*dev
,
4285 struct switchdev_attr
*attr
)
4287 const struct rocker_port
*rocker_port
= netdev_priv(dev
);
4288 const struct rocker
*rocker
= rocker_port
->rocker
;
4291 case SWITCHDEV_ATTR_PORT_PARENT_ID
:
4292 attr
->u
.ppid
.id_len
= sizeof(rocker
->hw
.id
);
4293 memcpy(&attr
->u
.ppid
.id
, &rocker
->hw
.id
, attr
->u
.ppid
.id_len
);
4295 case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS
:
4296 attr
->u
.brport_flags
= rocker_port
->brport_flags
;
4305 static void rocker_port_trans_abort(const struct rocker_port
*rocker_port
)
4307 struct list_head
*mem
, *tmp
;
4309 list_for_each_safe(mem
, tmp
, &rocker_port
->trans_mem
) {
4315 static int rocker_port_brport_flags_set(struct rocker_port
*rocker_port
,
4316 enum switchdev_trans trans
,
4317 unsigned long brport_flags
)
4319 unsigned long orig_flags
;
4322 orig_flags
= rocker_port
->brport_flags
;
4323 rocker_port
->brport_flags
= brport_flags
;
4324 if ((orig_flags
^ rocker_port
->brport_flags
) & BR_LEARNING
)
4325 err
= rocker_port_set_learning(rocker_port
, trans
);
4327 if (trans
== SWITCHDEV_TRANS_PREPARE
)
4328 rocker_port
->brport_flags
= orig_flags
;
4333 static int rocker_port_attr_set(struct net_device
*dev
,
4334 struct switchdev_attr
*attr
)
4336 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4339 switch (attr
->trans
) {
4340 case SWITCHDEV_TRANS_PREPARE
:
4341 BUG_ON(!list_empty(&rocker_port
->trans_mem
));
4343 case SWITCHDEV_TRANS_ABORT
:
4344 rocker_port_trans_abort(rocker_port
);
4351 case SWITCHDEV_ATTR_PORT_STP_STATE
:
4352 err
= rocker_port_stp_update(rocker_port
, attr
->trans
,
4353 ROCKER_OP_FLAG_NOWAIT
,
4356 case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS
:
4357 err
= rocker_port_brport_flags_set(rocker_port
, attr
->trans
,
4358 attr
->u
.brport_flags
);
4368 static int rocker_port_vlan_add(struct rocker_port
*rocker_port
,
4369 enum switchdev_trans trans
, u16 vid
, u16 flags
)
4373 /* XXX deal with flags for PVID and untagged */
4375 err
= rocker_port_vlan(rocker_port
, trans
, 0, vid
);
4379 err
= rocker_port_router_mac(rocker_port
, trans
, 0, htons(vid
));
4381 rocker_port_vlan(rocker_port
, trans
,
4382 ROCKER_OP_FLAG_REMOVE
, vid
);
4387 static int rocker_port_vlans_add(struct rocker_port
*rocker_port
,
4388 enum switchdev_trans trans
,
4389 const struct switchdev_obj_vlan
*vlan
)
4394 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; vid
++) {
4395 err
= rocker_port_vlan_add(rocker_port
, trans
,
4404 static int rocker_port_fdb_add(struct rocker_port
*rocker_port
,
4405 enum switchdev_trans trans
,
4406 const struct switchdev_obj_fdb
*fdb
)
4408 __be16 vlan_id
= rocker_port_vid_to_vlan(rocker_port
, fdb
->vid
, NULL
);
4411 if (!rocker_port_is_bridged(rocker_port
))
4414 return rocker_port_fdb(rocker_port
, trans
, fdb
->addr
, vlan_id
, flags
);
4417 static int rocker_port_obj_add(struct net_device
*dev
,
4418 struct switchdev_obj
*obj
)
4420 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4421 const struct switchdev_obj_ipv4_fib
*fib4
;
4424 switch (obj
->trans
) {
4425 case SWITCHDEV_TRANS_PREPARE
:
4426 BUG_ON(!list_empty(&rocker_port
->trans_mem
));
4428 case SWITCHDEV_TRANS_ABORT
:
4429 rocker_port_trans_abort(rocker_port
);
4436 case SWITCHDEV_OBJ_PORT_VLAN
:
4437 err
= rocker_port_vlans_add(rocker_port
, obj
->trans
,
4440 case SWITCHDEV_OBJ_IPV4_FIB
:
4441 fib4
= &obj
->u
.ipv4_fib
;
4442 err
= rocker_port_fib_ipv4(rocker_port
, obj
->trans
,
4443 htonl(fib4
->dst
), fib4
->dst_len
,
4444 fib4
->fi
, fib4
->tb_id
, 0);
4446 case SWITCHDEV_OBJ_PORT_FDB
:
4447 err
= rocker_port_fdb_add(rocker_port
, obj
->trans
, &obj
->u
.fdb
);
4457 static int rocker_port_vlan_del(struct rocker_port
*rocker_port
,
4462 err
= rocker_port_router_mac(rocker_port
, SWITCHDEV_TRANS_NONE
,
4463 ROCKER_OP_FLAG_REMOVE
, htons(vid
));
4467 return rocker_port_vlan(rocker_port
, SWITCHDEV_TRANS_NONE
,
4468 ROCKER_OP_FLAG_REMOVE
, vid
);
4471 static int rocker_port_vlans_del(struct rocker_port
*rocker_port
,
4472 const struct switchdev_obj_vlan
*vlan
)
4477 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; vid
++) {
4478 err
= rocker_port_vlan_del(rocker_port
, vid
, vlan
->flags
);
4486 static int rocker_port_fdb_del(struct rocker_port
*rocker_port
,
4487 enum switchdev_trans trans
,
4488 const struct switchdev_obj_fdb
*fdb
)
4490 __be16 vlan_id
= rocker_port_vid_to_vlan(rocker_port
, fdb
->vid
, NULL
);
4491 int flags
= ROCKER_OP_FLAG_NOWAIT
| ROCKER_OP_FLAG_REMOVE
;
4493 if (!rocker_port_is_bridged(rocker_port
))
4496 return rocker_port_fdb(rocker_port
, trans
, fdb
->addr
, vlan_id
, flags
);
4499 static int rocker_port_obj_del(struct net_device
*dev
,
4500 struct switchdev_obj
*obj
)
4502 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4503 const struct switchdev_obj_ipv4_fib
*fib4
;
4507 case SWITCHDEV_OBJ_PORT_VLAN
:
4508 err
= rocker_port_vlans_del(rocker_port
, &obj
->u
.vlan
);
4510 case SWITCHDEV_OBJ_IPV4_FIB
:
4511 fib4
= &obj
->u
.ipv4_fib
;
4512 err
= rocker_port_fib_ipv4(rocker_port
, SWITCHDEV_TRANS_NONE
,
4513 htonl(fib4
->dst
), fib4
->dst_len
,
4514 fib4
->fi
, fib4
->tb_id
,
4515 ROCKER_OP_FLAG_REMOVE
);
4517 case SWITCHDEV_OBJ_PORT_FDB
:
4518 err
= rocker_port_fdb_del(rocker_port
, obj
->trans
, &obj
->u
.fdb
);
4528 static int rocker_port_fdb_dump(const struct rocker_port
*rocker_port
,
4529 struct switchdev_obj
*obj
)
4531 struct rocker
*rocker
= rocker_port
->rocker
;
4532 struct switchdev_obj_fdb
*fdb
= &obj
->u
.fdb
;
4533 struct rocker_fdb_tbl_entry
*found
;
4534 struct hlist_node
*tmp
;
4535 unsigned long lock_flags
;
4539 spin_lock_irqsave(&rocker
->fdb_tbl_lock
, lock_flags
);
4540 hash_for_each_safe(rocker
->fdb_tbl
, bkt
, tmp
, found
, entry
) {
4541 if (found
->key
.pport
!= rocker_port
->pport
)
4543 fdb
->addr
= found
->key
.addr
;
4544 fdb
->vid
= rocker_port_vlan_to_vid(rocker_port
,
4545 found
->key
.vlan_id
);
4546 err
= obj
->cb(rocker_port
->dev
, obj
);
4550 spin_unlock_irqrestore(&rocker
->fdb_tbl_lock
, lock_flags
);
4555 static int rocker_port_vlan_dump(const struct rocker_port
*rocker_port
,
4556 struct switchdev_obj
*obj
)
4558 struct switchdev_obj_vlan
*vlan
= &obj
->u
.vlan
;
4562 for (vid
= 1; vid
< VLAN_N_VID
; vid
++) {
4563 if (!test_bit(vid
, rocker_port
->vlan_bitmap
))
4566 if (rocker_vlan_id_is_internal(htons(vid
)))
4567 vlan
->flags
|= BRIDGE_VLAN_INFO_PVID
;
4568 vlan
->vid_begin
= vlan
->vid_end
= vid
;
4569 err
= obj
->cb(rocker_port
->dev
, obj
);
4577 static int rocker_port_obj_dump(struct net_device
*dev
,
4578 struct switchdev_obj
*obj
)
4580 const struct rocker_port
*rocker_port
= netdev_priv(dev
);
4584 case SWITCHDEV_OBJ_PORT_FDB
:
4585 err
= rocker_port_fdb_dump(rocker_port
, obj
);
4587 case SWITCHDEV_OBJ_PORT_VLAN
:
4588 err
= rocker_port_vlan_dump(rocker_port
, obj
);
4598 static const struct switchdev_ops rocker_port_switchdev_ops
= {
4599 .switchdev_port_attr_get
= rocker_port_attr_get
,
4600 .switchdev_port_attr_set
= rocker_port_attr_set
,
4601 .switchdev_port_obj_add
= rocker_port_obj_add
,
4602 .switchdev_port_obj_del
= rocker_port_obj_del
,
4603 .switchdev_port_obj_dump
= rocker_port_obj_dump
,
4606 /********************
4608 ********************/
4610 static int rocker_port_get_settings(struct net_device
*dev
,
4611 struct ethtool_cmd
*ecmd
)
4613 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4615 return rocker_cmd_get_port_settings_ethtool(rocker_port
, ecmd
);
4618 static int rocker_port_set_settings(struct net_device
*dev
,
4619 struct ethtool_cmd
*ecmd
)
4621 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4623 return rocker_cmd_set_port_settings_ethtool(rocker_port
, ecmd
);
4626 static void rocker_port_get_drvinfo(struct net_device
*dev
,
4627 struct ethtool_drvinfo
*drvinfo
)
4629 strlcpy(drvinfo
->driver
, rocker_driver_name
, sizeof(drvinfo
->driver
));
4630 strlcpy(drvinfo
->version
, UTS_RELEASE
, sizeof(drvinfo
->version
));
4633 static struct rocker_port_stats
{
4634 char str
[ETH_GSTRING_LEN
];
4636 } rocker_port_stats
[] = {
4637 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS
, },
4638 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES
, },
4639 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED
, },
4640 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS
, },
4642 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS
, },
4643 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES
, },
4644 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED
, },
4645 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS
, },
4648 #define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4650 static void rocker_port_get_strings(struct net_device
*netdev
, u32 stringset
,
4656 switch (stringset
) {
4658 for (i
= 0; i
< ARRAY_SIZE(rocker_port_stats
); i
++) {
4659 memcpy(p
, rocker_port_stats
[i
].str
, ETH_GSTRING_LEN
);
4660 p
+= ETH_GSTRING_LEN
;
4667 rocker_cmd_get_port_stats_prep(const struct rocker_port
*rocker_port
,
4668 struct rocker_desc_info
*desc_info
,
4671 struct rocker_tlv
*cmd_stats
;
4673 if (rocker_tlv_put_u16(desc_info
, ROCKER_TLV_CMD_TYPE
,
4674 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS
))
4677 cmd_stats
= rocker_tlv_nest_start(desc_info
, ROCKER_TLV_CMD_INFO
);
4681 if (rocker_tlv_put_u32(desc_info
, ROCKER_TLV_CMD_PORT_STATS_PPORT
,
4682 rocker_port
->pport
))
4685 rocker_tlv_nest_end(desc_info
, cmd_stats
);
4691 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port
*rocker_port
,
4692 const struct rocker_desc_info
*desc_info
,
4695 const struct rocker_tlv
*attrs
[ROCKER_TLV_CMD_MAX
+ 1];
4696 const struct rocker_tlv
*stats_attrs
[ROCKER_TLV_CMD_PORT_STATS_MAX
+ 1];
4697 const struct rocker_tlv
*pattr
;
4702 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_CMD_MAX
, desc_info
);
4704 if (!attrs
[ROCKER_TLV_CMD_INFO
])
4707 rocker_tlv_parse_nested(stats_attrs
, ROCKER_TLV_CMD_PORT_STATS_MAX
,
4708 attrs
[ROCKER_TLV_CMD_INFO
]);
4710 if (!stats_attrs
[ROCKER_TLV_CMD_PORT_STATS_PPORT
])
4713 pport
= rocker_tlv_get_u32(stats_attrs
[ROCKER_TLV_CMD_PORT_STATS_PPORT
]);
4714 if (pport
!= rocker_port
->pport
)
4717 for (i
= 0; i
< ARRAY_SIZE(rocker_port_stats
); i
++) {
4718 pattr
= stats_attrs
[rocker_port_stats
[i
].type
];
4722 data
[i
] = rocker_tlv_get_u64(pattr
);
4728 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port
*rocker_port
,
4731 return rocker_cmd_exec(rocker_port
, SWITCHDEV_TRANS_NONE
, 0,
4732 rocker_cmd_get_port_stats_prep
, NULL
,
4733 rocker_cmd_get_port_stats_ethtool_proc
,
4737 static void rocker_port_get_stats(struct net_device
*dev
,
4738 struct ethtool_stats
*stats
, u64
*data
)
4740 struct rocker_port
*rocker_port
= netdev_priv(dev
);
4742 if (rocker_cmd_get_port_stats_ethtool(rocker_port
, data
) != 0) {
4745 for (i
= 0; i
< ARRAY_SIZE(rocker_port_stats
); ++i
)
4750 static int rocker_port_get_sset_count(struct net_device
*netdev
, int sset
)
4754 return ROCKER_PORT_STATS_LEN
;
4760 static const struct ethtool_ops rocker_port_ethtool_ops
= {
4761 .get_settings
= rocker_port_get_settings
,
4762 .set_settings
= rocker_port_set_settings
,
4763 .get_drvinfo
= rocker_port_get_drvinfo
,
4764 .get_link
= ethtool_op_get_link
,
4765 .get_strings
= rocker_port_get_strings
,
4766 .get_ethtool_stats
= rocker_port_get_stats
,
4767 .get_sset_count
= rocker_port_get_sset_count
,
4774 static struct rocker_port
*rocker_port_napi_tx_get(struct napi_struct
*napi
)
4776 return container_of(napi
, struct rocker_port
, napi_tx
);
4779 static int rocker_port_poll_tx(struct napi_struct
*napi
, int budget
)
4781 struct rocker_port
*rocker_port
= rocker_port_napi_tx_get(napi
);
4782 const struct rocker
*rocker
= rocker_port
->rocker
;
4783 const struct rocker_desc_info
*desc_info
;
4787 /* Cleanup tx descriptors */
4788 while ((desc_info
= rocker_desc_tail_get(&rocker_port
->tx_ring
))) {
4789 struct sk_buff
*skb
;
4791 err
= rocker_desc_err(desc_info
);
4792 if (err
&& net_ratelimit())
4793 netdev_err(rocker_port
->dev
, "tx desc received with err %d\n",
4795 rocker_tx_desc_frags_unmap(rocker_port
, desc_info
);
4797 skb
= rocker_desc_cookie_ptr_get(desc_info
);
4799 rocker_port
->dev
->stats
.tx_packets
++;
4800 rocker_port
->dev
->stats
.tx_bytes
+= skb
->len
;
4802 rocker_port
->dev
->stats
.tx_errors
++;
4805 dev_kfree_skb_any(skb
);
4809 if (credits
&& netif_queue_stopped(rocker_port
->dev
))
4810 netif_wake_queue(rocker_port
->dev
);
4812 napi_complete(napi
);
4813 rocker_dma_ring_credits_set(rocker
, &rocker_port
->tx_ring
, credits
);
4818 static int rocker_port_rx_proc(const struct rocker
*rocker
,
4819 const struct rocker_port
*rocker_port
,
4820 struct rocker_desc_info
*desc_info
)
4822 const struct rocker_tlv
*attrs
[ROCKER_TLV_RX_MAX
+ 1];
4823 struct sk_buff
*skb
= rocker_desc_cookie_ptr_get(desc_info
);
4830 rocker_tlv_parse_desc(attrs
, ROCKER_TLV_RX_MAX
, desc_info
);
4831 if (!attrs
[ROCKER_TLV_RX_FRAG_LEN
])
4833 if (attrs
[ROCKER_TLV_RX_FLAGS
])
4834 rx_flags
= rocker_tlv_get_u16(attrs
[ROCKER_TLV_RX_FLAGS
]);
4836 rocker_dma_rx_ring_skb_unmap(rocker
, attrs
);
4838 rx_len
= rocker_tlv_get_u16(attrs
[ROCKER_TLV_RX_FRAG_LEN
]);
4839 skb_put(skb
, rx_len
);
4840 skb
->protocol
= eth_type_trans(skb
, rocker_port
->dev
);
4842 if (rx_flags
& ROCKER_RX_FLAGS_FWD_OFFLOAD
)
4843 skb
->offload_fwd_mark
= rocker_port
->dev
->offload_fwd_mark
;
4845 rocker_port
->dev
->stats
.rx_packets
++;
4846 rocker_port
->dev
->stats
.rx_bytes
+= skb
->len
;
4848 netif_receive_skb(skb
);
4850 return rocker_dma_rx_ring_skb_alloc(rocker_port
, desc_info
);
4853 static struct rocker_port
*rocker_port_napi_rx_get(struct napi_struct
*napi
)
4855 return container_of(napi
, struct rocker_port
, napi_rx
);
4858 static int rocker_port_poll_rx(struct napi_struct
*napi
, int budget
)
4860 struct rocker_port
*rocker_port
= rocker_port_napi_rx_get(napi
);
4861 const struct rocker
*rocker
= rocker_port
->rocker
;
4862 struct rocker_desc_info
*desc_info
;
4866 /* Process rx descriptors */
4867 while (credits
< budget
&&
4868 (desc_info
= rocker_desc_tail_get(&rocker_port
->rx_ring
))) {
4869 err
= rocker_desc_err(desc_info
);
4871 if (net_ratelimit())
4872 netdev_err(rocker_port
->dev
, "rx desc received with err %d\n",
4875 err
= rocker_port_rx_proc(rocker
, rocker_port
,
4877 if (err
&& net_ratelimit())
4878 netdev_err(rocker_port
->dev
, "rx processing failed with err %d\n",
4882 rocker_port
->dev
->stats
.rx_errors
++;
4884 rocker_desc_gen_clear(desc_info
);
4885 rocker_desc_head_set(rocker
, &rocker_port
->rx_ring
, desc_info
);
4889 if (credits
< budget
)
4890 napi_complete(napi
);
4892 rocker_dma_ring_credits_set(rocker
, &rocker_port
->rx_ring
, credits
);
4901 static void rocker_carrier_init(const struct rocker_port
*rocker_port
)
4903 const struct rocker
*rocker
= rocker_port
->rocker
;
4904 u64 link_status
= rocker_read64(rocker
, PORT_PHYS_LINK_STATUS
);
4907 link_up
= link_status
& (1 << rocker_port
->pport
);
4909 netif_carrier_on(rocker_port
->dev
);
4911 netif_carrier_off(rocker_port
->dev
);
4914 static void rocker_remove_ports(const struct rocker
*rocker
)
4916 struct rocker_port
*rocker_port
;
4919 for (i
= 0; i
< rocker
->port_count
; i
++) {
4920 rocker_port
= rocker
->ports
[i
];
4923 rocker_port_ig_tbl(rocker_port
, SWITCHDEV_TRANS_NONE
,
4924 ROCKER_OP_FLAG_REMOVE
);
4925 unregister_netdev(rocker_port
->dev
);
4927 kfree(rocker
->ports
);
4930 static void rocker_port_dev_addr_init(struct rocker_port
*rocker_port
)
4932 const struct rocker
*rocker
= rocker_port
->rocker
;
4933 const struct pci_dev
*pdev
= rocker
->pdev
;
4936 err
= rocker_cmd_get_port_settings_macaddr(rocker_port
,
4937 rocker_port
->dev
->dev_addr
);
4939 dev_warn(&pdev
->dev
, "failed to get mac address, using random\n");
4940 eth_hw_addr_random(rocker_port
->dev
);
4944 static int rocker_probe_port(struct rocker
*rocker
, unsigned int port_number
)
4946 const struct pci_dev
*pdev
= rocker
->pdev
;
4947 struct rocker_port
*rocker_port
;
4948 struct net_device
*dev
;
4949 u16 untagged_vid
= 0;
4952 dev
= alloc_etherdev(sizeof(struct rocker_port
));
4955 rocker_port
= netdev_priv(dev
);
4956 rocker_port
->dev
= dev
;
4957 rocker_port
->rocker
= rocker
;
4958 rocker_port
->port_number
= port_number
;
4959 rocker_port
->pport
= port_number
+ 1;
4960 rocker_port
->brport_flags
= BR_LEARNING
| BR_LEARNING_SYNC
;
4961 INIT_LIST_HEAD(&rocker_port
->trans_mem
);
4963 rocker_port_dev_addr_init(rocker_port
);
4964 dev
->netdev_ops
= &rocker_port_netdev_ops
;
4965 dev
->ethtool_ops
= &rocker_port_ethtool_ops
;
4966 dev
->switchdev_ops
= &rocker_port_switchdev_ops
;
4967 netif_napi_add(dev
, &rocker_port
->napi_tx
, rocker_port_poll_tx
,
4969 netif_napi_add(dev
, &rocker_port
->napi_rx
, rocker_port_poll_rx
,
4971 rocker_carrier_init(rocker_port
);
4973 dev
->features
|= NETIF_F_NETNS_LOCAL
;
4975 err
= register_netdev(dev
);
4977 dev_err(&pdev
->dev
, "register_netdev failed\n");
4978 goto err_register_netdev
;
4980 rocker
->ports
[port_number
] = rocker_port
;
4982 switchdev_port_fwd_mark_set(rocker_port
->dev
, NULL
, false);
4984 rocker_port_set_learning(rocker_port
, SWITCHDEV_TRANS_NONE
);
4986 err
= rocker_port_ig_tbl(rocker_port
, SWITCHDEV_TRANS_NONE
, 0);
4988 dev_err(&pdev
->dev
, "install ig port table failed\n");
4989 goto err_port_ig_tbl
;
4992 rocker_port
->internal_vlan_id
=
4993 rocker_port_internal_vlan_id_get(rocker_port
, dev
->ifindex
);
4995 err
= rocker_port_vlan_add(rocker_port
, SWITCHDEV_TRANS_NONE
,
4998 netdev_err(rocker_port
->dev
, "install untagged VLAN failed\n");
4999 goto err_untagged_vlan
;
5005 rocker_port_ig_tbl(rocker_port
, SWITCHDEV_TRANS_NONE
,
5006 ROCKER_OP_FLAG_REMOVE
);
5008 unregister_netdev(dev
);
5009 err_register_netdev
:
5014 static int rocker_probe_ports(struct rocker
*rocker
)
5020 alloc_size
= sizeof(struct rocker_port
*) * rocker
->port_count
;
5021 rocker
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
5024 for (i
= 0; i
< rocker
->port_count
; i
++) {
5025 err
= rocker_probe_port(rocker
, i
);
5032 rocker_remove_ports(rocker
);
5036 static int rocker_msix_init(struct rocker
*rocker
)
5038 struct pci_dev
*pdev
= rocker
->pdev
;
5043 msix_entries
= pci_msix_vec_count(pdev
);
5044 if (msix_entries
< 0)
5045 return msix_entries
;
5047 if (msix_entries
!= ROCKER_MSIX_VEC_COUNT(rocker
->port_count
))
5050 rocker
->msix_entries
= kmalloc_array(msix_entries
,
5051 sizeof(struct msix_entry
),
5053 if (!rocker
->msix_entries
)
5056 for (i
= 0; i
< msix_entries
; i
++)
5057 rocker
->msix_entries
[i
].entry
= i
;
5059 err
= pci_enable_msix_exact(pdev
, rocker
->msix_entries
, msix_entries
);
5061 goto err_enable_msix
;
5066 kfree(rocker
->msix_entries
);
5070 static void rocker_msix_fini(const struct rocker
*rocker
)
5072 pci_disable_msix(rocker
->pdev
);
5073 kfree(rocker
->msix_entries
);
5076 static int rocker_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
5078 struct rocker
*rocker
;
5081 rocker
= kzalloc(sizeof(*rocker
), GFP_KERNEL
);
5085 err
= pci_enable_device(pdev
);
5087 dev_err(&pdev
->dev
, "pci_enable_device failed\n");
5088 goto err_pci_enable_device
;
5091 err
= pci_request_regions(pdev
, rocker_driver_name
);
5093 dev_err(&pdev
->dev
, "pci_request_regions failed\n");
5094 goto err_pci_request_regions
;
5097 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
5099 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
5101 dev_err(&pdev
->dev
, "pci_set_consistent_dma_mask failed\n");
5102 goto err_pci_set_dma_mask
;
5105 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
5107 dev_err(&pdev
->dev
, "pci_set_dma_mask failed\n");
5108 goto err_pci_set_dma_mask
;
5112 if (pci_resource_len(pdev
, 0) < ROCKER_PCI_BAR0_SIZE
) {
5113 dev_err(&pdev
->dev
, "invalid PCI region size\n");
5115 goto err_pci_resource_len_check
;
5118 rocker
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
5119 pci_resource_len(pdev
, 0));
5120 if (!rocker
->hw_addr
) {
5121 dev_err(&pdev
->dev
, "ioremap failed\n");
5125 pci_set_master(pdev
);
5127 rocker
->pdev
= pdev
;
5128 pci_set_drvdata(pdev
, rocker
);
5130 rocker
->port_count
= rocker_read32(rocker
, PORT_PHYS_COUNT
);
5132 err
= rocker_msix_init(rocker
);
5134 dev_err(&pdev
->dev
, "MSI-X init failed\n");
5138 err
= rocker_basic_hw_test(rocker
);
5140 dev_err(&pdev
->dev
, "basic hw test failed\n");
5141 goto err_basic_hw_test
;
5144 rocker_write32(rocker
, CONTROL
, ROCKER_CONTROL_RESET
);
5146 err
= rocker_dma_rings_init(rocker
);
5148 goto err_dma_rings_init
;
5150 err
= request_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_CMD
),
5151 rocker_cmd_irq_handler
, 0,
5152 rocker_driver_name
, rocker
);
5154 dev_err(&pdev
->dev
, "cannot assign cmd irq\n");
5155 goto err_request_cmd_irq
;
5158 err
= request_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_EVENT
),
5159 rocker_event_irq_handler
, 0,
5160 rocker_driver_name
, rocker
);
5162 dev_err(&pdev
->dev
, "cannot assign event irq\n");
5163 goto err_request_event_irq
;
5166 rocker
->hw
.id
= rocker_read64(rocker
, SWITCH_ID
);
5168 err
= rocker_init_tbls(rocker
);
5170 dev_err(&pdev
->dev
, "cannot init rocker tables\n");
5174 err
= rocker_probe_ports(rocker
);
5176 dev_err(&pdev
->dev
, "failed to probe ports\n");
5177 goto err_probe_ports
;
5180 dev_info(&pdev
->dev
, "Rocker switch with id %016llx\n", rocker
->hw
.id
);
5185 rocker_free_tbls(rocker
);
5187 free_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_EVENT
), rocker
);
5188 err_request_event_irq
:
5189 free_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_CMD
), rocker
);
5190 err_request_cmd_irq
:
5191 rocker_dma_rings_fini(rocker
);
5194 rocker_msix_fini(rocker
);
5196 iounmap(rocker
->hw_addr
);
5198 err_pci_resource_len_check
:
5199 err_pci_set_dma_mask
:
5200 pci_release_regions(pdev
);
5201 err_pci_request_regions
:
5202 pci_disable_device(pdev
);
5203 err_pci_enable_device
:
5208 static void rocker_remove(struct pci_dev
*pdev
)
5210 struct rocker
*rocker
= pci_get_drvdata(pdev
);
5212 rocker_free_tbls(rocker
);
5213 rocker_write32(rocker
, CONTROL
, ROCKER_CONTROL_RESET
);
5214 rocker_remove_ports(rocker
);
5215 free_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_EVENT
), rocker
);
5216 free_irq(rocker_msix_vector(rocker
, ROCKER_MSIX_VEC_CMD
), rocker
);
5217 rocker_dma_rings_fini(rocker
);
5218 rocker_msix_fini(rocker
);
5219 iounmap(rocker
->hw_addr
);
5220 pci_release_regions(rocker
->pdev
);
5221 pci_disable_device(rocker
->pdev
);
5225 static struct pci_driver rocker_pci_driver
= {
5226 .name
= rocker_driver_name
,
5227 .id_table
= rocker_pci_id_table
,
5228 .probe
= rocker_probe
,
5229 .remove
= rocker_remove
,
5232 /************************************
5233 * Net device notifier event handler
5234 ************************************/
5236 static bool rocker_port_dev_check(const struct net_device
*dev
)
5238 return dev
->netdev_ops
== &rocker_port_netdev_ops
;
5241 static int rocker_port_bridge_join(struct rocker_port
*rocker_port
,
5242 struct net_device
*bridge
)
5244 u16 untagged_vid
= 0;
5247 /* Port is joining bridge, so the internal VLAN for the
5248 * port is going to change to the bridge internal VLAN.
5249 * Let's remove untagged VLAN (vid=0) from port and
5250 * re-add once internal VLAN has changed.
5253 err
= rocker_port_vlan_del(rocker_port
, untagged_vid
, 0);
5257 rocker_port_internal_vlan_id_put(rocker_port
,
5258 rocker_port
->dev
->ifindex
);
5259 rocker_port
->internal_vlan_id
=
5260 rocker_port_internal_vlan_id_get(rocker_port
, bridge
->ifindex
);
5262 rocker_port
->bridge_dev
= bridge
;
5263 switchdev_port_fwd_mark_set(rocker_port
->dev
, bridge
, true);
5265 return rocker_port_vlan_add(rocker_port
, SWITCHDEV_TRANS_NONE
,
5269 static int rocker_port_bridge_leave(struct rocker_port
*rocker_port
)
5271 u16 untagged_vid
= 0;
5274 err
= rocker_port_vlan_del(rocker_port
, untagged_vid
, 0);
5278 rocker_port_internal_vlan_id_put(rocker_port
,
5279 rocker_port
->bridge_dev
->ifindex
);
5280 rocker_port
->internal_vlan_id
=
5281 rocker_port_internal_vlan_id_get(rocker_port
,
5282 rocker_port
->dev
->ifindex
);
5284 switchdev_port_fwd_mark_set(rocker_port
->dev
, rocker_port
->bridge_dev
,
5286 rocker_port
->bridge_dev
= NULL
;
5288 err
= rocker_port_vlan_add(rocker_port
, SWITCHDEV_TRANS_NONE
,
5293 if (rocker_port
->dev
->flags
& IFF_UP
)
5294 err
= rocker_port_fwd_enable(rocker_port
,
5295 SWITCHDEV_TRANS_NONE
, 0);
5301 static int rocker_port_ovs_changed(struct rocker_port
*rocker_port
,
5302 struct net_device
*master
)
5306 rocker_port
->bridge_dev
= master
;
5308 err
= rocker_port_fwd_disable(rocker_port
, SWITCHDEV_TRANS_NONE
, 0);
5311 err
= rocker_port_fwd_enable(rocker_port
, SWITCHDEV_TRANS_NONE
, 0);
5316 static int rocker_port_master_changed(struct net_device
*dev
)
5318 struct rocker_port
*rocker_port
= netdev_priv(dev
);
5319 struct net_device
*master
= netdev_master_upper_dev_get(dev
);
5322 /* N.B: Do nothing if the type of master is not supported */
5323 if (master
&& master
->rtnl_link_ops
) {
5324 if (!strcmp(master
->rtnl_link_ops
->kind
, "bridge"))
5325 err
= rocker_port_bridge_join(rocker_port
, master
);
5326 else if (!strcmp(master
->rtnl_link_ops
->kind
, "openvswitch"))
5327 err
= rocker_port_ovs_changed(rocker_port
, master
);
5328 } else if (rocker_port_is_bridged(rocker_port
)) {
5329 err
= rocker_port_bridge_leave(rocker_port
);
5330 } else if (rocker_port_is_ovsed(rocker_port
)) {
5331 err
= rocker_port_ovs_changed(rocker_port
, NULL
);
5337 static int rocker_netdevice_event(struct notifier_block
*unused
,
5338 unsigned long event
, void *ptr
)
5340 struct net_device
*dev
;
5344 case NETDEV_CHANGEUPPER
:
5345 dev
= netdev_notifier_info_to_dev(ptr
);
5346 if (!rocker_port_dev_check(dev
))
5348 err
= rocker_port_master_changed(dev
);
5351 "failed to reflect master change (err %d)\n",
5359 static struct notifier_block rocker_netdevice_nb __read_mostly
= {
5360 .notifier_call
= rocker_netdevice_event
,
5363 /************************************
5364 * Net event notifier event handler
5365 ************************************/
5367 static int rocker_neigh_update(struct net_device
*dev
, struct neighbour
*n
)
5369 struct rocker_port
*rocker_port
= netdev_priv(dev
);
5370 int flags
= (n
->nud_state
& NUD_VALID
? 0 : ROCKER_OP_FLAG_REMOVE
) |
5371 ROCKER_OP_FLAG_NOWAIT
;
5372 __be32 ip_addr
= *(__be32
*)n
->primary_key
;
5374 return rocker_port_ipv4_neigh(rocker_port
, SWITCHDEV_TRANS_NONE
,
5375 flags
, ip_addr
, n
->ha
);
5378 static int rocker_netevent_event(struct notifier_block
*unused
,
5379 unsigned long event
, void *ptr
)
5381 struct net_device
*dev
;
5382 struct neighbour
*n
= ptr
;
5386 case NETEVENT_NEIGH_UPDATE
:
5387 if (n
->tbl
!= &arp_tbl
)
5390 if (!rocker_port_dev_check(dev
))
5392 err
= rocker_neigh_update(dev
, n
);
5395 "failed to handle neigh update (err %d)\n",
5403 static struct notifier_block rocker_netevent_nb __read_mostly
= {
5404 .notifier_call
= rocker_netevent_event
,
5407 /***********************
5408 * Module init and exit
5409 ***********************/
5411 static int __init
rocker_module_init(void)
5415 register_netdevice_notifier(&rocker_netdevice_nb
);
5416 register_netevent_notifier(&rocker_netevent_nb
);
5417 err
= pci_register_driver(&rocker_pci_driver
);
5419 goto err_pci_register_driver
;
5422 err_pci_register_driver
:
5423 unregister_netevent_notifier(&rocker_netevent_nb
);
5424 unregister_netdevice_notifier(&rocker_netdevice_nb
);
5428 static void __exit
rocker_module_exit(void)
5430 unregister_netevent_notifier(&rocker_netevent_nb
);
5431 unregister_netdevice_notifier(&rocker_netdevice_nb
);
5432 pci_unregister_driver(&rocker_pci_driver
);
5435 module_init(rocker_module_init
);
5436 module_exit(rocker_module_exit
);
5438 MODULE_LICENSE("GPL v2");
5439 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5440 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5441 MODULE_DESCRIPTION("Rocker switch device driver");
5442 MODULE_DEVICE_TABLE(pci
, rocker_pci_id_table
);