2 * linux/drivers/net/ehea/ehea_main.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include <linux/tcp.h>
32 #include <linux/udp.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36 #include <linux/if_ether.h>
37 #include <linux/notifier.h>
38 #include <linux/reboot.h>
39 #include <linux/memory.h>
40 #include <asm/kexec.h>
41 #include <linux/mutex.h>
47 #include "ehea_phyp.h"
50 MODULE_LICENSE("GPL");
51 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
52 MODULE_DESCRIPTION("IBM eServer HEA Driver");
53 MODULE_VERSION(DRV_VERSION
);
56 static int msg_level
= -1;
57 static int rq1_entries
= EHEA_DEF_ENTRIES_RQ1
;
58 static int rq2_entries
= EHEA_DEF_ENTRIES_RQ2
;
59 static int rq3_entries
= EHEA_DEF_ENTRIES_RQ3
;
60 static int sq_entries
= EHEA_DEF_ENTRIES_SQ
;
63 static int lro_max_aggr
= EHEA_LRO_MAX_AGGR
;
64 static int num_tx_qps
= EHEA_NUM_TX_QP
;
65 static int prop_carrier_state
;
67 module_param(msg_level
, int, 0);
68 module_param(rq1_entries
, int, 0);
69 module_param(rq2_entries
, int, 0);
70 module_param(rq3_entries
, int, 0);
71 module_param(sq_entries
, int, 0);
72 module_param(prop_carrier_state
, int, 0);
73 module_param(use_mcs
, int, 0);
74 module_param(use_lro
, int, 0);
75 module_param(lro_max_aggr
, int, 0);
76 module_param(num_tx_qps
, int, 0);
78 MODULE_PARM_DESC(num_tx_qps
, "Number of TX-QPS");
79 MODULE_PARM_DESC(msg_level
, "msg_level");
80 MODULE_PARM_DESC(prop_carrier_state
, "Propagate carrier state of physical "
81 "port to stack. 1:yes, 0:no. Default = 0 ");
82 MODULE_PARM_DESC(rq3_entries
, "Number of entries for Receive Queue 3 "
83 "[2^x - 1], x = [6..14]. Default = "
84 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3
) ")");
85 MODULE_PARM_DESC(rq2_entries
, "Number of entries for Receive Queue 2 "
86 "[2^x - 1], x = [6..14]. Default = "
87 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2
) ")");
88 MODULE_PARM_DESC(rq1_entries
, "Number of entries for Receive Queue 1 "
89 "[2^x - 1], x = [6..14]. Default = "
90 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1
) ")");
91 MODULE_PARM_DESC(sq_entries
, " Number of entries for the Send Queue "
92 "[2^x - 1], x = [6..14]. Default = "
93 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ
) ")");
94 MODULE_PARM_DESC(use_mcs
, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
96 MODULE_PARM_DESC(lro_max_aggr
, " LRO: Max packets to be aggregated. Default = "
97 __MODULE_STRING(EHEA_LRO_MAX_AGGR
));
98 MODULE_PARM_DESC(use_lro
, " Large Receive Offload, 1: enable, 0: disable, "
101 static int port_name_cnt
;
102 static LIST_HEAD(adapter_list
);
103 static unsigned long ehea_driver_flags
;
104 struct work_struct ehea_rereg_mr_task
;
105 static DEFINE_MUTEX(dlpar_mem_lock
);
106 struct ehea_fw_handle_array ehea_fw_handles
;
107 struct ehea_bcmc_reg_array ehea_bcmc_regs
;
110 static int __devinit
ehea_probe_adapter(struct of_device
*dev
,
111 const struct of_device_id
*id
);
113 static int __devexit
ehea_remove(struct of_device
*dev
);
115 static struct of_device_id ehea_device_table
[] = {
118 .compatible
= "IBM,lhea",
122 MODULE_DEVICE_TABLE(of
, ehea_device_table
);
124 static struct of_platform_driver ehea_driver
= {
126 .match_table
= ehea_device_table
,
127 .probe
= ehea_probe_adapter
,
128 .remove
= ehea_remove
,
131 void ehea_dump(void *adr
, int len
, char *msg
)
134 unsigned char *deb
= adr
;
135 for (x
= 0; x
< len
; x
+= 16) {
136 printk(DRV_NAME
" %s adr=%p ofs=%04x %016llx %016llx\n", msg
,
137 deb
, x
, *((u64
*)&deb
[0]), *((u64
*)&deb
[8]));
142 void ehea_schedule_port_reset(struct ehea_port
*port
)
144 if (!test_bit(__EHEA_DISABLE_PORT_RESET
, &port
->flags
))
145 schedule_work(&port
->reset_task
);
148 static void ehea_update_firmware_handles(void)
150 struct ehea_fw_handle_entry
*arr
= NULL
;
151 struct ehea_adapter
*adapter
;
152 int num_adapters
= 0;
156 int num_fw_handles
, k
, l
;
158 /* Determine number of handles */
159 mutex_lock(&ehea_fw_handles
.lock
);
161 list_for_each_entry(adapter
, &adapter_list
, list
) {
164 for (k
= 0; k
< EHEA_MAX_PORTS
; k
++) {
165 struct ehea_port
*port
= adapter
->port
[k
];
167 if (!port
|| (port
->state
!= EHEA_PORT_UP
))
171 num_portres
+= port
->num_def_qps
+ port
->num_add_tx_qps
;
175 num_fw_handles
= num_adapters
* EHEA_NUM_ADAPTER_FW_HANDLES
+
176 num_ports
* EHEA_NUM_PORT_FW_HANDLES
+
177 num_portres
* EHEA_NUM_PORTRES_FW_HANDLES
;
179 if (num_fw_handles
) {
180 arr
= kzalloc(num_fw_handles
* sizeof(*arr
), GFP_KERNEL
);
182 goto out
; /* Keep the existing array */
186 list_for_each_entry(adapter
, &adapter_list
, list
) {
187 if (num_adapters
== 0)
190 for (k
= 0; k
< EHEA_MAX_PORTS
; k
++) {
191 struct ehea_port
*port
= adapter
->port
[k
];
193 if (!port
|| (port
->state
!= EHEA_PORT_UP
) ||
198 l
< port
->num_def_qps
+ port
->num_add_tx_qps
;
200 struct ehea_port_res
*pr
= &port
->port_res
[l
];
202 arr
[i
].adh
= adapter
->handle
;
203 arr
[i
++].fwh
= pr
->qp
->fw_handle
;
204 arr
[i
].adh
= adapter
->handle
;
205 arr
[i
++].fwh
= pr
->send_cq
->fw_handle
;
206 arr
[i
].adh
= adapter
->handle
;
207 arr
[i
++].fwh
= pr
->recv_cq
->fw_handle
;
208 arr
[i
].adh
= adapter
->handle
;
209 arr
[i
++].fwh
= pr
->eq
->fw_handle
;
210 arr
[i
].adh
= adapter
->handle
;
211 arr
[i
++].fwh
= pr
->send_mr
.handle
;
212 arr
[i
].adh
= adapter
->handle
;
213 arr
[i
++].fwh
= pr
->recv_mr
.handle
;
215 arr
[i
].adh
= adapter
->handle
;
216 arr
[i
++].fwh
= port
->qp_eq
->fw_handle
;
220 arr
[i
].adh
= adapter
->handle
;
221 arr
[i
++].fwh
= adapter
->neq
->fw_handle
;
223 if (adapter
->mr
.handle
) {
224 arr
[i
].adh
= adapter
->handle
;
225 arr
[i
++].fwh
= adapter
->mr
.handle
;
231 kfree(ehea_fw_handles
.arr
);
232 ehea_fw_handles
.arr
= arr
;
233 ehea_fw_handles
.num_entries
= i
;
235 mutex_unlock(&ehea_fw_handles
.lock
);
238 static void ehea_update_bcmc_registrations(void)
241 struct ehea_bcmc_reg_entry
*arr
= NULL
;
242 struct ehea_adapter
*adapter
;
243 struct ehea_mc_list
*mc_entry
;
244 int num_registrations
= 0;
248 spin_lock_irqsave(&ehea_bcmc_regs
.lock
, flags
);
250 /* Determine number of registrations */
251 list_for_each_entry(adapter
, &adapter_list
, list
)
252 for (k
= 0; k
< EHEA_MAX_PORTS
; k
++) {
253 struct ehea_port
*port
= adapter
->port
[k
];
255 if (!port
|| (port
->state
!= EHEA_PORT_UP
))
258 num_registrations
+= 2; /* Broadcast registrations */
260 list_for_each_entry(mc_entry
, &port
->mc_list
->list
,list
)
261 num_registrations
+= 2;
264 if (num_registrations
) {
265 arr
= kzalloc(num_registrations
* sizeof(*arr
), GFP_ATOMIC
);
267 goto out
; /* Keep the existing array */
271 list_for_each_entry(adapter
, &adapter_list
, list
) {
272 for (k
= 0; k
< EHEA_MAX_PORTS
; k
++) {
273 struct ehea_port
*port
= adapter
->port
[k
];
275 if (!port
|| (port
->state
!= EHEA_PORT_UP
))
278 if (num_registrations
== 0)
281 arr
[i
].adh
= adapter
->handle
;
282 arr
[i
].port_id
= port
->logical_port_id
;
283 arr
[i
].reg_type
= EHEA_BCMC_BROADCAST
|
285 arr
[i
++].macaddr
= port
->mac_addr
;
287 arr
[i
].adh
= adapter
->handle
;
288 arr
[i
].port_id
= port
->logical_port_id
;
289 arr
[i
].reg_type
= EHEA_BCMC_BROADCAST
|
290 EHEA_BCMC_VLANID_ALL
;
291 arr
[i
++].macaddr
= port
->mac_addr
;
292 num_registrations
-= 2;
294 list_for_each_entry(mc_entry
,
295 &port
->mc_list
->list
, list
) {
296 if (num_registrations
== 0)
299 arr
[i
].adh
= adapter
->handle
;
300 arr
[i
].port_id
= port
->logical_port_id
;
301 arr
[i
].reg_type
= EHEA_BCMC_SCOPE_ALL
|
302 EHEA_BCMC_MULTICAST
|
304 arr
[i
++].macaddr
= mc_entry
->macaddr
;
306 arr
[i
].adh
= adapter
->handle
;
307 arr
[i
].port_id
= port
->logical_port_id
;
308 arr
[i
].reg_type
= EHEA_BCMC_SCOPE_ALL
|
309 EHEA_BCMC_MULTICAST
|
310 EHEA_BCMC_VLANID_ALL
;
311 arr
[i
++].macaddr
= mc_entry
->macaddr
;
312 num_registrations
-= 2;
318 kfree(ehea_bcmc_regs
.arr
);
319 ehea_bcmc_regs
.arr
= arr
;
320 ehea_bcmc_regs
.num_entries
= i
;
322 spin_unlock_irqrestore(&ehea_bcmc_regs
.lock
, flags
);
325 static struct net_device_stats
*ehea_get_stats(struct net_device
*dev
)
327 struct ehea_port
*port
= netdev_priv(dev
);
328 struct net_device_stats
*stats
= &port
->stats
;
329 struct hcp_ehea_port_cb2
*cb2
;
330 u64 hret
, rx_packets
, tx_packets
;
333 memset(stats
, 0, sizeof(*stats
));
335 cb2
= (void *)get_zeroed_page(GFP_ATOMIC
);
337 ehea_error("no mem for cb2");
341 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
342 port
->logical_port_id
,
343 H_PORT_CB2
, H_PORT_CB2_ALL
, cb2
);
344 if (hret
!= H_SUCCESS
) {
345 ehea_error("query_ehea_port failed");
349 if (netif_msg_hw(port
))
350 ehea_dump(cb2
, sizeof(*cb2
), "net_device_stats");
353 for (i
= 0; i
< port
->num_def_qps
; i
++)
354 rx_packets
+= port
->port_res
[i
].rx_packets
;
357 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
358 tx_packets
+= port
->port_res
[i
].tx_packets
;
360 stats
->tx_packets
= tx_packets
;
361 stats
->multicast
= cb2
->rxmcp
;
362 stats
->rx_errors
= cb2
->rxuerr
;
363 stats
->rx_bytes
= cb2
->rxo
;
364 stats
->tx_bytes
= cb2
->txo
;
365 stats
->rx_packets
= rx_packets
;
368 free_page((unsigned long)cb2
);
373 static void ehea_refill_rq1(struct ehea_port_res
*pr
, int index
, int nr_of_wqes
)
375 struct sk_buff
**skb_arr_rq1
= pr
->rq1_skba
.arr
;
376 struct net_device
*dev
= pr
->port
->netdev
;
377 int max_index_mask
= pr
->rq1_skba
.len
- 1;
378 int fill_wqes
= pr
->rq1_skba
.os_skbs
+ nr_of_wqes
;
382 pr
->rq1_skba
.os_skbs
= 0;
384 if (unlikely(test_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
))) {
386 pr
->rq1_skba
.index
= index
;
387 pr
->rq1_skba
.os_skbs
= fill_wqes
;
391 for (i
= 0; i
< fill_wqes
; i
++) {
392 if (!skb_arr_rq1
[index
]) {
393 skb_arr_rq1
[index
] = netdev_alloc_skb(dev
,
395 if (!skb_arr_rq1
[index
]) {
396 pr
->rq1_skba
.os_skbs
= fill_wqes
- i
;
401 index
&= max_index_mask
;
409 ehea_update_rq1a(pr
->qp
, adder
);
412 static void ehea_init_fill_rq1(struct ehea_port_res
*pr
, int nr_rq1a
)
414 struct sk_buff
**skb_arr_rq1
= pr
->rq1_skba
.arr
;
415 struct net_device
*dev
= pr
->port
->netdev
;
418 for (i
= 0; i
< pr
->rq1_skba
.len
; i
++) {
419 skb_arr_rq1
[i
] = netdev_alloc_skb(dev
, EHEA_L_PKT_SIZE
);
424 ehea_update_rq1a(pr
->qp
, nr_rq1a
);
427 static int ehea_refill_rq_def(struct ehea_port_res
*pr
,
428 struct ehea_q_skb_arr
*q_skba
, int rq_nr
,
429 int num_wqes
, int wqe_type
, int packet_size
)
431 struct net_device
*dev
= pr
->port
->netdev
;
432 struct ehea_qp
*qp
= pr
->qp
;
433 struct sk_buff
**skb_arr
= q_skba
->arr
;
434 struct ehea_rwqe
*rwqe
;
435 int i
, index
, max_index_mask
, fill_wqes
;
439 fill_wqes
= q_skba
->os_skbs
+ num_wqes
;
442 if (unlikely(test_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
))) {
443 q_skba
->os_skbs
= fill_wqes
;
447 index
= q_skba
->index
;
448 max_index_mask
= q_skba
->len
- 1;
449 for (i
= 0; i
< fill_wqes
; i
++) {
453 skb
= netdev_alloc_skb_ip_align(dev
, packet_size
);
455 q_skba
->os_skbs
= fill_wqes
- i
;
456 if (q_skba
->os_skbs
== q_skba
->len
- 2) {
457 ehea_info("%s: rq%i ran dry - no mem for skb",
458 pr
->port
->netdev
->name
, rq_nr
);
464 skb_arr
[index
] = skb
;
465 tmp_addr
= ehea_map_vaddr(skb
->data
);
466 if (tmp_addr
== -1) {
468 q_skba
->os_skbs
= fill_wqes
- i
;
473 rwqe
= ehea_get_next_rwqe(qp
, rq_nr
);
474 rwqe
->wr_id
= EHEA_BMASK_SET(EHEA_WR_ID_TYPE
, wqe_type
)
475 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX
, index
);
476 rwqe
->sg_list
[0].l_key
= pr
->recv_mr
.lkey
;
477 rwqe
->sg_list
[0].vaddr
= tmp_addr
;
478 rwqe
->sg_list
[0].len
= packet_size
;
479 rwqe
->data_segments
= 1;
482 index
&= max_index_mask
;
486 q_skba
->index
= index
;
493 ehea_update_rq2a(pr
->qp
, adder
);
495 ehea_update_rq3a(pr
->qp
, adder
);
501 static int ehea_refill_rq2(struct ehea_port_res
*pr
, int nr_of_wqes
)
503 return ehea_refill_rq_def(pr
, &pr
->rq2_skba
, 2,
504 nr_of_wqes
, EHEA_RWQE2_TYPE
,
509 static int ehea_refill_rq3(struct ehea_port_res
*pr
, int nr_of_wqes
)
511 return ehea_refill_rq_def(pr
, &pr
->rq3_skba
, 3,
512 nr_of_wqes
, EHEA_RWQE3_TYPE
,
513 EHEA_MAX_PACKET_SIZE
);
516 static inline int ehea_check_cqe(struct ehea_cqe
*cqe
, int *rq_num
)
518 *rq_num
= (cqe
->type
& EHEA_CQE_TYPE_RQ
) >> 5;
519 if ((cqe
->status
& EHEA_CQE_STAT_ERR_MASK
) == 0)
521 if (((cqe
->status
& EHEA_CQE_STAT_ERR_TCP
) != 0) &&
522 (cqe
->header_length
== 0))
527 static inline void ehea_fill_skb(struct net_device
*dev
,
528 struct sk_buff
*skb
, struct ehea_cqe
*cqe
)
530 int length
= cqe
->num_bytes_transfered
- 4; /*remove CRC */
532 skb_put(skb
, length
);
533 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
534 skb
->protocol
= eth_type_trans(skb
, dev
);
537 static inline struct sk_buff
*get_skb_by_index(struct sk_buff
**skb_array
,
539 struct ehea_cqe
*cqe
)
541 int skb_index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, cqe
->wr_id
);
552 prefetchw(pref
+ EHEA_CACHE_LINE
);
554 pref
= (skb_array
[x
]->data
);
556 prefetch(pref
+ EHEA_CACHE_LINE
);
557 prefetch(pref
+ EHEA_CACHE_LINE
* 2);
558 prefetch(pref
+ EHEA_CACHE_LINE
* 3);
561 skb
= skb_array
[skb_index
];
562 skb_array
[skb_index
] = NULL
;
566 static inline struct sk_buff
*get_skb_by_index_ll(struct sk_buff
**skb_array
,
567 int arr_len
, int wqe_index
)
579 prefetchw(pref
+ EHEA_CACHE_LINE
);
581 pref
= (skb_array
[x
]->data
);
583 prefetchw(pref
+ EHEA_CACHE_LINE
);
586 skb
= skb_array
[wqe_index
];
587 skb_array
[wqe_index
] = NULL
;
591 static int ehea_treat_poll_error(struct ehea_port_res
*pr
, int rq
,
592 struct ehea_cqe
*cqe
, int *processed_rq2
,
597 if (cqe
->status
& EHEA_CQE_STAT_ERR_TCP
)
598 pr
->p_stats
.err_tcp_cksum
++;
599 if (cqe
->status
& EHEA_CQE_STAT_ERR_IP
)
600 pr
->p_stats
.err_ip_cksum
++;
601 if (cqe
->status
& EHEA_CQE_STAT_ERR_CRC
)
602 pr
->p_stats
.err_frame_crc
++;
606 skb
= get_skb_by_index(pr
->rq2_skba
.arr
, pr
->rq2_skba
.len
, cqe
);
608 } else if (rq
== 3) {
610 skb
= get_skb_by_index(pr
->rq3_skba
.arr
, pr
->rq3_skba
.len
, cqe
);
614 if (cqe
->status
& EHEA_CQE_STAT_FAT_ERR_MASK
) {
615 if (netif_msg_rx_err(pr
->port
)) {
616 ehea_error("Critical receive error for QP %d. "
617 "Resetting port.", pr
->qp
->init_attr
.qp_nr
);
618 ehea_dump(cqe
, sizeof(*cqe
), "CQE");
620 ehea_schedule_port_reset(pr
->port
);
627 static int get_skb_hdr(struct sk_buff
*skb
, void **iphdr
,
628 void **tcph
, u64
*hdr_flags
, void *priv
)
630 struct ehea_cqe
*cqe
= priv
;
634 /* non tcp/udp packets */
635 if (!cqe
->header_length
)
639 skb_reset_network_header(skb
);
641 if (iph
->protocol
!= IPPROTO_TCP
)
644 ip_len
= ip_hdrlen(skb
);
645 skb_set_transport_header(skb
, ip_len
);
646 *tcph
= tcp_hdr(skb
);
648 /* check if ip header and tcp header are complete */
649 if (ntohs(iph
->tot_len
) < ip_len
+ tcp_hdrlen(skb
))
652 *hdr_flags
= LRO_IPV4
| LRO_TCP
;
658 static void ehea_proc_skb(struct ehea_port_res
*pr
, struct ehea_cqe
*cqe
,
661 int vlan_extracted
= ((cqe
->status
& EHEA_CQE_VLAN_TAG_XTRACT
) &&
666 lro_vlan_hwaccel_receive_skb(&pr
->lro_mgr
, skb
,
671 lro_receive_skb(&pr
->lro_mgr
, skb
, cqe
);
674 vlan_hwaccel_receive_skb(skb
, pr
->port
->vgrp
,
677 netif_receive_skb(skb
);
681 static int ehea_proc_rwqes(struct net_device
*dev
,
682 struct ehea_port_res
*pr
,
685 struct ehea_port
*port
= pr
->port
;
686 struct ehea_qp
*qp
= pr
->qp
;
687 struct ehea_cqe
*cqe
;
689 struct sk_buff
**skb_arr_rq1
= pr
->rq1_skba
.arr
;
690 struct sk_buff
**skb_arr_rq2
= pr
->rq2_skba
.arr
;
691 struct sk_buff
**skb_arr_rq3
= pr
->rq3_skba
.arr
;
692 int skb_arr_rq1_len
= pr
->rq1_skba
.len
;
693 int skb_arr_rq2_len
= pr
->rq2_skba
.len
;
694 int skb_arr_rq3_len
= pr
->rq3_skba
.len
;
695 int processed
, processed_rq1
, processed_rq2
, processed_rq3
;
696 int wqe_index
, last_wqe_index
, rq
, port_reset
;
698 processed
= processed_rq1
= processed_rq2
= processed_rq3
= 0;
701 cqe
= ehea_poll_rq1(qp
, &wqe_index
);
702 while ((processed
< budget
) && cqe
) {
706 if (netif_msg_rx_status(port
))
707 ehea_dump(cqe
, sizeof(*cqe
), "CQE");
709 last_wqe_index
= wqe_index
;
711 if (!ehea_check_cqe(cqe
, &rq
)) {
714 skb
= get_skb_by_index_ll(skb_arr_rq1
,
717 if (unlikely(!skb
)) {
718 if (netif_msg_rx_err(port
))
719 ehea_error("LL rq1: skb=NULL");
721 skb
= netdev_alloc_skb(dev
,
726 skb_copy_to_linear_data(skb
, ((char *)cqe
) + 64,
727 cqe
->num_bytes_transfered
- 4);
728 ehea_fill_skb(dev
, skb
, cqe
);
729 } else if (rq
== 2) {
731 skb
= get_skb_by_index(skb_arr_rq2
,
732 skb_arr_rq2_len
, cqe
);
733 if (unlikely(!skb
)) {
734 if (netif_msg_rx_err(port
))
735 ehea_error("rq2: skb=NULL");
738 ehea_fill_skb(dev
, skb
, cqe
);
742 skb
= get_skb_by_index(skb_arr_rq3
,
743 skb_arr_rq3_len
, cqe
);
744 if (unlikely(!skb
)) {
745 if (netif_msg_rx_err(port
))
746 ehea_error("rq3: skb=NULL");
749 ehea_fill_skb(dev
, skb
, cqe
);
753 ehea_proc_skb(pr
, cqe
, skb
);
755 pr
->p_stats
.poll_receive_errors
++;
756 port_reset
= ehea_treat_poll_error(pr
, rq
, cqe
,
762 cqe
= ehea_poll_rq1(qp
, &wqe_index
);
765 lro_flush_all(&pr
->lro_mgr
);
767 pr
->rx_packets
+= processed
;
769 ehea_refill_rq1(pr
, last_wqe_index
, processed_rq1
);
770 ehea_refill_rq2(pr
, processed_rq2
);
771 ehea_refill_rq3(pr
, processed_rq3
);
776 static struct ehea_cqe
*ehea_proc_cqes(struct ehea_port_res
*pr
, int my_quota
)
779 struct ehea_cq
*send_cq
= pr
->send_cq
;
780 struct ehea_cqe
*cqe
;
781 int quota
= my_quota
;
787 cqe
= ehea_poll_cq(send_cq
);
788 while (cqe
&& (quota
> 0)) {
789 ehea_inc_cq(send_cq
);
793 if (cqe
->status
& EHEA_CQE_STAT_ERR_MASK
) {
794 ehea_error("Bad send completion status=0x%04X",
797 if (netif_msg_tx_err(pr
->port
))
798 ehea_dump(cqe
, sizeof(*cqe
), "Send CQE");
800 if (cqe
->status
& EHEA_CQE_STAT_RESET_MASK
) {
801 ehea_error("Resetting port");
802 ehea_schedule_port_reset(pr
->port
);
807 if (netif_msg_tx_done(pr
->port
))
808 ehea_dump(cqe
, sizeof(*cqe
), "CQE");
810 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE
, cqe
->wr_id
)
811 == EHEA_SWQE2_TYPE
)) {
813 index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, cqe
->wr_id
);
814 skb
= pr
->sq_skba
.arr
[index
];
816 pr
->sq_skba
.arr
[index
] = NULL
;
819 swqe_av
+= EHEA_BMASK_GET(EHEA_WR_ID_REFILL
, cqe
->wr_id
);
822 cqe
= ehea_poll_cq(send_cq
);
825 ehea_update_feca(send_cq
, cqe_counter
);
826 atomic_add(swqe_av
, &pr
->swqe_avail
);
828 spin_lock_irqsave(&pr
->netif_queue
, flags
);
830 if (pr
->queue_stopped
&& (atomic_read(&pr
->swqe_avail
)
831 >= pr
->swqe_refill_th
)) {
832 netif_wake_queue(pr
->port
->netdev
);
833 pr
->queue_stopped
= 0;
835 spin_unlock_irqrestore(&pr
->netif_queue
, flags
);
840 #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
841 #define EHEA_POLL_MAX_CQES 65535
843 static int ehea_poll(struct napi_struct
*napi
, int budget
)
845 struct ehea_port_res
*pr
= container_of(napi
, struct ehea_port_res
,
847 struct net_device
*dev
= pr
->port
->netdev
;
848 struct ehea_cqe
*cqe
;
849 struct ehea_cqe
*cqe_skb
= NULL
;
850 int force_irq
, wqe_index
;
853 force_irq
= (pr
->poll_counter
> EHEA_NAPI_POLL_NUM_BEFORE_IRQ
);
854 cqe_skb
= ehea_proc_cqes(pr
, EHEA_POLL_MAX_CQES
);
857 rx
+= ehea_proc_rwqes(dev
, pr
, budget
- rx
);
859 while ((rx
!= budget
) || force_irq
) {
860 pr
->poll_counter
= 0;
863 ehea_reset_cq_ep(pr
->recv_cq
);
864 ehea_reset_cq_ep(pr
->send_cq
);
865 ehea_reset_cq_n1(pr
->recv_cq
);
866 ehea_reset_cq_n1(pr
->send_cq
);
867 cqe
= ehea_poll_rq1(pr
->qp
, &wqe_index
);
868 cqe_skb
= ehea_poll_cq(pr
->send_cq
);
870 if (!cqe
&& !cqe_skb
)
873 if (!napi_reschedule(napi
))
876 cqe_skb
= ehea_proc_cqes(pr
, EHEA_POLL_MAX_CQES
);
877 rx
+= ehea_proc_rwqes(dev
, pr
, budget
- rx
);
884 #ifdef CONFIG_NET_POLL_CONTROLLER
885 static void ehea_netpoll(struct net_device
*dev
)
887 struct ehea_port
*port
= netdev_priv(dev
);
890 for (i
= 0; i
< port
->num_def_qps
; i
++)
891 napi_schedule(&port
->port_res
[i
].napi
);
895 static irqreturn_t
ehea_recv_irq_handler(int irq
, void *param
)
897 struct ehea_port_res
*pr
= param
;
899 napi_schedule(&pr
->napi
);
904 static irqreturn_t
ehea_qp_aff_irq_handler(int irq
, void *param
)
906 struct ehea_port
*port
= param
;
907 struct ehea_eqe
*eqe
;
910 u64 resource_type
, aer
, aerr
;
913 eqe
= ehea_poll_eq(port
->qp_eq
);
916 qp_token
= EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN
, eqe
->entry
);
917 ehea_error("QP aff_err: entry=0x%llx, token=0x%x",
918 eqe
->entry
, qp_token
);
920 qp
= port
->port_res
[qp_token
].qp
;
922 resource_type
= ehea_error_data(port
->adapter
, qp
->fw_handle
,
925 if (resource_type
== EHEA_AER_RESTYPE_QP
) {
926 if ((aer
& EHEA_AER_RESET_MASK
) ||
927 (aerr
& EHEA_AERR_RESET_MASK
))
930 reset_port
= 1; /* Reset in case of CQ or EQ error */
932 eqe
= ehea_poll_eq(port
->qp_eq
);
936 ehea_error("Resetting port");
937 ehea_schedule_port_reset(port
);
943 static struct ehea_port
*ehea_get_port(struct ehea_adapter
*adapter
,
948 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++)
949 if (adapter
->port
[i
])
950 if (adapter
->port
[i
]->logical_port_id
== logical_port
)
951 return adapter
->port
[i
];
955 int ehea_sense_port_attr(struct ehea_port
*port
)
959 struct hcp_ehea_port_cb0
*cb0
;
961 /* may be called via ehea_neq_tasklet() */
962 cb0
= (void *)get_zeroed_page(GFP_ATOMIC
);
964 ehea_error("no mem for cb0");
969 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
970 port
->logical_port_id
, H_PORT_CB0
,
971 EHEA_BMASK_SET(H_PORT_CB0_ALL
, 0xFFFF),
973 if (hret
!= H_SUCCESS
) {
979 port
->mac_addr
= cb0
->port_mac_addr
<< 16;
981 if (!is_valid_ether_addr((u8
*)&port
->mac_addr
)) {
982 ret
= -EADDRNOTAVAIL
;
987 switch (cb0
->port_speed
) {
989 port
->port_speed
= EHEA_SPEED_10M
;
990 port
->full_duplex
= 0;
993 port
->port_speed
= EHEA_SPEED_10M
;
994 port
->full_duplex
= 1;
997 port
->port_speed
= EHEA_SPEED_100M
;
998 port
->full_duplex
= 0;
1000 case H_SPEED_100M_F
:
1001 port
->port_speed
= EHEA_SPEED_100M
;
1002 port
->full_duplex
= 1;
1005 port
->port_speed
= EHEA_SPEED_1G
;
1006 port
->full_duplex
= 1;
1009 port
->port_speed
= EHEA_SPEED_10G
;
1010 port
->full_duplex
= 1;
1013 port
->port_speed
= 0;
1014 port
->full_duplex
= 0;
1019 port
->num_mcs
= cb0
->num_default_qps
;
1021 /* Number of default QPs */
1023 port
->num_def_qps
= cb0
->num_default_qps
;
1025 port
->num_def_qps
= 1;
1027 if (!port
->num_def_qps
) {
1032 port
->num_tx_qps
= num_tx_qps
;
1034 if (port
->num_def_qps
>= port
->num_tx_qps
)
1035 port
->num_add_tx_qps
= 0;
1037 port
->num_add_tx_qps
= port
->num_tx_qps
- port
->num_def_qps
;
1041 if (ret
|| netif_msg_probe(port
))
1042 ehea_dump(cb0
, sizeof(*cb0
), "ehea_sense_port_attr");
1043 free_page((unsigned long)cb0
);
1048 int ehea_set_portspeed(struct ehea_port
*port
, u32 port_speed
)
1050 struct hcp_ehea_port_cb4
*cb4
;
1054 cb4
= (void *)get_zeroed_page(GFP_KERNEL
);
1056 ehea_error("no mem for cb4");
1061 cb4
->port_speed
= port_speed
;
1063 netif_carrier_off(port
->netdev
);
1065 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1066 port
->logical_port_id
,
1067 H_PORT_CB4
, H_PORT_CB4_SPEED
, cb4
);
1068 if (hret
== H_SUCCESS
) {
1069 port
->autoneg
= port_speed
== EHEA_SPEED_AUTONEG
? 1 : 0;
1071 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
1072 port
->logical_port_id
,
1073 H_PORT_CB4
, H_PORT_CB4_SPEED
,
1075 if (hret
== H_SUCCESS
) {
1076 switch (cb4
->port_speed
) {
1078 port
->port_speed
= EHEA_SPEED_10M
;
1079 port
->full_duplex
= 0;
1082 port
->port_speed
= EHEA_SPEED_10M
;
1083 port
->full_duplex
= 1;
1085 case H_SPEED_100M_H
:
1086 port
->port_speed
= EHEA_SPEED_100M
;
1087 port
->full_duplex
= 0;
1089 case H_SPEED_100M_F
:
1090 port
->port_speed
= EHEA_SPEED_100M
;
1091 port
->full_duplex
= 1;
1094 port
->port_speed
= EHEA_SPEED_1G
;
1095 port
->full_duplex
= 1;
1098 port
->port_speed
= EHEA_SPEED_10G
;
1099 port
->full_duplex
= 1;
1102 port
->port_speed
= 0;
1103 port
->full_duplex
= 0;
1107 ehea_error("Failed sensing port speed");
1111 if (hret
== H_AUTHORITY
) {
1112 ehea_info("Hypervisor denied setting port speed");
1116 ehea_error("Failed setting port speed");
1119 if (!prop_carrier_state
|| (port
->phy_link
== EHEA_PHY_LINK_UP
))
1120 netif_carrier_on(port
->netdev
);
1122 free_page((unsigned long)cb4
);
1127 static void ehea_parse_eqe(struct ehea_adapter
*adapter
, u64 eqe
)
1132 struct ehea_port
*port
;
1134 ec
= EHEA_BMASK_GET(NEQE_EVENT_CODE
, eqe
);
1135 portnum
= EHEA_BMASK_GET(NEQE_PORTNUM
, eqe
);
1136 port
= ehea_get_port(adapter
, portnum
);
1139 case EHEA_EC_PORTSTATE_CHG
: /* port state change */
1142 ehea_error("unknown portnum %x", portnum
);
1146 if (EHEA_BMASK_GET(NEQE_PORT_UP
, eqe
)) {
1147 if (!netif_carrier_ok(port
->netdev
)) {
1148 ret
= ehea_sense_port_attr(port
);
1150 ehea_error("failed resensing port "
1155 if (netif_msg_link(port
))
1156 ehea_info("%s: Logical port up: %dMbps "
1160 port
->full_duplex
==
1161 1 ? "Full" : "Half");
1163 netif_carrier_on(port
->netdev
);
1164 netif_wake_queue(port
->netdev
);
1167 if (netif_carrier_ok(port
->netdev
)) {
1168 if (netif_msg_link(port
))
1169 ehea_info("%s: Logical port down",
1170 port
->netdev
->name
);
1171 netif_carrier_off(port
->netdev
);
1172 netif_stop_queue(port
->netdev
);
1175 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP
, eqe
)) {
1176 port
->phy_link
= EHEA_PHY_LINK_UP
;
1177 if (netif_msg_link(port
))
1178 ehea_info("%s: Physical port up",
1179 port
->netdev
->name
);
1180 if (prop_carrier_state
)
1181 netif_carrier_on(port
->netdev
);
1183 port
->phy_link
= EHEA_PHY_LINK_DOWN
;
1184 if (netif_msg_link(port
))
1185 ehea_info("%s: Physical port down",
1186 port
->netdev
->name
);
1187 if (prop_carrier_state
)
1188 netif_carrier_off(port
->netdev
);
1191 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY
, eqe
))
1192 ehea_info("External switch port is primary port");
1194 ehea_info("External switch port is backup port");
1197 case EHEA_EC_ADAPTER_MALFUNC
:
1198 ehea_error("Adapter malfunction");
1200 case EHEA_EC_PORT_MALFUNC
:
1201 ehea_info("Port malfunction: Device: %s", port
->netdev
->name
);
1202 netif_carrier_off(port
->netdev
);
1203 netif_stop_queue(port
->netdev
);
1206 ehea_error("unknown event code %x, eqe=0x%llX", ec
, eqe
);
1211 static void ehea_neq_tasklet(unsigned long data
)
1213 struct ehea_adapter
*adapter
= (struct ehea_adapter
*)data
;
1214 struct ehea_eqe
*eqe
;
1217 eqe
= ehea_poll_eq(adapter
->neq
);
1218 ehea_debug("eqe=%p", eqe
);
1221 ehea_debug("*eqe=%lx", eqe
->entry
);
1222 ehea_parse_eqe(adapter
, eqe
->entry
);
1223 eqe
= ehea_poll_eq(adapter
->neq
);
1224 ehea_debug("next eqe=%p", eqe
);
1227 event_mask
= EHEA_BMASK_SET(NELR_PORTSTATE_CHG
, 1)
1228 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC
, 1)
1229 | EHEA_BMASK_SET(NELR_PORT_MALFUNC
, 1);
1231 ehea_h_reset_events(adapter
->handle
,
1232 adapter
->neq
->fw_handle
, event_mask
);
1235 static irqreturn_t
ehea_interrupt_neq(int irq
, void *param
)
1237 struct ehea_adapter
*adapter
= param
;
1238 tasklet_hi_schedule(&adapter
->neq_tasklet
);
1243 static int ehea_fill_port_res(struct ehea_port_res
*pr
)
1246 struct ehea_qp_init_attr
*init_attr
= &pr
->qp
->init_attr
;
1248 ehea_init_fill_rq1(pr
, init_attr
->act_nr_rwqes_rq1
1249 - init_attr
->act_nr_rwqes_rq2
1250 - init_attr
->act_nr_rwqes_rq3
- 1);
1252 ret
= ehea_refill_rq2(pr
, init_attr
->act_nr_rwqes_rq2
- 1);
1254 ret
|= ehea_refill_rq3(pr
, init_attr
->act_nr_rwqes_rq3
- 1);
1259 static int ehea_reg_interrupts(struct net_device
*dev
)
1261 struct ehea_port
*port
= netdev_priv(dev
);
1262 struct ehea_port_res
*pr
;
1266 snprintf(port
->int_aff_name
, EHEA_IRQ_NAME_SIZE
- 1, "%s-aff",
1269 ret
= ibmebus_request_irq(port
->qp_eq
->attr
.ist1
,
1270 ehea_qp_aff_irq_handler
,
1271 IRQF_DISABLED
, port
->int_aff_name
, port
);
1273 ehea_error("failed registering irq for qp_aff_irq_handler:"
1274 "ist=%X", port
->qp_eq
->attr
.ist1
);
1278 if (netif_msg_ifup(port
))
1279 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
1280 "registered", port
->qp_eq
->attr
.ist1
);
1283 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
1284 pr
= &port
->port_res
[i
];
1285 snprintf(pr
->int_send_name
, EHEA_IRQ_NAME_SIZE
- 1,
1286 "%s-queue%d", dev
->name
, i
);
1287 ret
= ibmebus_request_irq(pr
->eq
->attr
.ist1
,
1288 ehea_recv_irq_handler
,
1289 IRQF_DISABLED
, pr
->int_send_name
,
1292 ehea_error("failed registering irq for ehea_queue "
1293 "port_res_nr:%d, ist=%X", i
,
1297 if (netif_msg_ifup(port
))
1298 ehea_info("irq_handle 0x%X for function ehea_queue_int "
1299 "%d registered", pr
->eq
->attr
.ist1
, i
);
1307 u32 ist
= port
->port_res
[i
].eq
->attr
.ist1
;
1308 ibmebus_free_irq(ist
, &port
->port_res
[i
]);
1312 ibmebus_free_irq(port
->qp_eq
->attr
.ist1
, port
);
1313 i
= port
->num_def_qps
;
1319 static void ehea_free_interrupts(struct net_device
*dev
)
1321 struct ehea_port
*port
= netdev_priv(dev
);
1322 struct ehea_port_res
*pr
;
1327 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
1328 pr
= &port
->port_res
[i
];
1329 ibmebus_free_irq(pr
->eq
->attr
.ist1
, pr
);
1330 if (netif_msg_intr(port
))
1331 ehea_info("free send irq for res %d with handle 0x%X",
1332 i
, pr
->eq
->attr
.ist1
);
1335 /* associated events */
1336 ibmebus_free_irq(port
->qp_eq
->attr
.ist1
, port
);
1337 if (netif_msg_intr(port
))
1338 ehea_info("associated event interrupt for handle 0x%X freed",
1339 port
->qp_eq
->attr
.ist1
);
1342 static int ehea_configure_port(struct ehea_port
*port
)
1346 struct hcp_ehea_port_cb0
*cb0
;
1349 cb0
= (void *)get_zeroed_page(GFP_KERNEL
);
1353 cb0
->port_rc
= EHEA_BMASK_SET(PXLY_RC_VALID
, 1)
1354 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM
, 1)
1355 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM
, 1)
1356 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT
, 1)
1357 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER
,
1358 PXLY_RC_VLAN_FILTER
)
1359 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME
, 1);
1361 for (i
= 0; i
< port
->num_mcs
; i
++)
1363 cb0
->default_qpn_arr
[i
] =
1364 port
->port_res
[i
].qp
->init_attr
.qp_nr
;
1366 cb0
->default_qpn_arr
[i
] =
1367 port
->port_res
[0].qp
->init_attr
.qp_nr
;
1369 if (netif_msg_ifup(port
))
1370 ehea_dump(cb0
, sizeof(*cb0
), "ehea_configure_port");
1372 mask
= EHEA_BMASK_SET(H_PORT_CB0_PRC
, 1)
1373 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY
, 1);
1375 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1376 port
->logical_port_id
,
1377 H_PORT_CB0
, mask
, cb0
);
1379 if (hret
!= H_SUCCESS
)
1385 free_page((unsigned long)cb0
);
1390 int ehea_gen_smrs(struct ehea_port_res
*pr
)
1393 struct ehea_adapter
*adapter
= pr
->port
->adapter
;
1395 ret
= ehea_gen_smr(adapter
, &adapter
->mr
, &pr
->send_mr
);
1399 ret
= ehea_gen_smr(adapter
, &adapter
->mr
, &pr
->recv_mr
);
1406 ehea_rem_mr(&pr
->send_mr
);
1408 ehea_error("Generating SMRS failed\n");
1412 int ehea_rem_smrs(struct ehea_port_res
*pr
)
1414 if ((ehea_rem_mr(&pr
->send_mr
)) ||
1415 (ehea_rem_mr(&pr
->recv_mr
)))
1421 static int ehea_init_q_skba(struct ehea_q_skb_arr
*q_skba
, int max_q_entries
)
1423 int arr_size
= sizeof(void *) * max_q_entries
;
1425 q_skba
->arr
= vmalloc(arr_size
);
1429 memset(q_skba
->arr
, 0, arr_size
);
1431 q_skba
->len
= max_q_entries
;
1433 q_skba
->os_skbs
= 0;
1438 static int ehea_init_port_res(struct ehea_port
*port
, struct ehea_port_res
*pr
,
1439 struct port_res_cfg
*pr_cfg
, int queue_token
)
1441 struct ehea_adapter
*adapter
= port
->adapter
;
1442 enum ehea_eq_type eq_type
= EHEA_EQ
;
1443 struct ehea_qp_init_attr
*init_attr
= NULL
;
1446 memset(pr
, 0, sizeof(struct ehea_port_res
));
1449 spin_lock_init(&pr
->xmit_lock
);
1450 spin_lock_init(&pr
->netif_queue
);
1452 pr
->eq
= ehea_create_eq(adapter
, eq_type
, EHEA_MAX_ENTRIES_EQ
, 0);
1454 ehea_error("create_eq failed (eq)");
1458 pr
->recv_cq
= ehea_create_cq(adapter
, pr_cfg
->max_entries_rcq
,
1460 port
->logical_port_id
);
1462 ehea_error("create_cq failed (cq_recv)");
1466 pr
->send_cq
= ehea_create_cq(adapter
, pr_cfg
->max_entries_scq
,
1468 port
->logical_port_id
);
1470 ehea_error("create_cq failed (cq_send)");
1474 if (netif_msg_ifup(port
))
1475 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1476 pr
->send_cq
->attr
.act_nr_of_cqes
,
1477 pr
->recv_cq
->attr
.act_nr_of_cqes
);
1479 init_attr
= kzalloc(sizeof(*init_attr
), GFP_KERNEL
);
1482 ehea_error("no mem for ehea_qp_init_attr");
1486 init_attr
->low_lat_rq1
= 1;
1487 init_attr
->signalingtype
= 1; /* generate CQE if specified in WQE */
1488 init_attr
->rq_count
= 3;
1489 init_attr
->qp_token
= queue_token
;
1490 init_attr
->max_nr_send_wqes
= pr_cfg
->max_entries_sq
;
1491 init_attr
->max_nr_rwqes_rq1
= pr_cfg
->max_entries_rq1
;
1492 init_attr
->max_nr_rwqes_rq2
= pr_cfg
->max_entries_rq2
;
1493 init_attr
->max_nr_rwqes_rq3
= pr_cfg
->max_entries_rq3
;
1494 init_attr
->wqe_size_enc_sq
= EHEA_SG_SQ
;
1495 init_attr
->wqe_size_enc_rq1
= EHEA_SG_RQ1
;
1496 init_attr
->wqe_size_enc_rq2
= EHEA_SG_RQ2
;
1497 init_attr
->wqe_size_enc_rq3
= EHEA_SG_RQ3
;
1498 init_attr
->rq2_threshold
= EHEA_RQ2_THRESHOLD
;
1499 init_attr
->rq3_threshold
= EHEA_RQ3_THRESHOLD
;
1500 init_attr
->port_nr
= port
->logical_port_id
;
1501 init_attr
->send_cq_handle
= pr
->send_cq
->fw_handle
;
1502 init_attr
->recv_cq_handle
= pr
->recv_cq
->fw_handle
;
1503 init_attr
->aff_eq_handle
= port
->qp_eq
->fw_handle
;
1505 pr
->qp
= ehea_create_qp(adapter
, adapter
->pd
, init_attr
);
1507 ehea_error("create_qp failed");
1512 if (netif_msg_ifup(port
))
1513 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1514 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr
->qp_nr
,
1515 init_attr
->act_nr_send_wqes
,
1516 init_attr
->act_nr_rwqes_rq1
,
1517 init_attr
->act_nr_rwqes_rq2
,
1518 init_attr
->act_nr_rwqes_rq3
);
1520 pr
->sq_skba_size
= init_attr
->act_nr_send_wqes
+ 1;
1522 ret
= ehea_init_q_skba(&pr
->sq_skba
, pr
->sq_skba_size
);
1523 ret
|= ehea_init_q_skba(&pr
->rq1_skba
, init_attr
->act_nr_rwqes_rq1
+ 1);
1524 ret
|= ehea_init_q_skba(&pr
->rq2_skba
, init_attr
->act_nr_rwqes_rq2
+ 1);
1525 ret
|= ehea_init_q_skba(&pr
->rq3_skba
, init_attr
->act_nr_rwqes_rq3
+ 1);
1529 pr
->swqe_refill_th
= init_attr
->act_nr_send_wqes
/ 10;
1530 if (ehea_gen_smrs(pr
) != 0) {
1535 atomic_set(&pr
->swqe_avail
, init_attr
->act_nr_send_wqes
- 1);
1539 netif_napi_add(pr
->port
->netdev
, &pr
->napi
, ehea_poll
, 64);
1541 pr
->lro_mgr
.max_aggr
= pr
->port
->lro_max_aggr
;
1542 pr
->lro_mgr
.max_desc
= MAX_LRO_DESCRIPTORS
;
1543 pr
->lro_mgr
.lro_arr
= pr
->lro_desc
;
1544 pr
->lro_mgr
.get_skb_header
= get_skb_hdr
;
1545 pr
->lro_mgr
.features
= LRO_F_NAPI
| LRO_F_EXTRACT_VLAN_ID
;
1546 pr
->lro_mgr
.dev
= port
->netdev
;
1547 pr
->lro_mgr
.ip_summed
= CHECKSUM_UNNECESSARY
;
1548 pr
->lro_mgr
.ip_summed_aggr
= CHECKSUM_UNNECESSARY
;
1555 vfree(pr
->sq_skba
.arr
);
1556 vfree(pr
->rq1_skba
.arr
);
1557 vfree(pr
->rq2_skba
.arr
);
1558 vfree(pr
->rq3_skba
.arr
);
1559 ehea_destroy_qp(pr
->qp
);
1560 ehea_destroy_cq(pr
->send_cq
);
1561 ehea_destroy_cq(pr
->recv_cq
);
1562 ehea_destroy_eq(pr
->eq
);
1567 static int ehea_clean_portres(struct ehea_port
*port
, struct ehea_port_res
*pr
)
1572 netif_napi_del(&pr
->napi
);
1574 ret
= ehea_destroy_qp(pr
->qp
);
1577 ehea_destroy_cq(pr
->send_cq
);
1578 ehea_destroy_cq(pr
->recv_cq
);
1579 ehea_destroy_eq(pr
->eq
);
1581 for (i
= 0; i
< pr
->rq1_skba
.len
; i
++)
1582 if (pr
->rq1_skba
.arr
[i
])
1583 dev_kfree_skb(pr
->rq1_skba
.arr
[i
]);
1585 for (i
= 0; i
< pr
->rq2_skba
.len
; i
++)
1586 if (pr
->rq2_skba
.arr
[i
])
1587 dev_kfree_skb(pr
->rq2_skba
.arr
[i
]);
1589 for (i
= 0; i
< pr
->rq3_skba
.len
; i
++)
1590 if (pr
->rq3_skba
.arr
[i
])
1591 dev_kfree_skb(pr
->rq3_skba
.arr
[i
]);
1593 for (i
= 0; i
< pr
->sq_skba
.len
; i
++)
1594 if (pr
->sq_skba
.arr
[i
])
1595 dev_kfree_skb(pr
->sq_skba
.arr
[i
]);
1597 vfree(pr
->rq1_skba
.arr
);
1598 vfree(pr
->rq2_skba
.arr
);
1599 vfree(pr
->rq3_skba
.arr
);
1600 vfree(pr
->sq_skba
.arr
);
1601 ret
= ehea_rem_smrs(pr
);
1607 * The write_* functions store information in swqe which is used by
1608 * the hardware to calculate the ip/tcp/udp checksum
1611 static inline void write_ip_start_end(struct ehea_swqe
*swqe
,
1612 const struct sk_buff
*skb
)
1614 swqe
->ip_start
= skb_network_offset(skb
);
1615 swqe
->ip_end
= (u8
)(swqe
->ip_start
+ ip_hdrlen(skb
) - 1);
1618 static inline void write_tcp_offset_end(struct ehea_swqe
*swqe
,
1619 const struct sk_buff
*skb
)
1622 (u8
)(swqe
->ip_end
+ 1 + offsetof(struct tcphdr
, check
));
1624 swqe
->tcp_end
= (u16
)skb
->len
- 1;
1627 static inline void write_udp_offset_end(struct ehea_swqe
*swqe
,
1628 const struct sk_buff
*skb
)
1631 (u8
)(swqe
->ip_end
+ 1 + offsetof(struct udphdr
, check
));
1633 swqe
->tcp_end
= (u16
)skb
->len
- 1;
1637 static void write_swqe2_TSO(struct sk_buff
*skb
,
1638 struct ehea_swqe
*swqe
, u32 lkey
)
1640 struct ehea_vsgentry
*sg1entry
= &swqe
->u
.immdata_desc
.sg_entry
;
1641 u8
*imm_data
= &swqe
->u
.immdata_desc
.immediate_data
[0];
1642 int skb_data_size
= skb_headlen(skb
);
1645 /* Packet is TCP with TSO enabled */
1646 swqe
->tx_control
|= EHEA_SWQE_TSO
;
1647 swqe
->mss
= skb_shinfo(skb
)->gso_size
;
1648 /* copy only eth/ip/tcp headers to immediate data and
1649 * the rest of skb->data to sg1entry
1651 headersize
= ETH_HLEN
+ ip_hdrlen(skb
) + tcp_hdrlen(skb
);
1653 skb_data_size
= skb_headlen(skb
);
1655 if (skb_data_size
>= headersize
) {
1656 /* copy immediate data */
1657 skb_copy_from_linear_data(skb
, imm_data
, headersize
);
1658 swqe
->immediate_data_length
= headersize
;
1660 if (skb_data_size
> headersize
) {
1661 /* set sg1entry data */
1662 sg1entry
->l_key
= lkey
;
1663 sg1entry
->len
= skb_data_size
- headersize
;
1665 ehea_map_vaddr(skb
->data
+ headersize
);
1666 swqe
->descriptors
++;
1669 ehea_error("cannot handle fragmented headers");
1672 static void write_swqe2_nonTSO(struct sk_buff
*skb
,
1673 struct ehea_swqe
*swqe
, u32 lkey
)
1675 int skb_data_size
= skb_headlen(skb
);
1676 u8
*imm_data
= &swqe
->u
.immdata_desc
.immediate_data
[0];
1677 struct ehea_vsgentry
*sg1entry
= &swqe
->u
.immdata_desc
.sg_entry
;
1679 /* Packet is any nonTSO type
1681 * Copy as much as possible skb->data to immediate data and
1682 * the rest to sg1entry
1684 if (skb_data_size
>= SWQE2_MAX_IMM
) {
1685 /* copy immediate data */
1686 skb_copy_from_linear_data(skb
, imm_data
, SWQE2_MAX_IMM
);
1688 swqe
->immediate_data_length
= SWQE2_MAX_IMM
;
1690 if (skb_data_size
> SWQE2_MAX_IMM
) {
1691 /* copy sg1entry data */
1692 sg1entry
->l_key
= lkey
;
1693 sg1entry
->len
= skb_data_size
- SWQE2_MAX_IMM
;
1695 ehea_map_vaddr(skb
->data
+ SWQE2_MAX_IMM
);
1696 swqe
->descriptors
++;
1699 skb_copy_from_linear_data(skb
, imm_data
, skb_data_size
);
1700 swqe
->immediate_data_length
= skb_data_size
;
1704 static inline void write_swqe2_data(struct sk_buff
*skb
, struct net_device
*dev
,
1705 struct ehea_swqe
*swqe
, u32 lkey
)
1707 struct ehea_vsgentry
*sg_list
, *sg1entry
, *sgentry
;
1709 int nfrags
, sg1entry_contains_frag_data
, i
;
1711 nfrags
= skb_shinfo(skb
)->nr_frags
;
1712 sg1entry
= &swqe
->u
.immdata_desc
.sg_entry
;
1713 sg_list
= (struct ehea_vsgentry
*)&swqe
->u
.immdata_desc
.sg_list
;
1714 swqe
->descriptors
= 0;
1715 sg1entry_contains_frag_data
= 0;
1717 if ((dev
->features
& NETIF_F_TSO
) && skb_shinfo(skb
)->gso_size
)
1718 write_swqe2_TSO(skb
, swqe
, lkey
);
1720 write_swqe2_nonTSO(skb
, swqe
, lkey
);
1722 /* write descriptors */
1724 if (swqe
->descriptors
== 0) {
1725 /* sg1entry not yet used */
1726 frag
= &skb_shinfo(skb
)->frags
[0];
1728 /* copy sg1entry data */
1729 sg1entry
->l_key
= lkey
;
1730 sg1entry
->len
= frag
->size
;
1732 ehea_map_vaddr(page_address(frag
->page
)
1733 + frag
->page_offset
);
1734 swqe
->descriptors
++;
1735 sg1entry_contains_frag_data
= 1;
1738 for (i
= sg1entry_contains_frag_data
; i
< nfrags
; i
++) {
1740 frag
= &skb_shinfo(skb
)->frags
[i
];
1741 sgentry
= &sg_list
[i
- sg1entry_contains_frag_data
];
1743 sgentry
->l_key
= lkey
;
1744 sgentry
->len
= frag
->size
;
1746 ehea_map_vaddr(page_address(frag
->page
)
1747 + frag
->page_offset
);
1748 swqe
->descriptors
++;
1753 static int ehea_broadcast_reg_helper(struct ehea_port
*port
, u32 hcallid
)
1759 /* De/Register untagged packets */
1760 reg_type
= EHEA_BCMC_BROADCAST
| EHEA_BCMC_UNTAGGED
;
1761 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1762 port
->logical_port_id
,
1763 reg_type
, port
->mac_addr
, 0, hcallid
);
1764 if (hret
!= H_SUCCESS
) {
1765 ehea_error("%sregistering bc address failed (tagged)",
1766 hcallid
== H_REG_BCMC
? "" : "de");
1771 /* De/Register VLAN packets */
1772 reg_type
= EHEA_BCMC_BROADCAST
| EHEA_BCMC_VLANID_ALL
;
1773 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1774 port
->logical_port_id
,
1775 reg_type
, port
->mac_addr
, 0, hcallid
);
1776 if (hret
!= H_SUCCESS
) {
1777 ehea_error("%sregistering bc address failed (vlan)",
1778 hcallid
== H_REG_BCMC
? "" : "de");
1785 static int ehea_set_mac_addr(struct net_device
*dev
, void *sa
)
1787 struct ehea_port
*port
= netdev_priv(dev
);
1788 struct sockaddr
*mac_addr
= sa
;
1789 struct hcp_ehea_port_cb0
*cb0
;
1793 if (!is_valid_ether_addr(mac_addr
->sa_data
)) {
1794 ret
= -EADDRNOTAVAIL
;
1798 cb0
= (void *)get_zeroed_page(GFP_KERNEL
);
1800 ehea_error("no mem for cb0");
1805 memcpy(&(cb0
->port_mac_addr
), &(mac_addr
->sa_data
[0]), ETH_ALEN
);
1807 cb0
->port_mac_addr
= cb0
->port_mac_addr
>> 16;
1809 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1810 port
->logical_port_id
, H_PORT_CB0
,
1811 EHEA_BMASK_SET(H_PORT_CB0_MAC
, 1), cb0
);
1812 if (hret
!= H_SUCCESS
) {
1817 memcpy(dev
->dev_addr
, mac_addr
->sa_data
, dev
->addr_len
);
1819 /* Deregister old MAC in pHYP */
1820 if (port
->state
== EHEA_PORT_UP
) {
1821 ret
= ehea_broadcast_reg_helper(port
, H_DEREG_BCMC
);
1826 port
->mac_addr
= cb0
->port_mac_addr
<< 16;
1828 /* Register new MAC in pHYP */
1829 if (port
->state
== EHEA_PORT_UP
) {
1830 ret
= ehea_broadcast_reg_helper(port
, H_REG_BCMC
);
1838 ehea_update_bcmc_registrations();
1840 free_page((unsigned long)cb0
);
1845 static void ehea_promiscuous_error(u64 hret
, int enable
)
1847 if (hret
== H_AUTHORITY
)
1848 ehea_info("Hypervisor denied %sabling promiscuous mode",
1849 enable
== 1 ? "en" : "dis");
1851 ehea_error("failed %sabling promiscuous mode",
1852 enable
== 1 ? "en" : "dis");
1855 static void ehea_promiscuous(struct net_device
*dev
, int enable
)
1857 struct ehea_port
*port
= netdev_priv(dev
);
1858 struct hcp_ehea_port_cb7
*cb7
;
1861 if ((enable
&& port
->promisc
) || (!enable
&& !port
->promisc
))
1864 cb7
= (void *)get_zeroed_page(GFP_ATOMIC
);
1866 ehea_error("no mem for cb7");
1870 /* Modify Pxs_DUCQPN in CB7 */
1871 cb7
->def_uc_qpn
= enable
== 1 ? port
->port_res
[0].qp
->fw_handle
: 0;
1873 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1874 port
->logical_port_id
,
1875 H_PORT_CB7
, H_PORT_CB7_DUCQPN
, cb7
);
1877 ehea_promiscuous_error(hret
, enable
);
1881 port
->promisc
= enable
;
1883 free_page((unsigned long)cb7
);
1887 static u64
ehea_multicast_reg_helper(struct ehea_port
*port
, u64 mc_mac_addr
,
1893 reg_type
= EHEA_BCMC_SCOPE_ALL
| EHEA_BCMC_MULTICAST
1894 | EHEA_BCMC_UNTAGGED
;
1896 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1897 port
->logical_port_id
,
1898 reg_type
, mc_mac_addr
, 0, hcallid
);
1902 reg_type
= EHEA_BCMC_SCOPE_ALL
| EHEA_BCMC_MULTICAST
1903 | EHEA_BCMC_VLANID_ALL
;
1905 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1906 port
->logical_port_id
,
1907 reg_type
, mc_mac_addr
, 0, hcallid
);
1912 static int ehea_drop_multicast_list(struct net_device
*dev
)
1914 struct ehea_port
*port
= netdev_priv(dev
);
1915 struct ehea_mc_list
*mc_entry
= port
->mc_list
;
1916 struct list_head
*pos
;
1917 struct list_head
*temp
;
1921 list_for_each_safe(pos
, temp
, &(port
->mc_list
->list
)) {
1922 mc_entry
= list_entry(pos
, struct ehea_mc_list
, list
);
1924 hret
= ehea_multicast_reg_helper(port
, mc_entry
->macaddr
,
1927 ehea_error("failed deregistering mcast MAC");
1937 static void ehea_allmulti(struct net_device
*dev
, int enable
)
1939 struct ehea_port
*port
= netdev_priv(dev
);
1942 if (!port
->allmulti
) {
1944 /* Enable ALLMULTI */
1945 ehea_drop_multicast_list(dev
);
1946 hret
= ehea_multicast_reg_helper(port
, 0, H_REG_BCMC
);
1950 ehea_error("failed enabling IFF_ALLMULTI");
1954 /* Disable ALLMULTI */
1955 hret
= ehea_multicast_reg_helper(port
, 0, H_DEREG_BCMC
);
1959 ehea_error("failed disabling IFF_ALLMULTI");
1963 static void ehea_add_multicast_entry(struct ehea_port
*port
, u8
*mc_mac_addr
)
1965 struct ehea_mc_list
*ehea_mcl_entry
;
1968 ehea_mcl_entry
= kzalloc(sizeof(*ehea_mcl_entry
), GFP_ATOMIC
);
1969 if (!ehea_mcl_entry
) {
1970 ehea_error("no mem for mcl_entry");
1974 INIT_LIST_HEAD(&ehea_mcl_entry
->list
);
1976 memcpy(&ehea_mcl_entry
->macaddr
, mc_mac_addr
, ETH_ALEN
);
1978 hret
= ehea_multicast_reg_helper(port
, ehea_mcl_entry
->macaddr
,
1981 list_add(&ehea_mcl_entry
->list
, &port
->mc_list
->list
);
1983 ehea_error("failed registering mcast MAC");
1984 kfree(ehea_mcl_entry
);
1988 static void ehea_set_multicast_list(struct net_device
*dev
)
1990 struct ehea_port
*port
= netdev_priv(dev
);
1991 struct netdev_hw_addr
*ha
;
1994 if (dev
->flags
& IFF_PROMISC
) {
1995 ehea_promiscuous(dev
, 1);
1998 ehea_promiscuous(dev
, 0);
2000 if (dev
->flags
& IFF_ALLMULTI
) {
2001 ehea_allmulti(dev
, 1);
2004 ehea_allmulti(dev
, 0);
2006 if (!netdev_mc_empty(dev
)) {
2007 ret
= ehea_drop_multicast_list(dev
);
2009 /* Dropping the current multicast list failed.
2010 * Enabling ALL_MULTI is the best we can do.
2012 ehea_allmulti(dev
, 1);
2015 if (netdev_mc_count(dev
) > port
->adapter
->max_mc_mac
) {
2016 ehea_info("Mcast registration limit reached (0x%llx). "
2018 port
->adapter
->max_mc_mac
);
2022 netdev_for_each_mc_addr(ha
, dev
)
2023 ehea_add_multicast_entry(port
, ha
->addr
);
2027 ehea_update_bcmc_registrations();
2031 static int ehea_change_mtu(struct net_device
*dev
, int new_mtu
)
2033 if ((new_mtu
< 68) || (new_mtu
> EHEA_MAX_PACKET_SIZE
))
2039 static void ehea_xmit2(struct sk_buff
*skb
, struct net_device
*dev
,
2040 struct ehea_swqe
*swqe
, u32 lkey
)
2042 if (skb
->protocol
== htons(ETH_P_IP
)) {
2043 const struct iphdr
*iph
= ip_hdr(skb
);
2046 swqe
->tx_control
|= EHEA_SWQE_CRC
2047 | EHEA_SWQE_IP_CHECKSUM
2048 | EHEA_SWQE_TCP_CHECKSUM
2049 | EHEA_SWQE_IMM_DATA_PRESENT
2050 | EHEA_SWQE_DESCRIPTORS_PRESENT
;
2052 write_ip_start_end(swqe
, skb
);
2054 if (iph
->protocol
== IPPROTO_UDP
) {
2055 if ((iph
->frag_off
& IP_MF
) ||
2056 (iph
->frag_off
& IP_OFFSET
))
2057 /* IP fragment, so don't change cs */
2058 swqe
->tx_control
&= ~EHEA_SWQE_TCP_CHECKSUM
;
2060 write_udp_offset_end(swqe
, skb
);
2061 } else if (iph
->protocol
== IPPROTO_TCP
) {
2062 write_tcp_offset_end(swqe
, skb
);
2065 /* icmp (big data) and ip segmentation packets (all other ip
2066 packets) do not require any special handling */
2069 /* Other Ethernet Protocol */
2070 swqe
->tx_control
|= EHEA_SWQE_CRC
2071 | EHEA_SWQE_IMM_DATA_PRESENT
2072 | EHEA_SWQE_DESCRIPTORS_PRESENT
;
2075 write_swqe2_data(skb
, dev
, swqe
, lkey
);
2078 static void ehea_xmit3(struct sk_buff
*skb
, struct net_device
*dev
,
2079 struct ehea_swqe
*swqe
)
2081 int nfrags
= skb_shinfo(skb
)->nr_frags
;
2082 u8
*imm_data
= &swqe
->u
.immdata_nodesc
.immediate_data
[0];
2086 if (skb
->protocol
== htons(ETH_P_IP
)) {
2087 const struct iphdr
*iph
= ip_hdr(skb
);
2090 write_ip_start_end(swqe
, skb
);
2092 if (iph
->protocol
== IPPROTO_TCP
) {
2093 swqe
->tx_control
|= EHEA_SWQE_CRC
2094 | EHEA_SWQE_IP_CHECKSUM
2095 | EHEA_SWQE_TCP_CHECKSUM
2096 | EHEA_SWQE_IMM_DATA_PRESENT
;
2098 write_tcp_offset_end(swqe
, skb
);
2100 } else if (iph
->protocol
== IPPROTO_UDP
) {
2101 if ((iph
->frag_off
& IP_MF
) ||
2102 (iph
->frag_off
& IP_OFFSET
))
2103 /* IP fragment, so don't change cs */
2104 swqe
->tx_control
|= EHEA_SWQE_CRC
2105 | EHEA_SWQE_IMM_DATA_PRESENT
;
2107 swqe
->tx_control
|= EHEA_SWQE_CRC
2108 | EHEA_SWQE_IP_CHECKSUM
2109 | EHEA_SWQE_TCP_CHECKSUM
2110 | EHEA_SWQE_IMM_DATA_PRESENT
;
2112 write_udp_offset_end(swqe
, skb
);
2115 /* icmp (big data) and
2116 ip segmentation packets (all other ip packets) */
2117 swqe
->tx_control
|= EHEA_SWQE_CRC
2118 | EHEA_SWQE_IP_CHECKSUM
2119 | EHEA_SWQE_IMM_DATA_PRESENT
;
2122 /* Other Ethernet Protocol */
2123 swqe
->tx_control
|= EHEA_SWQE_CRC
| EHEA_SWQE_IMM_DATA_PRESENT
;
2125 /* copy (immediate) data */
2127 /* data is in a single piece */
2128 skb_copy_from_linear_data(skb
, imm_data
, skb
->len
);
2130 /* first copy data from the skb->data buffer ... */
2131 skb_copy_from_linear_data(skb
, imm_data
,
2133 imm_data
+= skb_headlen(skb
);
2135 /* ... then copy data from the fragments */
2136 for (i
= 0; i
< nfrags
; i
++) {
2137 frag
= &skb_shinfo(skb
)->frags
[i
];
2139 page_address(frag
->page
) + frag
->page_offset
,
2141 imm_data
+= frag
->size
;
2144 swqe
->immediate_data_length
= skb
->len
;
2148 static inline int ehea_hash_skb(struct sk_buff
*skb
, int num_qps
)
2153 if ((skb
->protocol
== htons(ETH_P_IP
)) &&
2154 (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)) {
2155 tcp
= (struct tcphdr
*)(skb_network_header(skb
) +
2156 (ip_hdr(skb
)->ihl
* 4));
2157 tmp
= (tcp
->source
+ (tcp
->dest
<< 16)) % 31;
2158 tmp
+= ip_hdr(skb
)->daddr
% 31;
2159 return tmp
% num_qps
;
2164 static int ehea_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2166 struct ehea_port
*port
= netdev_priv(dev
);
2167 struct ehea_swqe
*swqe
;
2168 unsigned long flags
;
2171 struct ehea_port_res
*pr
;
2173 pr
= &port
->port_res
[ehea_hash_skb(skb
, port
->num_tx_qps
)];
2175 if (!spin_trylock(&pr
->xmit_lock
))
2176 return NETDEV_TX_BUSY
;
2178 if (pr
->queue_stopped
) {
2179 spin_unlock(&pr
->xmit_lock
);
2180 return NETDEV_TX_BUSY
;
2183 swqe
= ehea_get_swqe(pr
->qp
, &swqe_index
);
2184 memset(swqe
, 0, SWQE_HEADER_SIZE
);
2185 atomic_dec(&pr
->swqe_avail
);
2187 if (skb
->len
<= SWQE3_MAX_IMM
) {
2188 u32 sig_iv
= port
->sig_comp_iv
;
2189 u32 swqe_num
= pr
->swqe_id_counter
;
2190 ehea_xmit3(skb
, dev
, swqe
);
2191 swqe
->wr_id
= EHEA_BMASK_SET(EHEA_WR_ID_TYPE
, EHEA_SWQE3_TYPE
)
2192 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT
, swqe_num
);
2193 if (pr
->swqe_ll_count
>= (sig_iv
- 1)) {
2194 swqe
->wr_id
|= EHEA_BMASK_SET(EHEA_WR_ID_REFILL
,
2196 swqe
->tx_control
|= EHEA_SWQE_SIGNALLED_COMPLETION
;
2197 pr
->swqe_ll_count
= 0;
2199 pr
->swqe_ll_count
+= 1;
2202 EHEA_BMASK_SET(EHEA_WR_ID_TYPE
, EHEA_SWQE2_TYPE
)
2203 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT
, pr
->swqe_id_counter
)
2204 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL
, 1)
2205 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX
, pr
->sq_skba
.index
);
2206 pr
->sq_skba
.arr
[pr
->sq_skba
.index
] = skb
;
2208 pr
->sq_skba
.index
++;
2209 pr
->sq_skba
.index
&= (pr
->sq_skba
.len
- 1);
2211 lkey
= pr
->send_mr
.lkey
;
2212 ehea_xmit2(skb
, dev
, swqe
, lkey
);
2213 swqe
->tx_control
|= EHEA_SWQE_SIGNALLED_COMPLETION
;
2215 pr
->swqe_id_counter
+= 1;
2217 if (port
->vgrp
&& vlan_tx_tag_present(skb
)) {
2218 swqe
->tx_control
|= EHEA_SWQE_VLAN_INSERT
;
2219 swqe
->vlan_tag
= vlan_tx_tag_get(skb
);
2222 if (netif_msg_tx_queued(port
)) {
2223 ehea_info("post swqe on QP %d", pr
->qp
->init_attr
.qp_nr
);
2224 ehea_dump(swqe
, 512, "swqe");
2227 if (unlikely(test_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
))) {
2228 netif_stop_queue(dev
);
2229 swqe
->tx_control
|= EHEA_SWQE_PURGE
;
2232 ehea_post_swqe(pr
->qp
, swqe
);
2235 if (unlikely(atomic_read(&pr
->swqe_avail
) <= 1)) {
2236 spin_lock_irqsave(&pr
->netif_queue
, flags
);
2237 if (unlikely(atomic_read(&pr
->swqe_avail
) <= 1)) {
2238 pr
->p_stats
.queue_stopped
++;
2239 netif_stop_queue(dev
);
2240 pr
->queue_stopped
= 1;
2242 spin_unlock_irqrestore(&pr
->netif_queue
, flags
);
2244 dev
->trans_start
= jiffies
; /* NETIF_F_LLTX driver :( */
2245 spin_unlock(&pr
->xmit_lock
);
2247 return NETDEV_TX_OK
;
2250 static void ehea_vlan_rx_register(struct net_device
*dev
,
2251 struct vlan_group
*grp
)
2253 struct ehea_port
*port
= netdev_priv(dev
);
2254 struct ehea_adapter
*adapter
= port
->adapter
;
2255 struct hcp_ehea_port_cb1
*cb1
;
2260 cb1
= (void *)get_zeroed_page(GFP_KERNEL
);
2262 ehea_error("no mem for cb1");
2266 hret
= ehea_h_modify_ehea_port(adapter
->handle
, port
->logical_port_id
,
2267 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2268 if (hret
!= H_SUCCESS
)
2269 ehea_error("modify_ehea_port failed");
2271 free_page((unsigned long)cb1
);
2276 static void ehea_vlan_rx_add_vid(struct net_device
*dev
, unsigned short vid
)
2278 struct ehea_port
*port
= netdev_priv(dev
);
2279 struct ehea_adapter
*adapter
= port
->adapter
;
2280 struct hcp_ehea_port_cb1
*cb1
;
2284 cb1
= (void *)get_zeroed_page(GFP_KERNEL
);
2286 ehea_error("no mem for cb1");
2290 hret
= ehea_h_query_ehea_port(adapter
->handle
, port
->logical_port_id
,
2291 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2292 if (hret
!= H_SUCCESS
) {
2293 ehea_error("query_ehea_port failed");
2298 cb1
->vlan_filter
[index
] |= ((u64
)(0x8000000000000000 >> (vid
& 0x3F)));
2300 hret
= ehea_h_modify_ehea_port(adapter
->handle
, port
->logical_port_id
,
2301 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2302 if (hret
!= H_SUCCESS
)
2303 ehea_error("modify_ehea_port failed");
2305 free_page((unsigned long)cb1
);
2309 static void ehea_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
2311 struct ehea_port
*port
= netdev_priv(dev
);
2312 struct ehea_adapter
*adapter
= port
->adapter
;
2313 struct hcp_ehea_port_cb1
*cb1
;
2317 vlan_group_set_device(port
->vgrp
, vid
, NULL
);
2319 cb1
= (void *)get_zeroed_page(GFP_KERNEL
);
2321 ehea_error("no mem for cb1");
2325 hret
= ehea_h_query_ehea_port(adapter
->handle
, port
->logical_port_id
,
2326 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2327 if (hret
!= H_SUCCESS
) {
2328 ehea_error("query_ehea_port failed");
2333 cb1
->vlan_filter
[index
] &= ~((u64
)(0x8000000000000000 >> (vid
& 0x3F)));
2335 hret
= ehea_h_modify_ehea_port(adapter
->handle
, port
->logical_port_id
,
2336 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2337 if (hret
!= H_SUCCESS
)
2338 ehea_error("modify_ehea_port failed");
2340 free_page((unsigned long)cb1
);
2344 int ehea_activate_qp(struct ehea_adapter
*adapter
, struct ehea_qp
*qp
)
2350 struct hcp_modify_qp_cb0
*cb0
;
2352 cb0
= (void *)get_zeroed_page(GFP_KERNEL
);
2358 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2359 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2360 if (hret
!= H_SUCCESS
) {
2361 ehea_error("query_ehea_qp failed (1)");
2365 cb0
->qp_ctl_reg
= H_QP_CR_STATE_INITIALIZED
;
2366 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2367 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
, 1), cb0
,
2368 &dummy64
, &dummy64
, &dummy16
, &dummy16
);
2369 if (hret
!= H_SUCCESS
) {
2370 ehea_error("modify_ehea_qp failed (1)");
2374 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2375 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2376 if (hret
!= H_SUCCESS
) {
2377 ehea_error("query_ehea_qp failed (2)");
2381 cb0
->qp_ctl_reg
= H_QP_CR_ENABLED
| H_QP_CR_STATE_INITIALIZED
;
2382 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2383 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
, 1), cb0
,
2384 &dummy64
, &dummy64
, &dummy16
, &dummy16
);
2385 if (hret
!= H_SUCCESS
) {
2386 ehea_error("modify_ehea_qp failed (2)");
2390 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2391 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2392 if (hret
!= H_SUCCESS
) {
2393 ehea_error("query_ehea_qp failed (3)");
2397 cb0
->qp_ctl_reg
= H_QP_CR_ENABLED
| H_QP_CR_STATE_RDY2SND
;
2398 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2399 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
, 1), cb0
,
2400 &dummy64
, &dummy64
, &dummy16
, &dummy16
);
2401 if (hret
!= H_SUCCESS
) {
2402 ehea_error("modify_ehea_qp failed (3)");
2406 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2407 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2408 if (hret
!= H_SUCCESS
) {
2409 ehea_error("query_ehea_qp failed (4)");
2415 free_page((unsigned long)cb0
);
2419 static int ehea_port_res_setup(struct ehea_port
*port
, int def_qps
,
2423 struct port_res_cfg pr_cfg
, pr_cfg_small_rx
;
2424 enum ehea_eq_type eq_type
= EHEA_EQ
;
2426 port
->qp_eq
= ehea_create_eq(port
->adapter
, eq_type
,
2427 EHEA_MAX_ENTRIES_EQ
, 1);
2430 ehea_error("ehea_create_eq failed (qp_eq)");
2434 pr_cfg
.max_entries_rcq
= rq1_entries
+ rq2_entries
+ rq3_entries
;
2435 pr_cfg
.max_entries_scq
= sq_entries
* 2;
2436 pr_cfg
.max_entries_sq
= sq_entries
;
2437 pr_cfg
.max_entries_rq1
= rq1_entries
;
2438 pr_cfg
.max_entries_rq2
= rq2_entries
;
2439 pr_cfg
.max_entries_rq3
= rq3_entries
;
2441 pr_cfg_small_rx
.max_entries_rcq
= 1;
2442 pr_cfg_small_rx
.max_entries_scq
= sq_entries
;
2443 pr_cfg_small_rx
.max_entries_sq
= sq_entries
;
2444 pr_cfg_small_rx
.max_entries_rq1
= 1;
2445 pr_cfg_small_rx
.max_entries_rq2
= 1;
2446 pr_cfg_small_rx
.max_entries_rq3
= 1;
2448 for (i
= 0; i
< def_qps
; i
++) {
2449 ret
= ehea_init_port_res(port
, &port
->port_res
[i
], &pr_cfg
, i
);
2453 for (i
= def_qps
; i
< def_qps
+ add_tx_qps
; i
++) {
2454 ret
= ehea_init_port_res(port
, &port
->port_res
[i
],
2455 &pr_cfg_small_rx
, i
);
2464 ehea_clean_portres(port
, &port
->port_res
[i
]);
2467 ehea_destroy_eq(port
->qp_eq
);
2471 static int ehea_clean_all_portres(struct ehea_port
*port
)
2476 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
2477 ret
|= ehea_clean_portres(port
, &port
->port_res
[i
]);
2479 ret
|= ehea_destroy_eq(port
->qp_eq
);
2484 static void ehea_remove_adapter_mr(struct ehea_adapter
*adapter
)
2486 if (adapter
->active_ports
)
2489 ehea_rem_mr(&adapter
->mr
);
2492 static int ehea_add_adapter_mr(struct ehea_adapter
*adapter
)
2494 if (adapter
->active_ports
)
2497 return ehea_reg_kernel_mr(adapter
, &adapter
->mr
);
2500 static int ehea_up(struct net_device
*dev
)
2503 struct ehea_port
*port
= netdev_priv(dev
);
2505 if (port
->state
== EHEA_PORT_UP
)
2508 ret
= ehea_port_res_setup(port
, port
->num_def_qps
,
2509 port
->num_add_tx_qps
);
2511 ehea_error("port_res_failed");
2515 /* Set default QP for this port */
2516 ret
= ehea_configure_port(port
);
2518 ehea_error("ehea_configure_port failed. ret:%d", ret
);
2522 ret
= ehea_reg_interrupts(dev
);
2524 ehea_error("reg_interrupts failed. ret:%d", ret
);
2528 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
2529 ret
= ehea_activate_qp(port
->adapter
, port
->port_res
[i
].qp
);
2531 ehea_error("activate_qp failed");
2536 for (i
= 0; i
< port
->num_def_qps
; i
++) {
2537 ret
= ehea_fill_port_res(&port
->port_res
[i
]);
2539 ehea_error("out_free_irqs");
2544 ret
= ehea_broadcast_reg_helper(port
, H_REG_BCMC
);
2550 port
->state
= EHEA_PORT_UP
;
2556 ehea_free_interrupts(dev
);
2559 ehea_clean_all_portres(port
);
2562 ehea_info("Failed starting %s. ret=%i", dev
->name
, ret
);
2564 ehea_update_bcmc_registrations();
2565 ehea_update_firmware_handles();
2570 static void port_napi_disable(struct ehea_port
*port
)
2574 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
2575 napi_disable(&port
->port_res
[i
].napi
);
2578 static void port_napi_enable(struct ehea_port
*port
)
2582 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
2583 napi_enable(&port
->port_res
[i
].napi
);
2586 static int ehea_open(struct net_device
*dev
)
2589 struct ehea_port
*port
= netdev_priv(dev
);
2591 mutex_lock(&port
->port_lock
);
2593 if (netif_msg_ifup(port
))
2594 ehea_info("enabling port %s", dev
->name
);
2598 port_napi_enable(port
);
2599 netif_start_queue(dev
);
2602 mutex_unlock(&port
->port_lock
);
2607 static int ehea_down(struct net_device
*dev
)
2610 struct ehea_port
*port
= netdev_priv(dev
);
2612 if (port
->state
== EHEA_PORT_DOWN
)
2615 ehea_drop_multicast_list(dev
);
2616 ehea_broadcast_reg_helper(port
, H_DEREG_BCMC
);
2618 ehea_free_interrupts(dev
);
2620 port
->state
= EHEA_PORT_DOWN
;
2622 ehea_update_bcmc_registrations();
2624 ret
= ehea_clean_all_portres(port
);
2626 ehea_info("Failed freeing resources for %s. ret=%i",
2629 ehea_update_firmware_handles();
2634 static int ehea_stop(struct net_device
*dev
)
2637 struct ehea_port
*port
= netdev_priv(dev
);
2639 if (netif_msg_ifdown(port
))
2640 ehea_info("disabling port %s", dev
->name
);
2642 set_bit(__EHEA_DISABLE_PORT_RESET
, &port
->flags
);
2643 cancel_work_sync(&port
->reset_task
);
2644 mutex_lock(&port
->port_lock
);
2645 netif_stop_queue(dev
);
2646 port_napi_disable(port
);
2647 ret
= ehea_down(dev
);
2648 mutex_unlock(&port
->port_lock
);
2649 clear_bit(__EHEA_DISABLE_PORT_RESET
, &port
->flags
);
2653 static void ehea_purge_sq(struct ehea_qp
*orig_qp
)
2655 struct ehea_qp qp
= *orig_qp
;
2656 struct ehea_qp_init_attr
*init_attr
= &qp
.init_attr
;
2657 struct ehea_swqe
*swqe
;
2661 for (i
= 0; i
< init_attr
->act_nr_send_wqes
; i
++) {
2662 swqe
= ehea_get_swqe(&qp
, &wqe_index
);
2663 swqe
->tx_control
|= EHEA_SWQE_PURGE
;
2667 static void ehea_flush_sq(struct ehea_port
*port
)
2671 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
2672 struct ehea_port_res
*pr
= &port
->port_res
[i
];
2673 int swqe_max
= pr
->sq_skba_size
- 2 - pr
->swqe_ll_count
;
2675 while (atomic_read(&pr
->swqe_avail
) < swqe_max
) {
2683 int ehea_stop_qps(struct net_device
*dev
)
2685 struct ehea_port
*port
= netdev_priv(dev
);
2686 struct ehea_adapter
*adapter
= port
->adapter
;
2687 struct hcp_modify_qp_cb0
*cb0
;
2695 cb0
= (void *)get_zeroed_page(GFP_KERNEL
);
2701 for (i
= 0; i
< (port
->num_def_qps
+ port
->num_add_tx_qps
); i
++) {
2702 struct ehea_port_res
*pr
= &port
->port_res
[i
];
2703 struct ehea_qp
*qp
= pr
->qp
;
2705 /* Purge send queue */
2708 /* Disable queue pair */
2709 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2710 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF),
2712 if (hret
!= H_SUCCESS
) {
2713 ehea_error("query_ehea_qp failed (1)");
2717 cb0
->qp_ctl_reg
= (cb0
->qp_ctl_reg
& H_QP_CR_RES_STATE
) << 8;
2718 cb0
->qp_ctl_reg
&= ~H_QP_CR_ENABLED
;
2720 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2721 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
,
2723 &dummy64
, &dummy16
, &dummy16
);
2724 if (hret
!= H_SUCCESS
) {
2725 ehea_error("modify_ehea_qp failed (1)");
2729 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2730 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF),
2732 if (hret
!= H_SUCCESS
) {
2733 ehea_error("query_ehea_qp failed (2)");
2737 /* deregister shared memory regions */
2738 dret
= ehea_rem_smrs(pr
);
2740 ehea_error("unreg shared memory region failed");
2747 free_page((unsigned long)cb0
);
2752 void ehea_update_rqs(struct ehea_qp
*orig_qp
, struct ehea_port_res
*pr
)
2754 struct ehea_qp qp
= *orig_qp
;
2755 struct ehea_qp_init_attr
*init_attr
= &qp
.init_attr
;
2756 struct ehea_rwqe
*rwqe
;
2757 struct sk_buff
**skba_rq2
= pr
->rq2_skba
.arr
;
2758 struct sk_buff
**skba_rq3
= pr
->rq3_skba
.arr
;
2759 struct sk_buff
*skb
;
2760 u32 lkey
= pr
->recv_mr
.lkey
;
2766 for (i
= 0; i
< init_attr
->act_nr_rwqes_rq2
+ 1; i
++) {
2767 rwqe
= ehea_get_next_rwqe(&qp
, 2);
2768 rwqe
->sg_list
[0].l_key
= lkey
;
2769 index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, rwqe
->wr_id
);
2770 skb
= skba_rq2
[index
];
2772 rwqe
->sg_list
[0].vaddr
= ehea_map_vaddr(skb
->data
);
2775 for (i
= 0; i
< init_attr
->act_nr_rwqes_rq3
+ 1; i
++) {
2776 rwqe
= ehea_get_next_rwqe(&qp
, 3);
2777 rwqe
->sg_list
[0].l_key
= lkey
;
2778 index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, rwqe
->wr_id
);
2779 skb
= skba_rq3
[index
];
2781 rwqe
->sg_list
[0].vaddr
= ehea_map_vaddr(skb
->data
);
2785 int ehea_restart_qps(struct net_device
*dev
)
2787 struct ehea_port
*port
= netdev_priv(dev
);
2788 struct ehea_adapter
*adapter
= port
->adapter
;
2792 struct hcp_modify_qp_cb0
*cb0
;
2797 cb0
= (void *)get_zeroed_page(GFP_KERNEL
);
2803 for (i
= 0; i
< (port
->num_def_qps
+ port
->num_add_tx_qps
); i
++) {
2804 struct ehea_port_res
*pr
= &port
->port_res
[i
];
2805 struct ehea_qp
*qp
= pr
->qp
;
2807 ret
= ehea_gen_smrs(pr
);
2809 ehea_error("creation of shared memory regions failed");
2813 ehea_update_rqs(qp
, pr
);
2815 /* Enable queue pair */
2816 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2817 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF),
2819 if (hret
!= H_SUCCESS
) {
2820 ehea_error("query_ehea_qp failed (1)");
2824 cb0
->qp_ctl_reg
= (cb0
->qp_ctl_reg
& H_QP_CR_RES_STATE
) << 8;
2825 cb0
->qp_ctl_reg
|= H_QP_CR_ENABLED
;
2827 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2828 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
,
2830 &dummy64
, &dummy16
, &dummy16
);
2831 if (hret
!= H_SUCCESS
) {
2832 ehea_error("modify_ehea_qp failed (1)");
2836 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2837 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF),
2839 if (hret
!= H_SUCCESS
) {
2840 ehea_error("query_ehea_qp failed (2)");
2844 /* refill entire queue */
2845 ehea_refill_rq1(pr
, pr
->rq1_skba
.index
, 0);
2846 ehea_refill_rq2(pr
, 0);
2847 ehea_refill_rq3(pr
, 0);
2850 free_page((unsigned long)cb0
);
2855 static void ehea_reset_port(struct work_struct
*work
)
2858 struct ehea_port
*port
=
2859 container_of(work
, struct ehea_port
, reset_task
);
2860 struct net_device
*dev
= port
->netdev
;
2863 mutex_lock(&port
->port_lock
);
2864 netif_stop_queue(dev
);
2866 port_napi_disable(port
);
2874 ehea_set_multicast_list(dev
);
2876 if (netif_msg_timer(port
))
2877 ehea_info("Device %s resetted successfully", dev
->name
);
2879 port_napi_enable(port
);
2881 netif_wake_queue(dev
);
2883 mutex_unlock(&port
->port_lock
);
2887 static void ehea_rereg_mrs(struct work_struct
*work
)
2890 struct ehea_adapter
*adapter
;
2892 ehea_info("LPAR memory changed - re-initializing driver");
2894 list_for_each_entry(adapter
, &adapter_list
, list
)
2895 if (adapter
->active_ports
) {
2896 /* Shutdown all ports */
2897 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++) {
2898 struct ehea_port
*port
= adapter
->port
[i
];
2899 struct net_device
*dev
;
2906 if (dev
->flags
& IFF_UP
) {
2907 mutex_lock(&port
->port_lock
);
2908 netif_stop_queue(dev
);
2909 ehea_flush_sq(port
);
2910 ret
= ehea_stop_qps(dev
);
2912 mutex_unlock(&port
->port_lock
);
2915 port_napi_disable(port
);
2916 mutex_unlock(&port
->port_lock
);
2920 /* Unregister old memory region */
2921 ret
= ehea_rem_mr(&adapter
->mr
);
2923 ehea_error("unregister MR failed - driver"
2929 clear_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
);
2931 list_for_each_entry(adapter
, &adapter_list
, list
)
2932 if (adapter
->active_ports
) {
2933 /* Register new memory region */
2934 ret
= ehea_reg_kernel_mr(adapter
, &adapter
->mr
);
2936 ehea_error("register MR failed - driver"
2941 /* Restart all ports */
2942 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++) {
2943 struct ehea_port
*port
= adapter
->port
[i
];
2946 struct net_device
*dev
= port
->netdev
;
2948 if (dev
->flags
& IFF_UP
) {
2949 mutex_lock(&port
->port_lock
);
2950 port_napi_enable(port
);
2951 ret
= ehea_restart_qps(dev
);
2953 netif_wake_queue(dev
);
2954 mutex_unlock(&port
->port_lock
);
2959 ehea_info("re-initializing driver complete");
2964 static void ehea_tx_watchdog(struct net_device
*dev
)
2966 struct ehea_port
*port
= netdev_priv(dev
);
2968 if (netif_carrier_ok(dev
) &&
2969 !test_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
))
2970 ehea_schedule_port_reset(port
);
2973 int ehea_sense_adapter_attr(struct ehea_adapter
*adapter
)
2975 struct hcp_query_ehea
*cb
;
2979 cb
= (void *)get_zeroed_page(GFP_KERNEL
);
2985 hret
= ehea_h_query_ehea(adapter
->handle
, cb
);
2987 if (hret
!= H_SUCCESS
) {
2992 adapter
->max_mc_mac
= cb
->max_mc_mac
- 1;
2996 free_page((unsigned long)cb
);
3001 int ehea_get_jumboframe_status(struct ehea_port
*port
, int *jumbo
)
3003 struct hcp_ehea_port_cb4
*cb4
;
3009 /* (Try to) enable *jumbo frames */
3010 cb4
= (void *)get_zeroed_page(GFP_KERNEL
);
3012 ehea_error("no mem for cb4");
3016 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
3017 port
->logical_port_id
,
3019 H_PORT_CB4_JUMBO
, cb4
);
3020 if (hret
== H_SUCCESS
) {
3021 if (cb4
->jumbo_frame
)
3024 cb4
->jumbo_frame
= 1;
3025 hret
= ehea_h_modify_ehea_port(port
->adapter
->
3032 if (hret
== H_SUCCESS
)
3038 free_page((unsigned long)cb4
);
3044 static ssize_t
ehea_show_port_id(struct device
*dev
,
3045 struct device_attribute
*attr
, char *buf
)
3047 struct ehea_port
*port
= container_of(dev
, struct ehea_port
, ofdev
.dev
);
3048 return sprintf(buf
, "%d", port
->logical_port_id
);
3051 static DEVICE_ATTR(log_port_id
, S_IRUSR
| S_IRGRP
| S_IROTH
, ehea_show_port_id
,
3054 static void __devinit
logical_port_release(struct device
*dev
)
3056 struct ehea_port
*port
= container_of(dev
, struct ehea_port
, ofdev
.dev
);
3057 of_node_put(port
->ofdev
.node
);
3060 static struct device
*ehea_register_port(struct ehea_port
*port
,
3061 struct device_node
*dn
)
3065 port
->ofdev
.node
= of_node_get(dn
);
3066 port
->ofdev
.dev
.parent
= &port
->adapter
->ofdev
->dev
;
3067 port
->ofdev
.dev
.bus
= &ibmebus_bus_type
;
3069 dev_set_name(&port
->ofdev
.dev
, "port%d", port_name_cnt
++);
3070 port
->ofdev
.dev
.release
= logical_port_release
;
3072 ret
= of_device_register(&port
->ofdev
);
3074 ehea_error("failed to register device. ret=%d", ret
);
3078 ret
= device_create_file(&port
->ofdev
.dev
, &dev_attr_log_port_id
);
3080 ehea_error("failed to register attributes, ret=%d", ret
);
3081 goto out_unreg_of_dev
;
3084 return &port
->ofdev
.dev
;
3087 of_device_unregister(&port
->ofdev
);
3092 static void ehea_unregister_port(struct ehea_port
*port
)
3094 device_remove_file(&port
->ofdev
.dev
, &dev_attr_log_port_id
);
3095 of_device_unregister(&port
->ofdev
);
3098 static const struct net_device_ops ehea_netdev_ops
= {
3099 .ndo_open
= ehea_open
,
3100 .ndo_stop
= ehea_stop
,
3101 .ndo_start_xmit
= ehea_start_xmit
,
3102 #ifdef CONFIG_NET_POLL_CONTROLLER
3103 .ndo_poll_controller
= ehea_netpoll
,
3105 .ndo_get_stats
= ehea_get_stats
,
3106 .ndo_set_mac_address
= ehea_set_mac_addr
,
3107 .ndo_validate_addr
= eth_validate_addr
,
3108 .ndo_set_multicast_list
= ehea_set_multicast_list
,
3109 .ndo_change_mtu
= ehea_change_mtu
,
3110 .ndo_vlan_rx_register
= ehea_vlan_rx_register
,
3111 .ndo_vlan_rx_add_vid
= ehea_vlan_rx_add_vid
,
3112 .ndo_vlan_rx_kill_vid
= ehea_vlan_rx_kill_vid
,
3113 .ndo_tx_timeout
= ehea_tx_watchdog
,
3116 struct ehea_port
*ehea_setup_single_port(struct ehea_adapter
*adapter
,
3117 u32 logical_port_id
,
3118 struct device_node
*dn
)
3121 struct net_device
*dev
;
3122 struct ehea_port
*port
;
3123 struct device
*port_dev
;
3126 /* allocate memory for the port structures */
3127 dev
= alloc_etherdev(sizeof(struct ehea_port
));
3130 ehea_error("no mem for net_device");
3135 port
= netdev_priv(dev
);
3137 mutex_init(&port
->port_lock
);
3138 port
->state
= EHEA_PORT_DOWN
;
3139 port
->sig_comp_iv
= sq_entries
/ 10;
3141 port
->adapter
= adapter
;
3143 port
->logical_port_id
= logical_port_id
;
3145 port
->msg_enable
= netif_msg_init(msg_level
, EHEA_MSG_DEFAULT
);
3147 port
->mc_list
= kzalloc(sizeof(struct ehea_mc_list
), GFP_KERNEL
);
3148 if (!port
->mc_list
) {
3150 goto out_free_ethdev
;
3153 INIT_LIST_HEAD(&port
->mc_list
->list
);
3155 ret
= ehea_sense_port_attr(port
);
3157 goto out_free_mc_list
;
3159 port_dev
= ehea_register_port(port
, dn
);
3161 goto out_free_mc_list
;
3163 SET_NETDEV_DEV(dev
, port_dev
);
3165 /* initialize net_device structure */
3166 memcpy(dev
->dev_addr
, &port
->mac_addr
, ETH_ALEN
);
3168 dev
->netdev_ops
= &ehea_netdev_ops
;
3169 ehea_set_ethtool_ops(dev
);
3171 dev
->features
= NETIF_F_SG
| NETIF_F_FRAGLIST
| NETIF_F_TSO
3172 | NETIF_F_HIGHDMA
| NETIF_F_IP_CSUM
| NETIF_F_HW_VLAN_TX
3173 | NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
3175 dev
->watchdog_timeo
= EHEA_WATCH_DOG_TIMEOUT
;
3177 INIT_WORK(&port
->reset_task
, ehea_reset_port
);
3179 ret
= register_netdev(dev
);
3181 ehea_error("register_netdev failed. ret=%d", ret
);
3182 goto out_unreg_port
;
3185 port
->lro_max_aggr
= lro_max_aggr
;
3187 ret
= ehea_get_jumboframe_status(port
, &jumbo
);
3189 ehea_error("failed determining jumbo frame status for %s",
3190 port
->netdev
->name
);
3192 ehea_info("%s: Jumbo frames are %sabled", dev
->name
,
3193 jumbo
== 1 ? "en" : "dis");
3195 adapter
->active_ports
++;
3200 ehea_unregister_port(port
);
3203 kfree(port
->mc_list
);
3209 ehea_error("setting up logical port with id=%d failed, ret=%d",
3210 logical_port_id
, ret
);
3214 static void ehea_shutdown_single_port(struct ehea_port
*port
)
3216 struct ehea_adapter
*adapter
= port
->adapter
;
3217 unregister_netdev(port
->netdev
);
3218 ehea_unregister_port(port
);
3219 kfree(port
->mc_list
);
3220 free_netdev(port
->netdev
);
3221 adapter
->active_ports
--;
3224 static int ehea_setup_ports(struct ehea_adapter
*adapter
)
3226 struct device_node
*lhea_dn
;
3227 struct device_node
*eth_dn
= NULL
;
3229 const u32
*dn_log_port_id
;
3232 lhea_dn
= adapter
->ofdev
->node
;
3233 while ((eth_dn
= of_get_next_child(lhea_dn
, eth_dn
))) {
3235 dn_log_port_id
= of_get_property(eth_dn
, "ibm,hea-port-no",
3237 if (!dn_log_port_id
) {
3238 ehea_error("bad device node: eth_dn name=%s",
3243 if (ehea_add_adapter_mr(adapter
)) {
3244 ehea_error("creating MR failed");
3245 of_node_put(eth_dn
);
3249 adapter
->port
[i
] = ehea_setup_single_port(adapter
,
3252 if (adapter
->port
[i
])
3253 ehea_info("%s -> logical port id #%d",
3254 adapter
->port
[i
]->netdev
->name
,
3257 ehea_remove_adapter_mr(adapter
);
3264 static struct device_node
*ehea_get_eth_dn(struct ehea_adapter
*adapter
,
3265 u32 logical_port_id
)
3267 struct device_node
*lhea_dn
;
3268 struct device_node
*eth_dn
= NULL
;
3269 const u32
*dn_log_port_id
;
3271 lhea_dn
= adapter
->ofdev
->node
;
3272 while ((eth_dn
= of_get_next_child(lhea_dn
, eth_dn
))) {
3274 dn_log_port_id
= of_get_property(eth_dn
, "ibm,hea-port-no",
3277 if (*dn_log_port_id
== logical_port_id
)
3284 static ssize_t
ehea_probe_port(struct device
*dev
,
3285 struct device_attribute
*attr
,
3286 const char *buf
, size_t count
)
3288 struct ehea_adapter
*adapter
= dev_get_drvdata(dev
);
3289 struct ehea_port
*port
;
3290 struct device_node
*eth_dn
= NULL
;
3293 u32 logical_port_id
;
3295 sscanf(buf
, "%d", &logical_port_id
);
3297 port
= ehea_get_port(adapter
, logical_port_id
);
3300 ehea_info("adding port with logical port id=%d failed. port "
3301 "already configured as %s.", logical_port_id
,
3302 port
->netdev
->name
);
3306 eth_dn
= ehea_get_eth_dn(adapter
, logical_port_id
);
3309 ehea_info("no logical port with id %d found", logical_port_id
);
3313 if (ehea_add_adapter_mr(adapter
)) {
3314 ehea_error("creating MR failed");
3318 port
= ehea_setup_single_port(adapter
, logical_port_id
, eth_dn
);
3320 of_node_put(eth_dn
);
3323 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++)
3324 if (!adapter
->port
[i
]) {
3325 adapter
->port
[i
] = port
;
3329 ehea_info("added %s (logical port id=%d)", port
->netdev
->name
,
3332 ehea_remove_adapter_mr(adapter
);
3336 return (ssize_t
) count
;
3339 static ssize_t
ehea_remove_port(struct device
*dev
,
3340 struct device_attribute
*attr
,
3341 const char *buf
, size_t count
)
3343 struct ehea_adapter
*adapter
= dev_get_drvdata(dev
);
3344 struct ehea_port
*port
;
3346 u32 logical_port_id
;
3348 sscanf(buf
, "%d", &logical_port_id
);
3350 port
= ehea_get_port(adapter
, logical_port_id
);
3353 ehea_info("removed %s (logical port id=%d)", port
->netdev
->name
,
3356 ehea_shutdown_single_port(port
);
3358 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++)
3359 if (adapter
->port
[i
] == port
) {
3360 adapter
->port
[i
] = NULL
;
3364 ehea_error("removing port with logical port id=%d failed. port "
3365 "not configured.", logical_port_id
);
3369 ehea_remove_adapter_mr(adapter
);
3371 return (ssize_t
) count
;
3374 static DEVICE_ATTR(probe_port
, S_IWUSR
, NULL
, ehea_probe_port
);
3375 static DEVICE_ATTR(remove_port
, S_IWUSR
, NULL
, ehea_remove_port
);
3377 int ehea_create_device_sysfs(struct of_device
*dev
)
3379 int ret
= device_create_file(&dev
->dev
, &dev_attr_probe_port
);
3383 ret
= device_create_file(&dev
->dev
, &dev_attr_remove_port
);
3388 void ehea_remove_device_sysfs(struct of_device
*dev
)
3390 device_remove_file(&dev
->dev
, &dev_attr_probe_port
);
3391 device_remove_file(&dev
->dev
, &dev_attr_remove_port
);
3394 static int __devinit
ehea_probe_adapter(struct of_device
*dev
,
3395 const struct of_device_id
*id
)
3397 struct ehea_adapter
*adapter
;
3398 const u64
*adapter_handle
;
3401 if (!dev
|| !dev
->node
) {
3402 ehea_error("Invalid ibmebus device probed");
3406 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
3409 dev_err(&dev
->dev
, "no mem for ehea_adapter\n");
3413 list_add(&adapter
->list
, &adapter_list
);
3415 adapter
->ofdev
= dev
;
3417 adapter_handle
= of_get_property(dev
->node
, "ibm,hea-handle",
3420 adapter
->handle
= *adapter_handle
;
3422 if (!adapter
->handle
) {
3423 dev_err(&dev
->dev
, "failed getting handle for adapter"
3424 " '%s'\n", dev
->node
->full_name
);
3429 adapter
->pd
= EHEA_PD_ID
;
3431 dev_set_drvdata(&dev
->dev
, adapter
);
3434 /* initialize adapter and ports */
3435 /* get adapter properties */
3436 ret
= ehea_sense_adapter_attr(adapter
);
3438 dev_err(&dev
->dev
, "sense_adapter_attr failed: %d\n", ret
);
3442 adapter
->neq
= ehea_create_eq(adapter
,
3443 EHEA_NEQ
, EHEA_MAX_ENTRIES_EQ
, 1);
3444 if (!adapter
->neq
) {
3446 dev_err(&dev
->dev
, "NEQ creation failed\n");
3450 tasklet_init(&adapter
->neq_tasklet
, ehea_neq_tasklet
,
3451 (unsigned long)adapter
);
3453 ret
= ibmebus_request_irq(adapter
->neq
->attr
.ist1
,
3454 ehea_interrupt_neq
, IRQF_DISABLED
,
3455 "ehea_neq", adapter
);
3457 dev_err(&dev
->dev
, "requesting NEQ IRQ failed\n");
3461 ret
= ehea_create_device_sysfs(dev
);
3465 ret
= ehea_setup_ports(adapter
);
3467 dev_err(&dev
->dev
, "setup_ports failed\n");
3468 goto out_rem_dev_sysfs
;
3475 ehea_remove_device_sysfs(dev
);
3478 ibmebus_free_irq(adapter
->neq
->attr
.ist1
, adapter
);
3481 ehea_destroy_eq(adapter
->neq
);
3484 list_del(&adapter
->list
);
3488 ehea_update_firmware_handles();
3493 static int __devexit
ehea_remove(struct of_device
*dev
)
3495 struct ehea_adapter
*adapter
= dev_get_drvdata(&dev
->dev
);
3498 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++)
3499 if (adapter
->port
[i
]) {
3500 ehea_shutdown_single_port(adapter
->port
[i
]);
3501 adapter
->port
[i
] = NULL
;
3504 ehea_remove_device_sysfs(dev
);
3506 flush_scheduled_work();
3508 ibmebus_free_irq(adapter
->neq
->attr
.ist1
, adapter
);
3509 tasklet_kill(&adapter
->neq_tasklet
);
3511 ehea_destroy_eq(adapter
->neq
);
3512 ehea_remove_adapter_mr(adapter
);
3513 list_del(&adapter
->list
);
3516 ehea_update_firmware_handles();
3521 void ehea_crash_handler(void)
3525 if (ehea_fw_handles
.arr
)
3526 for (i
= 0; i
< ehea_fw_handles
.num_entries
; i
++)
3527 ehea_h_free_resource(ehea_fw_handles
.arr
[i
].adh
,
3528 ehea_fw_handles
.arr
[i
].fwh
,
3531 if (ehea_bcmc_regs
.arr
)
3532 for (i
= 0; i
< ehea_bcmc_regs
.num_entries
; i
++)
3533 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs
.arr
[i
].adh
,
3534 ehea_bcmc_regs
.arr
[i
].port_id
,
3535 ehea_bcmc_regs
.arr
[i
].reg_type
,
3536 ehea_bcmc_regs
.arr
[i
].macaddr
,
3540 static int ehea_mem_notifier(struct notifier_block
*nb
,
3541 unsigned long action
, void *data
)
3543 int ret
= NOTIFY_BAD
;
3544 struct memory_notify
*arg
= data
;
3546 if (!mutex_trylock(&dlpar_mem_lock
)) {
3547 ehea_info("ehea_mem_notifier must not be called parallelized");
3552 case MEM_CANCEL_OFFLINE
:
3553 ehea_info("memory offlining canceled");
3554 /* Readd canceled memory block */
3556 ehea_info("memory is going online");
3557 set_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
);
3558 if (ehea_add_sect_bmap(arg
->start_pfn
, arg
->nr_pages
))
3560 ehea_rereg_mrs(NULL
);
3562 case MEM_GOING_OFFLINE
:
3563 ehea_info("memory is going offline");
3564 set_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
);
3565 if (ehea_rem_sect_bmap(arg
->start_pfn
, arg
->nr_pages
))
3567 ehea_rereg_mrs(NULL
);
3573 ehea_update_firmware_handles();
3577 mutex_unlock(&dlpar_mem_lock
);
3582 static struct notifier_block ehea_mem_nb
= {
3583 .notifier_call
= ehea_mem_notifier
,
3586 static int ehea_reboot_notifier(struct notifier_block
*nb
,
3587 unsigned long action
, void *unused
)
3589 if (action
== SYS_RESTART
) {
3590 ehea_info("Reboot: freeing all eHEA resources");
3591 ibmebus_unregister_driver(&ehea_driver
);
3596 static struct notifier_block ehea_reboot_nb
= {
3597 .notifier_call
= ehea_reboot_notifier
,
3600 static int check_module_parm(void)
3604 if ((rq1_entries
< EHEA_MIN_ENTRIES_QP
) ||
3605 (rq1_entries
> EHEA_MAX_ENTRIES_RQ1
)) {
3606 ehea_info("Bad parameter: rq1_entries");
3609 if ((rq2_entries
< EHEA_MIN_ENTRIES_QP
) ||
3610 (rq2_entries
> EHEA_MAX_ENTRIES_RQ2
)) {
3611 ehea_info("Bad parameter: rq2_entries");
3614 if ((rq3_entries
< EHEA_MIN_ENTRIES_QP
) ||
3615 (rq3_entries
> EHEA_MAX_ENTRIES_RQ3
)) {
3616 ehea_info("Bad parameter: rq3_entries");
3619 if ((sq_entries
< EHEA_MIN_ENTRIES_QP
) ||
3620 (sq_entries
> EHEA_MAX_ENTRIES_SQ
)) {
3621 ehea_info("Bad parameter: sq_entries");
3628 static ssize_t
ehea_show_capabilities(struct device_driver
*drv
,
3631 return sprintf(buf
, "%d", EHEA_CAPABILITIES
);
3634 static DRIVER_ATTR(capabilities
, S_IRUSR
| S_IRGRP
| S_IROTH
,
3635 ehea_show_capabilities
, NULL
);
3637 int __init
ehea_module_init(void)
3641 printk(KERN_INFO
"IBM eHEA ethernet device driver (Release %s)\n",
3645 INIT_WORK(&ehea_rereg_mr_task
, ehea_rereg_mrs
);
3646 memset(&ehea_fw_handles
, 0, sizeof(ehea_fw_handles
));
3647 memset(&ehea_bcmc_regs
, 0, sizeof(ehea_bcmc_regs
));
3649 mutex_init(&ehea_fw_handles
.lock
);
3650 spin_lock_init(&ehea_bcmc_regs
.lock
);
3652 ret
= check_module_parm();
3656 ret
= ehea_create_busmap();
3660 ret
= register_reboot_notifier(&ehea_reboot_nb
);
3662 ehea_info("failed registering reboot notifier");
3664 ret
= register_memory_notifier(&ehea_mem_nb
);
3666 ehea_info("failed registering memory remove notifier");
3668 ret
= crash_shutdown_register(&ehea_crash_handler
);
3670 ehea_info("failed registering crash handler");
3672 ret
= ibmebus_register_driver(&ehea_driver
);
3674 ehea_error("failed registering eHEA device driver on ebus");
3678 ret
= driver_create_file(&ehea_driver
.driver
,
3679 &driver_attr_capabilities
);
3681 ehea_error("failed to register capabilities attribute, ret=%d",
3689 ibmebus_unregister_driver(&ehea_driver
);
3691 unregister_memory_notifier(&ehea_mem_nb
);
3692 unregister_reboot_notifier(&ehea_reboot_nb
);
3693 crash_shutdown_unregister(&ehea_crash_handler
);
3698 static void __exit
ehea_module_exit(void)
3702 flush_scheduled_work();
3703 driver_remove_file(&ehea_driver
.driver
, &driver_attr_capabilities
);
3704 ibmebus_unregister_driver(&ehea_driver
);
3705 unregister_reboot_notifier(&ehea_reboot_nb
);
3706 ret
= crash_shutdown_unregister(&ehea_crash_handler
);
3708 ehea_info("failed unregistering crash handler");
3709 unregister_memory_notifier(&ehea_mem_nb
);
3710 kfree(ehea_fw_handles
.arr
);
3711 kfree(ehea_bcmc_regs
.arr
);
3712 ehea_destroy_busmap();
3715 module_init(ehea_module_init
);
3716 module_exit(ehea_module_exit
);