1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/debugfs.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/seq_file.h>
78 #include <linux/workqueue.h>
82 static const char ibmvnic_driver_name
[] = "ibmvnic";
83 static const char ibmvnic_driver_string
[] = "IBM System i/p Virtual NIC Driver";
85 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
86 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
87 MODULE_LICENSE("GPL");
88 MODULE_VERSION(IBMVNIC_DRIVER_VERSION
);
90 static int ibmvnic_version
= IBMVNIC_INITIAL_VERSION
;
91 static int ibmvnic_remove(struct vio_dev
*);
92 static void release_sub_crqs(struct ibmvnic_adapter
*);
93 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter
*);
94 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*);
95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*);
96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*);
97 static int ibmvnic_send_crq(struct ibmvnic_adapter
*, union ibmvnic_crq
*);
98 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
99 union sub_crq
*sub_crq
);
100 static int send_subcrq_indirect(struct ibmvnic_adapter
*, u64
, u64
, u64
);
101 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
);
102 static int enable_scrq_irq(struct ibmvnic_adapter
*,
103 struct ibmvnic_sub_crq_queue
*);
104 static int disable_scrq_irq(struct ibmvnic_adapter
*,
105 struct ibmvnic_sub_crq_queue
*);
106 static int pending_scrq(struct ibmvnic_adapter
*,
107 struct ibmvnic_sub_crq_queue
*);
108 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*,
109 struct ibmvnic_sub_crq_queue
*);
110 static int ibmvnic_poll(struct napi_struct
*napi
, int data
);
111 static void send_map_query(struct ibmvnic_adapter
*adapter
);
112 static void send_request_map(struct ibmvnic_adapter
*, dma_addr_t
, __be32
, u8
);
113 static void send_request_unmap(struct ibmvnic_adapter
*, u8
);
115 struct ibmvnic_stat
{
116 char name
[ETH_GSTRING_LEN
];
120 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
121 offsetof(struct ibmvnic_statistics, stat))
122 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
124 static const struct ibmvnic_stat ibmvnic_stats
[] = {
125 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets
)},
126 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes
)},
127 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets
)},
128 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes
)},
129 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets
)},
130 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets
)},
131 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets
)},
132 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets
)},
133 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets
)},
134 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets
)},
135 {"align_errors", IBMVNIC_STAT_OFF(align_errors
)},
136 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors
)},
137 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames
)},
138 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames
)},
139 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors
)},
140 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx
)},
141 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions
)},
142 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions
)},
143 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors
)},
144 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense
)},
145 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames
)},
146 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors
)},
149 static long h_reg_sub_crq(unsigned long unit_address
, unsigned long token
,
150 unsigned long length
, unsigned long *number
,
153 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
156 rc
= plpar_hcall(H_REG_SUB_CRQ
, retbuf
, unit_address
, token
, length
);
163 /* net_device_ops functions */
165 static void init_rx_pool(struct ibmvnic_adapter
*adapter
,
166 struct ibmvnic_rx_pool
*rx_pool
, int num
, int index
,
167 int buff_size
, int active
)
169 netdev_dbg(adapter
->netdev
,
170 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
171 index
, num
, buff_size
);
173 rx_pool
->index
= index
;
174 rx_pool
->buff_size
= buff_size
;
175 rx_pool
->active
= active
;
178 static int alloc_long_term_buff(struct ibmvnic_adapter
*adapter
,
179 struct ibmvnic_long_term_buff
*ltb
, int size
)
181 struct device
*dev
= &adapter
->vdev
->dev
;
184 ltb
->buff
= dma_alloc_coherent(dev
, ltb
->size
, <b
->addr
,
188 dev_err(dev
, "Couldn't alloc long term buffer\n");
191 ltb
->map_id
= adapter
->map_id
;
193 send_request_map(adapter
, ltb
->addr
,
194 ltb
->size
, ltb
->map_id
);
195 init_completion(&adapter
->fw_done
);
196 wait_for_completion(&adapter
->fw_done
);
200 static void free_long_term_buff(struct ibmvnic_adapter
*adapter
,
201 struct ibmvnic_long_term_buff
*ltb
)
203 struct device
*dev
= &adapter
->vdev
->dev
;
205 dma_free_coherent(dev
, ltb
->size
, ltb
->buff
, ltb
->addr
);
206 if (!adapter
->failover
)
207 send_request_unmap(adapter
, ltb
->map_id
);
210 static int alloc_rx_pool(struct ibmvnic_adapter
*adapter
,
211 struct ibmvnic_rx_pool
*pool
)
213 struct device
*dev
= &adapter
->vdev
->dev
;
216 pool
->free_map
= kcalloc(pool
->size
, sizeof(int), GFP_KERNEL
);
220 pool
->rx_buff
= kcalloc(pool
->size
, sizeof(struct ibmvnic_rx_buff
),
223 if (!pool
->rx_buff
) {
224 dev_err(dev
, "Couldn't alloc rx buffers\n");
225 kfree(pool
->free_map
);
229 if (alloc_long_term_buff(adapter
, &pool
->long_term_buff
,
230 pool
->size
* pool
->buff_size
)) {
231 kfree(pool
->free_map
);
232 kfree(pool
->rx_buff
);
236 for (i
= 0; i
< pool
->size
; ++i
)
237 pool
->free_map
[i
] = i
;
239 atomic_set(&pool
->available
, 0);
240 pool
->next_alloc
= 0;
246 static void replenish_rx_pool(struct ibmvnic_adapter
*adapter
,
247 struct ibmvnic_rx_pool
*pool
)
249 int count
= pool
->size
- atomic_read(&pool
->available
);
250 struct device
*dev
= &adapter
->vdev
->dev
;
251 int buffers_added
= 0;
252 unsigned long lpar_rc
;
253 union sub_crq sub_crq
;
263 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
264 be32_to_cpu(adapter
->login_rsp_buf
->
267 for (i
= 0; i
< count
; ++i
) {
268 skb
= alloc_skb(pool
->buff_size
, GFP_ATOMIC
);
270 dev_err(dev
, "Couldn't replenish rx buff\n");
271 adapter
->replenish_no_mem
++;
275 index
= pool
->free_map
[pool
->next_free
];
277 if (pool
->rx_buff
[index
].skb
)
278 dev_err(dev
, "Inconsistent free_map!\n");
280 /* Copy the skb to the long term mapped DMA buffer */
281 offset
= index
* pool
->buff_size
;
282 dst
= pool
->long_term_buff
.buff
+ offset
;
283 memset(dst
, 0, pool
->buff_size
);
284 dma_addr
= pool
->long_term_buff
.addr
+ offset
;
285 pool
->rx_buff
[index
].data
= dst
;
287 pool
->free_map
[pool
->next_free
] = IBMVNIC_INVALID_MAP
;
288 pool
->rx_buff
[index
].dma
= dma_addr
;
289 pool
->rx_buff
[index
].skb
= skb
;
290 pool
->rx_buff
[index
].pool_index
= pool
->index
;
291 pool
->rx_buff
[index
].size
= pool
->buff_size
;
293 memset(&sub_crq
, 0, sizeof(sub_crq
));
294 sub_crq
.rx_add
.first
= IBMVNIC_CRQ_CMD
;
295 sub_crq
.rx_add
.correlator
=
296 cpu_to_be64((u64
)&pool
->rx_buff
[index
]);
297 sub_crq
.rx_add
.ioba
= cpu_to_be32(dma_addr
);
298 sub_crq
.rx_add
.map_id
= pool
->long_term_buff
.map_id
;
300 /* The length field of the sCRQ is defined to be 24 bits so the
301 * buffer size needs to be left shifted by a byte before it is
302 * converted to big endian to prevent the last byte from being
305 #ifdef __LITTLE_ENDIAN__
308 sub_crq
.rx_add
.len
= cpu_to_be32(pool
->buff_size
<< shift
);
310 lpar_rc
= send_subcrq(adapter
, handle_array
[pool
->index
],
312 if (lpar_rc
!= H_SUCCESS
)
316 adapter
->replenish_add_buff_success
++;
317 pool
->next_free
= (pool
->next_free
+ 1) % pool
->size
;
319 atomic_add(buffers_added
, &pool
->available
);
323 dev_info(dev
, "replenish pools failure\n");
324 pool
->free_map
[pool
->next_free
] = index
;
325 pool
->rx_buff
[index
].skb
= NULL
;
326 if (!dma_mapping_error(dev
, dma_addr
))
327 dma_unmap_single(dev
, dma_addr
, pool
->buff_size
,
330 dev_kfree_skb_any(skb
);
331 adapter
->replenish_add_buff_failure
++;
332 atomic_add(buffers_added
, &pool
->available
);
335 static void replenish_pools(struct ibmvnic_adapter
*adapter
)
339 if (adapter
->migrated
)
342 adapter
->replenish_task_cycles
++;
343 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
345 if (adapter
->rx_pool
[i
].active
)
346 replenish_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
350 static void free_rx_pool(struct ibmvnic_adapter
*adapter
,
351 struct ibmvnic_rx_pool
*pool
)
355 kfree(pool
->free_map
);
356 pool
->free_map
= NULL
;
361 for (i
= 0; i
< pool
->size
; i
++) {
362 if (pool
->rx_buff
[i
].skb
) {
363 dev_kfree_skb_any(pool
->rx_buff
[i
].skb
);
364 pool
->rx_buff
[i
].skb
= NULL
;
367 kfree(pool
->rx_buff
);
368 pool
->rx_buff
= NULL
;
371 static int ibmvnic_open(struct net_device
*netdev
)
373 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
374 struct device
*dev
= &adapter
->vdev
->dev
;
375 struct ibmvnic_tx_pool
*tx_pool
;
376 union ibmvnic_crq crq
;
383 be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
385 be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
386 size_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
387 be32_to_cpu(adapter
->login_rsp_buf
->
388 off_rxadd_buff_size
));
390 adapter
->napi
= kcalloc(adapter
->req_rx_queues
,
391 sizeof(struct napi_struct
), GFP_KERNEL
);
393 goto alloc_napi_failed
;
394 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
395 netif_napi_add(netdev
, &adapter
->napi
[i
], ibmvnic_poll
,
397 napi_enable(&adapter
->napi
[i
]);
400 kcalloc(rxadd_subcrqs
, sizeof(struct ibmvnic_rx_pool
), GFP_KERNEL
);
402 if (!adapter
->rx_pool
)
403 goto rx_pool_arr_alloc_failed
;
404 send_map_query(adapter
);
405 for (i
= 0; i
< rxadd_subcrqs
; i
++) {
406 init_rx_pool(adapter
, &adapter
->rx_pool
[i
],
407 IBMVNIC_BUFFS_PER_POOL
, i
,
408 be64_to_cpu(size_array
[i
]), 1);
409 if (alloc_rx_pool(adapter
, &adapter
->rx_pool
[i
])) {
410 dev_err(dev
, "Couldn't alloc rx pool\n");
411 goto rx_pool_alloc_failed
;
415 kcalloc(tx_subcrqs
, sizeof(struct ibmvnic_tx_pool
), GFP_KERNEL
);
417 if (!adapter
->tx_pool
)
418 goto tx_pool_arr_alloc_failed
;
419 for (i
= 0; i
< tx_subcrqs
; i
++) {
420 tx_pool
= &adapter
->tx_pool
[i
];
422 kcalloc(adapter
->max_tx_entries_per_subcrq
,
423 sizeof(struct ibmvnic_tx_buff
), GFP_KERNEL
);
424 if (!tx_pool
->tx_buff
)
425 goto tx_pool_alloc_failed
;
427 if (alloc_long_term_buff(adapter
, &tx_pool
->long_term_buff
,
428 adapter
->max_tx_entries_per_subcrq
*
430 goto tx_ltb_alloc_failed
;
433 kcalloc(adapter
->max_tx_entries_per_subcrq
,
434 sizeof(int), GFP_KERNEL
);
435 if (!tx_pool
->free_map
)
436 goto tx_fm_alloc_failed
;
438 for (j
= 0; j
< adapter
->max_tx_entries_per_subcrq
; j
++)
439 tx_pool
->free_map
[j
] = j
;
441 tx_pool
->consumer_index
= 0;
442 tx_pool
->producer_index
= 0;
444 adapter
->bounce_buffer_size
=
445 (netdev
->mtu
+ ETH_HLEN
- 1) / PAGE_SIZE
+ 1;
446 adapter
->bounce_buffer
= kmalloc(adapter
->bounce_buffer_size
,
448 if (!adapter
->bounce_buffer
)
449 goto bounce_alloc_failed
;
451 adapter
->bounce_buffer_dma
= dma_map_single(dev
, adapter
->bounce_buffer
,
452 adapter
->bounce_buffer_size
,
454 if (dma_mapping_error(dev
, adapter
->bounce_buffer_dma
)) {
455 dev_err(dev
, "Couldn't map tx bounce buffer\n");
456 goto bounce_map_failed
;
458 replenish_pools(adapter
);
460 /* We're ready to receive frames, enable the sub-crq interrupts and
461 * set the logical link state to up
463 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
464 enable_scrq_irq(adapter
, adapter
->rx_scrq
[i
]);
466 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
467 enable_scrq_irq(adapter
, adapter
->tx_scrq
[i
]);
469 memset(&crq
, 0, sizeof(crq
));
470 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
471 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
472 crq
.logical_link_state
.link_state
= IBMVNIC_LOGICAL_LNK_UP
;
473 ibmvnic_send_crq(adapter
, &crq
);
475 netif_tx_start_all_queues(netdev
);
480 kfree(adapter
->bounce_buffer
);
483 kfree(adapter
->tx_pool
[i
].free_map
);
485 free_long_term_buff(adapter
, &adapter
->tx_pool
[i
].long_term_buff
);
487 kfree(adapter
->tx_pool
[i
].tx_buff
);
488 tx_pool_alloc_failed
:
489 for (j
= 0; j
< i
; j
++) {
490 kfree(adapter
->tx_pool
[j
].tx_buff
);
491 free_long_term_buff(adapter
,
492 &adapter
->tx_pool
[j
].long_term_buff
);
493 kfree(adapter
->tx_pool
[j
].free_map
);
495 kfree(adapter
->tx_pool
);
496 adapter
->tx_pool
= NULL
;
497 tx_pool_arr_alloc_failed
:
499 rx_pool_alloc_failed
:
500 for (j
= 0; j
< i
; j
++) {
501 free_rx_pool(adapter
, &adapter
->rx_pool
[j
]);
502 free_long_term_buff(adapter
,
503 &adapter
->rx_pool
[j
].long_term_buff
);
505 kfree(adapter
->rx_pool
);
506 adapter
->rx_pool
= NULL
;
507 rx_pool_arr_alloc_failed
:
508 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
509 napi_enable(&adapter
->napi
[i
]);
514 static int ibmvnic_close(struct net_device
*netdev
)
516 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
517 struct device
*dev
= &adapter
->vdev
->dev
;
518 union ibmvnic_crq crq
;
521 adapter
->closing
= true;
523 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
524 napi_disable(&adapter
->napi
[i
]);
526 if (!adapter
->failover
)
527 netif_tx_stop_all_queues(netdev
);
529 if (adapter
->bounce_buffer
) {
530 if (!dma_mapping_error(dev
, adapter
->bounce_buffer_dma
)) {
531 dma_unmap_single(&adapter
->vdev
->dev
,
532 adapter
->bounce_buffer_dma
,
533 adapter
->bounce_buffer_size
,
535 adapter
->bounce_buffer_dma
= DMA_ERROR_CODE
;
537 kfree(adapter
->bounce_buffer
);
538 adapter
->bounce_buffer
= NULL
;
541 memset(&crq
, 0, sizeof(crq
));
542 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
543 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
544 crq
.logical_link_state
.link_state
= IBMVNIC_LOGICAL_LNK_DN
;
545 ibmvnic_send_crq(adapter
, &crq
);
547 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
549 kfree(adapter
->tx_pool
[i
].tx_buff
);
550 free_long_term_buff(adapter
,
551 &adapter
->tx_pool
[i
].long_term_buff
);
552 kfree(adapter
->tx_pool
[i
].free_map
);
554 kfree(adapter
->tx_pool
);
555 adapter
->tx_pool
= NULL
;
557 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
559 free_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
560 free_long_term_buff(adapter
,
561 &adapter
->rx_pool
[i
].long_term_buff
);
563 kfree(adapter
->rx_pool
);
564 adapter
->rx_pool
= NULL
;
566 adapter
->closing
= false;
572 * build_hdr_data - creates L2/L3/L4 header data buffer
573 * @hdr_field - bitfield determining needed headers
574 * @skb - socket buffer
575 * @hdr_len - array of header lengths
576 * @tot_len - total length of data
578 * Reads hdr_field to determine which headers are needed by firmware.
579 * Builds a buffer containing these headers. Saves individual header
580 * lengths and total buffer length to be used to build descriptors.
582 static int build_hdr_data(u8 hdr_field
, struct sk_buff
*skb
,
583 int *hdr_len
, u8
*hdr_data
)
588 hdr_len
[0] = sizeof(struct ethhdr
);
590 if (skb
->protocol
== htons(ETH_P_IP
)) {
591 hdr_len
[1] = ip_hdr(skb
)->ihl
* 4;
592 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
593 hdr_len
[2] = tcp_hdrlen(skb
);
594 else if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
595 hdr_len
[2] = sizeof(struct udphdr
);
596 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
597 hdr_len
[1] = sizeof(struct ipv6hdr
);
598 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
599 hdr_len
[2] = tcp_hdrlen(skb
);
600 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
)
601 hdr_len
[2] = sizeof(struct udphdr
);
604 memset(hdr_data
, 0, 120);
605 if ((hdr_field
>> 6) & 1) {
606 hdr
= skb_mac_header(skb
);
607 memcpy(hdr_data
, hdr
, hdr_len
[0]);
611 if ((hdr_field
>> 5) & 1) {
612 hdr
= skb_network_header(skb
);
613 memcpy(hdr_data
+ len
, hdr
, hdr_len
[1]);
617 if ((hdr_field
>> 4) & 1) {
618 hdr
= skb_transport_header(skb
);
619 memcpy(hdr_data
+ len
, hdr
, hdr_len
[2]);
626 * create_hdr_descs - create header and header extension descriptors
627 * @hdr_field - bitfield determining needed headers
628 * @data - buffer containing header data
629 * @len - length of data buffer
630 * @hdr_len - array of individual header lengths
631 * @scrq_arr - descriptor array
633 * Creates header and, if needed, header extension descriptors and
634 * places them in a descriptor array, scrq_arr
637 static void create_hdr_descs(u8 hdr_field
, u8
*hdr_data
, int len
, int *hdr_len
,
638 union sub_crq
*scrq_arr
)
640 union sub_crq hdr_desc
;
645 while (tmp_len
> 0) {
646 cur
= hdr_data
+ len
- tmp_len
;
648 memset(&hdr_desc
, 0, sizeof(hdr_desc
));
649 if (cur
!= hdr_data
) {
650 data
= hdr_desc
.hdr_ext
.data
;
651 tmp
= tmp_len
> 29 ? 29 : tmp_len
;
652 hdr_desc
.hdr_ext
.first
= IBMVNIC_CRQ_CMD
;
653 hdr_desc
.hdr_ext
.type
= IBMVNIC_HDR_EXT_DESC
;
654 hdr_desc
.hdr_ext
.len
= tmp
;
656 data
= hdr_desc
.hdr
.data
;
657 tmp
= tmp_len
> 24 ? 24 : tmp_len
;
658 hdr_desc
.hdr
.first
= IBMVNIC_CRQ_CMD
;
659 hdr_desc
.hdr
.type
= IBMVNIC_HDR_DESC
;
660 hdr_desc
.hdr
.len
= tmp
;
661 hdr_desc
.hdr
.l2_len
= (u8
)hdr_len
[0];
662 hdr_desc
.hdr
.l3_len
= cpu_to_be16((u16
)hdr_len
[1]);
663 hdr_desc
.hdr
.l4_len
= (u8
)hdr_len
[2];
664 hdr_desc
.hdr
.flag
= hdr_field
<< 1;
666 memcpy(data
, cur
, tmp
);
668 *scrq_arr
= hdr_desc
;
674 * build_hdr_descs_arr - build a header descriptor array
675 * @skb - socket buffer
676 * @num_entries - number of descriptors to be sent
677 * @subcrq - first TX descriptor
678 * @hdr_field - bit field determining which headers will be sent
680 * This function will build a TX descriptor array with applicable
681 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
684 static void build_hdr_descs_arr(struct ibmvnic_tx_buff
*txbuff
,
685 int *num_entries
, u8 hdr_field
)
687 int hdr_len
[3] = {0, 0, 0};
689 u8
*hdr_data
= txbuff
->hdr_data
;
691 tot_len
= build_hdr_data(hdr_field
, txbuff
->skb
, hdr_len
,
696 num_entries
+= len
% 29 ? len
/ 29 + 1 : len
/ 29;
697 create_hdr_descs(hdr_field
, hdr_data
, tot_len
, hdr_len
,
698 txbuff
->indir_arr
+ 1);
701 static int ibmvnic_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
703 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
704 int queue_num
= skb_get_queue_mapping(skb
);
705 u8
*hdrs
= (u8
*)&adapter
->tx_rx_desc_req
;
706 struct device
*dev
= &adapter
->vdev
->dev
;
707 struct ibmvnic_tx_buff
*tx_buff
= NULL
;
708 struct ibmvnic_tx_pool
*tx_pool
;
709 unsigned int tx_send_failed
= 0;
710 unsigned int tx_map_failed
= 0;
711 unsigned int tx_dropped
= 0;
712 unsigned int tx_packets
= 0;
713 unsigned int tx_bytes
= 0;
714 dma_addr_t data_dma_addr
;
715 struct netdev_queue
*txq
;
716 bool used_bounce
= false;
717 unsigned long lpar_rc
;
718 union sub_crq tx_crq
;
726 tx_pool
= &adapter
->tx_pool
[queue_num
];
727 txq
= netdev_get_tx_queue(netdev
, skb_get_queue_mapping(skb
));
728 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
729 be32_to_cpu(adapter
->login_rsp_buf
->
730 off_txsubm_subcrqs
));
731 if (adapter
->migrated
) {
734 ret
= NETDEV_TX_BUSY
;
738 index
= tx_pool
->free_map
[tx_pool
->consumer_index
];
739 offset
= index
* adapter
->req_mtu
;
740 dst
= tx_pool
->long_term_buff
.buff
+ offset
;
741 memset(dst
, 0, adapter
->req_mtu
);
742 skb_copy_from_linear_data(skb
, dst
, skb
->len
);
743 data_dma_addr
= tx_pool
->long_term_buff
.addr
+ offset
;
745 tx_pool
->consumer_index
=
746 (tx_pool
->consumer_index
+ 1) %
747 adapter
->max_tx_entries_per_subcrq
;
749 tx_buff
= &tx_pool
->tx_buff
[index
];
751 tx_buff
->data_dma
[0] = data_dma_addr
;
752 tx_buff
->data_len
[0] = skb
->len
;
753 tx_buff
->index
= index
;
754 tx_buff
->pool_index
= queue_num
;
755 tx_buff
->last_frag
= true;
756 tx_buff
->used_bounce
= used_bounce
;
758 memset(&tx_crq
, 0, sizeof(tx_crq
));
759 tx_crq
.v1
.first
= IBMVNIC_CRQ_CMD
;
760 tx_crq
.v1
.type
= IBMVNIC_TX_DESC
;
761 tx_crq
.v1
.n_crq_elem
= 1;
763 tx_crq
.v1
.flags1
= IBMVNIC_TX_COMP_NEEDED
;
764 tx_crq
.v1
.correlator
= cpu_to_be32(index
);
765 tx_crq
.v1
.dma_reg
= cpu_to_be16(tx_pool
->long_term_buff
.map_id
);
766 tx_crq
.v1
.sge_len
= cpu_to_be32(skb
->len
);
767 tx_crq
.v1
.ioba
= cpu_to_be64(data_dma_addr
);
769 if (adapter
->vlan_header_insertion
) {
770 tx_crq
.v1
.flags2
|= IBMVNIC_TX_VLAN_INSERT
;
771 tx_crq
.v1
.vlan_id
= cpu_to_be16(skb
->vlan_tci
);
774 if (skb
->protocol
== htons(ETH_P_IP
)) {
775 if (ip_hdr(skb
)->version
== 4)
776 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV4
;
777 else if (ip_hdr(skb
)->version
== 6)
778 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV6
;
780 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
781 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_TCP
;
782 else if (ip_hdr(skb
)->protocol
!= IPPROTO_TCP
)
783 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_UDP
;
786 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
787 tx_crq
.v1
.flags1
|= IBMVNIC_TX_CHKSUM_OFFLOAD
;
790 /* determine if l2/3/4 headers are sent to firmware */
791 if ((*hdrs
>> 7) & 1 &&
792 (skb
->protocol
== htons(ETH_P_IP
) ||
793 skb
->protocol
== htons(ETH_P_IPV6
))) {
794 build_hdr_descs_arr(tx_buff
, &num_entries
, *hdrs
);
795 tx_crq
.v1
.n_crq_elem
= num_entries
;
796 tx_buff
->indir_arr
[0] = tx_crq
;
797 tx_buff
->indir_dma
= dma_map_single(dev
, tx_buff
->indir_arr
,
798 sizeof(tx_buff
->indir_arr
),
800 if (dma_mapping_error(dev
, tx_buff
->indir_dma
)) {
801 if (!firmware_has_feature(FW_FEATURE_CMO
))
802 dev_err(dev
, "tx: unable to map descriptor array\n");
805 ret
= NETDEV_TX_BUSY
;
808 lpar_rc
= send_subcrq_indirect(adapter
, handle_array
[queue_num
],
809 (u64
)tx_buff
->indir_dma
,
812 lpar_rc
= send_subcrq(adapter
, handle_array
[queue_num
],
815 if (lpar_rc
!= H_SUCCESS
) {
816 dev_err(dev
, "tx failed with code %ld\n", lpar_rc
);
818 if (tx_pool
->consumer_index
== 0)
819 tx_pool
->consumer_index
=
820 adapter
->max_tx_entries_per_subcrq
- 1;
822 tx_pool
->consumer_index
--;
826 ret
= NETDEV_TX_BUSY
;
830 tx_bytes
+= skb
->len
;
831 txq
->trans_start
= jiffies
;
835 netdev
->stats
.tx_dropped
+= tx_dropped
;
836 netdev
->stats
.tx_bytes
+= tx_bytes
;
837 netdev
->stats
.tx_packets
+= tx_packets
;
838 adapter
->tx_send_failed
+= tx_send_failed
;
839 adapter
->tx_map_failed
+= tx_map_failed
;
844 static void ibmvnic_set_multi(struct net_device
*netdev
)
846 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
847 struct netdev_hw_addr
*ha
;
848 union ibmvnic_crq crq
;
850 memset(&crq
, 0, sizeof(crq
));
851 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
852 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
854 if (netdev
->flags
& IFF_PROMISC
) {
855 if (!adapter
->promisc_supported
)
858 if (netdev
->flags
& IFF_ALLMULTI
) {
859 /* Accept all multicast */
860 memset(&crq
, 0, sizeof(crq
));
861 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
862 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
863 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_ALL
;
864 ibmvnic_send_crq(adapter
, &crq
);
865 } else if (netdev_mc_empty(netdev
)) {
866 /* Reject all multicast */
867 memset(&crq
, 0, sizeof(crq
));
868 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
869 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
870 crq
.multicast_ctrl
.flags
= IBMVNIC_DISABLE_ALL
;
871 ibmvnic_send_crq(adapter
, &crq
);
873 /* Accept one or more multicast(s) */
874 netdev_for_each_mc_addr(ha
, netdev
) {
875 memset(&crq
, 0, sizeof(crq
));
876 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
877 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
878 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_MC
;
879 ether_addr_copy(&crq
.multicast_ctrl
.mac_addr
[0],
881 ibmvnic_send_crq(adapter
, &crq
);
887 static int ibmvnic_set_mac(struct net_device
*netdev
, void *p
)
889 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
890 struct sockaddr
*addr
= p
;
891 union ibmvnic_crq crq
;
893 if (!is_valid_ether_addr(addr
->sa_data
))
894 return -EADDRNOTAVAIL
;
896 memset(&crq
, 0, sizeof(crq
));
897 crq
.change_mac_addr
.first
= IBMVNIC_CRQ_CMD
;
898 crq
.change_mac_addr
.cmd
= CHANGE_MAC_ADDR
;
899 ether_addr_copy(&crq
.change_mac_addr
.mac_addr
[0], addr
->sa_data
);
900 ibmvnic_send_crq(adapter
, &crq
);
901 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
905 static int ibmvnic_change_mtu(struct net_device
*netdev
, int new_mtu
)
907 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
909 if (new_mtu
> adapter
->req_mtu
|| new_mtu
< adapter
->min_mtu
)
912 netdev
->mtu
= new_mtu
;
916 static void ibmvnic_tx_timeout(struct net_device
*dev
)
918 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
921 /* Adapter timed out, resetting it */
922 release_sub_crqs(adapter
);
923 rc
= ibmvnic_reset_crq(adapter
);
925 dev_err(&adapter
->vdev
->dev
, "Adapter timeout, reset failed\n");
927 ibmvnic_send_crq_init(adapter
);
930 static void remove_buff_from_pool(struct ibmvnic_adapter
*adapter
,
931 struct ibmvnic_rx_buff
*rx_buff
)
933 struct ibmvnic_rx_pool
*pool
= &adapter
->rx_pool
[rx_buff
->pool_index
];
937 pool
->free_map
[pool
->next_alloc
] = (int)(rx_buff
- pool
->rx_buff
);
938 pool
->next_alloc
= (pool
->next_alloc
+ 1) % pool
->size
;
940 atomic_dec(&pool
->available
);
943 static int ibmvnic_poll(struct napi_struct
*napi
, int budget
)
945 struct net_device
*netdev
= napi
->dev
;
946 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
947 int scrq_num
= (int)(napi
- adapter
->napi
);
948 int frames_processed
= 0;
950 while (frames_processed
< budget
) {
952 struct ibmvnic_rx_buff
*rx_buff
;
958 if (!pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]))
960 next
= ibmvnic_next_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]);
962 (struct ibmvnic_rx_buff
*)be64_to_cpu(next
->
964 /* do error checking */
965 if (next
->rx_comp
.rc
) {
966 netdev_err(netdev
, "rx error %x\n", next
->rx_comp
.rc
);
968 next
->rx_comp
.first
= 0;
969 remove_buff_from_pool(adapter
, rx_buff
);
973 length
= be32_to_cpu(next
->rx_comp
.len
);
974 offset
= be16_to_cpu(next
->rx_comp
.off_frame_data
);
975 flags
= next
->rx_comp
.flags
;
977 skb_copy_to_linear_data(skb
, rx_buff
->data
+ offset
,
979 skb
->vlan_tci
= be16_to_cpu(next
->rx_comp
.vlan_tci
);
981 next
->rx_comp
.first
= 0;
982 remove_buff_from_pool(adapter
, rx_buff
);
984 skb_put(skb
, length
);
985 skb
->protocol
= eth_type_trans(skb
, netdev
);
987 if (flags
& IBMVNIC_IP_CHKSUM_GOOD
&&
988 flags
& IBMVNIC_TCP_UDP_CHKSUM_GOOD
) {
989 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
993 napi_gro_receive(napi
, skb
); /* send it up */
994 netdev
->stats
.rx_packets
++;
995 netdev
->stats
.rx_bytes
+= length
;
998 replenish_rx_pool(adapter
, &adapter
->rx_pool
[scrq_num
]);
1000 if (frames_processed
< budget
) {
1001 enable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1002 napi_complete(napi
);
1003 if (pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]) &&
1004 napi_reschedule(napi
)) {
1005 disable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1009 return frames_processed
;
1012 #ifdef CONFIG_NET_POLL_CONTROLLER
1013 static void ibmvnic_netpoll_controller(struct net_device
*dev
)
1015 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1018 replenish_pools(netdev_priv(dev
));
1019 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1020 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
1021 adapter
->rx_scrq
[i
]);
1025 static const struct net_device_ops ibmvnic_netdev_ops
= {
1026 .ndo_open
= ibmvnic_open
,
1027 .ndo_stop
= ibmvnic_close
,
1028 .ndo_start_xmit
= ibmvnic_xmit
,
1029 .ndo_set_rx_mode
= ibmvnic_set_multi
,
1030 .ndo_set_mac_address
= ibmvnic_set_mac
,
1031 .ndo_validate_addr
= eth_validate_addr
,
1032 .ndo_change_mtu
= ibmvnic_change_mtu
,
1033 .ndo_tx_timeout
= ibmvnic_tx_timeout
,
1034 #ifdef CONFIG_NET_POLL_CONTROLLER
1035 .ndo_poll_controller
= ibmvnic_netpoll_controller
,
1039 /* ethtool functions */
1041 static int ibmvnic_get_settings(struct net_device
*netdev
,
1042 struct ethtool_cmd
*cmd
)
1044 cmd
->supported
= (SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
|
1046 cmd
->advertising
= (ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg
|
1048 ethtool_cmd_speed_set(cmd
, SPEED_1000
);
1049 cmd
->duplex
= DUPLEX_FULL
;
1050 cmd
->port
= PORT_FIBRE
;
1051 cmd
->phy_address
= 0;
1052 cmd
->transceiver
= XCVR_INTERNAL
;
1053 cmd
->autoneg
= AUTONEG_ENABLE
;
1059 static void ibmvnic_get_drvinfo(struct net_device
*dev
,
1060 struct ethtool_drvinfo
*info
)
1062 strlcpy(info
->driver
, ibmvnic_driver_name
, sizeof(info
->driver
));
1063 strlcpy(info
->version
, IBMVNIC_DRIVER_VERSION
, sizeof(info
->version
));
1066 static u32
ibmvnic_get_msglevel(struct net_device
*netdev
)
1068 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1070 return adapter
->msg_enable
;
1073 static void ibmvnic_set_msglevel(struct net_device
*netdev
, u32 data
)
1075 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1077 adapter
->msg_enable
= data
;
1080 static u32
ibmvnic_get_link(struct net_device
*netdev
)
1082 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1084 /* Don't need to send a query because we request a logical link up at
1085 * init and then we wait for link state indications
1087 return adapter
->logical_link_state
;
1090 static void ibmvnic_get_ringparam(struct net_device
*netdev
,
1091 struct ethtool_ringparam
*ring
)
1093 ring
->rx_max_pending
= 0;
1094 ring
->tx_max_pending
= 0;
1095 ring
->rx_mini_max_pending
= 0;
1096 ring
->rx_jumbo_max_pending
= 0;
1097 ring
->rx_pending
= 0;
1098 ring
->tx_pending
= 0;
1099 ring
->rx_mini_pending
= 0;
1100 ring
->rx_jumbo_pending
= 0;
1103 static void ibmvnic_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1107 if (stringset
!= ETH_SS_STATS
)
1110 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++, data
+= ETH_GSTRING_LEN
)
1111 memcpy(data
, ibmvnic_stats
[i
].name
, ETH_GSTRING_LEN
);
1114 static int ibmvnic_get_sset_count(struct net_device
*dev
, int sset
)
1118 return ARRAY_SIZE(ibmvnic_stats
);
1124 static void ibmvnic_get_ethtool_stats(struct net_device
*dev
,
1125 struct ethtool_stats
*stats
, u64
*data
)
1127 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1128 union ibmvnic_crq crq
;
1131 memset(&crq
, 0, sizeof(crq
));
1132 crq
.request_statistics
.first
= IBMVNIC_CRQ_CMD
;
1133 crq
.request_statistics
.cmd
= REQUEST_STATISTICS
;
1134 crq
.request_statistics
.ioba
= cpu_to_be32(adapter
->stats_token
);
1135 crq
.request_statistics
.len
=
1136 cpu_to_be32(sizeof(struct ibmvnic_statistics
));
1137 ibmvnic_send_crq(adapter
, &crq
);
1139 /* Wait for data to be written */
1140 init_completion(&adapter
->stats_done
);
1141 wait_for_completion(&adapter
->stats_done
);
1143 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++)
1144 data
[i
] = IBMVNIC_GET_STAT(adapter
, ibmvnic_stats
[i
].offset
);
1147 static const struct ethtool_ops ibmvnic_ethtool_ops
= {
1148 .get_settings
= ibmvnic_get_settings
,
1149 .get_drvinfo
= ibmvnic_get_drvinfo
,
1150 .get_msglevel
= ibmvnic_get_msglevel
,
1151 .set_msglevel
= ibmvnic_set_msglevel
,
1152 .get_link
= ibmvnic_get_link
,
1153 .get_ringparam
= ibmvnic_get_ringparam
,
1154 .get_strings
= ibmvnic_get_strings
,
1155 .get_sset_count
= ibmvnic_get_sset_count
,
1156 .get_ethtool_stats
= ibmvnic_get_ethtool_stats
,
1159 /* Routines for managing CRQs/sCRQs */
1161 static void release_sub_crq_queue(struct ibmvnic_adapter
*adapter
,
1162 struct ibmvnic_sub_crq_queue
*scrq
)
1164 struct device
*dev
= &adapter
->vdev
->dev
;
1167 netdev_dbg(adapter
->netdev
, "Releasing sub-CRQ\n");
1169 /* Close the sub-crqs */
1171 rc
= plpar_hcall_norets(H_FREE_SUB_CRQ
,
1172 adapter
->vdev
->unit_address
,
1174 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
1176 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1178 free_pages((unsigned long)scrq
->msgs
, 2);
1182 static struct ibmvnic_sub_crq_queue
*init_sub_crq_queue(struct ibmvnic_adapter
1185 struct device
*dev
= &adapter
->vdev
->dev
;
1186 struct ibmvnic_sub_crq_queue
*scrq
;
1189 scrq
= kmalloc(sizeof(*scrq
), GFP_ATOMIC
);
1193 scrq
->msgs
= (union sub_crq
*)__get_free_pages(GFP_KERNEL
, 2);
1194 memset(scrq
->msgs
, 0, 4 * PAGE_SIZE
);
1196 dev_warn(dev
, "Couldn't allocate crq queue messages page\n");
1197 goto zero_page_failed
;
1200 scrq
->msg_token
= dma_map_single(dev
, scrq
->msgs
, 4 * PAGE_SIZE
,
1202 if (dma_mapping_error(dev
, scrq
->msg_token
)) {
1203 dev_warn(dev
, "Couldn't map crq queue messages page\n");
1207 rc
= h_reg_sub_crq(adapter
->vdev
->unit_address
, scrq
->msg_token
,
1208 4 * PAGE_SIZE
, &scrq
->crq_num
, &scrq
->hw_irq
);
1210 if (rc
== H_RESOURCE
)
1211 rc
= ibmvnic_reset_crq(adapter
);
1213 if (rc
== H_CLOSED
) {
1214 dev_warn(dev
, "Partner adapter not ready, waiting.\n");
1216 dev_warn(dev
, "Error %d registering sub-crq\n", rc
);
1220 scrq
->adapter
= adapter
;
1221 scrq
->size
= 4 * PAGE_SIZE
/ sizeof(*scrq
->msgs
);
1223 scrq
->rx_skb_top
= NULL
;
1224 spin_lock_init(&scrq
->lock
);
1226 netdev_dbg(adapter
->netdev
,
1227 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1228 scrq
->crq_num
, scrq
->hw_irq
, scrq
->irq
);
1233 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1236 free_pages((unsigned long)scrq
->msgs
, 2);
1243 static void release_sub_crqs(struct ibmvnic_adapter
*adapter
)
1247 if (adapter
->tx_scrq
) {
1248 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
1249 if (adapter
->tx_scrq
[i
]) {
1250 free_irq(adapter
->tx_scrq
[i
]->irq
,
1251 adapter
->tx_scrq
[i
]);
1252 irq_dispose_mapping(adapter
->tx_scrq
[i
]->irq
);
1253 release_sub_crq_queue(adapter
,
1254 adapter
->tx_scrq
[i
]);
1256 adapter
->tx_scrq
= NULL
;
1259 if (adapter
->rx_scrq
) {
1260 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1261 if (adapter
->rx_scrq
[i
]) {
1262 free_irq(adapter
->rx_scrq
[i
]->irq
,
1263 adapter
->rx_scrq
[i
]);
1264 irq_dispose_mapping(adapter
->rx_scrq
[i
]->irq
);
1265 release_sub_crq_queue(adapter
,
1266 adapter
->rx_scrq
[i
]);
1268 adapter
->rx_scrq
= NULL
;
1271 adapter
->requested_caps
= 0;
1274 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter
*adapter
)
1278 if (adapter
->tx_scrq
) {
1279 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
1280 if (adapter
->tx_scrq
[i
])
1281 release_sub_crq_queue(adapter
,
1282 adapter
->tx_scrq
[i
]);
1283 adapter
->tx_scrq
= NULL
;
1286 if (adapter
->rx_scrq
) {
1287 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1288 if (adapter
->rx_scrq
[i
])
1289 release_sub_crq_queue(adapter
,
1290 adapter
->rx_scrq
[i
]);
1291 adapter
->rx_scrq
= NULL
;
1294 adapter
->requested_caps
= 0;
1297 static int disable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1298 struct ibmvnic_sub_crq_queue
*scrq
)
1300 struct device
*dev
= &adapter
->vdev
->dev
;
1303 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1304 H_DISABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1306 dev_err(dev
, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1311 static int enable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1312 struct ibmvnic_sub_crq_queue
*scrq
)
1314 struct device
*dev
= &adapter
->vdev
->dev
;
1317 if (scrq
->hw_irq
> 0x100000000ULL
) {
1318 dev_err(dev
, "bad hw_irq = %lx\n", scrq
->hw_irq
);
1322 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1323 H_ENABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1325 dev_err(dev
, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1330 static int ibmvnic_complete_tx(struct ibmvnic_adapter
*adapter
,
1331 struct ibmvnic_sub_crq_queue
*scrq
)
1333 struct device
*dev
= &adapter
->vdev
->dev
;
1334 struct ibmvnic_tx_buff
*txbuff
;
1335 union sub_crq
*next
;
1341 while (pending_scrq(adapter
, scrq
)) {
1342 unsigned int pool
= scrq
->pool_index
;
1344 next
= ibmvnic_next_scrq(adapter
, scrq
);
1345 for (i
= 0; i
< next
->tx_comp
.num_comps
; i
++) {
1346 if (next
->tx_comp
.rcs
[i
]) {
1347 dev_err(dev
, "tx error %x\n",
1348 next
->tx_comp
.rcs
[i
]);
1351 index
= be32_to_cpu(next
->tx_comp
.correlators
[i
]);
1352 txbuff
= &adapter
->tx_pool
[pool
].tx_buff
[index
];
1354 for (j
= 0; j
< IBMVNIC_MAX_FRAGS_PER_CRQ
; j
++) {
1355 if (!txbuff
->data_dma
[j
])
1358 txbuff
->data_dma
[j
] = 0;
1359 txbuff
->used_bounce
= false;
1361 /* if sub_crq was sent indirectly */
1362 first
= txbuff
->indir_arr
[0].generic
.first
;
1363 if (first
== IBMVNIC_CRQ_CMD
) {
1364 dma_unmap_single(dev
, txbuff
->indir_dma
,
1365 sizeof(txbuff
->indir_arr
),
1369 if (txbuff
->last_frag
)
1370 dev_kfree_skb_any(txbuff
->skb
);
1372 adapter
->tx_pool
[pool
].free_map
[adapter
->tx_pool
[pool
].
1373 producer_index
] = index
;
1374 adapter
->tx_pool
[pool
].producer_index
=
1375 (adapter
->tx_pool
[pool
].producer_index
+ 1) %
1376 adapter
->max_tx_entries_per_subcrq
;
1378 /* remove tx_comp scrq*/
1379 next
->tx_comp
.first
= 0;
1382 enable_scrq_irq(adapter
, scrq
);
1384 if (pending_scrq(adapter
, scrq
)) {
1385 disable_scrq_irq(adapter
, scrq
);
1392 static irqreturn_t
ibmvnic_interrupt_tx(int irq
, void *instance
)
1394 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
1395 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
1397 disable_scrq_irq(adapter
, scrq
);
1398 ibmvnic_complete_tx(adapter
, scrq
);
1403 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
)
1405 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
1406 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
1408 if (napi_schedule_prep(&adapter
->napi
[scrq
->scrq_num
])) {
1409 disable_scrq_irq(adapter
, scrq
);
1410 __napi_schedule(&adapter
->napi
[scrq
->scrq_num
]);
1416 static int init_sub_crq_irqs(struct ibmvnic_adapter
*adapter
)
1418 struct device
*dev
= &adapter
->vdev
->dev
;
1419 struct ibmvnic_sub_crq_queue
*scrq
;
1423 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1424 scrq
= adapter
->tx_scrq
[i
];
1425 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
1427 if (scrq
->irq
== NO_IRQ
) {
1429 dev_err(dev
, "Error mapping irq\n");
1430 goto req_tx_irq_failed
;
1433 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_tx
,
1434 0, "ibmvnic_tx", scrq
);
1437 dev_err(dev
, "Couldn't register tx irq 0x%x. rc=%d\n",
1439 irq_dispose_mapping(scrq
->irq
);
1440 goto req_rx_irq_failed
;
1444 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1445 scrq
= adapter
->rx_scrq
[i
];
1446 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
1447 if (scrq
->irq
== NO_IRQ
) {
1449 dev_err(dev
, "Error mapping irq\n");
1450 goto req_rx_irq_failed
;
1452 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_rx
,
1453 0, "ibmvnic_rx", scrq
);
1455 dev_err(dev
, "Couldn't register rx irq 0x%x. rc=%d\n",
1457 irq_dispose_mapping(scrq
->irq
);
1458 goto req_rx_irq_failed
;
1464 for (j
= 0; j
< i
; j
++)
1465 free_irq(adapter
->rx_scrq
[j
]->irq
, adapter
->rx_scrq
[j
]);
1466 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
1467 i
= adapter
->req_tx_queues
;
1469 for (j
= 0; j
< i
; j
++)
1470 free_irq(adapter
->tx_scrq
[j
]->irq
, adapter
->tx_scrq
[j
]);
1471 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
1472 release_sub_crqs_no_irqs(adapter
);
1476 static void init_sub_crqs(struct ibmvnic_adapter
*adapter
, int retry
)
1478 struct device
*dev
= &adapter
->vdev
->dev
;
1479 struct ibmvnic_sub_crq_queue
**allqueues
;
1480 int registered_queues
= 0;
1481 union ibmvnic_crq crq
;
1487 /* Sub-CRQ entries are 32 byte long */
1488 int entries_page
= 4 * PAGE_SIZE
/ (sizeof(u64
) * 4);
1490 if (adapter
->min_tx_entries_per_subcrq
> entries_page
||
1491 adapter
->min_rx_add_entries_per_subcrq
> entries_page
) {
1492 dev_err(dev
, "Fatal, invalid entries per sub-crq\n");
1493 goto allqueues_failed
;
1496 /* Get the minimum between the queried max and the entries
1497 * that fit in our PAGE_SIZE
1499 adapter
->req_tx_entries_per_subcrq
=
1500 adapter
->max_tx_entries_per_subcrq
> entries_page
?
1501 entries_page
: adapter
->max_tx_entries_per_subcrq
;
1502 adapter
->req_rx_add_entries_per_subcrq
=
1503 adapter
->max_rx_add_entries_per_subcrq
> entries_page
?
1504 entries_page
: adapter
->max_rx_add_entries_per_subcrq
;
1506 /* Choosing the maximum number of queues supported by firmware*/
1507 adapter
->req_tx_queues
= adapter
->max_tx_queues
;
1508 adapter
->req_rx_queues
= adapter
->max_rx_queues
;
1509 adapter
->req_rx_add_queues
= adapter
->max_rx_add_queues
;
1511 adapter
->req_mtu
= adapter
->max_mtu
;
1514 total_queues
= adapter
->req_tx_queues
+ adapter
->req_rx_queues
;
1516 allqueues
= kcalloc(total_queues
, sizeof(*allqueues
), GFP_ATOMIC
);
1518 goto allqueues_failed
;
1520 for (i
= 0; i
< total_queues
; i
++) {
1521 allqueues
[i
] = init_sub_crq_queue(adapter
);
1522 if (!allqueues
[i
]) {
1523 dev_warn(dev
, "Couldn't allocate all sub-crqs\n");
1526 registered_queues
++;
1529 /* Make sure we were able to register the minimum number of queues */
1530 if (registered_queues
<
1531 adapter
->min_tx_queues
+ adapter
->min_rx_queues
) {
1532 dev_err(dev
, "Fatal: Couldn't init min number of sub-crqs\n");
1536 /* Distribute the failed allocated queues*/
1537 for (i
= 0; i
< total_queues
- registered_queues
+ more
; i
++) {
1538 netdev_dbg(adapter
->netdev
, "Reducing number of queues\n");
1541 if (adapter
->req_rx_queues
> adapter
->min_rx_queues
)
1542 adapter
->req_rx_queues
--;
1547 if (adapter
->req_tx_queues
> adapter
->min_tx_queues
)
1548 adapter
->req_tx_queues
--;
1555 adapter
->tx_scrq
= kcalloc(adapter
->req_tx_queues
,
1556 sizeof(*adapter
->tx_scrq
), GFP_ATOMIC
);
1557 if (!adapter
->tx_scrq
)
1560 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1561 adapter
->tx_scrq
[i
] = allqueues
[i
];
1562 adapter
->tx_scrq
[i
]->pool_index
= i
;
1565 adapter
->rx_scrq
= kcalloc(adapter
->req_rx_queues
,
1566 sizeof(*adapter
->rx_scrq
), GFP_ATOMIC
);
1567 if (!adapter
->rx_scrq
)
1570 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1571 adapter
->rx_scrq
[i
] = allqueues
[i
+ adapter
->req_tx_queues
];
1572 adapter
->rx_scrq
[i
]->scrq_num
= i
;
1575 memset(&crq
, 0, sizeof(crq
));
1576 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
1577 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
1579 crq
.request_capability
.capability
= cpu_to_be16(REQ_TX_QUEUES
);
1580 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_tx_queues
);
1581 ibmvnic_send_crq(adapter
, &crq
);
1583 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_QUEUES
);
1584 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_queues
);
1585 ibmvnic_send_crq(adapter
, &crq
);
1587 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_ADD_QUEUES
);
1588 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_add_queues
);
1589 ibmvnic_send_crq(adapter
, &crq
);
1591 crq
.request_capability
.capability
=
1592 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ
);
1593 crq
.request_capability
.number
=
1594 cpu_to_be64(adapter
->req_tx_entries_per_subcrq
);
1595 ibmvnic_send_crq(adapter
, &crq
);
1597 crq
.request_capability
.capability
=
1598 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ
);
1599 crq
.request_capability
.number
=
1600 cpu_to_be64(adapter
->req_rx_add_entries_per_subcrq
);
1601 ibmvnic_send_crq(adapter
, &crq
);
1603 crq
.request_capability
.capability
= cpu_to_be16(REQ_MTU
);
1604 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_mtu
);
1605 ibmvnic_send_crq(adapter
, &crq
);
1607 if (adapter
->netdev
->flags
& IFF_PROMISC
) {
1608 if (adapter
->promisc_supported
) {
1609 crq
.request_capability
.capability
=
1610 cpu_to_be16(PROMISC_REQUESTED
);
1611 crq
.request_capability
.number
= cpu_to_be64(1);
1612 ibmvnic_send_crq(adapter
, &crq
);
1615 crq
.request_capability
.capability
=
1616 cpu_to_be16(PROMISC_REQUESTED
);
1617 crq
.request_capability
.number
= cpu_to_be64(0);
1618 ibmvnic_send_crq(adapter
, &crq
);
1626 kfree(adapter
->tx_scrq
);
1627 adapter
->tx_scrq
= NULL
;
1629 for (i
= 0; i
< registered_queues
; i
++)
1630 release_sub_crq_queue(adapter
, allqueues
[i
]);
1633 ibmvnic_remove(adapter
->vdev
);
1636 static int pending_scrq(struct ibmvnic_adapter
*adapter
,
1637 struct ibmvnic_sub_crq_queue
*scrq
)
1639 union sub_crq
*entry
= &scrq
->msgs
[scrq
->cur
];
1641 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
|| adapter
->closing
)
1647 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*adapter
,
1648 struct ibmvnic_sub_crq_queue
*scrq
)
1650 union sub_crq
*entry
;
1651 unsigned long flags
;
1653 spin_lock_irqsave(&scrq
->lock
, flags
);
1654 entry
= &scrq
->msgs
[scrq
->cur
];
1655 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
1656 if (++scrq
->cur
== scrq
->size
)
1661 spin_unlock_irqrestore(&scrq
->lock
, flags
);
1666 static union ibmvnic_crq
*ibmvnic_next_crq(struct ibmvnic_adapter
*adapter
)
1668 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
1669 union ibmvnic_crq
*crq
;
1671 crq
= &queue
->msgs
[queue
->cur
];
1672 if (crq
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
1673 if (++queue
->cur
== queue
->size
)
1682 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
1683 union sub_crq
*sub_crq
)
1685 unsigned int ua
= adapter
->vdev
->unit_address
;
1686 struct device
*dev
= &adapter
->vdev
->dev
;
1687 u64
*u64_crq
= (u64
*)sub_crq
;
1690 netdev_dbg(adapter
->netdev
,
1691 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1692 (unsigned long int)cpu_to_be64(remote_handle
),
1693 (unsigned long int)cpu_to_be64(u64_crq
[0]),
1694 (unsigned long int)cpu_to_be64(u64_crq
[1]),
1695 (unsigned long int)cpu_to_be64(u64_crq
[2]),
1696 (unsigned long int)cpu_to_be64(u64_crq
[3]));
1698 /* Make sure the hypervisor sees the complete request */
1701 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ
, ua
,
1702 cpu_to_be64(remote_handle
),
1703 cpu_to_be64(u64_crq
[0]),
1704 cpu_to_be64(u64_crq
[1]),
1705 cpu_to_be64(u64_crq
[2]),
1706 cpu_to_be64(u64_crq
[3]));
1710 dev_warn(dev
, "CRQ Queue closed\n");
1711 dev_err(dev
, "Send error (rc=%d)\n", rc
);
1717 static int send_subcrq_indirect(struct ibmvnic_adapter
*adapter
,
1718 u64 remote_handle
, u64 ioba
, u64 num_entries
)
1720 unsigned int ua
= adapter
->vdev
->unit_address
;
1721 struct device
*dev
= &adapter
->vdev
->dev
;
1724 /* Make sure the hypervisor sees the complete request */
1726 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT
, ua
,
1727 cpu_to_be64(remote_handle
),
1732 dev_warn(dev
, "CRQ Queue closed\n");
1733 dev_err(dev
, "Send (indirect) error (rc=%d)\n", rc
);
1739 static int ibmvnic_send_crq(struct ibmvnic_adapter
*adapter
,
1740 union ibmvnic_crq
*crq
)
1742 unsigned int ua
= adapter
->vdev
->unit_address
;
1743 struct device
*dev
= &adapter
->vdev
->dev
;
1744 u64
*u64_crq
= (u64
*)crq
;
1747 netdev_dbg(adapter
->netdev
, "Sending CRQ: %016lx %016lx\n",
1748 (unsigned long int)cpu_to_be64(u64_crq
[0]),
1749 (unsigned long int)cpu_to_be64(u64_crq
[1]));
1751 /* Make sure the hypervisor sees the complete request */
1754 rc
= plpar_hcall_norets(H_SEND_CRQ
, ua
,
1755 cpu_to_be64(u64_crq
[0]),
1756 cpu_to_be64(u64_crq
[1]));
1760 dev_warn(dev
, "CRQ Queue closed\n");
1761 dev_warn(dev
, "Send error (rc=%d)\n", rc
);
1767 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*adapter
)
1769 union ibmvnic_crq crq
;
1771 memset(&crq
, 0, sizeof(crq
));
1772 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
1773 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT
;
1774 netdev_dbg(adapter
->netdev
, "Sending CRQ init\n");
1776 return ibmvnic_send_crq(adapter
, &crq
);
1779 static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter
*adapter
)
1781 union ibmvnic_crq crq
;
1783 memset(&crq
, 0, sizeof(crq
));
1784 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
1785 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT_COMPLETE
;
1786 netdev_dbg(adapter
->netdev
, "Sending CRQ init complete\n");
1788 return ibmvnic_send_crq(adapter
, &crq
);
1791 static int send_version_xchg(struct ibmvnic_adapter
*adapter
)
1793 union ibmvnic_crq crq
;
1795 memset(&crq
, 0, sizeof(crq
));
1796 crq
.version_exchange
.first
= IBMVNIC_CRQ_CMD
;
1797 crq
.version_exchange
.cmd
= VERSION_EXCHANGE
;
1798 crq
.version_exchange
.version
= cpu_to_be16(ibmvnic_version
);
1800 return ibmvnic_send_crq(adapter
, &crq
);
1803 static void send_login(struct ibmvnic_adapter
*adapter
)
1805 struct ibmvnic_login_rsp_buffer
*login_rsp_buffer
;
1806 struct ibmvnic_login_buffer
*login_buffer
;
1807 struct ibmvnic_inflight_cmd
*inflight_cmd
;
1808 struct device
*dev
= &adapter
->vdev
->dev
;
1809 dma_addr_t rsp_buffer_token
;
1810 dma_addr_t buffer_token
;
1811 size_t rsp_buffer_size
;
1812 union ibmvnic_crq crq
;
1813 unsigned long flags
;
1820 sizeof(struct ibmvnic_login_buffer
) +
1821 sizeof(u64
) * (adapter
->req_tx_queues
+ adapter
->req_rx_queues
);
1823 login_buffer
= kmalloc(buffer_size
, GFP_ATOMIC
);
1825 goto buf_alloc_failed
;
1827 buffer_token
= dma_map_single(dev
, login_buffer
, buffer_size
,
1829 if (dma_mapping_error(dev
, buffer_token
)) {
1830 dev_err(dev
, "Couldn't map login buffer\n");
1831 goto buf_map_failed
;
1834 rsp_buffer_size
= sizeof(struct ibmvnic_login_rsp_buffer
) +
1835 sizeof(u64
) * adapter
->req_tx_queues
+
1836 sizeof(u64
) * adapter
->req_rx_queues
+
1837 sizeof(u64
) * adapter
->req_rx_queues
+
1838 sizeof(u8
) * IBMVNIC_TX_DESC_VERSIONS
;
1840 login_rsp_buffer
= kmalloc(rsp_buffer_size
, GFP_ATOMIC
);
1841 if (!login_rsp_buffer
)
1842 goto buf_rsp_alloc_failed
;
1844 rsp_buffer_token
= dma_map_single(dev
, login_rsp_buffer
,
1845 rsp_buffer_size
, DMA_FROM_DEVICE
);
1846 if (dma_mapping_error(dev
, rsp_buffer_token
)) {
1847 dev_err(dev
, "Couldn't map login rsp buffer\n");
1848 goto buf_rsp_map_failed
;
1850 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
1851 if (!inflight_cmd
) {
1852 dev_err(dev
, "Couldn't allocate inflight_cmd\n");
1853 goto inflight_alloc_failed
;
1855 adapter
->login_buf
= login_buffer
;
1856 adapter
->login_buf_token
= buffer_token
;
1857 adapter
->login_buf_sz
= buffer_size
;
1858 adapter
->login_rsp_buf
= login_rsp_buffer
;
1859 adapter
->login_rsp_buf_token
= rsp_buffer_token
;
1860 adapter
->login_rsp_buf_sz
= rsp_buffer_size
;
1862 login_buffer
->len
= cpu_to_be32(buffer_size
);
1863 login_buffer
->version
= cpu_to_be32(INITIAL_VERSION_LB
);
1864 login_buffer
->num_txcomp_subcrqs
= cpu_to_be32(adapter
->req_tx_queues
);
1865 login_buffer
->off_txcomp_subcrqs
=
1866 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
));
1867 login_buffer
->num_rxcomp_subcrqs
= cpu_to_be32(adapter
->req_rx_queues
);
1868 login_buffer
->off_rxcomp_subcrqs
=
1869 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
) +
1870 sizeof(u64
) * adapter
->req_tx_queues
);
1871 login_buffer
->login_rsp_ioba
= cpu_to_be32(rsp_buffer_token
);
1872 login_buffer
->login_rsp_len
= cpu_to_be32(rsp_buffer_size
);
1874 tx_list_p
= (__be64
*)((char *)login_buffer
+
1875 sizeof(struct ibmvnic_login_buffer
));
1876 rx_list_p
= (__be64
*)((char *)login_buffer
+
1877 sizeof(struct ibmvnic_login_buffer
) +
1878 sizeof(u64
) * adapter
->req_tx_queues
);
1880 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1881 if (adapter
->tx_scrq
[i
]) {
1882 tx_list_p
[i
] = cpu_to_be64(adapter
->tx_scrq
[i
]->
1887 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1888 if (adapter
->rx_scrq
[i
]) {
1889 rx_list_p
[i
] = cpu_to_be64(adapter
->rx_scrq
[i
]->
1894 netdev_dbg(adapter
->netdev
, "Login Buffer:\n");
1895 for (i
= 0; i
< (adapter
->login_buf_sz
- 1) / 8 + 1; i
++) {
1896 netdev_dbg(adapter
->netdev
, "%016lx\n",
1897 ((unsigned long int *)(adapter
->login_buf
))[i
]);
1900 memset(&crq
, 0, sizeof(crq
));
1901 crq
.login
.first
= IBMVNIC_CRQ_CMD
;
1902 crq
.login
.cmd
= LOGIN
;
1903 crq
.login
.ioba
= cpu_to_be32(buffer_token
);
1904 crq
.login
.len
= cpu_to_be32(buffer_size
);
1906 memcpy(&inflight_cmd
->crq
, &crq
, sizeof(crq
));
1908 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
1909 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
1910 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
1912 ibmvnic_send_crq(adapter
, &crq
);
1916 inflight_alloc_failed
:
1917 dma_unmap_single(dev
, rsp_buffer_token
, rsp_buffer_size
,
1920 kfree(login_rsp_buffer
);
1921 buf_rsp_alloc_failed
:
1922 dma_unmap_single(dev
, buffer_token
, buffer_size
, DMA_TO_DEVICE
);
1924 kfree(login_buffer
);
1929 static void send_request_map(struct ibmvnic_adapter
*adapter
, dma_addr_t addr
,
1932 union ibmvnic_crq crq
;
1934 memset(&crq
, 0, sizeof(crq
));
1935 crq
.request_map
.first
= IBMVNIC_CRQ_CMD
;
1936 crq
.request_map
.cmd
= REQUEST_MAP
;
1937 crq
.request_map
.map_id
= map_id
;
1938 crq
.request_map
.ioba
= cpu_to_be32(addr
);
1939 crq
.request_map
.len
= cpu_to_be32(len
);
1940 ibmvnic_send_crq(adapter
, &crq
);
1943 static void send_request_unmap(struct ibmvnic_adapter
*adapter
, u8 map_id
)
1945 union ibmvnic_crq crq
;
1947 memset(&crq
, 0, sizeof(crq
));
1948 crq
.request_unmap
.first
= IBMVNIC_CRQ_CMD
;
1949 crq
.request_unmap
.cmd
= REQUEST_UNMAP
;
1950 crq
.request_unmap
.map_id
= map_id
;
1951 ibmvnic_send_crq(adapter
, &crq
);
1954 static void send_map_query(struct ibmvnic_adapter
*adapter
)
1956 union ibmvnic_crq crq
;
1958 memset(&crq
, 0, sizeof(crq
));
1959 crq
.query_map
.first
= IBMVNIC_CRQ_CMD
;
1960 crq
.query_map
.cmd
= QUERY_MAP
;
1961 ibmvnic_send_crq(adapter
, &crq
);
1964 /* Send a series of CRQs requesting various capabilities of the VNIC server */
1965 static void send_cap_queries(struct ibmvnic_adapter
*adapter
)
1967 union ibmvnic_crq crq
;
1969 atomic_set(&adapter
->running_cap_queries
, 0);
1970 memset(&crq
, 0, sizeof(crq
));
1971 crq
.query_capability
.first
= IBMVNIC_CRQ_CMD
;
1972 crq
.query_capability
.cmd
= QUERY_CAPABILITY
;
1974 crq
.query_capability
.capability
= cpu_to_be16(MIN_TX_QUEUES
);
1975 atomic_inc(&adapter
->running_cap_queries
);
1976 ibmvnic_send_crq(adapter
, &crq
);
1978 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_QUEUES
);
1979 atomic_inc(&adapter
->running_cap_queries
);
1980 ibmvnic_send_crq(adapter
, &crq
);
1982 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_ADD_QUEUES
);
1983 atomic_inc(&adapter
->running_cap_queries
);
1984 ibmvnic_send_crq(adapter
, &crq
);
1986 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_QUEUES
);
1987 atomic_inc(&adapter
->running_cap_queries
);
1988 ibmvnic_send_crq(adapter
, &crq
);
1990 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_QUEUES
);
1991 atomic_inc(&adapter
->running_cap_queries
);
1992 ibmvnic_send_crq(adapter
, &crq
);
1994 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_ADD_QUEUES
);
1995 atomic_inc(&adapter
->running_cap_queries
);
1996 ibmvnic_send_crq(adapter
, &crq
);
1998 crq
.query_capability
.capability
=
1999 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ
);
2000 atomic_inc(&adapter
->running_cap_queries
);
2001 ibmvnic_send_crq(adapter
, &crq
);
2003 crq
.query_capability
.capability
=
2004 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ
);
2005 atomic_inc(&adapter
->running_cap_queries
);
2006 ibmvnic_send_crq(adapter
, &crq
);
2008 crq
.query_capability
.capability
=
2009 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ
);
2010 atomic_inc(&adapter
->running_cap_queries
);
2011 ibmvnic_send_crq(adapter
, &crq
);
2013 crq
.query_capability
.capability
=
2014 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ
);
2015 atomic_inc(&adapter
->running_cap_queries
);
2016 ibmvnic_send_crq(adapter
, &crq
);
2018 crq
.query_capability
.capability
= cpu_to_be16(TCP_IP_OFFLOAD
);
2019 atomic_inc(&adapter
->running_cap_queries
);
2020 ibmvnic_send_crq(adapter
, &crq
);
2022 crq
.query_capability
.capability
= cpu_to_be16(PROMISC_SUPPORTED
);
2023 atomic_inc(&adapter
->running_cap_queries
);
2024 ibmvnic_send_crq(adapter
, &crq
);
2026 crq
.query_capability
.capability
= cpu_to_be16(MIN_MTU
);
2027 atomic_inc(&adapter
->running_cap_queries
);
2028 ibmvnic_send_crq(adapter
, &crq
);
2030 crq
.query_capability
.capability
= cpu_to_be16(MAX_MTU
);
2031 atomic_inc(&adapter
->running_cap_queries
);
2032 ibmvnic_send_crq(adapter
, &crq
);
2034 crq
.query_capability
.capability
= cpu_to_be16(MAX_MULTICAST_FILTERS
);
2035 atomic_inc(&adapter
->running_cap_queries
);
2036 ibmvnic_send_crq(adapter
, &crq
);
2038 crq
.query_capability
.capability
= cpu_to_be16(VLAN_HEADER_INSERTION
);
2039 atomic_inc(&adapter
->running_cap_queries
);
2040 ibmvnic_send_crq(adapter
, &crq
);
2042 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_SG_ENTRIES
);
2043 atomic_inc(&adapter
->running_cap_queries
);
2044 ibmvnic_send_crq(adapter
, &crq
);
2046 crq
.query_capability
.capability
= cpu_to_be16(RX_SG_SUPPORTED
);
2047 atomic_inc(&adapter
->running_cap_queries
);
2048 ibmvnic_send_crq(adapter
, &crq
);
2050 crq
.query_capability
.capability
= cpu_to_be16(OPT_TX_COMP_SUB_QUEUES
);
2051 atomic_inc(&adapter
->running_cap_queries
);
2052 ibmvnic_send_crq(adapter
, &crq
);
2054 crq
.query_capability
.capability
= cpu_to_be16(OPT_RX_COMP_QUEUES
);
2055 atomic_inc(&adapter
->running_cap_queries
);
2056 ibmvnic_send_crq(adapter
, &crq
);
2058 crq
.query_capability
.capability
=
2059 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q
);
2060 atomic_inc(&adapter
->running_cap_queries
);
2061 ibmvnic_send_crq(adapter
, &crq
);
2063 crq
.query_capability
.capability
=
2064 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ
);
2065 atomic_inc(&adapter
->running_cap_queries
);
2066 ibmvnic_send_crq(adapter
, &crq
);
2068 crq
.query_capability
.capability
=
2069 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ
);
2070 atomic_inc(&adapter
->running_cap_queries
);
2071 ibmvnic_send_crq(adapter
, &crq
);
2073 crq
.query_capability
.capability
= cpu_to_be16(TX_RX_DESC_REQ
);
2074 atomic_inc(&adapter
->running_cap_queries
);
2075 ibmvnic_send_crq(adapter
, &crq
);
2078 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter
*adapter
)
2080 struct device
*dev
= &adapter
->vdev
->dev
;
2081 struct ibmvnic_query_ip_offload_buffer
*buf
= &adapter
->ip_offload_buf
;
2082 union ibmvnic_crq crq
;
2085 dma_unmap_single(dev
, adapter
->ip_offload_tok
,
2086 sizeof(adapter
->ip_offload_buf
), DMA_FROM_DEVICE
);
2088 netdev_dbg(adapter
->netdev
, "Query IP Offload Buffer:\n");
2089 for (i
= 0; i
< (sizeof(adapter
->ip_offload_buf
) - 1) / 8 + 1; i
++)
2090 netdev_dbg(adapter
->netdev
, "%016lx\n",
2091 ((unsigned long int *)(buf
))[i
]);
2093 netdev_dbg(adapter
->netdev
, "ipv4_chksum = %d\n", buf
->ipv4_chksum
);
2094 netdev_dbg(adapter
->netdev
, "ipv6_chksum = %d\n", buf
->ipv6_chksum
);
2095 netdev_dbg(adapter
->netdev
, "tcp_ipv4_chksum = %d\n",
2096 buf
->tcp_ipv4_chksum
);
2097 netdev_dbg(adapter
->netdev
, "tcp_ipv6_chksum = %d\n",
2098 buf
->tcp_ipv6_chksum
);
2099 netdev_dbg(adapter
->netdev
, "udp_ipv4_chksum = %d\n",
2100 buf
->udp_ipv4_chksum
);
2101 netdev_dbg(adapter
->netdev
, "udp_ipv6_chksum = %d\n",
2102 buf
->udp_ipv6_chksum
);
2103 netdev_dbg(adapter
->netdev
, "large_tx_ipv4 = %d\n",
2104 buf
->large_tx_ipv4
);
2105 netdev_dbg(adapter
->netdev
, "large_tx_ipv6 = %d\n",
2106 buf
->large_tx_ipv6
);
2107 netdev_dbg(adapter
->netdev
, "large_rx_ipv4 = %d\n",
2108 buf
->large_rx_ipv4
);
2109 netdev_dbg(adapter
->netdev
, "large_rx_ipv6 = %d\n",
2110 buf
->large_rx_ipv6
);
2111 netdev_dbg(adapter
->netdev
, "max_ipv4_hdr_sz = %d\n",
2112 buf
->max_ipv4_header_size
);
2113 netdev_dbg(adapter
->netdev
, "max_ipv6_hdr_sz = %d\n",
2114 buf
->max_ipv6_header_size
);
2115 netdev_dbg(adapter
->netdev
, "max_tcp_hdr_size = %d\n",
2116 buf
->max_tcp_header_size
);
2117 netdev_dbg(adapter
->netdev
, "max_udp_hdr_size = %d\n",
2118 buf
->max_udp_header_size
);
2119 netdev_dbg(adapter
->netdev
, "max_large_tx_size = %d\n",
2120 buf
->max_large_tx_size
);
2121 netdev_dbg(adapter
->netdev
, "max_large_rx_size = %d\n",
2122 buf
->max_large_rx_size
);
2123 netdev_dbg(adapter
->netdev
, "ipv6_ext_hdr = %d\n",
2124 buf
->ipv6_extension_header
);
2125 netdev_dbg(adapter
->netdev
, "tcp_pseudosum_req = %d\n",
2126 buf
->tcp_pseudosum_req
);
2127 netdev_dbg(adapter
->netdev
, "num_ipv6_ext_hd = %d\n",
2128 buf
->num_ipv6_ext_headers
);
2129 netdev_dbg(adapter
->netdev
, "off_ipv6_ext_hd = %d\n",
2130 buf
->off_ipv6_ext_headers
);
2132 adapter
->ip_offload_ctrl_tok
=
2133 dma_map_single(dev
, &adapter
->ip_offload_ctrl
,
2134 sizeof(adapter
->ip_offload_ctrl
), DMA_TO_DEVICE
);
2136 if (dma_mapping_error(dev
, adapter
->ip_offload_ctrl_tok
)) {
2137 dev_err(dev
, "Couldn't map ip offload control buffer\n");
2141 adapter
->ip_offload_ctrl
.version
= cpu_to_be32(INITIAL_VERSION_IOB
);
2142 adapter
->ip_offload_ctrl
.tcp_ipv4_chksum
= buf
->tcp_ipv4_chksum
;
2143 adapter
->ip_offload_ctrl
.udp_ipv4_chksum
= buf
->udp_ipv4_chksum
;
2144 adapter
->ip_offload_ctrl
.tcp_ipv6_chksum
= buf
->tcp_ipv6_chksum
;
2145 adapter
->ip_offload_ctrl
.udp_ipv6_chksum
= buf
->udp_ipv6_chksum
;
2147 /* large_tx/rx disabled for now, additional features needed */
2148 adapter
->ip_offload_ctrl
.large_tx_ipv4
= 0;
2149 adapter
->ip_offload_ctrl
.large_tx_ipv6
= 0;
2150 adapter
->ip_offload_ctrl
.large_rx_ipv4
= 0;
2151 adapter
->ip_offload_ctrl
.large_rx_ipv6
= 0;
2153 adapter
->netdev
->features
= NETIF_F_GSO
;
2155 if (buf
->tcp_ipv4_chksum
|| buf
->udp_ipv4_chksum
)
2156 adapter
->netdev
->features
|= NETIF_F_IP_CSUM
;
2158 if (buf
->tcp_ipv6_chksum
|| buf
->udp_ipv6_chksum
)
2159 adapter
->netdev
->features
|= NETIF_F_IPV6_CSUM
;
2161 if ((adapter
->netdev
->features
&
2162 (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)))
2163 adapter
->netdev
->features
|= NETIF_F_RXCSUM
;
2165 memset(&crq
, 0, sizeof(crq
));
2166 crq
.control_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
2167 crq
.control_ip_offload
.cmd
= CONTROL_IP_OFFLOAD
;
2168 crq
.control_ip_offload
.len
=
2169 cpu_to_be32(sizeof(adapter
->ip_offload_ctrl
));
2170 crq
.control_ip_offload
.ioba
= cpu_to_be32(adapter
->ip_offload_ctrl_tok
);
2171 ibmvnic_send_crq(adapter
, &crq
);
2174 static void handle_error_info_rsp(union ibmvnic_crq
*crq
,
2175 struct ibmvnic_adapter
*adapter
)
2177 struct device
*dev
= &adapter
->vdev
->dev
;
2178 struct ibmvnic_error_buff
*error_buff
, *tmp
;
2179 unsigned long flags
;
2183 if (!crq
->request_error_rsp
.rc
.code
) {
2184 dev_info(dev
, "Request Error Rsp returned with rc=%x\n",
2185 crq
->request_error_rsp
.rc
.code
);
2189 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2190 list_for_each_entry_safe(error_buff
, tmp
, &adapter
->errors
, list
)
2191 if (error_buff
->error_id
== crq
->request_error_rsp
.error_id
) {
2193 list_del(&error_buff
->list
);
2196 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2199 dev_err(dev
, "Couldn't find error id %x\n",
2200 crq
->request_error_rsp
.error_id
);
2204 dev_err(dev
, "Detailed info for error id %x:",
2205 crq
->request_error_rsp
.error_id
);
2207 for (i
= 0; i
< error_buff
->len
; i
++) {
2208 pr_cont("%02x", (int)error_buff
->buff
[i
]);
2214 dma_unmap_single(dev
, error_buff
->dma
, error_buff
->len
,
2216 kfree(error_buff
->buff
);
2220 static void handle_dump_size_rsp(union ibmvnic_crq
*crq
,
2221 struct ibmvnic_adapter
*adapter
)
2223 int len
= be32_to_cpu(crq
->request_dump_size_rsp
.len
);
2224 struct ibmvnic_inflight_cmd
*inflight_cmd
;
2225 struct device
*dev
= &adapter
->vdev
->dev
;
2226 union ibmvnic_crq newcrq
;
2227 unsigned long flags
;
2229 /* allocate and map buffer */
2230 adapter
->dump_data
= kmalloc(len
, GFP_KERNEL
);
2231 if (!adapter
->dump_data
) {
2232 complete(&adapter
->fw_done
);
2236 adapter
->dump_data_token
= dma_map_single(dev
, adapter
->dump_data
, len
,
2239 if (dma_mapping_error(dev
, adapter
->dump_data_token
)) {
2240 if (!firmware_has_feature(FW_FEATURE_CMO
))
2241 dev_err(dev
, "Couldn't map dump data\n");
2242 kfree(adapter
->dump_data
);
2243 complete(&adapter
->fw_done
);
2247 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
2248 if (!inflight_cmd
) {
2249 dma_unmap_single(dev
, adapter
->dump_data_token
, len
,
2251 kfree(adapter
->dump_data
);
2252 complete(&adapter
->fw_done
);
2256 memset(&newcrq
, 0, sizeof(newcrq
));
2257 newcrq
.request_dump
.first
= IBMVNIC_CRQ_CMD
;
2258 newcrq
.request_dump
.cmd
= REQUEST_DUMP
;
2259 newcrq
.request_dump
.ioba
= cpu_to_be32(adapter
->dump_data_token
);
2260 newcrq
.request_dump
.len
= cpu_to_be32(adapter
->dump_data_size
);
2262 memcpy(&inflight_cmd
->crq
, &newcrq
, sizeof(newcrq
));
2264 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
2265 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
2266 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
2268 ibmvnic_send_crq(adapter
, &newcrq
);
2271 static void handle_error_indication(union ibmvnic_crq
*crq
,
2272 struct ibmvnic_adapter
*adapter
)
2274 int detail_len
= be32_to_cpu(crq
->error_indication
.detail_error_sz
);
2275 struct ibmvnic_inflight_cmd
*inflight_cmd
;
2276 struct device
*dev
= &adapter
->vdev
->dev
;
2277 struct ibmvnic_error_buff
*error_buff
;
2278 union ibmvnic_crq new_crq
;
2279 unsigned long flags
;
2281 dev_err(dev
, "Firmware reports %serror id %x, cause %d\n",
2282 crq
->error_indication
.
2283 flags
& IBMVNIC_FATAL_ERROR
? "FATAL " : "",
2284 crq
->error_indication
.error_id
,
2285 crq
->error_indication
.error_cause
);
2287 error_buff
= kmalloc(sizeof(*error_buff
), GFP_ATOMIC
);
2291 error_buff
->buff
= kmalloc(detail_len
, GFP_ATOMIC
);
2292 if (!error_buff
->buff
) {
2297 error_buff
->dma
= dma_map_single(dev
, error_buff
->buff
, detail_len
,
2299 if (dma_mapping_error(dev
, error_buff
->dma
)) {
2300 if (!firmware_has_feature(FW_FEATURE_CMO
))
2301 dev_err(dev
, "Couldn't map error buffer\n");
2302 kfree(error_buff
->buff
);
2307 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
2308 if (!inflight_cmd
) {
2309 dma_unmap_single(dev
, error_buff
->dma
, detail_len
,
2311 kfree(error_buff
->buff
);
2316 error_buff
->len
= detail_len
;
2317 error_buff
->error_id
= crq
->error_indication
.error_id
;
2319 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2320 list_add_tail(&error_buff
->list
, &adapter
->errors
);
2321 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2323 memset(&new_crq
, 0, sizeof(new_crq
));
2324 new_crq
.request_error_info
.first
= IBMVNIC_CRQ_CMD
;
2325 new_crq
.request_error_info
.cmd
= REQUEST_ERROR_INFO
;
2326 new_crq
.request_error_info
.ioba
= cpu_to_be32(error_buff
->dma
);
2327 new_crq
.request_error_info
.len
= cpu_to_be32(detail_len
);
2328 new_crq
.request_error_info
.error_id
= crq
->error_indication
.error_id
;
2330 memcpy(&inflight_cmd
->crq
, &crq
, sizeof(crq
));
2332 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
2333 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
2334 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
2336 ibmvnic_send_crq(adapter
, &new_crq
);
2339 static void handle_change_mac_rsp(union ibmvnic_crq
*crq
,
2340 struct ibmvnic_adapter
*adapter
)
2342 struct net_device
*netdev
= adapter
->netdev
;
2343 struct device
*dev
= &adapter
->vdev
->dev
;
2346 rc
= crq
->change_mac_addr_rsp
.rc
.code
;
2348 dev_err(dev
, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc
);
2351 memcpy(netdev
->dev_addr
, &crq
->change_mac_addr_rsp
.mac_addr
[0],
2355 static void handle_request_cap_rsp(union ibmvnic_crq
*crq
,
2356 struct ibmvnic_adapter
*adapter
)
2358 struct device
*dev
= &adapter
->vdev
->dev
;
2362 switch (be16_to_cpu(crq
->request_capability_rsp
.capability
)) {
2364 req_value
= &adapter
->req_tx_queues
;
2368 req_value
= &adapter
->req_rx_queues
;
2371 case REQ_RX_ADD_QUEUES
:
2372 req_value
= &adapter
->req_rx_add_queues
;
2375 case REQ_TX_ENTRIES_PER_SUBCRQ
:
2376 req_value
= &adapter
->req_tx_entries_per_subcrq
;
2377 name
= "tx_entries_per_subcrq";
2379 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ
:
2380 req_value
= &adapter
->req_rx_add_entries_per_subcrq
;
2381 name
= "rx_add_entries_per_subcrq";
2384 req_value
= &adapter
->req_mtu
;
2387 case PROMISC_REQUESTED
:
2388 req_value
= &adapter
->promisc
;
2392 dev_err(dev
, "Got invalid cap request rsp %d\n",
2393 crq
->request_capability
.capability
);
2397 switch (crq
->request_capability_rsp
.rc
.code
) {
2400 case PARTIALSUCCESS
:
2401 dev_info(dev
, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2403 (long int)be32_to_cpu(crq
->request_capability_rsp
.
2405 release_sub_crqs_no_irqs(adapter
);
2406 *req_value
= be32_to_cpu(crq
->request_capability_rsp
.number
);
2407 init_sub_crqs(adapter
, 1);
2410 dev_err(dev
, "Error %d in request cap rsp\n",
2411 crq
->request_capability_rsp
.rc
.code
);
2415 /* Done receiving requested capabilities, query IP offload support */
2416 if (++adapter
->requested_caps
== 7) {
2417 union ibmvnic_crq newcrq
;
2418 int buf_sz
= sizeof(struct ibmvnic_query_ip_offload_buffer
);
2419 struct ibmvnic_query_ip_offload_buffer
*ip_offload_buf
=
2420 &adapter
->ip_offload_buf
;
2422 adapter
->ip_offload_tok
= dma_map_single(dev
, ip_offload_buf
,
2426 if (dma_mapping_error(dev
, adapter
->ip_offload_tok
)) {
2427 if (!firmware_has_feature(FW_FEATURE_CMO
))
2428 dev_err(dev
, "Couldn't map offload buffer\n");
2432 memset(&newcrq
, 0, sizeof(newcrq
));
2433 newcrq
.query_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
2434 newcrq
.query_ip_offload
.cmd
= QUERY_IP_OFFLOAD
;
2435 newcrq
.query_ip_offload
.len
= cpu_to_be32(buf_sz
);
2436 newcrq
.query_ip_offload
.ioba
=
2437 cpu_to_be32(adapter
->ip_offload_tok
);
2439 ibmvnic_send_crq(adapter
, &newcrq
);
2443 static int handle_login_rsp(union ibmvnic_crq
*login_rsp_crq
,
2444 struct ibmvnic_adapter
*adapter
)
2446 struct device
*dev
= &adapter
->vdev
->dev
;
2447 struct ibmvnic_login_rsp_buffer
*login_rsp
= adapter
->login_rsp_buf
;
2448 struct ibmvnic_login_buffer
*login
= adapter
->login_buf
;
2449 union ibmvnic_crq crq
;
2452 dma_unmap_single(dev
, adapter
->login_buf_token
, adapter
->login_buf_sz
,
2454 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
2455 adapter
->login_rsp_buf_sz
, DMA_BIDIRECTIONAL
);
2457 /* If the number of queues requested can't be allocated by the
2458 * server, the login response will return with code 1. We will need
2459 * to resend the login buffer with fewer queues requested.
2461 if (login_rsp_crq
->generic
.rc
.code
) {
2462 adapter
->renegotiate
= true;
2463 complete(&adapter
->init_done
);
2467 netdev_dbg(adapter
->netdev
, "Login Response Buffer:\n");
2468 for (i
= 0; i
< (adapter
->login_rsp_buf_sz
- 1) / 8 + 1; i
++) {
2469 netdev_dbg(adapter
->netdev
, "%016lx\n",
2470 ((unsigned long int *)(adapter
->login_rsp_buf
))[i
]);
2474 if (login
->num_txcomp_subcrqs
!= login_rsp
->num_txsubm_subcrqs
||
2475 (be32_to_cpu(login
->num_rxcomp_subcrqs
) *
2476 adapter
->req_rx_add_queues
!=
2477 be32_to_cpu(login_rsp
->num_rxadd_subcrqs
))) {
2478 dev_err(dev
, "FATAL: Inconsistent login and login rsp\n");
2479 ibmvnic_remove(adapter
->vdev
);
2482 complete(&adapter
->init_done
);
2484 memset(&crq
, 0, sizeof(crq
));
2485 crq
.request_ras_comp_num
.first
= IBMVNIC_CRQ_CMD
;
2486 crq
.request_ras_comp_num
.cmd
= REQUEST_RAS_COMP_NUM
;
2487 ibmvnic_send_crq(adapter
, &crq
);
2492 static void handle_request_map_rsp(union ibmvnic_crq
*crq
,
2493 struct ibmvnic_adapter
*adapter
)
2495 struct device
*dev
= &adapter
->vdev
->dev
;
2496 u8 map_id
= crq
->request_map_rsp
.map_id
;
2502 tx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
2503 rx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
2505 rc
= crq
->request_map_rsp
.rc
.code
;
2507 dev_err(dev
, "Error %ld in REQUEST_MAP_RSP\n", rc
);
2509 /* need to find and zero tx/rx_pool map_id */
2510 for (i
= 0; i
< tx_subcrqs
; i
++) {
2511 if (adapter
->tx_pool
[i
].long_term_buff
.map_id
== map_id
)
2512 adapter
->tx_pool
[i
].long_term_buff
.map_id
= 0;
2514 for (i
= 0; i
< rx_subcrqs
; i
++) {
2515 if (adapter
->rx_pool
[i
].long_term_buff
.map_id
== map_id
)
2516 adapter
->rx_pool
[i
].long_term_buff
.map_id
= 0;
2519 complete(&adapter
->fw_done
);
2522 static void handle_request_unmap_rsp(union ibmvnic_crq
*crq
,
2523 struct ibmvnic_adapter
*adapter
)
2525 struct device
*dev
= &adapter
->vdev
->dev
;
2528 rc
= crq
->request_unmap_rsp
.rc
.code
;
2530 dev_err(dev
, "Error %ld in REQUEST_UNMAP_RSP\n", rc
);
2533 static void handle_query_map_rsp(union ibmvnic_crq
*crq
,
2534 struct ibmvnic_adapter
*adapter
)
2536 struct net_device
*netdev
= adapter
->netdev
;
2537 struct device
*dev
= &adapter
->vdev
->dev
;
2540 rc
= crq
->query_map_rsp
.rc
.code
;
2542 dev_err(dev
, "Error %ld in QUERY_MAP_RSP\n", rc
);
2545 netdev_dbg(netdev
, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2546 crq
->query_map_rsp
.page_size
, crq
->query_map_rsp
.tot_pages
,
2547 crq
->query_map_rsp
.free_pages
);
2550 static void handle_query_cap_rsp(union ibmvnic_crq
*crq
,
2551 struct ibmvnic_adapter
*adapter
)
2553 struct net_device
*netdev
= adapter
->netdev
;
2554 struct device
*dev
= &adapter
->vdev
->dev
;
2557 atomic_dec(&adapter
->running_cap_queries
);
2558 netdev_dbg(netdev
, "Outstanding queries: %d\n",
2559 atomic_read(&adapter
->running_cap_queries
));
2560 rc
= crq
->query_capability
.rc
.code
;
2562 dev_err(dev
, "Error %ld in QUERY_CAP_RSP\n", rc
);
2566 switch (be16_to_cpu(crq
->query_capability
.capability
)) {
2568 adapter
->min_tx_queues
=
2569 be64_to_cpu(crq
->query_capability
.number
);
2570 netdev_dbg(netdev
, "min_tx_queues = %lld\n",
2571 adapter
->min_tx_queues
);
2574 adapter
->min_rx_queues
=
2575 be64_to_cpu(crq
->query_capability
.number
);
2576 netdev_dbg(netdev
, "min_rx_queues = %lld\n",
2577 adapter
->min_rx_queues
);
2579 case MIN_RX_ADD_QUEUES
:
2580 adapter
->min_rx_add_queues
=
2581 be64_to_cpu(crq
->query_capability
.number
);
2582 netdev_dbg(netdev
, "min_rx_add_queues = %lld\n",
2583 adapter
->min_rx_add_queues
);
2586 adapter
->max_tx_queues
=
2587 be64_to_cpu(crq
->query_capability
.number
);
2588 netdev_dbg(netdev
, "max_tx_queues = %lld\n",
2589 adapter
->max_tx_queues
);
2592 adapter
->max_rx_queues
=
2593 be64_to_cpu(crq
->query_capability
.number
);
2594 netdev_dbg(netdev
, "max_rx_queues = %lld\n",
2595 adapter
->max_rx_queues
);
2597 case MAX_RX_ADD_QUEUES
:
2598 adapter
->max_rx_add_queues
=
2599 be64_to_cpu(crq
->query_capability
.number
);
2600 netdev_dbg(netdev
, "max_rx_add_queues = %lld\n",
2601 adapter
->max_rx_add_queues
);
2603 case MIN_TX_ENTRIES_PER_SUBCRQ
:
2604 adapter
->min_tx_entries_per_subcrq
=
2605 be64_to_cpu(crq
->query_capability
.number
);
2606 netdev_dbg(netdev
, "min_tx_entries_per_subcrq = %lld\n",
2607 adapter
->min_tx_entries_per_subcrq
);
2609 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ
:
2610 adapter
->min_rx_add_entries_per_subcrq
=
2611 be64_to_cpu(crq
->query_capability
.number
);
2612 netdev_dbg(netdev
, "min_rx_add_entrs_per_subcrq = %lld\n",
2613 adapter
->min_rx_add_entries_per_subcrq
);
2615 case MAX_TX_ENTRIES_PER_SUBCRQ
:
2616 adapter
->max_tx_entries_per_subcrq
=
2617 be64_to_cpu(crq
->query_capability
.number
);
2618 netdev_dbg(netdev
, "max_tx_entries_per_subcrq = %lld\n",
2619 adapter
->max_tx_entries_per_subcrq
);
2621 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ
:
2622 adapter
->max_rx_add_entries_per_subcrq
=
2623 be64_to_cpu(crq
->query_capability
.number
);
2624 netdev_dbg(netdev
, "max_rx_add_entrs_per_subcrq = %lld\n",
2625 adapter
->max_rx_add_entries_per_subcrq
);
2627 case TCP_IP_OFFLOAD
:
2628 adapter
->tcp_ip_offload
=
2629 be64_to_cpu(crq
->query_capability
.number
);
2630 netdev_dbg(netdev
, "tcp_ip_offload = %lld\n",
2631 adapter
->tcp_ip_offload
);
2633 case PROMISC_SUPPORTED
:
2634 adapter
->promisc_supported
=
2635 be64_to_cpu(crq
->query_capability
.number
);
2636 netdev_dbg(netdev
, "promisc_supported = %lld\n",
2637 adapter
->promisc_supported
);
2640 adapter
->min_mtu
= be64_to_cpu(crq
->query_capability
.number
);
2641 netdev_dbg(netdev
, "min_mtu = %lld\n", adapter
->min_mtu
);
2644 adapter
->max_mtu
= be64_to_cpu(crq
->query_capability
.number
);
2645 netdev_dbg(netdev
, "max_mtu = %lld\n", adapter
->max_mtu
);
2647 case MAX_MULTICAST_FILTERS
:
2648 adapter
->max_multicast_filters
=
2649 be64_to_cpu(crq
->query_capability
.number
);
2650 netdev_dbg(netdev
, "max_multicast_filters = %lld\n",
2651 adapter
->max_multicast_filters
);
2653 case VLAN_HEADER_INSERTION
:
2654 adapter
->vlan_header_insertion
=
2655 be64_to_cpu(crq
->query_capability
.number
);
2656 if (adapter
->vlan_header_insertion
)
2657 netdev
->features
|= NETIF_F_HW_VLAN_STAG_TX
;
2658 netdev_dbg(netdev
, "vlan_header_insertion = %lld\n",
2659 adapter
->vlan_header_insertion
);
2661 case MAX_TX_SG_ENTRIES
:
2662 adapter
->max_tx_sg_entries
=
2663 be64_to_cpu(crq
->query_capability
.number
);
2664 netdev_dbg(netdev
, "max_tx_sg_entries = %lld\n",
2665 adapter
->max_tx_sg_entries
);
2667 case RX_SG_SUPPORTED
:
2668 adapter
->rx_sg_supported
=
2669 be64_to_cpu(crq
->query_capability
.number
);
2670 netdev_dbg(netdev
, "rx_sg_supported = %lld\n",
2671 adapter
->rx_sg_supported
);
2673 case OPT_TX_COMP_SUB_QUEUES
:
2674 adapter
->opt_tx_comp_sub_queues
=
2675 be64_to_cpu(crq
->query_capability
.number
);
2676 netdev_dbg(netdev
, "opt_tx_comp_sub_queues = %lld\n",
2677 adapter
->opt_tx_comp_sub_queues
);
2679 case OPT_RX_COMP_QUEUES
:
2680 adapter
->opt_rx_comp_queues
=
2681 be64_to_cpu(crq
->query_capability
.number
);
2682 netdev_dbg(netdev
, "opt_rx_comp_queues = %lld\n",
2683 adapter
->opt_rx_comp_queues
);
2685 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q
:
2686 adapter
->opt_rx_bufadd_q_per_rx_comp_q
=
2687 be64_to_cpu(crq
->query_capability
.number
);
2688 netdev_dbg(netdev
, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2689 adapter
->opt_rx_bufadd_q_per_rx_comp_q
);
2691 case OPT_TX_ENTRIES_PER_SUBCRQ
:
2692 adapter
->opt_tx_entries_per_subcrq
=
2693 be64_to_cpu(crq
->query_capability
.number
);
2694 netdev_dbg(netdev
, "opt_tx_entries_per_subcrq = %lld\n",
2695 adapter
->opt_tx_entries_per_subcrq
);
2697 case OPT_RXBA_ENTRIES_PER_SUBCRQ
:
2698 adapter
->opt_rxba_entries_per_subcrq
=
2699 be64_to_cpu(crq
->query_capability
.number
);
2700 netdev_dbg(netdev
, "opt_rxba_entries_per_subcrq = %lld\n",
2701 adapter
->opt_rxba_entries_per_subcrq
);
2703 case TX_RX_DESC_REQ
:
2704 adapter
->tx_rx_desc_req
= crq
->query_capability
.number
;
2705 netdev_dbg(netdev
, "tx_rx_desc_req = %llx\n",
2706 adapter
->tx_rx_desc_req
);
2710 netdev_err(netdev
, "Got invalid cap rsp %d\n",
2711 crq
->query_capability
.capability
);
2715 if (atomic_read(&adapter
->running_cap_queries
) == 0)
2716 init_sub_crqs(adapter
, 0);
2717 /* We're done querying the capabilities, initialize sub-crqs */
2720 static void handle_control_ras_rsp(union ibmvnic_crq
*crq
,
2721 struct ibmvnic_adapter
*adapter
)
2723 u8 correlator
= crq
->control_ras_rsp
.correlator
;
2724 struct device
*dev
= &adapter
->vdev
->dev
;
2728 if (crq
->control_ras_rsp
.rc
.code
) {
2729 dev_warn(dev
, "Control ras failed rc=%d\n",
2730 crq
->control_ras_rsp
.rc
.code
);
2734 for (i
= 0; i
< adapter
->ras_comp_num
; i
++) {
2735 if (adapter
->ras_comps
[i
].correlator
== correlator
) {
2742 dev_warn(dev
, "Correlator not found on control_ras_rsp\n");
2746 switch (crq
->control_ras_rsp
.op
) {
2747 case IBMVNIC_TRACE_LEVEL
:
2748 adapter
->ras_comps
[i
].trace_level
= crq
->control_ras
.level
;
2750 case IBMVNIC_ERROR_LEVEL
:
2751 adapter
->ras_comps
[i
].error_check_level
=
2752 crq
->control_ras
.level
;
2754 case IBMVNIC_TRACE_PAUSE
:
2755 adapter
->ras_comp_int
[i
].paused
= 1;
2757 case IBMVNIC_TRACE_RESUME
:
2758 adapter
->ras_comp_int
[i
].paused
= 0;
2760 case IBMVNIC_TRACE_ON
:
2761 adapter
->ras_comps
[i
].trace_on
= 1;
2763 case IBMVNIC_TRACE_OFF
:
2764 adapter
->ras_comps
[i
].trace_on
= 0;
2766 case IBMVNIC_CHG_TRACE_BUFF_SZ
:
2767 /* trace_buff_sz is 3 bytes, stuff it into an int */
2768 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[0] = 0;
2769 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[1] =
2770 crq
->control_ras_rsp
.trace_buff_sz
[0];
2771 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[2] =
2772 crq
->control_ras_rsp
.trace_buff_sz
[1];
2773 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[3] =
2774 crq
->control_ras_rsp
.trace_buff_sz
[2];
2777 dev_err(dev
, "invalid op %d on control_ras_rsp",
2778 crq
->control_ras_rsp
.op
);
2782 static ssize_t
trace_read(struct file
*file
, char __user
*user_buf
, size_t len
,
2785 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2786 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2787 struct device
*dev
= &adapter
->vdev
->dev
;
2788 struct ibmvnic_fw_trace_entry
*trace
;
2789 int num
= ras_comp_int
->num
;
2790 union ibmvnic_crq crq
;
2791 dma_addr_t trace_tok
;
2793 if (*ppos
>= be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
))
2797 dma_alloc_coherent(dev
,
2798 be32_to_cpu(adapter
->ras_comps
[num
].
2799 trace_buff_size
), &trace_tok
,
2802 dev_err(dev
, "Couldn't alloc trace buffer\n");
2806 memset(&crq
, 0, sizeof(crq
));
2807 crq
.collect_fw_trace
.first
= IBMVNIC_CRQ_CMD
;
2808 crq
.collect_fw_trace
.cmd
= COLLECT_FW_TRACE
;
2809 crq
.collect_fw_trace
.correlator
= adapter
->ras_comps
[num
].correlator
;
2810 crq
.collect_fw_trace
.ioba
= cpu_to_be32(trace_tok
);
2811 crq
.collect_fw_trace
.len
= adapter
->ras_comps
[num
].trace_buff_size
;
2812 ibmvnic_send_crq(adapter
, &crq
);
2814 init_completion(&adapter
->fw_done
);
2815 wait_for_completion(&adapter
->fw_done
);
2817 if (*ppos
+ len
> be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
))
2819 be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
) -
2822 copy_to_user(user_buf
, &((u8
*)trace
)[*ppos
], len
);
2824 dma_free_coherent(dev
,
2825 be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
),
2831 static const struct file_operations trace_ops
= {
2832 .owner
= THIS_MODULE
,
2833 .open
= simple_open
,
2837 static ssize_t
paused_read(struct file
*file
, char __user
*user_buf
, size_t len
,
2840 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2841 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2842 int num
= ras_comp_int
->num
;
2843 char buff
[5]; /* 1 or 0 plus \n and \0 */
2846 size
= sprintf(buff
, "%d\n", adapter
->ras_comp_int
[num
].paused
);
2851 copy_to_user(user_buf
, buff
, size
);
2856 static ssize_t
paused_write(struct file
*file
, const char __user
*user_buf
,
2857 size_t len
, loff_t
*ppos
)
2859 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2860 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2861 int num
= ras_comp_int
->num
;
2862 union ibmvnic_crq crq
;
2864 char buff
[9]; /* decimal max int plus \n and \0 */
2866 copy_from_user(buff
, user_buf
, sizeof(buff
));
2867 val
= kstrtoul(buff
, 10, NULL
);
2869 adapter
->ras_comp_int
[num
].paused
= val
? 1 : 0;
2871 memset(&crq
, 0, sizeof(crq
));
2872 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2873 crq
.control_ras
.cmd
= CONTROL_RAS
;
2874 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2875 crq
.control_ras
.op
= val
? IBMVNIC_TRACE_PAUSE
: IBMVNIC_TRACE_RESUME
;
2876 ibmvnic_send_crq(adapter
, &crq
);
2881 static const struct file_operations paused_ops
= {
2882 .owner
= THIS_MODULE
,
2883 .open
= simple_open
,
2884 .read
= paused_read
,
2885 .write
= paused_write
,
2888 static ssize_t
tracing_read(struct file
*file
, char __user
*user_buf
,
2889 size_t len
, loff_t
*ppos
)
2891 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2892 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2893 int num
= ras_comp_int
->num
;
2894 char buff
[5]; /* 1 or 0 plus \n and \0 */
2897 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_on
);
2902 copy_to_user(user_buf
, buff
, size
);
2907 static ssize_t
tracing_write(struct file
*file
, const char __user
*user_buf
,
2908 size_t len
, loff_t
*ppos
)
2910 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2911 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2912 int num
= ras_comp_int
->num
;
2913 union ibmvnic_crq crq
;
2915 char buff
[9]; /* decimal max int plus \n and \0 */
2917 copy_from_user(buff
, user_buf
, sizeof(buff
));
2918 val
= kstrtoul(buff
, 10, NULL
);
2920 memset(&crq
, 0, sizeof(crq
));
2921 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2922 crq
.control_ras
.cmd
= CONTROL_RAS
;
2923 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2924 crq
.control_ras
.op
= val
? IBMVNIC_TRACE_ON
: IBMVNIC_TRACE_OFF
;
2929 static const struct file_operations tracing_ops
= {
2930 .owner
= THIS_MODULE
,
2931 .open
= simple_open
,
2932 .read
= tracing_read
,
2933 .write
= tracing_write
,
2936 static ssize_t
error_level_read(struct file
*file
, char __user
*user_buf
,
2937 size_t len
, loff_t
*ppos
)
2939 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2940 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2941 int num
= ras_comp_int
->num
;
2942 char buff
[5]; /* decimal max char plus \n and \0 */
2945 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].error_check_level
);
2950 copy_to_user(user_buf
, buff
, size
);
2955 static ssize_t
error_level_write(struct file
*file
, const char __user
*user_buf
,
2956 size_t len
, loff_t
*ppos
)
2958 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2959 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2960 int num
= ras_comp_int
->num
;
2961 union ibmvnic_crq crq
;
2963 char buff
[9]; /* decimal max int plus \n and \0 */
2965 copy_from_user(buff
, user_buf
, sizeof(buff
));
2966 val
= kstrtoul(buff
, 10, NULL
);
2971 memset(&crq
, 0, sizeof(crq
));
2972 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2973 crq
.control_ras
.cmd
= CONTROL_RAS
;
2974 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2975 crq
.control_ras
.op
= IBMVNIC_ERROR_LEVEL
;
2976 crq
.control_ras
.level
= val
;
2977 ibmvnic_send_crq(adapter
, &crq
);
2982 static const struct file_operations error_level_ops
= {
2983 .owner
= THIS_MODULE
,
2984 .open
= simple_open
,
2985 .read
= error_level_read
,
2986 .write
= error_level_write
,
2989 static ssize_t
trace_level_read(struct file
*file
, char __user
*user_buf
,
2990 size_t len
, loff_t
*ppos
)
2992 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2993 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2994 int num
= ras_comp_int
->num
;
2995 char buff
[5]; /* decimal max char plus \n and \0 */
2998 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_level
);
3002 copy_to_user(user_buf
, buff
, size
);
3007 static ssize_t
trace_level_write(struct file
*file
, const char __user
*user_buf
,
3008 size_t len
, loff_t
*ppos
)
3010 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
3011 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
3012 union ibmvnic_crq crq
;
3014 char buff
[9]; /* decimal max int plus \n and \0 */
3016 copy_from_user(buff
, user_buf
, sizeof(buff
));
3017 val
= kstrtoul(buff
, 10, NULL
);
3021 memset(&crq
, 0, sizeof(crq
));
3022 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
3023 crq
.control_ras
.cmd
= CONTROL_RAS
;
3024 crq
.control_ras
.correlator
=
3025 adapter
->ras_comps
[ras_comp_int
->num
].correlator
;
3026 crq
.control_ras
.op
= IBMVNIC_TRACE_LEVEL
;
3027 crq
.control_ras
.level
= val
;
3028 ibmvnic_send_crq(adapter
, &crq
);
3033 static const struct file_operations trace_level_ops
= {
3034 .owner
= THIS_MODULE
,
3035 .open
= simple_open
,
3036 .read
= trace_level_read
,
3037 .write
= trace_level_write
,
3040 static ssize_t
trace_buff_size_read(struct file
*file
, char __user
*user_buf
,
3041 size_t len
, loff_t
*ppos
)
3043 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
3044 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
3045 int num
= ras_comp_int
->num
;
3046 char buff
[9]; /* decimal max int plus \n and \0 */
3049 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_buff_size
);
3053 copy_to_user(user_buf
, buff
, size
);
3058 static ssize_t
trace_buff_size_write(struct file
*file
,
3059 const char __user
*user_buf
, size_t len
,
3062 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
3063 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
3064 union ibmvnic_crq crq
;
3066 char buff
[9]; /* decimal max int plus \n and \0 */
3068 copy_from_user(buff
, user_buf
, sizeof(buff
));
3069 val
= kstrtoul(buff
, 10, NULL
);
3071 memset(&crq
, 0, sizeof(crq
));
3072 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
3073 crq
.control_ras
.cmd
= CONTROL_RAS
;
3074 crq
.control_ras
.correlator
=
3075 adapter
->ras_comps
[ras_comp_int
->num
].correlator
;
3076 crq
.control_ras
.op
= IBMVNIC_CHG_TRACE_BUFF_SZ
;
3077 /* trace_buff_sz is 3 bytes, stuff an int into it */
3078 crq
.control_ras
.trace_buff_sz
[0] = ((u8
*)(&val
))[5];
3079 crq
.control_ras
.trace_buff_sz
[1] = ((u8
*)(&val
))[6];
3080 crq
.control_ras
.trace_buff_sz
[2] = ((u8
*)(&val
))[7];
3081 ibmvnic_send_crq(adapter
, &crq
);
3086 static const struct file_operations trace_size_ops
= {
3087 .owner
= THIS_MODULE
,
3088 .open
= simple_open
,
3089 .read
= trace_buff_size_read
,
3090 .write
= trace_buff_size_write
,
3093 static void handle_request_ras_comps_rsp(union ibmvnic_crq
*crq
,
3094 struct ibmvnic_adapter
*adapter
)
3096 struct device
*dev
= &adapter
->vdev
->dev
;
3097 struct dentry
*dir_ent
;
3101 debugfs_remove_recursive(adapter
->ras_comps_ent
);
3103 adapter
->ras_comps_ent
= debugfs_create_dir("ras_comps",
3104 adapter
->debugfs_dir
);
3105 if (!adapter
->ras_comps_ent
|| IS_ERR(adapter
->ras_comps_ent
)) {
3106 dev_info(dev
, "debugfs create ras_comps dir failed\n");
3110 for (i
= 0; i
< adapter
->ras_comp_num
; i
++) {
3111 dir_ent
= debugfs_create_dir(adapter
->ras_comps
[i
].name
,
3112 adapter
->ras_comps_ent
);
3113 if (!dir_ent
|| IS_ERR(dir_ent
)) {
3114 dev_info(dev
, "debugfs create %s dir failed\n",
3115 adapter
->ras_comps
[i
].name
);
3119 adapter
->ras_comp_int
[i
].adapter
= adapter
;
3120 adapter
->ras_comp_int
[i
].num
= i
;
3121 adapter
->ras_comp_int
[i
].desc_blob
.data
=
3122 &adapter
->ras_comps
[i
].description
;
3123 adapter
->ras_comp_int
[i
].desc_blob
.size
=
3124 sizeof(adapter
->ras_comps
[i
].description
);
3126 /* Don't need to remember the dentry's because the debugfs dir
3127 * gets removed recursively
3129 ent
= debugfs_create_blob("description", S_IRUGO
, dir_ent
,
3130 &adapter
->ras_comp_int
[i
].desc_blob
);
3131 ent
= debugfs_create_file("trace_buf_size", S_IRUGO
| S_IWUSR
,
3132 dir_ent
, &adapter
->ras_comp_int
[i
],
3134 ent
= debugfs_create_file("trace_level",
3136 (adapter
->ras_comps
[i
].trace_level
!=
3137 0xFF ? S_IWUSR
: 0),
3138 dir_ent
, &adapter
->ras_comp_int
[i
],
3140 ent
= debugfs_create_file("error_level",
3143 ras_comps
[i
].error_check_level
!=
3144 0xFF ? S_IWUSR
: 0),
3145 dir_ent
, &adapter
->ras_comp_int
[i
],
3147 ent
= debugfs_create_file("tracing", S_IRUGO
| S_IWUSR
,
3148 dir_ent
, &adapter
->ras_comp_int
[i
],
3150 ent
= debugfs_create_file("paused", S_IRUGO
| S_IWUSR
,
3151 dir_ent
, &adapter
->ras_comp_int
[i
],
3153 ent
= debugfs_create_file("trace", S_IRUGO
, dir_ent
,
3154 &adapter
->ras_comp_int
[i
],
3159 static void handle_request_ras_comp_num_rsp(union ibmvnic_crq
*crq
,
3160 struct ibmvnic_adapter
*adapter
)
3162 int len
= adapter
->ras_comp_num
* sizeof(struct ibmvnic_fw_component
);
3163 struct device
*dev
= &adapter
->vdev
->dev
;
3164 union ibmvnic_crq newcrq
;
3166 adapter
->ras_comps
= dma_alloc_coherent(dev
, len
,
3167 &adapter
->ras_comps_tok
,
3169 if (!adapter
->ras_comps
) {
3170 if (!firmware_has_feature(FW_FEATURE_CMO
))
3171 dev_err(dev
, "Couldn't alloc fw comps buffer\n");
3175 adapter
->ras_comp_int
= kmalloc(adapter
->ras_comp_num
*
3176 sizeof(struct ibmvnic_fw_comp_internal
),
3178 if (!adapter
->ras_comp_int
)
3179 dma_free_coherent(dev
, len
, adapter
->ras_comps
,
3180 adapter
->ras_comps_tok
);
3182 memset(&newcrq
, 0, sizeof(newcrq
));
3183 newcrq
.request_ras_comps
.first
= IBMVNIC_CRQ_CMD
;
3184 newcrq
.request_ras_comps
.cmd
= REQUEST_RAS_COMPS
;
3185 newcrq
.request_ras_comps
.ioba
= cpu_to_be32(adapter
->ras_comps_tok
);
3186 newcrq
.request_ras_comps
.len
= cpu_to_be32(len
);
3187 ibmvnic_send_crq(adapter
, &newcrq
);
3190 static void ibmvnic_free_inflight(struct ibmvnic_adapter
*adapter
)
3192 struct ibmvnic_inflight_cmd
*inflight_cmd
, *tmp1
;
3193 struct device
*dev
= &adapter
->vdev
->dev
;
3194 struct ibmvnic_error_buff
*error_buff
, *tmp2
;
3195 unsigned long flags
;
3196 unsigned long flags2
;
3198 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
3199 list_for_each_entry_safe(inflight_cmd
, tmp1
, &adapter
->inflight
, list
) {
3200 switch (inflight_cmd
->crq
.generic
.cmd
) {
3202 dma_unmap_single(dev
, adapter
->login_buf_token
,
3203 adapter
->login_buf_sz
,
3205 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
3206 adapter
->login_rsp_buf_sz
,
3208 kfree(adapter
->login_rsp_buf
);
3209 kfree(adapter
->login_buf
);
3212 complete(&adapter
->fw_done
);
3214 case REQUEST_ERROR_INFO
:
3215 spin_lock_irqsave(&adapter
->error_list_lock
, flags2
);
3216 list_for_each_entry_safe(error_buff
, tmp2
,
3217 &adapter
->errors
, list
) {
3218 dma_unmap_single(dev
, error_buff
->dma
,
3221 kfree(error_buff
->buff
);
3222 list_del(&error_buff
->list
);
3225 spin_unlock_irqrestore(&adapter
->error_list_lock
,
3229 list_del(&inflight_cmd
->list
);
3230 kfree(inflight_cmd
);
3232 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
3235 static void ibmvnic_handle_crq(union ibmvnic_crq
*crq
,
3236 struct ibmvnic_adapter
*adapter
)
3238 struct ibmvnic_generic_crq
*gen_crq
= &crq
->generic
;
3239 struct net_device
*netdev
= adapter
->netdev
;
3240 struct device
*dev
= &adapter
->vdev
->dev
;
3243 netdev_dbg(netdev
, "Handling CRQ: %016lx %016lx\n",
3244 ((unsigned long int *)crq
)[0],
3245 ((unsigned long int *)crq
)[1]);
3246 switch (gen_crq
->first
) {
3247 case IBMVNIC_CRQ_INIT_RSP
:
3248 switch (gen_crq
->cmd
) {
3249 case IBMVNIC_CRQ_INIT
:
3250 dev_info(dev
, "Partner initialized\n");
3251 /* Send back a response */
3252 rc
= ibmvnic_send_crq_init_complete(adapter
);
3254 schedule_work(&adapter
->vnic_crq_init
);
3256 dev_err(dev
, "Can't send initrsp rc=%ld\n", rc
);
3258 case IBMVNIC_CRQ_INIT_COMPLETE
:
3259 dev_info(dev
, "Partner initialization complete\n");
3260 send_version_xchg(adapter
);
3263 dev_err(dev
, "Unknown crq cmd: %d\n", gen_crq
->cmd
);
3266 case IBMVNIC_CRQ_XPORT_EVENT
:
3267 if (gen_crq
->cmd
== IBMVNIC_PARTITION_MIGRATED
) {
3268 dev_info(dev
, "Re-enabling adapter\n");
3269 adapter
->migrated
= true;
3270 ibmvnic_free_inflight(adapter
);
3271 release_sub_crqs(adapter
);
3272 rc
= ibmvnic_reenable_crq_queue(adapter
);
3274 dev_err(dev
, "Error after enable rc=%ld\n", rc
);
3275 adapter
->migrated
= false;
3276 rc
= ibmvnic_send_crq_init(adapter
);
3278 dev_err(dev
, "Error sending init rc=%ld\n", rc
);
3279 } else if (gen_crq
->cmd
== IBMVNIC_DEVICE_FAILOVER
) {
3280 dev_info(dev
, "Backing device failover detected\n");
3281 netif_carrier_off(netdev
);
3282 adapter
->failover
= true;
3284 /* The adapter lost the connection */
3285 dev_err(dev
, "Virtual Adapter failed (rc=%d)\n",
3287 ibmvnic_free_inflight(adapter
);
3288 release_sub_crqs(adapter
);
3291 case IBMVNIC_CRQ_CMD_RSP
:
3294 dev_err(dev
, "Got an invalid msg type 0x%02x\n",
3299 switch (gen_crq
->cmd
) {
3300 case VERSION_EXCHANGE_RSP
:
3301 rc
= crq
->version_exchange_rsp
.rc
.code
;
3303 dev_err(dev
, "Error %ld in VERSION_EXCHG_RSP\n", rc
);
3306 dev_info(dev
, "Partner protocol version is %d\n",
3307 crq
->version_exchange_rsp
.version
);
3308 if (be16_to_cpu(crq
->version_exchange_rsp
.version
) <
3311 be16_to_cpu(crq
->version_exchange_rsp
.version
);
3312 send_cap_queries(adapter
);
3314 case QUERY_CAPABILITY_RSP
:
3315 handle_query_cap_rsp(crq
, adapter
);
3318 handle_query_map_rsp(crq
, adapter
);
3320 case REQUEST_MAP_RSP
:
3321 handle_request_map_rsp(crq
, adapter
);
3323 case REQUEST_UNMAP_RSP
:
3324 handle_request_unmap_rsp(crq
, adapter
);
3326 case REQUEST_CAPABILITY_RSP
:
3327 handle_request_cap_rsp(crq
, adapter
);
3330 netdev_dbg(netdev
, "Got Login Response\n");
3331 handle_login_rsp(crq
, adapter
);
3333 case LOGICAL_LINK_STATE_RSP
:
3334 netdev_dbg(netdev
, "Got Logical Link State Response\n");
3335 adapter
->logical_link_state
=
3336 crq
->logical_link_state_rsp
.link_state
;
3338 case LINK_STATE_INDICATION
:
3339 netdev_dbg(netdev
, "Got Logical Link State Indication\n");
3340 adapter
->phys_link_state
=
3341 crq
->link_state_indication
.phys_link_state
;
3342 adapter
->logical_link_state
=
3343 crq
->link_state_indication
.logical_link_state
;
3345 case CHANGE_MAC_ADDR_RSP
:
3346 netdev_dbg(netdev
, "Got MAC address change Response\n");
3347 handle_change_mac_rsp(crq
, adapter
);
3349 case ERROR_INDICATION
:
3350 netdev_dbg(netdev
, "Got Error Indication\n");
3351 handle_error_indication(crq
, adapter
);
3353 case REQUEST_ERROR_RSP
:
3354 netdev_dbg(netdev
, "Got Error Detail Response\n");
3355 handle_error_info_rsp(crq
, adapter
);
3357 case REQUEST_STATISTICS_RSP
:
3358 netdev_dbg(netdev
, "Got Statistics Response\n");
3359 complete(&adapter
->stats_done
);
3361 case REQUEST_DUMP_SIZE_RSP
:
3362 netdev_dbg(netdev
, "Got Request Dump Size Response\n");
3363 handle_dump_size_rsp(crq
, adapter
);
3365 case REQUEST_DUMP_RSP
:
3366 netdev_dbg(netdev
, "Got Request Dump Response\n");
3367 complete(&adapter
->fw_done
);
3369 case QUERY_IP_OFFLOAD_RSP
:
3370 netdev_dbg(netdev
, "Got Query IP offload Response\n");
3371 handle_query_ip_offload_rsp(adapter
);
3373 case MULTICAST_CTRL_RSP
:
3374 netdev_dbg(netdev
, "Got multicast control Response\n");
3376 case CONTROL_IP_OFFLOAD_RSP
:
3377 netdev_dbg(netdev
, "Got Control IP offload Response\n");
3378 dma_unmap_single(dev
, adapter
->ip_offload_ctrl_tok
,
3379 sizeof(adapter
->ip_offload_ctrl
),
3381 /* We're done with the queries, perform the login */
3382 send_login(adapter
);
3384 case REQUEST_RAS_COMP_NUM_RSP
:
3385 netdev_dbg(netdev
, "Got Request RAS Comp Num Response\n");
3386 if (crq
->request_ras_comp_num_rsp
.rc
.code
== 10) {
3387 netdev_dbg(netdev
, "Request RAS Comp Num not supported\n");
3390 adapter
->ras_comp_num
=
3391 be32_to_cpu(crq
->request_ras_comp_num_rsp
.num_components
);
3392 handle_request_ras_comp_num_rsp(crq
, adapter
);
3394 case REQUEST_RAS_COMPS_RSP
:
3395 netdev_dbg(netdev
, "Got Request RAS Comps Response\n");
3396 handle_request_ras_comps_rsp(crq
, adapter
);
3398 case CONTROL_RAS_RSP
:
3399 netdev_dbg(netdev
, "Got Control RAS Response\n");
3400 handle_control_ras_rsp(crq
, adapter
);
3402 case COLLECT_FW_TRACE_RSP
:
3403 netdev_dbg(netdev
, "Got Collect firmware trace Response\n");
3404 complete(&adapter
->fw_done
);
3407 netdev_err(netdev
, "Got an invalid cmd type 0x%02x\n",
3412 static irqreturn_t
ibmvnic_interrupt(int irq
, void *instance
)
3414 struct ibmvnic_adapter
*adapter
= instance
;
3415 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
3416 struct vio_dev
*vdev
= adapter
->vdev
;
3417 union ibmvnic_crq
*crq
;
3418 unsigned long flags
;
3421 spin_lock_irqsave(&queue
->lock
, flags
);
3422 vio_disable_interrupts(vdev
);
3424 /* Pull all the valid messages off the CRQ */
3425 while ((crq
= ibmvnic_next_crq(adapter
)) != NULL
) {
3426 ibmvnic_handle_crq(crq
, adapter
);
3427 crq
->generic
.first
= 0;
3429 vio_enable_interrupts(vdev
);
3430 crq
= ibmvnic_next_crq(adapter
);
3432 vio_disable_interrupts(vdev
);
3433 ibmvnic_handle_crq(crq
, adapter
);
3434 crq
->generic
.first
= 0;
3439 spin_unlock_irqrestore(&queue
->lock
, flags
);
3443 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*adapter
)
3445 struct vio_dev
*vdev
= adapter
->vdev
;
3449 rc
= plpar_hcall_norets(H_ENABLE_CRQ
, vdev
->unit_address
);
3450 } while (rc
== H_IN_PROGRESS
|| rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3453 dev_err(&vdev
->dev
, "Error enabling adapter (rc=%d)\n", rc
);
3458 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*adapter
)
3460 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3461 struct device
*dev
= &adapter
->vdev
->dev
;
3462 struct vio_dev
*vdev
= adapter
->vdev
;
3467 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3468 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3470 /* Clean out the queue */
3471 memset(crq
->msgs
, 0, PAGE_SIZE
);
3474 /* And re-open it again */
3475 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3476 crq
->msg_token
, PAGE_SIZE
);
3479 /* Adapter is good, but other end is not ready */
3480 dev_warn(dev
, "Partner adapter not ready\n");
3482 dev_warn(dev
, "Couldn't register crq (rc=%d)\n", rc
);
3487 static void ibmvnic_release_crq_queue(struct ibmvnic_adapter
*adapter
)
3489 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3490 struct vio_dev
*vdev
= adapter
->vdev
;
3493 netdev_dbg(adapter
->netdev
, "Releasing CRQ\n");
3494 free_irq(vdev
->irq
, adapter
);
3496 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3497 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3499 dma_unmap_single(&vdev
->dev
, crq
->msg_token
, PAGE_SIZE
,
3501 free_page((unsigned long)crq
->msgs
);
3504 static int ibmvnic_init_crq_queue(struct ibmvnic_adapter
*adapter
)
3506 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3507 struct device
*dev
= &adapter
->vdev
->dev
;
3508 struct vio_dev
*vdev
= adapter
->vdev
;
3509 int rc
, retrc
= -ENOMEM
;
3511 crq
->msgs
= (union ibmvnic_crq
*)get_zeroed_page(GFP_KERNEL
);
3512 /* Should we allocate more than one page? */
3517 crq
->size
= PAGE_SIZE
/ sizeof(*crq
->msgs
);
3518 crq
->msg_token
= dma_map_single(dev
, crq
->msgs
, PAGE_SIZE
,
3520 if (dma_mapping_error(dev
, crq
->msg_token
))
3523 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3524 crq
->msg_token
, PAGE_SIZE
);
3526 if (rc
== H_RESOURCE
)
3527 /* maybe kexecing and resource is busy. try a reset */
3528 rc
= ibmvnic_reset_crq(adapter
);
3531 if (rc
== H_CLOSED
) {
3532 dev_warn(dev
, "Partner adapter not ready\n");
3534 dev_warn(dev
, "Error %d opening adapter\n", rc
);
3535 goto reg_crq_failed
;
3540 netdev_dbg(adapter
->netdev
, "registering irq 0x%x\n", vdev
->irq
);
3541 rc
= request_irq(vdev
->irq
, ibmvnic_interrupt
, 0, IBMVNIC_NAME
,
3544 dev_err(dev
, "Couldn't register irq 0x%x. rc=%d\n",
3546 goto req_irq_failed
;
3549 rc
= vio_enable_interrupts(vdev
);
3551 dev_err(dev
, "Error %d enabling interrupts\n", rc
);
3552 goto req_irq_failed
;
3556 spin_lock_init(&crq
->lock
);
3562 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3563 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3565 dma_unmap_single(dev
, crq
->msg_token
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
3567 free_page((unsigned long)crq
->msgs
);
3571 /* debugfs for dump */
3572 static int ibmvnic_dump_show(struct seq_file
*seq
, void *v
)
3574 struct net_device
*netdev
= seq
->private;
3575 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3576 struct device
*dev
= &adapter
->vdev
->dev
;
3577 union ibmvnic_crq crq
;
3579 memset(&crq
, 0, sizeof(crq
));
3580 crq
.request_dump_size
.first
= IBMVNIC_CRQ_CMD
;
3581 crq
.request_dump_size
.cmd
= REQUEST_DUMP_SIZE
;
3582 ibmvnic_send_crq(adapter
, &crq
);
3584 init_completion(&adapter
->fw_done
);
3585 wait_for_completion(&adapter
->fw_done
);
3587 seq_write(seq
, adapter
->dump_data
, adapter
->dump_data_size
);
3589 dma_unmap_single(dev
, adapter
->dump_data_token
, adapter
->dump_data_size
,
3592 kfree(adapter
->dump_data
);
3597 static int ibmvnic_dump_open(struct inode
*inode
, struct file
*file
)
3599 return single_open(file
, ibmvnic_dump_show
, inode
->i_private
);
3602 static const struct file_operations ibmvnic_dump_ops
= {
3603 .owner
= THIS_MODULE
,
3604 .open
= ibmvnic_dump_open
,
3606 .llseek
= seq_lseek
,
3607 .release
= single_release
,
3610 static void handle_crq_init_rsp(struct work_struct
*work
)
3612 struct ibmvnic_adapter
*adapter
= container_of(work
,
3613 struct ibmvnic_adapter
,
3615 struct device
*dev
= &adapter
->vdev
->dev
;
3616 struct net_device
*netdev
= adapter
->netdev
;
3617 unsigned long timeout
= msecs_to_jiffies(30000);
3618 bool restart
= false;
3621 if (adapter
->failover
) {
3622 release_sub_crqs(adapter
);
3623 if (netif_running(netdev
)) {
3624 netif_tx_disable(netdev
);
3625 ibmvnic_close(netdev
);
3630 send_version_xchg(adapter
);
3631 reinit_completion(&adapter
->init_done
);
3632 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
)) {
3633 dev_err(dev
, "Passive init timeout\n");
3638 if (adapter
->renegotiate
) {
3639 adapter
->renegotiate
= false;
3640 release_sub_crqs_no_irqs(adapter
);
3641 send_cap_queries(adapter
);
3643 reinit_completion(&adapter
->init_done
);
3644 if (!wait_for_completion_timeout(&adapter
->init_done
,
3646 dev_err(dev
, "Passive init timeout\n");
3650 } while (adapter
->renegotiate
);
3651 rc
= init_sub_crq_irqs(adapter
);
3656 netdev
->real_num_tx_queues
= adapter
->req_tx_queues
;
3658 if (adapter
->failover
) {
3659 adapter
->failover
= false;
3661 rc
= ibmvnic_open(netdev
);
3663 goto restart_failed
;
3665 netif_carrier_on(netdev
);
3669 rc
= register_netdev(netdev
);
3672 "failed to register netdev rc=%d\n", rc
);
3673 goto register_failed
;
3675 dev_info(dev
, "ibmvnic registered\n");
3680 dev_err(dev
, "Failed to restart ibmvnic, rc=%d\n", rc
);
3682 release_sub_crqs(adapter
);
3684 dev_err(dev
, "Passive initialization was not successful\n");
3687 static int ibmvnic_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
3689 unsigned long timeout
= msecs_to_jiffies(30000);
3690 struct ibmvnic_adapter
*adapter
;
3691 struct net_device
*netdev
;
3692 unsigned char *mac_addr_p
;
3694 char buf
[16]; /* debugfs name buf */
3697 dev_dbg(&dev
->dev
, "entering ibmvnic_probe for UA 0x%x\n",
3700 mac_addr_p
= (unsigned char *)vio_get_attribute(dev
,
3701 VETH_MAC_ADDR
, NULL
);
3704 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3705 __FILE__
, __LINE__
);
3709 netdev
= alloc_etherdev_mq(sizeof(struct ibmvnic_adapter
),
3710 IBMVNIC_MAX_TX_QUEUES
);
3714 adapter
= netdev_priv(netdev
);
3715 dev_set_drvdata(&dev
->dev
, netdev
);
3716 adapter
->vdev
= dev
;
3717 adapter
->netdev
= netdev
;
3718 adapter
->failover
= false;
3720 ether_addr_copy(adapter
->mac_addr
, mac_addr_p
);
3721 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
3722 netdev
->irq
= dev
->irq
;
3723 netdev
->netdev_ops
= &ibmvnic_netdev_ops
;
3724 netdev
->ethtool_ops
= &ibmvnic_ethtool_ops
;
3725 SET_NETDEV_DEV(netdev
, &dev
->dev
);
3727 INIT_WORK(&adapter
->vnic_crq_init
, handle_crq_init_rsp
);
3729 spin_lock_init(&adapter
->stats_lock
);
3731 rc
= ibmvnic_init_crq_queue(adapter
);
3733 dev_err(&dev
->dev
, "Couldn't initialize crq. rc=%d\n", rc
);
3737 INIT_LIST_HEAD(&adapter
->errors
);
3738 INIT_LIST_HEAD(&adapter
->inflight
);
3739 spin_lock_init(&adapter
->error_list_lock
);
3740 spin_lock_init(&adapter
->inflight_lock
);
3742 adapter
->stats_token
= dma_map_single(&dev
->dev
, &adapter
->stats
,
3743 sizeof(struct ibmvnic_statistics
),
3745 if (dma_mapping_error(&dev
->dev
, adapter
->stats_token
)) {
3746 if (!firmware_has_feature(FW_FEATURE_CMO
))
3747 dev_err(&dev
->dev
, "Couldn't map stats buffer\n");
3752 snprintf(buf
, sizeof(buf
), "ibmvnic_%x", dev
->unit_address
);
3753 ent
= debugfs_create_dir(buf
, NULL
);
3754 if (!ent
|| IS_ERR(ent
)) {
3755 dev_info(&dev
->dev
, "debugfs create directory failed\n");
3756 adapter
->debugfs_dir
= NULL
;
3758 adapter
->debugfs_dir
= ent
;
3759 ent
= debugfs_create_file("dump", S_IRUGO
, adapter
->debugfs_dir
,
3760 netdev
, &ibmvnic_dump_ops
);
3761 if (!ent
|| IS_ERR(ent
)) {
3763 "debugfs create dump file failed\n");
3764 adapter
->debugfs_dump
= NULL
;
3766 adapter
->debugfs_dump
= ent
;
3769 ibmvnic_send_crq_init(adapter
);
3771 init_completion(&adapter
->init_done
);
3772 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
))
3776 if (adapter
->renegotiate
) {
3777 adapter
->renegotiate
= false;
3778 release_sub_crqs_no_irqs(adapter
);
3779 send_cap_queries(adapter
);
3781 reinit_completion(&adapter
->init_done
);
3782 if (!wait_for_completion_timeout(&adapter
->init_done
,
3786 } while (adapter
->renegotiate
);
3788 rc
= init_sub_crq_irqs(adapter
);
3790 dev_err(&dev
->dev
, "failed to initialize sub crq irqs\n");
3794 netdev
->real_num_tx_queues
= adapter
->req_tx_queues
;
3796 rc
= register_netdev(netdev
);
3798 dev_err(&dev
->dev
, "failed to register netdev rc=%d\n", rc
);
3801 dev_info(&dev
->dev
, "ibmvnic registered\n");
3806 release_sub_crqs(adapter
);
3808 if (adapter
->debugfs_dir
&& !IS_ERR(adapter
->debugfs_dir
))
3809 debugfs_remove_recursive(adapter
->debugfs_dir
);
3811 ibmvnic_release_crq_queue(adapter
);
3813 free_netdev(netdev
);
3817 static int ibmvnic_remove(struct vio_dev
*dev
)
3819 struct net_device
*netdev
= dev_get_drvdata(&dev
->dev
);
3820 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3822 unregister_netdev(netdev
);
3824 release_sub_crqs(adapter
);
3826 ibmvnic_release_crq_queue(adapter
);
3828 if (adapter
->debugfs_dir
&& !IS_ERR(adapter
->debugfs_dir
))
3829 debugfs_remove_recursive(adapter
->debugfs_dir
);
3831 if (adapter
->ras_comps
)
3832 dma_free_coherent(&dev
->dev
,
3833 adapter
->ras_comp_num
*
3834 sizeof(struct ibmvnic_fw_component
),
3835 adapter
->ras_comps
, adapter
->ras_comps_tok
);
3837 kfree(adapter
->ras_comp_int
);
3839 free_netdev(netdev
);
3840 dev_set_drvdata(&dev
->dev
, NULL
);
3845 static unsigned long ibmvnic_get_desired_dma(struct vio_dev
*vdev
)
3847 struct net_device
*netdev
= dev_get_drvdata(&vdev
->dev
);
3848 struct ibmvnic_adapter
*adapter
;
3849 struct iommu_table
*tbl
;
3850 unsigned long ret
= 0;
3853 tbl
= get_iommu_table_base(&vdev
->dev
);
3855 /* netdev inits at probe time along with the structures we need below*/
3857 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT
, tbl
);
3859 adapter
= netdev_priv(netdev
);
3861 ret
+= PAGE_SIZE
; /* the crq message queue */
3862 ret
+= adapter
->bounce_buffer_size
;
3863 ret
+= IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics
), tbl
);
3865 for (i
= 0; i
< adapter
->req_tx_queues
+ adapter
->req_rx_queues
; i
++)
3866 ret
+= 4 * PAGE_SIZE
; /* the scrq message queue */
3868 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
3870 ret
+= adapter
->rx_pool
[i
].size
*
3871 IOMMU_PAGE_ALIGN(adapter
->rx_pool
[i
].buff_size
, tbl
);
3876 static int ibmvnic_resume(struct device
*dev
)
3878 struct net_device
*netdev
= dev_get_drvdata(dev
);
3879 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3882 /* kick the interrupt handlers just in case we lost an interrupt */
3883 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
3884 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
3885 adapter
->rx_scrq
[i
]);
3890 static struct vio_device_id ibmvnic_device_table
[] = {
3891 {"network", "IBM,vnic"},
3894 MODULE_DEVICE_TABLE(vio
, ibmvnic_device_table
);
3896 static const struct dev_pm_ops ibmvnic_pm_ops
= {
3897 .resume
= ibmvnic_resume
3900 static struct vio_driver ibmvnic_driver
= {
3901 .id_table
= ibmvnic_device_table
,
3902 .probe
= ibmvnic_probe
,
3903 .remove
= ibmvnic_remove
,
3904 .get_desired_dma
= ibmvnic_get_desired_dma
,
3905 .name
= ibmvnic_driver_name
,
3906 .pm
= &ibmvnic_pm_ops
,
3909 /* module functions */
3910 static int __init
ibmvnic_module_init(void)
3912 pr_info("%s: %s %s\n", ibmvnic_driver_name
, ibmvnic_driver_string
,
3913 IBMVNIC_DRIVER_VERSION
);
3915 return vio_register_driver(&ibmvnic_driver
);
3918 static void __exit
ibmvnic_module_exit(void)
3920 vio_unregister_driver(&ibmvnic_driver
);
3923 module_init(ibmvnic_module_init
);
3924 module_exit(ibmvnic_module_exit
);