Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[deliverable/linux.git] / drivers / net / ehea / ehea_main.c
index 190fb691d20b107619e1fde16ebb1b204393426f..bb7d306fb44656c3070fa3c20e2ef7b0759c73fc 100644 (file)
@@ -533,8 +533,15 @@ static inline void ehea_fill_skb(struct net_device *dev,
        int length = cqe->num_bytes_transfered - 4;     /*remove CRC */
 
        skb_put(skb, length);
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
        skb->protocol = eth_type_trans(skb, dev);
+
+       /* The packet was not an IPV4 packet so a complemented checksum was
+          calculated. The value is found in the Internet Checksum field. */
+       if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
+               skb->ip_summed = CHECKSUM_COMPLETE;
+               skb->csum = csum_unfold(~cqe->inet_checksum_value);
+       } else
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
@@ -786,6 +793,7 @@ static void reset_sq_restart_flag(struct ehea_port *port)
                struct ehea_port_res *pr = &port->port_res[i];
                pr->sq_restart_flag = 0;
        }
+       wake_up(&port->restart_wq);
 }
 
 static void check_sqs(struct ehea_port *port)
@@ -796,6 +804,7 @@ static void check_sqs(struct ehea_port *port)
 
        for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
                struct ehea_port_res *pr = &port->port_res[i];
+               int ret;
                k = 0;
                swqe = ehea_get_swqe(pr->qp, &swqe_index);
                memset(swqe, 0, SWQE_HEADER_SIZE);
@@ -809,13 +818,14 @@ static void check_sqs(struct ehea_port *port)
 
                ehea_post_swqe(pr->qp, swqe);
 
-               while (pr->sq_restart_flag == 0) {
-                       msleep(5);
-                       if (++k == 100) {
-                               ehea_error("HW/SW queues out of sync");
-                               ehea_schedule_port_reset(pr->port);
-                               return;
-                       }
+               ret = wait_event_timeout(port->restart_wq,
+                                        pr->sq_restart_flag == 0,
+                                        msecs_to_jiffies(100));
+
+               if (!ret) {
+                       ehea_error("HW/SW queues out of sync");
+                       ehea_schedule_port_reset(pr->port);
+                       return;
                }
        }
 }
@@ -888,6 +898,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
                pr->queue_stopped = 0;
        }
        spin_unlock_irqrestore(&pr->netif_queue, flags);
+       wake_up(&pr->port->swqe_avail_wq);
 
        return cqe;
 }
@@ -1914,7 +1925,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
        struct hcp_ehea_port_cb7 *cb7;
        u64 hret;
 
-       if ((enable && port->promisc) || (!enable && !port->promisc))
+       if (enable == port->promisc)
                return;
 
        cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
@@ -2268,7 +2279,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
        pr->swqe_id_counter += 1;
 
-       if (port->vgrp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
                swqe->vlan_tag = vlan_tx_tag_get(skb);
        }
@@ -2652,6 +2663,9 @@ static int ehea_open(struct net_device *dev)
                netif_start_queue(dev);
        }
 
+       init_waitqueue_head(&port->swqe_avail_wq);
+       init_waitqueue_head(&port->restart_wq);
+
        mutex_unlock(&port->port_lock);
 
        return ret;
@@ -2724,13 +2738,15 @@ static void ehea_flush_sq(struct ehea_port *port)
        for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
                struct ehea_port_res *pr = &port->port_res[i];
                int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
-               int k = 0;
-               while (atomic_read(&pr->swqe_avail) < swqe_max) {
-                       msleep(5);
-                       if (++k == 20) {
-                               ehea_error("WARNING: sq not flushed completely");
-                               break;
-                       }
+               int ret;
+
+               ret = wait_event_timeout(port->swqe_avail_wq,
+                        atomic_read(&pr->swqe_avail) >= swqe_max,
+                        msecs_to_jiffies(100));
+
+               if (!ret) {
+                       ehea_error("WARNING: sq not flushed completely");
+                       break;
                }
        }
 }
This page took 0.02668 seconds and 5 git commands to generate.