Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/linville...
authorDavid S. Miller <davem@davemloft.net>
Wed, 16 May 2012 05:03:54 +0000 (01:03 -0400)
committerDavid S. Miller <davem@davemloft.net>
Wed, 16 May 2012 05:04:07 +0000 (01:04 -0400)
John Linville says:

Here are three more fixes that some of my developers are desperate to
see included in 3.4...

Johan Hedberg went to some length justifyng the inclusion of these two
Bluetooth fixes:

"The device_connected fix should be quite self-explanatory, but it's
actually a wider issue than just for keyboards. All profiles that do
incoming connection authorization (e.g. headsets) will break without it
with specific hardware. The reason it wasn't caught earlier is that it
only occurs with specific Bluetooth adapters.

As for the security level patch, this fixes L2CAP socket based security
level elevation during a connection. The HID profile needs this (for
keyboards) and it is the only way to achieve the security level
elevation when using the management interface to talk to the kernel
(hence the management enabling patch being the one that exposes this"

The rtlwifi fix addresses a regression related to firmware loading,
as described in kernel.org bug 43187.  It basically just moves a hunk
of code to a more appropriate place.

Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/usbnet.c
include/linux/usb/usbnet.h

index dd14915f54bb8581516329c36b3fed3d0204cfae..ba781747d17421d7a0fff044b74c79303e6a8fa3 100644 (file)
@@ -584,7 +584,6 @@ struct pch_gbe_hw_stats {
 /**
  * struct pch_gbe_adapter - board specific private data structure
  * @stats_lock:        Spinlock structure for status
- * @tx_queue_lock:     Spinlock structure for transmit
  * @ethtool_lock:      Spinlock structure for ethtool
  * @irq_sem:           Semaphore for interrupt
  * @netdev:            Pointer of network device structure
@@ -609,7 +608,6 @@ struct pch_gbe_hw_stats {
 
 struct pch_gbe_adapter {
        spinlock_t stats_lock;
-       spinlock_t tx_queue_lock;
        spinlock_t ethtool_lock;
        atomic_t irq_sem;
        struct net_device *netdev;
index 8035e5ff6e060d4c208208857159c7e32c0484f0..1e38d502a06202d381b14d50964712896da4c67e 100644 (file)
@@ -640,14 +640,11 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
  */
 static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
 {
-       int size;
-
-       size = (int)sizeof(struct pch_gbe_tx_ring);
-       adapter->tx_ring = kzalloc(size, GFP_KERNEL);
+       adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL);
        if (!adapter->tx_ring)
                return -ENOMEM;
-       size = (int)sizeof(struct pch_gbe_rx_ring);
-       adapter->rx_ring = kzalloc(size, GFP_KERNEL);
+
+       adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL);
        if (!adapter->rx_ring) {
                kfree(adapter->tx_ring);
                return -ENOMEM;
@@ -1162,7 +1159,6 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
        struct sk_buff *tmp_skb;
        unsigned int frame_ctrl;
        unsigned int ring_num;
-       unsigned long flags;
 
        /*-- Set frame control --*/
        frame_ctrl = 0;
@@ -1211,14 +1207,14 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
                        }
                }
        }
-       spin_lock_irqsave(&tx_ring->tx_lock, flags);
+
        ring_num = tx_ring->next_to_use;
        if (unlikely((ring_num + 1) == tx_ring->count))
                tx_ring->next_to_use = 0;
        else
                tx_ring->next_to_use = ring_num + 1;
 
-       spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+
        buffer_info = &tx_ring->buffer_info[ring_num];
        tmp_skb = buffer_info->skb;
 
@@ -1518,7 +1514,7 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
                                                &rx_ring->rx_buff_pool_logic,
                                                GFP_KERNEL);
        if (!rx_ring->rx_buff_pool) {
-               pr_err("Unable to allocate memory for the receive poll buffer\n");
+               pr_err("Unable to allocate memory for the receive pool buffer\n");
                return -ENOMEM;
        }
        memset(rx_ring->rx_buff_pool, 0, size);
@@ -1637,15 +1633,17 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
        pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
                 cleaned_count);
        /* Recover from running out of Tx resources in xmit_frame */
+       spin_lock(&tx_ring->tx_lock);
        if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
                netif_wake_queue(adapter->netdev);
                adapter->stats.tx_restart_count++;
                pr_debug("Tx wake queue\n");
        }
-       spin_lock(&adapter->tx_queue_lock);
+
        tx_ring->next_to_clean = i;
-       spin_unlock(&adapter->tx_queue_lock);
+
        pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
+       spin_unlock(&tx_ring->tx_lock);
        return cleaned;
 }
 
@@ -2037,7 +2035,6 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
                return -ENOMEM;
        }
        spin_lock_init(&adapter->hw.miim_lock);
-       spin_lock_init(&adapter->tx_queue_lock);
        spin_lock_init(&adapter->stats_lock);
        spin_lock_init(&adapter->ethtool_lock);
        atomic_set(&adapter->irq_sem, 0);
@@ -2142,10 +2139,10 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                         tx_ring->next_to_use, tx_ring->next_to_clean);
                return NETDEV_TX_BUSY;
        }
-       spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
 
        /* CRC,ITAG no support */
        pch_gbe_tx_queue(adapter, tx_ring, skb);
+       spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
        return NETDEV_TX_OK;
 }
 
index 00880edba04801bc9b686e17182bc09cc0531564..425e201f597c7f009d052859e65568d51b2c464f 100644 (file)
@@ -485,6 +485,7 @@ static const struct driver_info wwan_info = {
 /*-------------------------------------------------------------------------*/
 
 #define HUAWEI_VENDOR_ID       0x12D1
+#define NOVATEL_VENDOR_ID      0x1410
 
 static const struct usb_device_id      products [] = {
 /*
@@ -602,6 +603,21 @@ static const struct usb_device_id  products [] = {
  * because of bugs/quirks in a given product (like Zaurus, above).
  */
 {
+       /* Novatel USB551L */
+       /* This match must come *before* the generic CDC-ETHER match so that
+        * we get FLAG_WWAN set on the device, since it's descriptors are
+        * generic CDC-ETHER.
+        */
+       .match_flags    =   USB_DEVICE_ID_MATCH_VENDOR
+                | USB_DEVICE_ID_MATCH_PRODUCT
+                | USB_DEVICE_ID_MATCH_INT_INFO,
+       .idVendor               = NOVATEL_VENDOR_ID,
+       .idProduct              = 0xB001,
+       .bInterfaceClass        = USB_CLASS_COMM,
+       .bInterfaceSubClass     = USB_CDC_SUBCLASS_ETHERNET,
+       .bInterfaceProtocol     = USB_CDC_PROTO_NONE,
+       .driver_info = (unsigned long)&wwan_info,
+}, {
        USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
                        USB_CDC_PROTO_NONE),
        .driver_info = (unsigned long) &cdc_info,
index 2d927fb4adf4ea6805b2aeb02d06c8cb5108f496..b38db48b1ce09b380d8cdea0ea444be0d97573d0 100644 (file)
@@ -282,17 +282,32 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
 }
 EXPORT_SYMBOL_GPL(usbnet_change_mtu);
 
+/* The caller must hold list->lock */
+static void __usbnet_queue_skb(struct sk_buff_head *list,
+                       struct sk_buff *newsk, enum skb_state state)
+{
+       struct skb_data *entry = (struct skb_data *) newsk->cb;
+
+       __skb_queue_tail(list, newsk);
+       entry->state = state;
+}
+
 /*-------------------------------------------------------------------------*/
 
 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
  * completion callbacks.  2.5 should have fixed those bugs...
  */
 
-static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
+static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
+               struct sk_buff_head *list, enum skb_state state)
 {
        unsigned long           flags;
+       enum skb_state          old_state;
+       struct skb_data *entry = (struct skb_data *) skb->cb;
 
        spin_lock_irqsave(&list->lock, flags);
+       old_state = entry->state;
+       entry->state = state;
        __skb_unlink(skb, list);
        spin_unlock(&list->lock);
        spin_lock(&dev->done.lock);
@@ -300,6 +315,7 @@ static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_hea
        if (dev->done.qlen == 1)
                tasklet_schedule(&dev->bh);
        spin_unlock_irqrestore(&dev->done.lock, flags);
+       return old_state;
 }
 
 /* some work can't be done in tasklets, so we use keventd
@@ -340,7 +356,6 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
        entry = (struct skb_data *) skb->cb;
        entry->urb = urb;
        entry->dev = dev;
-       entry->state = rx_start;
        entry->length = 0;
 
        usb_fill_bulk_urb (urb, dev->udev, dev->in,
@@ -372,7 +387,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
                        tasklet_schedule (&dev->bh);
                        break;
                case 0:
-                       __skb_queue_tail (&dev->rxq, skb);
+                       __usbnet_queue_skb(&dev->rxq, skb, rx_start);
                }
        } else {
                netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
@@ -423,16 +438,17 @@ static void rx_complete (struct urb *urb)
        struct skb_data         *entry = (struct skb_data *) skb->cb;
        struct usbnet           *dev = entry->dev;
        int                     urb_status = urb->status;
+       enum skb_state          state;
 
        skb_put (skb, urb->actual_length);
-       entry->state = rx_done;
+       state = rx_done;
        entry->urb = NULL;
 
        switch (urb_status) {
        /* success */
        case 0:
                if (skb->len < dev->net->hard_header_len) {
-                       entry->state = rx_cleanup;
+                       state = rx_cleanup;
                        dev->net->stats.rx_errors++;
                        dev->net->stats.rx_length_errors++;
                        netif_dbg(dev, rx_err, dev->net,
@@ -471,7 +487,7 @@ static void rx_complete (struct urb *urb)
                                  "rx throttle %d\n", urb_status);
                }
 block:
-               entry->state = rx_cleanup;
+               state = rx_cleanup;
                entry->urb = urb;
                urb = NULL;
                break;
@@ -482,17 +498,18 @@ block:
                // FALLTHROUGH
 
        default:
-               entry->state = rx_cleanup;
+               state = rx_cleanup;
                dev->net->stats.rx_errors++;
                netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
                break;
        }
 
-       defer_bh(dev, skb, &dev->rxq);
+       state = defer_bh(dev, skb, &dev->rxq, state);
 
        if (urb) {
                if (netif_running (dev->net) &&
-                   !test_bit (EVENT_RX_HALT, &dev->flags)) {
+                   !test_bit (EVENT_RX_HALT, &dev->flags) &&
+                   state != unlink_start) {
                        rx_submit (dev, urb, GFP_ATOMIC);
                        usb_mark_last_busy(dev->udev);
                        return;
@@ -579,16 +596,23 @@ EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq);
 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
 {
        unsigned long           flags;
-       struct sk_buff          *skb, *skbnext;
+       struct sk_buff          *skb;
        int                     count = 0;
 
        spin_lock_irqsave (&q->lock, flags);
-       skb_queue_walk_safe(q, skb, skbnext) {
+       while (!skb_queue_empty(q)) {
                struct skb_data         *entry;
                struct urb              *urb;
                int                     retval;
 
-               entry = (struct skb_data *) skb->cb;
+               skb_queue_walk(q, skb) {
+                       entry = (struct skb_data *) skb->cb;
+                       if (entry->state != unlink_start)
+                               goto found;
+               }
+               break;
+found:
+               entry->state = unlink_start;
                urb = entry->urb;
 
                /*
@@ -1039,8 +1063,7 @@ static void tx_complete (struct urb *urb)
        }
 
        usb_autopm_put_interface_async(dev->intf);
-       entry->state = tx_done;
-       defer_bh(dev, skb, &dev->txq);
+       (void) defer_bh(dev, skb, &dev->txq, tx_done);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -1096,7 +1119,6 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
        entry = (struct skb_data *) skb->cb;
        entry->urb = urb;
        entry->dev = dev;
-       entry->state = tx_start;
        entry->length = length;
 
        usb_fill_bulk_urb (urb, dev->udev, dev->out,
@@ -1155,7 +1177,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
                break;
        case 0:
                net->trans_start = jiffies;
-               __skb_queue_tail (&dev->txq, skb);
+               __usbnet_queue_skb(&dev->txq, skb, tx_start);
                if (dev->txq.qlen >= TX_QLEN (dev))
                        netif_stop_queue (net);
        }
index 605b0aa8d852f63d4d0c7a3cf18add1f518e2acd..76f439647c4bdbe8a81c8518be0f5194ed43af8f 100644 (file)
@@ -191,7 +191,8 @@ extern void usbnet_cdc_status(struct usbnet *, struct urb *);
 enum skb_state {
        illegal = 0,
        tx_start, tx_done,
-       rx_start, rx_done, rx_cleanup
+       rx_start, rx_done, rx_cleanup,
+       unlink_start
 };
 
 struct skb_data {      /* skb->cb is one of these */
This page took 0.034161 seconds and 5 git commands to generate.