#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
#define RTL_R8(reg) readb (ioaddr + (reg))
#define RTL_R16(reg) readw (ioaddr + (reg))
-#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
+#define RTL_R32(reg) readl (ioaddr + (reg))
enum mac_version {
RTL_GIGA_MAC_NONE = 0x00,
{ 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
/* 8168C family. */
- { 0x7cf00000, 0x3ca00000, RTL_GIGA_MAC_VER_24 },
+ { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
{ 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
{ 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
{ 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
*/
rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
+ /*
+ * Start watchdog monitoring.
+ */
+ rt2x00link_start_watchdog(rt2x00dev);
+
/*
* Start the TX queues.
*/
ieee80211_stop_queues(rt2x00dev->hw);
rt2x00queue_stop_queues(rt2x00dev);
+ /*
+ * Stop watchdog monitoring.
+ */
+ rt2x00link_stop_watchdog(rt2x00dev);
+
/*
* Disable RX.
*/
/*
* Interrupt context handlers.
*/
-static void rt2x00lib_beacondone_iter(void *data, u8 *mac,
- struct ieee80211_vif *vif)
+static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- struct rt2x00_intf *intf = vif_to_intf(vif);
+ struct rt2x00_dev *rt2x00dev = data;
+ struct sk_buff *skb;
+
+ /*
+ * Only AP mode interfaces do broad- and multicast buffering
+ */
+ if (vif->type != NL80211_IFTYPE_AP)
+ return;
+
+ /*
+ * Send out buffered broad- and multicast frames
+ */
+ skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
+ while (skb) {
+ rt2x00mac_tx(rt2x00dev->hw, skb);
+ skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
+ }
+}
+
+static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rt2x00_dev *rt2x00dev = data;
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_ADHOC &&
vif->type != NL80211_IFTYPE_WDS)
return;
- spin_lock(&intf->lock);
- intf->delayed_flags |= DELAYED_UPDATE_BEACON;
- spin_unlock(&intf->lock);
+ rt2x00queue_update_beacon(rt2x00dev, vif, true);
}
void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
return;
- ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
- rt2x00lib_beacondone_iter,
- rt2x00dev);
+ /* send buffered bc/mc frames out for every bssid */
+ ieee80211_iterate_active_interfaces(rt2x00dev->hw,
+ rt2x00lib_bc_buffer_iter,
+ rt2x00dev);
+ /*
+ * Devices with pre tbtt interrupt don't need to update the beacon
+ * here as they will fetch the next beacon directly prior to
+ * transmission.
+ */
+ if (test_bit(DRIVER_SUPPORT_PRE_TBTT_INTERRUPT, &rt2x00dev->flags))
+ return;
- ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->intf_work);
+ /* fetch next beacon */
+ ieee80211_iterate_active_interfaces(rt2x00dev->hw,
+ rt2x00lib_beaconupdate_iter,
+ rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
+void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev)
+{
+ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ return;
+
+ /* fetch next beacon */
+ ieee80211_iterate_active_interfaces(rt2x00dev->hw,
+ rt2x00lib_beaconupdate_iter,
+ rt2x00dev);
+}
+EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
+
void rt2x00lib_txdone(struct queue_entry *entry,
struct txdone_entry_desc *txdesc)
{
*/
rt2x00queue_unmap_skb(rt2x00dev, entry->skb);
+ /*
+ * Remove the extra tx headroom from the skb.
+ */
+ skb_pull(entry->skb, rt2x00dev->ops->extra_tx_headroom);
+
+ /*
+ * Signal that the TX descriptor is no longer in the skb.
+ */
+ skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
+
/*
* Remove L2 padding which was added during
*/
/*
* If the IV/EIV data was stripped from the frame before it was
* passed to the hardware, we should now reinsert it again because
- * mac80211 will expect the the same data to be present it the
+ * mac80211 will expect the same data to be present it the
* frame as it was passed to us.
*/
if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags))
*/
success =
test_bit(TXDONE_SUCCESS, &txdesc->flags) ||
- test_bit(TXDONE_UNKNOWN, &txdesc->flags) ||
- test_bit(TXDONE_FALLBACK, &txdesc->flags);
+ test_bit(TXDONE_UNKNOWN, &txdesc->flags);
/*
* Update TX statistics.
/*
* Frame was send with retries, hardware tried
* different rates to send out the frame, at each
- * retry it lowered the rate 1 step.
+ * retry it lowered the rate 1 step except when the
+ * lowest rate was used.
*/
for (i = 0; i < retry_rates && i < IEEE80211_TX_MAX_RATES; i++) {
tx_info->status.rates[i].idx = rate_idx - i;
tx_info->status.rates[i].flags = rate_flags;
+
+ if (rate_idx - i == 0) {
+ /*
+ * The lowest rate (index 0) was used until the
+ * number of max retries was reached.
+ */
+ tx_info->status.rates[i].count = retry_rates - i;
+ i++;
+ break;
+ }
tx_info->status.rates[i].count = 1;
}
if (i < (IEEE80211_TX_MAX_RATES - 1))
rt2x00dev->low_level_stats.dot11ACKFailureCount++;
}
+ /*
+ * Every single frame has it's own tx status, hence report
+ * every frame as ampdu of size 1.
+ *
+ * TODO: if we can find out how many frames were aggregated
+ * by the hw we could provide the real ampdu_len to mac80211
+ * which would allow the rc algorithm to better decide on
+ * which rates are suitable.
+ */
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
+ tx_info->status.ampdu_len = 1;
+ tx_info->status.ampdu_ack_len = success ? 1 : 0;
+ }
+
if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
if (success)
rt2x00dev->low_level_stats.dot11RTSSuccessCount++;
* send the status report back.
*/
if (!(skbdesc_flags & SKBDESC_NOT_MAC80211))
- ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb);
+ /*
+ * Only PCI and SOC devices process the tx status in process
+ * context. Hence use ieee80211_tx_status for PCI and SOC
+ * devices and stick to ieee80211_tx_status_irqsafe for USB.
+ */
+ if (rt2x00_is_usb(rt2x00dev))
+ ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb);
+ else
+ ieee80211_tx_status(rt2x00dev->hw, entry->skb);
else
- dev_kfree_skb_irq(entry->skb);
+ dev_kfree_skb_any(entry->skb);
/*
* Make this entry available for reuse.
*/
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb);
memcpy(IEEE80211_SKB_RXCB(entry->skb), rx_status, sizeof(*rx_status));
- ieee80211_rx_irqsafe(rt2x00dev->hw, entry->skb);
+
+ /*
+ * Currently only PCI and SOC devices handle rx interrupts in process
+ * context. Hence, use ieee80211_rx_irqsafe for USB and ieee80211_rx_ni
+ * for PCI and SOC devices.
+ */
+ if (rt2x00_is_usb(rt2x00dev))
+ ieee80211_rx_irqsafe(rt2x00dev->hw, entry->skb);
+ else
+ ieee80211_rx_ni(rt2x00dev->hw, entry->skb);
/*
* Replace the skb with the freshly allocated one.
BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_WDS);
+ /*
+ * Initialize configuration work.
+ */
+ INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
+
/*
* Let the driver probe the device to detect the capabilities.
*/
goto exit;
}
- /*
- * Initialize configuration work.
- */
- INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
-
/*
* Allocate queue array.
*/
break;
}
if (err != len)
- pr_err("Truncated TX packet: "
- " len %d != %zd\n", err, len);
+ pr_debug("Truncated TX packet: "
+ " len %d != %zd\n", err, len);
vhost_add_used_and_signal(&net->dev, vq, head, 0);
total_len += len;
if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
}
/* TODO: Should check and handle checksum. */
if (err > len) {
- pr_err("Discarded truncated rx packet: "
- " len %d > %zd\n", err, len);
+ pr_debug("Discarded truncated rx packet: "
+ " len %d > %zd\n", err, len);
vhost_discard_vq_desc(vq);
continue;
}
/* start polling new socket */
oldsock = vq->private_data;
- if (sock != oldsock){
- if (sock == oldsock)
- goto done;
++ if (sock != oldsock) {
+ vhost_net_disable_vq(n, vq);
+ rcu_assign_pointer(vq->private_data, sock);
+ vhost_net_enable_vq(n, vq);
+ }
- vhost_net_disable_vq(n, vq);
- rcu_assign_pointer(vq->private_data, sock);
- vhost_net_enable_vq(n, vq);
+ done:
+ mutex_unlock(&vq->mutex);
+
if (oldsock) {
vhost_net_flush_vq(n, index);
fput(oldsock->file);
}
+ mutex_unlock(&n->dev.mutex);
+ return 0;
+
err_vq:
mutex_unlock(&vq->mutex);
err:
}
#endif
-const static struct file_operations vhost_net_fops = {
+static const struct file_operations vhost_net_fops = {
.owner = THIS_MODULE,
.release = vhost_net_release,
.unlocked_ioctl = vhost_net_ioctl,
unsigned short sk_ack_backlog;
unsigned short sk_max_ack_backlog;
__u32 sk_priority;
- struct ucred sk_peercred;
+ struct pid *sk_peer_pid;
+ const struct cred *sk_peer_cred;
long sk_rcvtimeo;
long sk_sndtimeo;
struct sk_filter *sk_filter;
int *sysctl_wmem;
int *sysctl_rmem;
int max_header;
+ bool no_autobind;
struct kmem_cache *slab;
unsigned int obj_size;
static inline int sk_tx_queue_get(const struct sock *sk)
{
- return sk->sk_tx_queue_mapping;
- }
-
- static inline bool sk_tx_queue_recorded(const struct sock *sk)
- {
- return (sk && sk->sk_tx_queue_mapping >= 0);
+ return sk ? sk->sk_tx_queue_mapping : -1;
}
static inline void sk_set_socket(struct sock *sk, struct socket *sock)
static inline
struct net *sock_net(const struct sock *sk)
{
-#ifdef CONFIG_NET_NS
- return sk->sk_net;
-#else
- return &init_net;
-#endif
+ return read_pnet(&sk->sk_net);
}
static inline
void sock_net_set(struct sock *sk, struct net *net)
{
-#ifdef CONFIG_NET_NS
- sk->sk_net = net;
-#endif
+ write_pnet(&sk->sk_net, net);
}
/*
EXPORT_SYMBOL(dev_getfirstbyhwtype);
/**
- * dev_get_by_flags - find any device with given flags
+ * dev_get_by_flags_rcu - find any device with given flags
* @net: the applicable net namespace
* @if_flags: IFF_* values
* @mask: bitmask of bits in if_flags to check
*
* Search for any interface with the given flags. Returns NULL if a device
- * is not found or a pointer to the device. The device returned has
- * had a reference added and the pointer is safe until the user calls
- * dev_put to indicate they have finished with it.
+ * is not found or a pointer to the device. Must be called inside
+ * rcu_read_lock(), and result refcount is unchanged.
*/
-struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
+struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
unsigned short mask)
{
struct net_device *dev, *ret;
ret = NULL;
- rcu_read_lock();
for_each_netdev_rcu(net, dev) {
if (((dev->flags ^ if_flags) & mask) == 0) {
- dev_hold(dev);
ret = dev;
break;
}
}
- rcu_read_unlock();
return ret;
}
-EXPORT_SYMBOL(dev_get_by_flags);
+EXPORT_SYMBOL(dev_get_by_flags_rcu);
/**
* dev_valid_name - check if name is okay for network device
if (net_ratelimit())
printk(KERN_CRIT "protocol %04x is "
"buggy, dev %s\n",
- skb2->protocol, dev->name);
+ ntohs(skb2->protocol),
+ dev->name);
skb_reset_network_header(skb2);
}
void dev_kfree_skb_irq(struct sk_buff *skb)
{
- if (atomic_dec_and_test(&skb->users)) {
+ if (!skb->destructor)
+ dev_kfree_skb(skb);
+ else if (atomic_dec_and_test(&skb->users)) {
struct softnet_data *sd;
unsigned long flags;
*/
static inline void skb_orphan_try(struct sk_buff *skb)
{
- if (!skb_tx(skb)->flags)
+ struct sock *sk = skb->sk;
+
+ if (sk && !skb_tx(skb)->flags) {
+ /* skb_tx_hash() wont be able to get sk.
+ * We copy sk_hash into skb->rxhash
+ */
+ if (!skb->rxhash)
+ skb->rxhash = sk->sk_hash;
skb_orphan(skb);
+ }
}
+/*
+ * Returns true if either:
+ * 1. skb has frag_list and the device doesn't support FRAGLIST, or
+ * 2. skb is fragmented and the device does not support SG, or if
+ * at least one of fragments is in highmem and device does not
+ * support DMA from it.
+ */
+static inline int skb_needs_linearize(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ return skb_is_nonlinear(skb) &&
+ ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
+ (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
+ illegal_highdma(dev, skb))));
+}
+
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
goto out_kfree_skb;
if (skb->next)
goto gso;
+ } else {
+ if (skb_needs_linearize(skb, dev) &&
+ __skb_linearize(skb))
+ goto out_kfree_skb;
+
+ /* If packet is not checksummed and device does not
+ * support checksumming for this protocol, complete
+ * checksumming here.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ skb_set_transport_header(skb, skb->csum_start -
+ skb_headroom(skb));
+ if (!dev_can_checksum(dev, skb) &&
+ skb_checksum_help(skb))
+ goto out_kfree_skb;
+ }
}
rc = ops->ndo_start_xmit(skb, dev);
if (skb->sk && skb->sk->sk_hash)
hash = skb->sk->sk_hash;
else
- hash = (__force u16) skb->protocol;
-
+ hash = (__force u16) skb->protocol ^ skb->rxhash;
hash = jhash_1word(hash, hashrnd);
return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
static struct netdev_queue *dev_pick_tx(struct net_device *dev,
struct sk_buff *skb)
{
- u16 queue_index;
+ int queue_index;
struct sock *sk = skb->sk;
- if (sk_tx_queue_recorded(sk)) {
- queue_index = sk_tx_queue_get(sk);
- } else {
+ queue_index = sk_tx_queue_get(sk);
+ if (queue_index < 0) {
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_select_queue) {
struct netdev_queue *txq)
{
spinlock_t *root_lock = qdisc_lock(q);
+ bool contended = qdisc_is_running(q);
int rc;
+ /*
+ * Heuristic to force contended enqueues to serialize on a
+ * separate lock before trying to get qdisc main lock.
+ * This permits __QDISC_STATE_RUNNING owner to get the lock more often
+ * and dequeue packets faster.
+ */
+ if (unlikely(contended))
+ spin_lock(&q->busylock);
+
spin_lock(root_lock);
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
kfree_skb(skb);
rc = NET_XMIT_DROP;
} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
- !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
+ qdisc_run_begin(q)) {
/*
* This is a work-conserving queue; there are no old skbs
* waiting to be sent out; and the qdisc is not running -
if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
skb_dst_force(skb);
__qdisc_update_bstats(q, skb->len);
- if (sch_direct_xmit(skb, q, dev, txq, root_lock))
+ if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
+ if (unlikely(contended)) {
+ spin_unlock(&q->busylock);
+ contended = false;
+ }
__qdisc_run(q);
- else
- clear_bit(__QDISC_STATE_RUNNING, &q->state);
+ } else
+ qdisc_run_end(q);
rc = NET_XMIT_SUCCESS;
} else {
skb_dst_force(skb);
rc = qdisc_enqueue_root(skb, q);
- qdisc_run(q);
+ if (qdisc_run_begin(q)) {
+ if (unlikely(contended)) {
+ spin_unlock(&q->busylock);
+ contended = false;
+ }
+ __qdisc_run(q);
+ }
}
spin_unlock(root_lock);
-
+ if (unlikely(contended))
+ spin_unlock(&q->busylock);
return rc;
}
-/*
- * Returns true if either:
- * 1. skb has frag_list and the device doesn't support FRAGLIST, or
- * 2. skb is fragmented and the device does not support SG, or if
- * at least one of fragments is in highmem and device does not
- * support DMA from it.
- */
-static inline int skb_needs_linearize(struct sk_buff *skb,
- struct net_device *dev)
-{
- return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
- (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
- illegal_highdma(dev, skb)));
-}
-
/**
* dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
struct Qdisc *q;
int rc = -ENOMEM;
- /* GSO will handle the following emulations directly. */
- if (netif_needs_gso(dev, skb))
- goto gso;
-
- /* Convert a paged skb to linear, if required */
- if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
- goto out_kfree_skb;
-
- /* If packet is not checksummed and device does not support
- * checksumming for this protocol, complete checksumming here.
- */
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- skb_set_transport_header(skb, skb->csum_start -
- skb_headroom(skb));
- if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
- goto out_kfree_skb;
- }
-
-gso:
/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
*/
rc = -ENETDOWN;
rcu_read_unlock_bh();
-out_kfree_skb:
kfree_skb(skb);
return rc;
out:
return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}
-#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
-
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
+ (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
/* This hook is defined here for ATM LANE */
int (*br_fdb_test_addr_hook)(struct net_device *dev,
unsigned char *addr) __read_mostly;
EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
#endif
-/*
- * If bridge module is loaded call bridging hook.
- * returns NULL if packet was consumed.
- */
-struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
- struct sk_buff *skb) __read_mostly;
-EXPORT_SYMBOL_GPL(br_handle_frame_hook);
-
-static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
- struct packet_type **pt_prev, int *ret,
- struct net_device *orig_dev)
-{
- struct net_bridge_port *port;
-
- if (skb->pkt_type == PACKET_LOOPBACK ||
- (port = rcu_dereference(skb->dev->br_port)) == NULL)
- return skb;
-
- if (*pt_prev) {
- *ret = deliver_skb(skb, *pt_prev, orig_dev);
- *pt_prev = NULL;
- }
-
- return br_handle_frame_hook(port, skb);
-}
-#else
-#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
-#endif
-
-#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
-struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *p,
- struct sk_buff *skb) __read_mostly;
-EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
-
-static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
- struct packet_type **pt_prev,
- int *ret,
- struct net_device *orig_dev)
-{
- struct macvlan_port *port;
-
- port = rcu_dereference(skb->dev->macvlan_port);
- if (!port)
- return skb;
-
- if (*pt_prev) {
- *ret = deliver_skb(skb, *pt_prev, orig_dev);
- *pt_prev = NULL;
- }
- return macvlan_handle_frame_hook(port, skb);
-}
-#else
-#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
-#endif
-
#ifdef CONFIG_NET_CLS_ACT
/* TODO: Maybe we should just force sch_ingress to be compiled in
* when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL;
- } else {
- /* Huh? Why does turning on AF_PACKET affect this? */
- skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
}
switch (ing_filter(skb)) {
rcu_read_unlock();
}
+/**
+ * netdev_rx_handler_register - register receive handler
+ * @dev: device to register a handler for
+ * @rx_handler: receive handler to register
+ * @rx_handler_data: data pointer that is used by rx handler
+ *
+ * Register a receive hander for a device. This handler will then be
+ * called from __netif_receive_skb. A negative errno code is returned
+ * on a failure.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int netdev_rx_handler_register(struct net_device *dev,
+ rx_handler_func_t *rx_handler,
+ void *rx_handler_data)
+{
+ ASSERT_RTNL();
+
+ if (dev->rx_handler)
+ return -EBUSY;
+
+ rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
+ rcu_assign_pointer(dev->rx_handler, rx_handler);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
+
+/**
+ * netdev_rx_handler_unregister - unregister receive handler
+ * @dev: device to unregister a handler from
+ *
+ * Unregister a receive hander from a device.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+void netdev_rx_handler_unregister(struct net_device *dev)
+{
+
+ ASSERT_RTNL();
+ rcu_assign_pointer(dev->rx_handler, NULL);
+ rcu_assign_pointer(dev->rx_handler_data, NULL);
+}
+EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
+
static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
struct net_device *master)
{
if (master->priv_flags & IFF_MASTER_ARPMON)
dev->last_rx = jiffies;
- if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
+ if ((master->priv_flags & IFF_MASTER_ALB) &&
+ (master->priv_flags & IFF_BRIDGE_PORT)) {
/* Do address unmangle. The local destination address
* will be always the one master has. Provides the right
* functionality in a bridge.
static int __netif_receive_skb(struct sk_buff *skb)
{
struct packet_type *ptype, *pt_prev;
+ rx_handler_func_t *rx_handler;
struct net_device *orig_dev;
struct net_device *master;
struct net_device *null_or_orig;
skb->dev = master;
}
- __get_cpu_var(softnet_data).processed++;
-
+ __this_cpu_inc(softnet_data.processed);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
skb->mac_len = skb->network_header - skb->mac_header;
ncls:
#endif
- skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
- if (!skb)
- goto out;
- skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
- if (!skb)
- goto out;
+ /* Handle special case of bridge or macvlan */
+ rx_handler = rcu_dereference(skb->dev->rx_handler);
+ if (rx_handler) {
+ if (pt_prev) {
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+ pt_prev = NULL;
+ }
+ skb = rx_handler(skb);
+ if (!skb)
+ goto out;
+ }
/*
* Make sure frames received on VLAN interfaces stacked on
if (netdev_tstamp_prequeue)
net_timestamp_check(skb);
+ if (skb_defer_rx_timestamp(skb))
+ return NET_RX_SUCCESS;
+
#ifdef CONFIG_RPS
{
struct rps_dev_flow voidflow, *rflow = &voidflow;
static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
{
- const struct net_device_stats *stats = dev_get_stats(dev);
+ struct rtnl_link_stats64 temp;
+ const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
- seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
- "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
dev->name, stats->rx_bytes, stats->rx_packets,
stats->rx_errors,
stats->rx_dropped + stats->rx_missed_errors,
/**
* dev_txq_stats_fold - fold tx_queues stats
* @dev: device to get statistics from
- * @stats: struct net_device_stats to hold results
+ * @stats: struct rtnl_link_stats64 to hold results
*/
void dev_txq_stats_fold(const struct net_device *dev,
- struct net_device_stats *stats)
+ struct rtnl_link_stats64 *stats)
{
- unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
+ u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
unsigned int i;
struct netdev_queue *txq;
for (i = 0; i < dev->num_tx_queues; i++) {
txq = netdev_get_tx_queue(dev, i);
+ spin_lock_bh(&txq->_xmit_lock);
tx_bytes += txq->tx_bytes;
tx_packets += txq->tx_packets;
tx_dropped += txq->tx_dropped;
+ spin_unlock_bh(&txq->_xmit_lock);
}
if (tx_bytes || tx_packets || tx_dropped) {
stats->tx_bytes = tx_bytes;
}
EXPORT_SYMBOL(dev_txq_stats_fold);
+/* Convert net_device_stats to rtnl_link_stats64. They have the same
+ * fields in the same order, with only the type differing.
+ */
+static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
+ const struct net_device_stats *netdev_stats)
+{
+#if BITS_PER_LONG == 64
+ BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
+ memcpy(stats64, netdev_stats, sizeof(*stats64));
+#else
+ size_t i, n = sizeof(*stats64) / sizeof(u64);
+ const unsigned long *src = (const unsigned long *)netdev_stats;
+ u64 *dst = (u64 *)stats64;
+
+ BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
+ sizeof(*stats64) / sizeof(u64));
+ for (i = 0; i < n; i++)
+ dst[i] = src[i];
+#endif
+}
+
/**
* dev_get_stats - get network device statistics
* @dev: device to get statistics from
+ * @storage: place to store stats
*
- * Get network statistics from device. The device driver may provide
- * its own method by setting dev->netdev_ops->get_stats; otherwise
- * the internal statistics structure is used.
+ * Get network statistics from device. Return @storage.
+ * The device driver may provide its own method by setting
+ * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
+ * otherwise the internal statistics structure is used.
*/
-const struct net_device_stats *dev_get_stats(struct net_device *dev)
+struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
{
const struct net_device_ops *ops = dev->netdev_ops;
- if (ops->ndo_get_stats)
- return ops->ndo_get_stats(dev);
-
- dev_txq_stats_fold(dev, &dev->stats);
- return &dev->stats;
+ if (ops->ndo_get_stats64) {
+ memset(storage, 0, sizeof(*storage));
+ return ops->ndo_get_stats64(dev, storage);
+ }
+ if (ops->ndo_get_stats) {
+ netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
+ return storage;
+ }
+ netdev_stats_to_stats64(storage, &dev->stats);
+ dev_txq_stats_fold(dev, storage);
+ return storage;
}
EXPORT_SYMBOL(dev_get_stats);
return buffer;
}
+static int __netdev_printk(const char *level, const struct net_device *dev,
+ struct va_format *vaf)
+{
+ int r;
+
+ if (dev && dev->dev.parent)
+ r = dev_printk(level, dev->dev.parent, "%s: %pV",
+ netdev_name(dev), vaf);
+ else if (dev)
+ r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
+ else
+ r = printk("%s(NULL net_device): %pV", level, vaf);
+
+ return r;
+}
+
+int netdev_printk(const char *level, const struct net_device *dev,
+ const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int r;
+
+ va_start(args, format);
+
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ r = __netdev_printk(level, dev, &vaf);
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(netdev_printk);
+
+#define define_netdev_printk_level(func, level) \
+int func(const struct net_device *dev, const char *fmt, ...) \
+{ \
+ int r; \
+ struct va_format vaf; \
+ va_list args; \
+ \
+ va_start(args, fmt); \
+ \
+ vaf.fmt = fmt; \
+ vaf.va = &args; \
+ \
+ r = __netdev_printk(level, dev, &vaf); \
+ va_end(args); \
+ \
+ return r; \
+} \
+EXPORT_SYMBOL(func);
+
+define_netdev_printk_level(netdev_emerg, KERN_EMERG);
+define_netdev_printk_level(netdev_alert, KERN_ALERT);
+define_netdev_printk_level(netdev_crit, KERN_CRIT);
+define_netdev_printk_level(netdev_err, KERN_ERR);
+define_netdev_printk_level(netdev_warn, KERN_WARNING);
+define_netdev_printk_level(netdev_notice, KERN_NOTICE);
+define_netdev_printk_level(netdev_info, KERN_INFO);
+
static void __net_exit netdev_exit(struct net *net)
{
kfree(net->dev_name_head);
int err;
err = ipmr_fib_lookup(net, &fl, &mrt);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(skb);
return err;
+ }
read_lock(&mrt_lock);
dev->stats.tx_bytes += skb->len;
goto out_free;
}
- dev = rt->u.dst.dev;
+ dev = rt->dst.dev;
- if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
+ if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
/* Do not fragment multicasts. Alas, IPv4 does not
allow to send ICMP, so that packets will disappear
to blackhole.
goto out_free;
}
- encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
+ encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
if (skb_cow(skb, encap)) {
ip_rt_put(rt);
vif->bytes_out += skb->len;
skb_dst_drop(skb);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
ip_decrease_ttl(ip_hdr(skb));
/* FIXME: forward and output firewalls used to be called here.
goto dont_forward;
err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(skb);
return err;
+ }
if (!local) {
if (IPCB(skb)->opt.router_alert) {
* is strict, actions are advisory and have some latency.
*/
int tcp_memory_pressure __read_mostly;
-
EXPORT_SYMBOL(tcp_memory_pressure);
void tcp_enter_memory_pressure(struct sock *sk)
tcp_memory_pressure = 1;
}
}
-
EXPORT_SYMBOL(tcp_enter_memory_pressure);
/* Convert seconds to retransmits based on initial and max timeout */
}
return mask;
}
+EXPORT_SYMBOL(tcp_poll);
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
return put_user(answ, (int __user *)arg);
}
+EXPORT_SYMBOL(tcp_ioctl);
static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
{
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
tp->pushed_seq = tp->write_seq;
}
skb->csum = 0;
tcb->seq = tcb->end_seq = tp->write_seq;
- tcb->flags = TCPCB_FLAG_ACK;
+ tcb->flags = TCPHDR_ACK;
tcb->sacked = 0;
skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb);
ssize_t spliced;
int ret;
+ sock_rps_record_flow(sk);
/*
* We can't seek on a socket input
*/
return ret;
}
+EXPORT_SYMBOL(tcp_splice_read);
struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
{
skb_shinfo(skb)->gso_segs = 0;
if (!copied)
- TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
+ TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
copied += copy;
poffset += copy;
return sk_stream_error(sk, flags, err);
}
-ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
- size_t size, int flags)
+int tcp_sendpage(struct sock *sk, struct page *page, int offset,
+ size_t size, int flags)
{
ssize_t res;
- struct sock *sk = sock->sk;
if (!(sk->sk_route_caps & NETIF_F_SG) ||
!(sk->sk_route_caps & NETIF_F_ALL_CSUM))
- return sock_no_sendpage(sock, page, offset, size, flags);
+ return sock_no_sendpage(sk->sk_socket, page, offset, size,
+ flags);
lock_sock(sk);
TCP_CHECK_TIMER(sk);
release_sock(sk);
return res;
}
+EXPORT_SYMBOL(tcp_sendpage);
#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
#define TCP_OFF(sk) (sk->sk_sndmsg_off)
return tmp;
}
-int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t size)
{
- struct sock *sk = sock->sk;
struct iovec *iov;
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
}
if (!copied)
- TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
+ TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
release_sock(sk);
return err;
}
+EXPORT_SYMBOL(tcp_sendmsg);
/*
* Handle reading urgent data. BSD has very simple semantics for
tcp_cleanup_rbuf(sk, copied);
return copied;
}
+EXPORT_SYMBOL(tcp_read_sock);
/*
* This routine copies from a sock struct into the user buffer.
err = tcp_recv_urg(sk, msg, len, flags);
goto out;
}
+EXPORT_SYMBOL(tcp_recvmsg);
void tcp_set_state(struct sock *sk, int state)
{
tcp_send_fin(sk);
}
}
+EXPORT_SYMBOL(tcp_shutdown);
void tcp_close(struct sock *sk, long timeout)
{
sk_mem_reclaim(sk);
+ /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
+ if (sk->sk_state == TCP_CLOSE)
+ goto adjudge_to_death;
+
/* As outlined in RFC 2525, section 2.17, we send a RST here because
* data was lost. To witness the awful effects of the old behavior of
* always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
local_bh_enable();
sock_put(sk);
}
+EXPORT_SYMBOL(tcp_close);
/* These states need RST on ABORT according to RFC793 */
sk->sk_error_report(sk);
return err;
}
+EXPORT_SYMBOL(tcp_disconnect);
/*
* Socket option code for TCP.
optval, optlen);
return do_tcp_setsockopt(sk, level, optname, optval, optlen);
}
+EXPORT_SYMBOL(tcp_setsockopt);
#ifdef CONFIG_COMPAT
int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
optval, optlen);
return do_tcp_setsockopt(sk, level, optname, optval, optlen);
}
-
EXPORT_SYMBOL(compat_tcp_setsockopt);
#endif
info->tcpi_total_retrans = tp->total_retrans;
}
-
EXPORT_SYMBOL_GPL(tcp_get_info);
static int do_tcp_getsockopt(struct sock *sk, int level,
optval, optlen);
return do_tcp_getsockopt(sk, level, optname, optval, optlen);
}
+EXPORT_SYMBOL(tcp_getsockopt);
#ifdef CONFIG_COMPAT
int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
optval, optlen);
return do_tcp_getsockopt(sk, level, optname, optval, optlen);
}
-
EXPORT_SYMBOL(compat_tcp_getsockopt);
#endif
if (pool)
__tcp_free_md5sig_pool(pool);
}
-
EXPORT_SYMBOL(tcp_free_md5sig_pool);
static struct tcp_md5sig_pool * __percpu *
}
return pool;
}
-
EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
spin_unlock(&tcp_md5sig_pool_lock);
if (p)
- return *per_cpu_ptr(p, smp_processor_id());
+ return *this_cpu_ptr(p);
local_bh_enable();
return NULL;
th->check = old_checksum;
return err;
}
-
EXPORT_SYMBOL(tcp_md5_hash_header);
int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
const unsigned head_data_len = skb_headlen(skb) > header_len ?
skb_headlen(skb) - header_len : 0;
const struct skb_shared_info *shi = skb_shinfo(skb);
+ struct sk_buff *frag_iter;
sg_init_table(&sg, 1);
return 1;
}
+ skb_walk_frags(skb, frag_iter)
+ if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
+ return 1;
+
return 0;
}
-
EXPORT_SYMBOL(tcp_md5_hash_skb_data);
int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key)
sg_init_one(&sg, key->key, key->keylen);
return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
}
-
EXPORT_SYMBOL(tcp_md5_hash_key);
#endif
tcp_secret_retiring = &tcp_secret_two;
tcp_secret_secondary = &tcp_secret_two;
}
-
-EXPORT_SYMBOL(tcp_close);
-EXPORT_SYMBOL(tcp_disconnect);
-EXPORT_SYMBOL(tcp_getsockopt);
-EXPORT_SYMBOL(tcp_ioctl);
-EXPORT_SYMBOL(tcp_poll);
-EXPORT_SYMBOL(tcp_read_sock);
-EXPORT_SYMBOL(tcp_recvmsg);
-EXPORT_SYMBOL(tcp_sendmsg);
-EXPORT_SYMBOL(tcp_splice_read);
-EXPORT_SYMBOL(tcp_sendpage);
-EXPORT_SYMBOL(tcp_setsockopt);
-EXPORT_SYMBOL(tcp_shutdown);
/* Set the clamp no higher than max representable value */
(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
}
+EXPORT_SYMBOL(tcp_select_initial_window);
/* Chose a new window to advertise, update state in tcp_sock for the
* socket, and return result with RFC1323 scaling applied. The return
/* Packet ECN state for a SYN-ACK */
static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
{
- TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
+ TCP_SKB_CB(skb)->flags &= ~TCPHDR_CWR;
if (!(tp->ecn_flags & TCP_ECN_OK))
- TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
+ TCP_SKB_CB(skb)->flags &= ~TCPHDR_ECE;
}
/* Packet ECN state for a SYN. */
tp->ecn_flags = 0;
if (sysctl_tcp_ecn == 1) {
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_ECE | TCPHDR_CWR;
tp->ecn_flags = TCP_ECN_OK;
}
}
skb_shinfo(skb)->gso_type = 0;
TCP_SKB_CB(skb)->seq = seq;
- if (flags & (TCPCB_FLAG_SYN | TCPCB_FLAG_FIN))
+ if (flags & (TCPHDR_SYN | TCPHDR_FIN))
seq++;
TCP_SKB_CB(skb)->end_seq = seq;
}
tcb = TCP_SKB_CB(skb);
memset(&opts, 0, sizeof(opts));
- if (unlikely(tcb->flags & TCPCB_FLAG_SYN))
+ if (unlikely(tcb->flags & TCPHDR_SYN))
tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
else
tcp_options_size = tcp_established_options(sk, skb, &opts,
*(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
tcb->flags);
- if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
+ if (unlikely(tcb->flags & TCPHDR_SYN)) {
/* RFC1323: The window in SYN & SYN/ACK segments
* is never scaled.
*/
}
tcp_options_write((__be32 *)(th + 1), tp, &opts);
- if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0))
+ if (likely((tcb->flags & TCPHDR_SYN) == 0))
TCP_ECN_send(sk, skb, tcp_header_size);
#ifdef CONFIG_TCP_MD5SIG
icsk->icsk_af_ops->send_check(sk, skb);
- if (likely(tcb->flags & TCPCB_FLAG_ACK))
+ if (likely(tcb->flags & TCPHDR_ACK))
tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
if (skb->len != tcp_header_size)
/* PSH and FIN should only be set in the second packet. */
flags = TCP_SKB_CB(skb)->flags;
- TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
+ TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
TCP_SKB_CB(buff)->flags = flags;
TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
icsk->icsk_mtup.probe_size = 0;
}
+EXPORT_SYMBOL(tcp_mtup_init);
/* This function synchronize snd mss to current pmtu/exthdr set.
return mss_now;
}
+EXPORT_SYMBOL(tcp_sync_mss);
/* Compute the current effective MSS, taking SACKs and IP options,
* and even PMTU discovery events into account.
u32 in_flight, cwnd;
/* Don't be strict about the congestion window for the final FIN. */
- if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
- tcp_skb_pcount(skb) == 1)
+ if ((TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1)
return 1;
in_flight = tcp_packets_in_flight(tp);
* Nagle can be ignored during F-RTO too (see RFC4138).
*/
if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
- (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
+ (TCP_SKB_CB(skb)->flags & TCPHDR_FIN))
return 1;
if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
* packet has never been sent out before (and thus is not cloned).
*/
static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
- unsigned int mss_now)
+ unsigned int mss_now, gfp_t gfp)
{
struct sk_buff *buff;
int nlen = skb->len - len;
if (skb->len != skb->data_len)
return tcp_fragment(sk, skb, len, mss_now);
- buff = sk_stream_alloc_skb(sk, 0, GFP_ATOMIC);
+ buff = sk_stream_alloc_skb(sk, 0, gfp);
if (unlikely(buff == NULL))
return -ENOMEM;
/* PSH and FIN should only be set in the second packet. */
flags = TCP_SKB_CB(skb)->flags;
- TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
+ TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
TCP_SKB_CB(buff)->flags = flags;
/* This packet was never sent out yet, so no SACK bits. */
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 send_win, cong_win, limit, in_flight;
- if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
+ if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
goto send_now;
if (icsk->icsk_ca_state != TCP_CA_Open)
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
- TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
+ TCP_SKB_CB(nskb)->flags = TCPHDR_ACK;
TCP_SKB_CB(nskb)->sacked = 0;
nskb->csum = 0;
nskb->ip_summed = skb->ip_summed;
sk_wmem_free_skb(sk, skb);
} else {
TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
- ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
+ ~(TCPHDR_FIN|TCPHDR_PSH);
if (!skb_shinfo(skb)->nr_frags) {
skb_pull(skb, copy);
if (skb->ip_summed != CHECKSUM_PARTIAL)
cwnd_quota);
if (skb->len > limit &&
- unlikely(tso_fragment(sk, skb, limit, mss_now)))
+ unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (!sysctl_tcp_retrans_collapse)
return;
- if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)
+ if (TCP_SKB_CB(skb)->flags & TCPHDR_SYN)
return;
tcp_for_write_queue_from_safe(skb, tmp, sk) {
* since it is cheap to do so and saves bytes on the network.
*/
if (skb->len > 0 &&
- (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
+ (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) &&
tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
if (!pskb_trim(skb, 0)) {
/* Reuse, even though it does some unnecessary work */
int mib_idx;
int fwd_rexmitting = 0;
+ if (!tp->packets_out)
+ return;
+
if (!tp->lost_out)
tp->retransmit_high = tp->snd_una;
mss_now = tcp_current_mss(sk);
if (tcp_send_head(sk) != NULL) {
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_FIN;
TCP_SKB_CB(skb)->end_seq++;
tp->write_seq++;
} else {
skb_reserve(skb, MAX_TCP_HEADER);
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
tcp_init_nondata_skb(skb, tp->write_seq,
- TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
+ TCPHDR_ACK | TCPHDR_FIN);
tcp_queue_skb(sk, skb);
}
__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
/* Reserve space for headers and prepare control bits. */
skb_reserve(skb, MAX_TCP_HEADER);
tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
- TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
+ TCPHDR_ACK | TCPHDR_RST);
/* Send it off. */
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (tcp_transmit_skb(sk, skb, 0, priority))
struct sk_buff *skb;
skb = tcp_write_queue_head(sk);
- if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)) {
+ if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPHDR_SYN)) {
printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
return -EFAULT;
}
- if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_ACK)) {
+ if (!(TCP_SKB_CB(skb)->flags & TCPHDR_ACK)) {
if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
if (nskb == NULL)
skb = nskb;
}
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_ACK;
TCP_ECN_send_synack(tcp_sk(sk), skb);
}
TCP_SKB_CB(skb)->when = tcp_time_stamp;
* not even correctly set)
*/
tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
- TCPCB_FLAG_SYN | TCPCB_FLAG_ACK);
+ TCPHDR_SYN | TCPHDR_ACK);
if (OPTION_COOKIE_EXTENSION & opts.options) {
if (s_data_desired) {
return skb;
}
+EXPORT_SYMBOL(tcp_make_synack);
/* Do all connect socket setups that can be done AF independent. */
static void tcp_connect_init(struct sock *sk)
skb_reserve(buff, MAX_TCP_HEADER);
tp->snd_nxt = tp->write_seq;
- tcp_init_nondata_skb(buff, tp->write_seq++, TCPCB_FLAG_SYN);
+ tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
TCP_ECN_send_syn(sk, buff);
/* Send it off. */
inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
return 0;
}
+EXPORT_SYMBOL(tcp_connect);
/* Send out a delayed ack, the caller does the policy checking
* to see if we should even be here. See tcp_input.c:tcp_ack_snd_check()
/* Reserve space for headers and prepare control bits. */
skb_reserve(buff, MAX_TCP_HEADER);
- tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPCB_FLAG_ACK);
+ tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
/* Send it off, this clears delayed acks for us. */
TCP_SKB_CB(buff)->when = tcp_time_stamp;
* end to send an ack. Don't queue or clone SKB, just
* send it.
*/
- tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPCB_FLAG_ACK);
+ tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
TCP_SKB_CB(skb)->when = tcp_time_stamp;
return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
}
if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
skb->len > mss) {
seg_size = min(seg_size, mss);
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
if (tcp_fragment(sk, skb, seg_size, mss))
return -1;
} else if (!tcp_skb_pcount(skb))
tcp_set_skb_tso_segs(sk, skb, mss);
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
TCP_SKB_CB(skb)->when = tcp_time_stamp;
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
if (!err)
TCP_RTO_MAX);
}
}
-
-EXPORT_SYMBOL(tcp_select_initial_window);
-EXPORT_SYMBOL(tcp_connect);
-EXPORT_SYMBOL(tcp_make_synack);
-EXPORT_SYMBOL(tcp_simple_retransmit);
-EXPORT_SYMBOL(tcp_sync_mss);
-EXPORT_SYMBOL(tcp_mtup_init);
{
struct icmphdr *icmph;
- if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
+ if (!pskb_may_pull(skb, ihl + sizeof(*icmph)))
goto drop;
icmph = (void *)(skb_network_header(skb) + ihl);
(icmph->type != ICMP_PARAMETERPROB))
break;
+ if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
+ goto drop;
+
iph = (void *)(icmph + 1);
if (egress)
addr = iph->daddr;
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_nat *p = a->priv;
- struct tc_nat *opt;
+ struct tc_nat opt;
struct tcf_t t;
- int s;
- s = sizeof(*opt);
+ opt.old_addr = p->old_addr;
+ opt.new_addr = p->new_addr;
+ opt.mask = p->mask;
+ opt.flags = p->flags;
- /* netlink spinlocks held above us - must use ATOMIC */
- opt = kzalloc(s, GFP_ATOMIC);
- if (unlikely(!opt))
- return -ENOBUFS;
+ opt.index = p->tcf_index;
+ opt.action = p->tcf_action;
+ opt.refcnt = p->tcf_refcnt - ref;
+ opt.bindcnt = p->tcf_bindcnt - bind;
- opt->old_addr = p->old_addr;
- opt->new_addr = p->new_addr;
- opt->mask = p->mask;
- opt->flags = p->flags;
-
- opt->index = p->tcf_index;
- opt->action = p->tcf_action;
- opt->refcnt = p->tcf_refcnt - ref;
- opt->bindcnt = p->tcf_bindcnt - bind;
-
- NLA_PUT(skb, TCA_NAT_PARMS, s, opt);
+ NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt);
t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t);
- kfree(opt);
-
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
- kfree(opt);
return -1;
}
/* Try to instantiate a bundle */
err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
- if (err < 0) {
- if (err != -EAGAIN)
+ if (err <= 0) {
+ if (err != 0 && err != -EAGAIN)
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
return ERR_PTR(err);
}
goto make_dummy_bundle;
dst_hold(&xdst->u.dst);
return oldflo;
+ } else if (new_xdst == NULL) {
+ num_xfrms = 0;
+ if (oldflo == NULL)
+ goto make_dummy_bundle;
+ xdst->num_xfrms = 0;
+ dst_hold(&xdst->u.dst);
+ return oldflo;
}
/* Kill the previous bundle */
xfrm_pols_put(pols, num_pols);
err = PTR_ERR(xdst);
goto dropdst;
+ } else if (xdst == NULL) {
+ num_xfrms = 0;
+ drop_pols = num_pols;
+ goto no_transform;
}
spin_lock_bh(&xfrm_policy_sk_bundle_lock);
int rv;
if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
- sizeof(struct linux_xfrm_mib)) < 0)
+ sizeof(struct linux_xfrm_mib),
+ __alignof__(struct linux_xfrm_mib)) < 0)
return -ENOMEM;
rv = xfrm_proc_init(net);
if (rv < 0)