Merge branch 'server-cluster-locking-api' of git://linux-nfs.org/~bfields/linux
[deliverable/linux.git] / net / irda / irlan / irlan_eth.c
1 /*********************************************************************
2 *
3 * Filename: irlan_eth.c
4 * Version:
5 * Description:
6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Thu Oct 15 08:37:58 1998
9 * Modified at: Tue Mar 21 09:06:41 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
12 * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
13 * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
14 *
15 * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License as
19 * published by the Free Software Foundation; either version 2 of
20 * the License, or (at your option) any later version.
21 *
22 * Neither Dag Brattli nor University of Tromsø admit liability nor
23 * provide warranty for any of this software. This material is
24 * provided "AS-IS" and at no charge.
25 *
26 ********************************************************************/
27
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/module.h>
33 #include <net/arp.h>
34
35 #include <net/irda/irda.h>
36 #include <net/irda/irmod.h>
37 #include <net/irda/irlan_common.h>
38 #include <net/irda/irlan_client.h>
39 #include <net/irda/irlan_event.h>
40 #include <net/irda/irlan_eth.h>
41
42 static int irlan_eth_open(struct net_device *dev);
43 static int irlan_eth_close(struct net_device *dev);
44 static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev);
45 static void irlan_eth_set_multicast_list( struct net_device *dev);
46 static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev);
47
48 /*
49 * Function irlan_eth_setup (dev)
50 *
51 * The network device initialization function.
52 *
53 */
54 static void irlan_eth_setup(struct net_device *dev)
55 {
56 dev->open = irlan_eth_open;
57 dev->stop = irlan_eth_close;
58 dev->hard_start_xmit = irlan_eth_xmit;
59 dev->get_stats = irlan_eth_get_stats;
60 dev->set_multicast_list = irlan_eth_set_multicast_list;
61 dev->destructor = free_netdev;
62
63 SET_MODULE_OWNER(dev);
64
65 ether_setup(dev);
66
67 /*
68 * Lets do all queueing in IrTTP instead of this device driver.
69 * Queueing here as well can introduce some strange latency
70 * problems, which we will avoid by setting the queue size to 0.
71 */
72 /*
73 * The bugs in IrTTP and IrLAN that created this latency issue
74 * have now been fixed, and we can propagate flow control properly
75 * to the network layer. However, this requires a minimal queue of
76 * packets for the device.
77 * Without flow control, the Tx Queue is 14 (ttp) + 0 (dev) = 14
78 * With flow control, the Tx Queue is 7 (ttp) + 4 (dev) = 11
79 * See irlan_eth_flow_indication()...
80 * Note : this number was randomly selected and would need to
81 * be adjusted.
82 * Jean II */
83 dev->tx_queue_len = 4;
84 }
85
86 /*
87 * Function alloc_irlandev
88 *
89 * Allocate network device and control block
90 *
91 */
92 struct net_device *alloc_irlandev(const char *name)
93 {
94 return alloc_netdev(sizeof(struct irlan_cb), name,
95 irlan_eth_setup);
96 }
97
98 /*
99 * Function irlan_eth_open (dev)
100 *
101 * Network device has been opened by user
102 *
103 */
104 static int irlan_eth_open(struct net_device *dev)
105 {
106 struct irlan_cb *self = netdev_priv(dev);
107
108 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
109
110 /* Ready to play! */
111 netif_stop_queue(dev); /* Wait until data link is ready */
112
113 /* We are now open, so time to do some work */
114 self->disconnect_reason = 0;
115 irlan_client_wakeup(self, self->saddr, self->daddr);
116
117 /* Make sure we have a hardware address before we return,
118 so DHCP clients gets happy */
119 return wait_event_interruptible(self->open_wait,
120 !self->tsap_data->connected);
121 }
122
123 /*
124 * Function irlan_eth_close (dev)
125 *
126 * Stop the ether network device, his function will usually be called by
127 * ifconfig down. We should now disconnect the link, We start the
128 * close timer, so that the instance will be removed if we are unable
129 * to discover the remote device after the disconnect.
130 */
131 static int irlan_eth_close(struct net_device *dev)
132 {
133 struct irlan_cb *self = netdev_priv(dev);
134
135 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
136
137 /* Stop device */
138 netif_stop_queue(dev);
139
140 irlan_close_data_channel(self);
141 irlan_close_tsaps(self);
142
143 irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL);
144 irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
145
146 /* Remove frames queued on the control channel */
147 skb_queue_purge(&self->client.txq);
148
149 self->client.tx_busy = 0;
150
151 return 0;
152 }
153
154 /*
155 * Function irlan_eth_tx (skb)
156 *
157 * Transmits ethernet frames over IrDA link.
158 *
159 */
160 static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev)
161 {
162 struct irlan_cb *self = netdev_priv(dev);
163 int ret;
164
165 /* skb headroom large enough to contain all IrDA-headers? */
166 if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) {
167 struct sk_buff *new_skb =
168 skb_realloc_headroom(skb, self->max_header_size);
169
170 /* We have to free the original skb anyway */
171 dev_kfree_skb(skb);
172
173 /* Did the realloc succeed? */
174 if (new_skb == NULL)
175 return 0;
176
177 /* Use the new skb instead */
178 skb = new_skb;
179 }
180
181 dev->trans_start = jiffies;
182
183 /* Now queue the packet in the transport layer */
184 if (self->use_udata)
185 ret = irttp_udata_request(self->tsap_data, skb);
186 else
187 ret = irttp_data_request(self->tsap_data, skb);
188
189 if (ret < 0) {
190 /*
191 * IrTTPs tx queue is full, so we just have to
192 * drop the frame! You might think that we should
193 * just return -1 and don't deallocate the frame,
194 * but that is dangerous since it's possible that
195 * we have replaced the original skb with a new
196 * one with larger headroom, and that would really
197 * confuse do_dev_queue_xmit() in dev.c! I have
198 * tried :-) DB
199 */
200 /* irttp_data_request already free the packet */
201 self->stats.tx_dropped++;
202 } else {
203 self->stats.tx_packets++;
204 self->stats.tx_bytes += skb->len;
205 }
206
207 return 0;
208 }
209
210 /*
211 * Function irlan_eth_receive (handle, skb)
212 *
213 * This function gets the data that is received on the data channel
214 *
215 */
216 int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
217 {
218 struct irlan_cb *self = instance;
219
220 if (skb == NULL) {
221 ++self->stats.rx_dropped;
222 return 0;
223 }
224 if (skb->len < ETH_HLEN) {
225 IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n",
226 __FUNCTION__, skb->len);
227 ++self->stats.rx_dropped;
228 dev_kfree_skb(skb);
229 return 0;
230 }
231
232 /*
233 * Adopt this frame! Important to set all these fields since they
234 * might have been previously set by the low level IrDA network
235 * device driver
236 */
237 skb->protocol = eth_type_trans(skb, self->dev); /* Remove eth header */
238
239 self->stats.rx_packets++;
240 self->stats.rx_bytes += skb->len;
241
242 netif_rx(skb); /* Eat it! */
243
244 return 0;
245 }
246
247 /*
248 * Function irlan_eth_flow (status)
249 *
250 * Do flow control between IP/Ethernet and IrLAN/IrTTP. This is done by
251 * controlling the queue stop/start.
252 *
253 * The IrDA link layer has the advantage to have flow control, and
254 * IrTTP now properly handles that. Flow controlling the higher layers
255 * prevent us to drop Tx packets in here (up to 15% for a TCP socket,
256 * more for UDP socket).
257 * Also, this allow us to reduce the overall transmit queue, which means
258 * less latency in case of mixed traffic.
259 * Jean II
260 */
261 void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
262 {
263 struct irlan_cb *self;
264 struct net_device *dev;
265
266 self = (struct irlan_cb *) instance;
267
268 IRDA_ASSERT(self != NULL, return;);
269 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
270
271 dev = self->dev;
272
273 IRDA_ASSERT(dev != NULL, return;);
274
275 IRDA_DEBUG(0, "%s() : flow %s ; running %d\n", __FUNCTION__,
276 flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START",
277 netif_running(dev));
278
279 switch (flow) {
280 case FLOW_STOP:
281 /* IrTTP is full, stop higher layers */
282 netif_stop_queue(dev);
283 break;
284 case FLOW_START:
285 default:
286 /* Tell upper layers that its time to transmit frames again */
287 /* Schedule network layer */
288 netif_wake_queue(dev);
289 break;
290 }
291 }
292
293 /*
294 * Function irlan_etc_send_gratuitous_arp (dev)
295 *
296 * Send gratuitous ARP to announce that we have changed
297 * hardware address, so that all peers updates their ARP tables
298 */
299 void irlan_eth_send_gratuitous_arp(struct net_device *dev)
300 {
301 struct in_device *in_dev;
302
303 /*
304 * When we get a new MAC address do a gratuitous ARP. This
305 * is useful if we have changed access points on the same
306 * subnet.
307 */
308 #ifdef CONFIG_INET
309 IRDA_DEBUG(4, "IrLAN: Sending gratuitous ARP\n");
310 rcu_read_lock();
311 in_dev = __in_dev_get_rcu(dev);
312 if (in_dev == NULL)
313 goto out;
314 if (in_dev->ifa_list)
315
316 arp_send(ARPOP_REQUEST, ETH_P_ARP,
317 in_dev->ifa_list->ifa_address,
318 dev,
319 in_dev->ifa_list->ifa_address,
320 NULL, dev->dev_addr, NULL);
321 out:
322 rcu_read_unlock();
323 #endif /* CONFIG_INET */
324 }
325
326 /*
327 * Function set_multicast_list (dev)
328 *
329 * Configure the filtering of the device
330 *
331 */
332 #define HW_MAX_ADDRS 4 /* Must query to get it! */
333 static void irlan_eth_set_multicast_list(struct net_device *dev)
334 {
335 struct irlan_cb *self = netdev_priv(dev);
336
337 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
338
339 /* Check if data channel has been connected yet */
340 if (self->client.state != IRLAN_DATA) {
341 IRDA_DEBUG(1, "%s(), delaying!\n", __FUNCTION__ );
342 return;
343 }
344
345 if (dev->flags & IFF_PROMISC) {
346 /* Enable promiscuous mode */
347 IRDA_WARNING("Promiscous mode not implemented by IrLAN!\n");
348 }
349 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) {
350 /* Disable promiscuous mode, use normal mode. */
351 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __FUNCTION__ );
352 /* hardware_set_filter(NULL); */
353
354 irlan_set_multicast_filter(self, TRUE);
355 }
356 else if (dev->mc_count) {
357 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __FUNCTION__ );
358 /* Walk the address list, and load the filter */
359 /* hardware_set_filter(dev->mc_list); */
360
361 irlan_set_multicast_filter(self, TRUE);
362 }
363 else {
364 IRDA_DEBUG(4, "%s(), Clearing multicast filter\n", __FUNCTION__ );
365 irlan_set_multicast_filter(self, FALSE);
366 }
367
368 if (dev->flags & IFF_BROADCAST)
369 irlan_set_broadcast_filter(self, TRUE);
370 else
371 irlan_set_broadcast_filter(self, FALSE);
372 }
373
374 /*
375 * Function irlan_get_stats (dev)
376 *
377 * Get the current statistics for this device
378 *
379 */
380 static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev)
381 {
382 struct irlan_cb *self = netdev_priv(dev);
383
384 return &self->stats;
385 }
This page took 0.040859 seconds and 6 git commands to generate.