Merge branch 'master' of /home/sam/kernel/linux-2.6/
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / irda / irlap.c
1 /*********************************************************************
2 *
3 * Filename: irlap.c
4 * Version: 1.0
5 * Description: IrLAP implementation for Linux
6 * Status: Stable
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Mon Aug 4 20:40:53 1997
9 * Modified at: Tue Dec 14 09:26:44 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
13 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
28 * MA 02111-1307 USA
29 *
30 ********************************************************************/
31
32 #include <linux/slab.h>
33 #include <linux/string.h>
34 #include <linux/skbuff.h>
35 #include <linux/delay.h>
36 #include <linux/proc_fs.h>
37 #include <linux/init.h>
38 #include <linux/random.h>
39 #include <linux/module.h>
40 #include <linux/seq_file.h>
41
42 #include <net/irda/irda.h>
43 #include <net/irda/irda_device.h>
44 #include <net/irda/irqueue.h>
45 #include <net/irda/irlmp.h>
46 #include <net/irda/irlmp_frame.h>
47 #include <net/irda/irlap_frame.h>
48 #include <net/irda/irlap.h>
49 #include <net/irda/timer.h>
50 #include <net/irda/qos.h>
51
52 static hashbin_t *irlap = NULL;
53 int sysctl_slot_timeout = SLOT_TIMEOUT * 1000 / HZ;
54
55 /* This is the delay of missed pf period before generating an event
56 * to the application. The spec mandate 3 seconds, but in some cases
57 * it's way too long. - Jean II */
58 int sysctl_warn_noreply_time = 3;
59
60 extern void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
61 static void __irlap_close(struct irlap_cb *self);
62 static void irlap_init_qos_capabilities(struct irlap_cb *self,
63 struct qos_info *qos_user);
64
65 #ifdef CONFIG_IRDA_DEBUG
66 static char *lap_reasons[] = {
67 "ERROR, NOT USED",
68 "LAP_DISC_INDICATION",
69 "LAP_NO_RESPONSE",
70 "LAP_RESET_INDICATION",
71 "LAP_FOUND_NONE",
72 "LAP_MEDIA_BUSY",
73 "LAP_PRIMARY_CONFLICT",
74 "ERROR, NOT USED",
75 };
76 #endif /* CONFIG_IRDA_DEBUG */
77
78 int __init irlap_init(void)
79 {
80 /* Check if the compiler did its job properly.
81 * May happen on some ARM configuration, check with Russell King. */
82 IRDA_ASSERT(sizeof(struct xid_frame) == 14, ;);
83 IRDA_ASSERT(sizeof(struct test_frame) == 10, ;);
84 IRDA_ASSERT(sizeof(struct ua_frame) == 10, ;);
85 IRDA_ASSERT(sizeof(struct snrm_frame) == 11, ;);
86
87 /* Allocate master array */
88 irlap = hashbin_new(HB_LOCK);
89 if (irlap == NULL) {
90 IRDA_ERROR("%s: can't allocate irlap hashbin!\n",
91 __FUNCTION__);
92 return -ENOMEM;
93 }
94
95 return 0;
96 }
97
98 void __exit irlap_cleanup(void)
99 {
100 IRDA_ASSERT(irlap != NULL, return;);
101
102 hashbin_delete(irlap, (FREE_FUNC) __irlap_close);
103 }
104
105 /*
106 * Function irlap_open (driver)
107 *
108 * Initialize IrLAP layer
109 *
110 */
111 struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
112 const char *hw_name)
113 {
114 struct irlap_cb *self;
115
116 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
117
118 /* Initialize the irlap structure. */
119 self = kmalloc(sizeof(struct irlap_cb), GFP_KERNEL);
120 if (self == NULL)
121 return NULL;
122
123 memset(self, 0, sizeof(struct irlap_cb));
124 self->magic = LAP_MAGIC;
125
126 /* Make a binding between the layers */
127 self->netdev = dev;
128 self->qos_dev = qos;
129 /* Copy hardware name */
130 if(hw_name != NULL) {
131 strlcpy(self->hw_name, hw_name, sizeof(self->hw_name));
132 } else {
133 self->hw_name[0] = '\0';
134 }
135
136 /* FIXME: should we get our own field? */
137 dev->atalk_ptr = self;
138
139 self->state = LAP_OFFLINE;
140
141 /* Initialize transmit queue */
142 skb_queue_head_init(&self->txq);
143 skb_queue_head_init(&self->txq_ultra);
144 skb_queue_head_init(&self->wx_list);
145
146 /* My unique IrLAP device address! */
147 /* We don't want the broadcast address, neither the NULL address
148 * (most often used to signify "invalid"), and we don't want an
149 * address already in use (otherwise connect won't be able
150 * to select the proper link). - Jean II */
151 do {
152 get_random_bytes(&self->saddr, sizeof(self->saddr));
153 } while ((self->saddr == 0x0) || (self->saddr == BROADCAST) ||
154 (hashbin_lock_find(irlap, self->saddr, NULL)) );
155 /* Copy to the driver */
156 memcpy(dev->dev_addr, &self->saddr, 4);
157
158 init_timer(&self->slot_timer);
159 init_timer(&self->query_timer);
160 init_timer(&self->discovery_timer);
161 init_timer(&self->final_timer);
162 init_timer(&self->poll_timer);
163 init_timer(&self->wd_timer);
164 init_timer(&self->backoff_timer);
165 init_timer(&self->media_busy_timer);
166
167 irlap_apply_default_connection_parameters(self);
168
169 self->N3 = 3; /* # connections attemts to try before giving up */
170
171 self->state = LAP_NDM;
172
173 hashbin_insert(irlap, (irda_queue_t *) self, self->saddr, NULL);
174
175 irlmp_register_link(self, self->saddr, &self->notify);
176
177 return self;
178 }
179 EXPORT_SYMBOL(irlap_open);
180
181 /*
182 * Function __irlap_close (self)
183 *
184 * Remove IrLAP and all allocated memory. Stop any pending timers.
185 *
186 */
187 static void __irlap_close(struct irlap_cb *self)
188 {
189 IRDA_ASSERT(self != NULL, return;);
190 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
191
192 /* Stop timers */
193 del_timer(&self->slot_timer);
194 del_timer(&self->query_timer);
195 del_timer(&self->discovery_timer);
196 del_timer(&self->final_timer);
197 del_timer(&self->poll_timer);
198 del_timer(&self->wd_timer);
199 del_timer(&self->backoff_timer);
200 del_timer(&self->media_busy_timer);
201
202 irlap_flush_all_queues(self);
203
204 self->magic = 0;
205
206 kfree(self);
207 }
208
209 /*
210 * Function irlap_close (self)
211 *
212 * Remove IrLAP instance
213 *
214 */
215 void irlap_close(struct irlap_cb *self)
216 {
217 struct irlap_cb *lap;
218
219 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
220
221 IRDA_ASSERT(self != NULL, return;);
222 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
223
224 /* We used to send a LAP_DISC_INDICATION here, but this was
225 * racy. This has been move within irlmp_unregister_link()
226 * itself. Jean II */
227
228 /* Kill the LAP and all LSAPs on top of it */
229 irlmp_unregister_link(self->saddr);
230 self->notify.instance = NULL;
231
232 /* Be sure that we manage to remove ourself from the hash */
233 lap = hashbin_remove(irlap, self->saddr, NULL);
234 if (!lap) {
235 IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __FUNCTION__);
236 return;
237 }
238 __irlap_close(lap);
239 }
240 EXPORT_SYMBOL(irlap_close);
241
242 /*
243 * Function irlap_connect_indication (self, skb)
244 *
245 * Another device is attempting to make a connection
246 *
247 */
248 void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb)
249 {
250 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
251
252 IRDA_ASSERT(self != NULL, return;);
253 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
254
255 irlap_init_qos_capabilities(self, NULL); /* No user QoS! */
256
257 irlmp_link_connect_indication(self->notify.instance, self->saddr,
258 self->daddr, &self->qos_tx, skb);
259 }
260
261 /*
262 * Function irlap_connect_response (self, skb)
263 *
264 * Service user has accepted incoming connection
265 *
266 */
267 void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata)
268 {
269 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
270
271 irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL);
272 }
273
274 /*
275 * Function irlap_connect_request (self, daddr, qos_user, sniff)
276 *
277 * Request connection with another device, sniffing is not implemented
278 * yet.
279 *
280 */
281 void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
282 struct qos_info *qos_user, int sniff)
283 {
284 IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __FUNCTION__, daddr);
285
286 IRDA_ASSERT(self != NULL, return;);
287 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
288
289 self->daddr = daddr;
290
291 /*
292 * If the service user specifies QoS values for this connection,
293 * then use them
294 */
295 irlap_init_qos_capabilities(self, qos_user);
296
297 if ((self->state == LAP_NDM) && !self->media_busy)
298 irlap_do_event(self, CONNECT_REQUEST, NULL, NULL);
299 else
300 self->connect_pending = TRUE;
301 }
302
303 /*
304 * Function irlap_connect_confirm (self, skb)
305 *
306 * Connection request has been accepted
307 *
308 */
309 void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb)
310 {
311 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
312
313 IRDA_ASSERT(self != NULL, return;);
314 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
315
316 irlmp_link_connect_confirm(self->notify.instance, &self->qos_tx, skb);
317 }
318
319 /*
320 * Function irlap_data_indication (self, skb)
321 *
322 * Received data frames from IR-port, so we just pass them up to
323 * IrLMP for further processing
324 *
325 */
326 void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb,
327 int unreliable)
328 {
329 /* Hide LAP header from IrLMP layer */
330 skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
331
332 irlmp_link_data_indication(self->notify.instance, skb, unreliable);
333 }
334
335
336 /*
337 * Function irlap_data_request (self, skb)
338 *
339 * Queue data for transmission, must wait until XMIT state
340 *
341 */
342 void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
343 int unreliable)
344 {
345 IRDA_ASSERT(self != NULL, return;);
346 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
347
348 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
349
350 IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
351 return;);
352 skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
353
354 /*
355 * Must set frame format now so that the rest of the code knows
356 * if its dealing with an I or an UI frame
357 */
358 if (unreliable)
359 skb->data[1] = UI_FRAME;
360 else
361 skb->data[1] = I_FRAME;
362
363 /* Don't forget to refcount it - see irlmp_connect_request(). */
364 skb_get(skb);
365
366 /* Add at the end of the queue (keep ordering) - Jean II */
367 skb_queue_tail(&self->txq, skb);
368
369 /*
370 * Send event if this frame only if we are in the right state
371 * FIXME: udata should be sent first! (skb_queue_head?)
372 */
373 if ((self->state == LAP_XMIT_P) || (self->state == LAP_XMIT_S)) {
374 /* If we are not already processing the Tx queue, trigger
375 * transmission immediately - Jean II */
376 if((skb_queue_len(&self->txq) <= 1) && (!self->local_busy))
377 irlap_do_event(self, DATA_REQUEST, skb, NULL);
378 /* Otherwise, the packets will be sent normally at the
379 * next pf-poll - Jean II */
380 }
381 }
382
383 /*
384 * Function irlap_unitdata_request (self, skb)
385 *
386 * Send Ultra data. This is data that must be sent outside any connection
387 *
388 */
389 #ifdef CONFIG_IRDA_ULTRA
390 void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb)
391 {
392 IRDA_ASSERT(self != NULL, return;);
393 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
394
395 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
396
397 IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
398 return;);
399 skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
400
401 skb->data[0] = CBROADCAST;
402 skb->data[1] = UI_FRAME;
403
404 /* Don't need to refcount, see irlmp_connless_data_request() */
405
406 skb_queue_tail(&self->txq_ultra, skb);
407
408 irlap_do_event(self, SEND_UI_FRAME, NULL, NULL);
409 }
410 #endif /*CONFIG_IRDA_ULTRA */
411
412 /*
413 * Function irlap_udata_indication (self, skb)
414 *
415 * Receive Ultra data. This is data that is received outside any connection
416 *
417 */
418 #ifdef CONFIG_IRDA_ULTRA
419 void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb)
420 {
421 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
422
423 IRDA_ASSERT(self != NULL, return;);
424 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
425 IRDA_ASSERT(skb != NULL, return;);
426
427 /* Hide LAP header from IrLMP layer */
428 skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
429
430 irlmp_link_unitdata_indication(self->notify.instance, skb);
431 }
432 #endif /* CONFIG_IRDA_ULTRA */
433
434 /*
435 * Function irlap_disconnect_request (void)
436 *
437 * Request to disconnect connection by service user
438 */
439 void irlap_disconnect_request(struct irlap_cb *self)
440 {
441 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
442
443 IRDA_ASSERT(self != NULL, return;);
444 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
445
446 /* Don't disconnect until all data frames are successfully sent */
447 if (!skb_queue_empty(&self->txq)) {
448 self->disconnect_pending = TRUE;
449 return;
450 }
451
452 /* Check if we are in the right state for disconnecting */
453 switch (self->state) {
454 case LAP_XMIT_P: /* FALLTROUGH */
455 case LAP_XMIT_S: /* FALLTROUGH */
456 case LAP_CONN: /* FALLTROUGH */
457 case LAP_RESET_WAIT: /* FALLTROUGH */
458 case LAP_RESET_CHECK:
459 irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL);
460 break;
461 default:
462 IRDA_DEBUG(2, "%s(), disconnect pending!\n", __FUNCTION__);
463 self->disconnect_pending = TRUE;
464 break;
465 }
466 }
467
468 /*
469 * Function irlap_disconnect_indication (void)
470 *
471 * Disconnect request from other device
472 *
473 */
474 void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
475 {
476 IRDA_DEBUG(1, "%s(), reason=%s\n", __FUNCTION__, lap_reasons[reason]);
477
478 IRDA_ASSERT(self != NULL, return;);
479 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
480
481 /* Flush queues */
482 irlap_flush_all_queues(self);
483
484 switch (reason) {
485 case LAP_RESET_INDICATION:
486 IRDA_DEBUG(1, "%s(), Sending reset request!\n", __FUNCTION__);
487 irlap_do_event(self, RESET_REQUEST, NULL, NULL);
488 break;
489 case LAP_NO_RESPONSE: /* FALLTROUGH */
490 case LAP_DISC_INDICATION: /* FALLTROUGH */
491 case LAP_FOUND_NONE: /* FALLTROUGH */
492 case LAP_MEDIA_BUSY:
493 irlmp_link_disconnect_indication(self->notify.instance, self,
494 reason, NULL);
495 break;
496 default:
497 IRDA_ERROR("%s: Unknown reason %d\n", __FUNCTION__, reason);
498 }
499 }
500
501 /*
502 * Function irlap_discovery_request (gen_addr_bit)
503 *
504 * Start one single discovery operation.
505 *
506 */
507 void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
508 {
509 struct irlap_info info;
510
511 IRDA_ASSERT(self != NULL, return;);
512 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
513 IRDA_ASSERT(discovery != NULL, return;);
514
515 IRDA_DEBUG(4, "%s(), nslots = %d\n", __FUNCTION__, discovery->nslots);
516
517 IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) ||
518 (discovery->nslots == 8) || (discovery->nslots == 16),
519 return;);
520
521 /* Discovery is only possible in NDM mode */
522 if (self->state != LAP_NDM) {
523 IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n",
524 __FUNCTION__);
525 irlap_discovery_confirm(self, NULL);
526 /* Note : in theory, if we are not in NDM, we could postpone
527 * the discovery like we do for connection request.
528 * In practice, it's not worth it. If the media was busy,
529 * it's likely next time around it won't be busy. If we are
530 * in REPLY state, we will get passive discovery info & event.
531 * Jean II */
532 return;
533 }
534
535 /* Check if last discovery request finished in time, or if
536 * it was aborted due to the media busy flag. */
537 if (self->discovery_log != NULL) {
538 hashbin_delete(self->discovery_log, (FREE_FUNC) kfree);
539 self->discovery_log = NULL;
540 }
541
542 /* All operations will occur at predictable time, no need to lock */
543 self->discovery_log = hashbin_new(HB_NOLOCK);
544
545 if (self->discovery_log == NULL) {
546 IRDA_WARNING("%s(), Unable to allocate discovery log!\n",
547 __FUNCTION__);
548 return;
549 }
550
551 info.S = discovery->nslots; /* Number of slots */
552 info.s = 0; /* Current slot */
553
554 self->discovery_cmd = discovery;
555 info.discovery = discovery;
556
557 /* sysctl_slot_timeout bounds are checked in irsysctl.c - Jean II */
558 self->slot_timeout = sysctl_slot_timeout * HZ / 1000;
559
560 irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info);
561 }
562
563 /*
564 * Function irlap_discovery_confirm (log)
565 *
566 * A device has been discovered in front of this station, we
567 * report directly to LMP.
568 */
569 void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log)
570 {
571 IRDA_ASSERT(self != NULL, return;);
572 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
573
574 IRDA_ASSERT(self->notify.instance != NULL, return;);
575
576 /*
577 * Check for successful discovery, since we are then allowed to clear
578 * the media busy condition (IrLAP 6.13.4 - p.94). This should allow
579 * us to make connection attempts much faster and easier (i.e. no
580 * collisions).
581 * Setting media busy to false will also generate an event allowing
582 * to process pending events in NDM state machine.
583 * Note : the spec doesn't define what's a successful discovery is.
584 * If we want Ultra to work, it's successful even if there is
585 * nobody discovered - Jean II
586 */
587 if (discovery_log)
588 irda_device_set_media_busy(self->netdev, FALSE);
589
590 /* Inform IrLMP */
591 irlmp_link_discovery_confirm(self->notify.instance, discovery_log);
592 }
593
594 /*
595 * Function irlap_discovery_indication (log)
596 *
597 * Somebody is trying to discover us!
598 *
599 */
600 void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery)
601 {
602 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
603
604 IRDA_ASSERT(self != NULL, return;);
605 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
606 IRDA_ASSERT(discovery != NULL, return;);
607
608 IRDA_ASSERT(self->notify.instance != NULL, return;);
609
610 /* A device is very likely to connect immediately after it performs
611 * a successful discovery. This means that in our case, we are much
612 * more likely to receive a connection request over the medium.
613 * So, we backoff to avoid collisions.
614 * IrLAP spec 6.13.4 suggest 100ms...
615 * Note : this little trick actually make a *BIG* difference. If I set
616 * my Linux box with discovery enabled and one Ultra frame sent every
617 * second, my Palm has no trouble connecting to it every time !
618 * Jean II */
619 irda_device_set_media_busy(self->netdev, SMALL);
620
621 irlmp_link_discovery_indication(self->notify.instance, discovery);
622 }
623
624 /*
625 * Function irlap_status_indication (quality_of_link)
626 */
627 void irlap_status_indication(struct irlap_cb *self, int quality_of_link)
628 {
629 switch (quality_of_link) {
630 case STATUS_NO_ACTIVITY:
631 IRDA_MESSAGE("IrLAP, no activity on link!\n");
632 break;
633 case STATUS_NOISY:
634 IRDA_MESSAGE("IrLAP, noisy link!\n");
635 break;
636 default:
637 break;
638 }
639 irlmp_status_indication(self->notify.instance,
640 quality_of_link, LOCK_NO_CHANGE);
641 }
642
643 /*
644 * Function irlap_reset_indication (void)
645 */
646 void irlap_reset_indication(struct irlap_cb *self)
647 {
648 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
649
650 IRDA_ASSERT(self != NULL, return;);
651 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
652
653 if (self->state == LAP_RESET_WAIT)
654 irlap_do_event(self, RESET_REQUEST, NULL, NULL);
655 else
656 irlap_do_event(self, RESET_RESPONSE, NULL, NULL);
657 }
658
659 /*
660 * Function irlap_reset_confirm (void)
661 */
662 void irlap_reset_confirm(void)
663 {
664 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
665 }
666
667 /*
668 * Function irlap_generate_rand_time_slot (S, s)
669 *
670 * Generate a random time slot between s and S-1 where
671 * S = Number of slots (0 -> S-1)
672 * s = Current slot
673 */
674 int irlap_generate_rand_time_slot(int S, int s)
675 {
676 static int rand;
677 int slot;
678
679 IRDA_ASSERT((S - s) > 0, return 0;);
680
681 rand += jiffies;
682 rand ^= (rand << 12);
683 rand ^= (rand >> 20);
684
685 slot = s + rand % (S-s);
686
687 IRDA_ASSERT((slot >= s) || (slot < S), return 0;);
688
689 return slot;
690 }
691
692 /*
693 * Function irlap_update_nr_received (nr)
694 *
695 * Remove all acknowledged frames in current window queue. This code is
696 * not intuitive and you should not try to change it. If you think it
697 * contains bugs, please mail a patch to the author instead.
698 */
699 void irlap_update_nr_received(struct irlap_cb *self, int nr)
700 {
701 struct sk_buff *skb = NULL;
702 int count = 0;
703
704 /*
705 * Remove all the ack-ed frames from the window queue.
706 */
707
708 /*
709 * Optimize for the common case. It is most likely that the receiver
710 * will acknowledge all the frames we have sent! So in that case we
711 * delete all frames stored in window.
712 */
713 if (nr == self->vs) {
714 while ((skb = skb_dequeue(&self->wx_list)) != NULL) {
715 dev_kfree_skb(skb);
716 }
717 /* The last acked frame is the next to send minus one */
718 self->va = nr - 1;
719 } else {
720 /* Remove all acknowledged frames in current window */
721 while ((skb_peek(&self->wx_list) != NULL) &&
722 (((self->va+1) % 8) != nr))
723 {
724 skb = skb_dequeue(&self->wx_list);
725 dev_kfree_skb(skb);
726
727 self->va = (self->va + 1) % 8;
728 count++;
729 }
730 }
731
732 /* Advance window */
733 self->window = self->window_size - skb_queue_len(&self->wx_list);
734 }
735
736 /*
737 * Function irlap_validate_ns_received (ns)
738 *
739 * Validate the next to send (ns) field from received frame.
740 */
741 int irlap_validate_ns_received(struct irlap_cb *self, int ns)
742 {
743 /* ns as expected? */
744 if (ns == self->vr)
745 return NS_EXPECTED;
746 /*
747 * Stations are allowed to treat invalid NS as unexpected NS
748 * IrLAP, Recv ... with-invalid-Ns. p. 84
749 */
750 return NS_UNEXPECTED;
751
752 /* return NR_INVALID; */
753 }
754 /*
755 * Function irlap_validate_nr_received (nr)
756 *
757 * Validate the next to receive (nr) field from received frame.
758 *
759 */
760 int irlap_validate_nr_received(struct irlap_cb *self, int nr)
761 {
762 /* nr as expected? */
763 if (nr == self->vs) {
764 IRDA_DEBUG(4, "%s(), expected!\n", __FUNCTION__);
765 return NR_EXPECTED;
766 }
767
768 /*
769 * unexpected nr? (but within current window), first we check if the
770 * ns numbers of the frames in the current window wrap.
771 */
772 if (self->va < self->vs) {
773 if ((nr >= self->va) && (nr <= self->vs))
774 return NR_UNEXPECTED;
775 } else {
776 if ((nr >= self->va) || (nr <= self->vs))
777 return NR_UNEXPECTED;
778 }
779
780 /* Invalid nr! */
781 return NR_INVALID;
782 }
783
784 /*
785 * Function irlap_initiate_connection_state ()
786 *
787 * Initialize the connection state parameters
788 *
789 */
790 void irlap_initiate_connection_state(struct irlap_cb *self)
791 {
792 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
793
794 IRDA_ASSERT(self != NULL, return;);
795 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
796
797 /* Next to send and next to receive */
798 self->vs = self->vr = 0;
799
800 /* Last frame which got acked (0 - 1) % 8 */
801 self->va = 7;
802
803 self->window = 1;
804
805 self->remote_busy = FALSE;
806 self->retry_count = 0;
807 }
808
809 /*
810 * Function irlap_wait_min_turn_around (self, qos)
811 *
812 * Wait negotiated minimum turn around time, this function actually sets
813 * the number of BOS's that must be sent before the next transmitted
814 * frame in order to delay for the specified amount of time. This is
815 * done to avoid using timers, and the forbidden udelay!
816 */
817 void irlap_wait_min_turn_around(struct irlap_cb *self, struct qos_info *qos)
818 {
819 __u32 min_turn_time;
820 __u32 speed;
821
822 /* Get QoS values. */
823 speed = qos->baud_rate.value;
824 min_turn_time = qos->min_turn_time.value;
825
826 /* No need to calculate XBOFs for speeds over 115200 bps */
827 if (speed > 115200) {
828 self->mtt_required = min_turn_time;
829 return;
830 }
831
832 /*
833 * Send additional BOF's for the next frame for the requested
834 * min turn time, so now we must calculate how many chars (XBOF's) we
835 * must send for the requested time period (min turn time)
836 */
837 self->xbofs_delay = irlap_min_turn_time_in_bytes(speed, min_turn_time);
838 }
839
840 /*
841 * Function irlap_flush_all_queues (void)
842 *
843 * Flush all queues
844 *
845 */
846 void irlap_flush_all_queues(struct irlap_cb *self)
847 {
848 struct sk_buff* skb;
849
850 IRDA_ASSERT(self != NULL, return;);
851 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
852
853 /* Free transmission queue */
854 while ((skb = skb_dequeue(&self->txq)) != NULL)
855 dev_kfree_skb(skb);
856
857 while ((skb = skb_dequeue(&self->txq_ultra)) != NULL)
858 dev_kfree_skb(skb);
859
860 /* Free sliding window buffered packets */
861 while ((skb = skb_dequeue(&self->wx_list)) != NULL)
862 dev_kfree_skb(skb);
863 }
864
865 /*
866 * Function irlap_setspeed (self, speed)
867 *
868 * Change the speed of the IrDA port
869 *
870 */
871 static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
872 {
873 struct sk_buff *skb;
874
875 IRDA_DEBUG(0, "%s(), setting speed to %d\n", __FUNCTION__, speed);
876
877 IRDA_ASSERT(self != NULL, return;);
878 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
879
880 self->speed = speed;
881
882 /* Change speed now, or just piggyback speed on frames */
883 if (now) {
884 /* Send down empty frame to trigger speed change */
885 skb = dev_alloc_skb(0);
886 if (skb)
887 irlap_queue_xmit(self, skb);
888 }
889 }
890
891 /*
892 * Function irlap_init_qos_capabilities (self, qos)
893 *
894 * Initialize QoS for this IrLAP session, What we do is to compute the
895 * intersection of the QoS capabilities for the user, driver and for
896 * IrLAP itself. Normally, IrLAP will not specify any values, but it can
897 * be used to restrict certain values.
898 */
899 static void irlap_init_qos_capabilities(struct irlap_cb *self,
900 struct qos_info *qos_user)
901 {
902 IRDA_ASSERT(self != NULL, return;);
903 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
904 IRDA_ASSERT(self->netdev != NULL, return;);
905
906 /* Start out with the maximum QoS support possible */
907 irda_init_max_qos_capabilies(&self->qos_rx);
908
909 /* Apply drivers QoS capabilities */
910 irda_qos_compute_intersection(&self->qos_rx, self->qos_dev);
911
912 /*
913 * Check for user supplied QoS parameters. The service user is only
914 * allowed to supply these values. We check each parameter since the
915 * user may not have set all of them.
916 */
917 if (qos_user) {
918 IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __FUNCTION__);
919
920 if (qos_user->baud_rate.bits)
921 self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits;
922
923 if (qos_user->max_turn_time.bits)
924 self->qos_rx.max_turn_time.bits &= qos_user->max_turn_time.bits;
925 if (qos_user->data_size.bits)
926 self->qos_rx.data_size.bits &= qos_user->data_size.bits;
927
928 if (qos_user->link_disc_time.bits)
929 self->qos_rx.link_disc_time.bits &= qos_user->link_disc_time.bits;
930 }
931
932 /* Use 500ms in IrLAP for now */
933 self->qos_rx.max_turn_time.bits &= 0x01;
934
935 /* Set data size */
936 /*self->qos_rx.data_size.bits &= 0x03;*/
937
938 irda_qos_bits_to_value(&self->qos_rx);
939 }
940
941 /*
942 * Function irlap_apply_default_connection_parameters (void, now)
943 *
944 * Use the default connection and transmission parameters
945 */
946 void irlap_apply_default_connection_parameters(struct irlap_cb *self)
947 {
948 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
949
950 IRDA_ASSERT(self != NULL, return;);
951 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
952
953 /* xbofs : Default value in NDM */
954 self->next_bofs = 12;
955 self->bofs_count = 12;
956
957 /* NDM Speed is 9600 */
958 irlap_change_speed(self, 9600, TRUE);
959
960 /* Set mbusy when going to NDM state */
961 irda_device_set_media_busy(self->netdev, TRUE);
962
963 /*
964 * Generate random connection address for this session, which must
965 * be 7 bits wide and different from 0x00 and 0xfe
966 */
967 while ((self->caddr == 0x00) || (self->caddr == 0xfe)) {
968 get_random_bytes(&self->caddr, sizeof(self->caddr));
969 self->caddr &= 0xfe;
970 }
971
972 /* Use default values until connection has been negitiated */
973 self->slot_timeout = sysctl_slot_timeout;
974 self->final_timeout = FINAL_TIMEOUT;
975 self->poll_timeout = POLL_TIMEOUT;
976 self->wd_timeout = WD_TIMEOUT;
977
978 /* Set some default values */
979 self->qos_tx.baud_rate.value = 9600;
980 self->qos_rx.baud_rate.value = 9600;
981 self->qos_tx.max_turn_time.value = 0;
982 self->qos_rx.max_turn_time.value = 0;
983 self->qos_tx.min_turn_time.value = 0;
984 self->qos_rx.min_turn_time.value = 0;
985 self->qos_tx.data_size.value = 64;
986 self->qos_rx.data_size.value = 64;
987 self->qos_tx.window_size.value = 1;
988 self->qos_rx.window_size.value = 1;
989 self->qos_tx.additional_bofs.value = 12;
990 self->qos_rx.additional_bofs.value = 12;
991 self->qos_tx.link_disc_time.value = 0;
992 self->qos_rx.link_disc_time.value = 0;
993
994 irlap_flush_all_queues(self);
995
996 self->disconnect_pending = FALSE;
997 self->connect_pending = FALSE;
998 }
999
1000 /*
1001 * Function irlap_apply_connection_parameters (qos, now)
1002 *
1003 * Initialize IrLAP with the negotiated QoS values
1004 *
1005 * If 'now' is false, the speed and xbofs will be changed after the next
1006 * frame is sent.
1007 * If 'now' is true, the speed and xbofs is changed immediately
1008 */
1009 void irlap_apply_connection_parameters(struct irlap_cb *self, int now)
1010 {
1011 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
1012
1013 IRDA_ASSERT(self != NULL, return;);
1014 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
1015
1016 /* Set the negotiated xbofs value */
1017 self->next_bofs = self->qos_tx.additional_bofs.value;
1018 if (now)
1019 self->bofs_count = self->next_bofs;
1020
1021 /* Set the negotiated link speed (may need the new xbofs value) */
1022 irlap_change_speed(self, self->qos_tx.baud_rate.value, now);
1023
1024 self->window_size = self->qos_tx.window_size.value;
1025 self->window = self->qos_tx.window_size.value;
1026
1027 #ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1028 /*
1029 * Calculate how many bytes it is possible to transmit before the
1030 * link must be turned around
1031 */
1032 self->line_capacity =
1033 irlap_max_line_capacity(self->qos_tx.baud_rate.value,
1034 self->qos_tx.max_turn_time.value);
1035 self->bytes_left = self->line_capacity;
1036 #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1037
1038
1039 /*
1040 * Initialize timeout values, some of the rules are listed on
1041 * page 92 in IrLAP.
1042 */
1043 IRDA_ASSERT(self->qos_tx.max_turn_time.value != 0, return;);
1044 IRDA_ASSERT(self->qos_rx.max_turn_time.value != 0, return;);
1045 /* The poll timeout applies only to the primary station.
1046 * It defines the maximum time the primary stay in XMIT mode
1047 * before timeout and turning the link around (sending a RR).
1048 * Or, this is how much we can keep the pf bit in primary mode.
1049 * Therefore, it must be lower or equal than our *OWN* max turn around.
1050 * Jean II */
1051 self->poll_timeout = self->qos_tx.max_turn_time.value * HZ / 1000;
1052 /* The Final timeout applies only to the primary station.
1053 * It defines the maximum time the primary wait (mostly in RECV mode)
1054 * for an answer from the secondary station before polling it again.
1055 * Therefore, it must be greater or equal than our *PARTNER*
1056 * max turn around time - Jean II */
1057 self->final_timeout = self->qos_rx.max_turn_time.value * HZ / 1000;
1058 /* The Watchdog Bit timeout applies only to the secondary station.
1059 * It defines the maximum time the secondary wait (mostly in RECV mode)
1060 * for poll from the primary station before getting annoyed.
1061 * Therefore, it must be greater or equal than our *PARTNER*
1062 * max turn around time - Jean II */
1063 self->wd_timeout = self->final_timeout * 2;
1064
1065 /*
1066 * N1 and N2 are maximum retry count for *both* the final timer
1067 * and the wd timer (with a factor 2) as defined above.
1068 * After N1 retry of a timer, we give a warning to the user.
1069 * After N2 retry, we consider the link dead and disconnect it.
1070 * Jean II
1071 */
1072
1073 /*
1074 * Set N1 to 0 if Link Disconnect/Threshold Time = 3 and set it to
1075 * 3 seconds otherwise. See page 71 in IrLAP for more details.
1076 * Actually, it's not always 3 seconds, as we allow to set
1077 * it via sysctl... Max maxtt is 500ms, and N1 need to be multiple
1078 * of 2, so 1 second is minimum we can allow. - Jean II
1079 */
1080 if (self->qos_tx.link_disc_time.value == sysctl_warn_noreply_time)
1081 /*
1082 * If we set N1 to 0, it will trigger immediately, which is
1083 * not what we want. What we really want is to disable it,
1084 * Jean II
1085 */
1086 self->N1 = -2; /* Disable - Need to be multiple of 2*/
1087 else
1088 self->N1 = sysctl_warn_noreply_time * 1000 /
1089 self->qos_rx.max_turn_time.value;
1090
1091 IRDA_DEBUG(4, "Setting N1 = %d\n", self->N1);
1092
1093 /* Set N2 to match our own disconnect time */
1094 self->N2 = self->qos_tx.link_disc_time.value * 1000 /
1095 self->qos_rx.max_turn_time.value;
1096 IRDA_DEBUG(4, "Setting N2 = %d\n", self->N2);
1097 }
1098
1099 #ifdef CONFIG_PROC_FS
1100 struct irlap_iter_state {
1101 int id;
1102 };
1103
1104 static void *irlap_seq_start(struct seq_file *seq, loff_t *pos)
1105 {
1106 struct irlap_iter_state *iter = seq->private;
1107 struct irlap_cb *self;
1108
1109 /* Protect our access to the tsap list */
1110 spin_lock_irq(&irlap->hb_spinlock);
1111 iter->id = 0;
1112
1113 for (self = (struct irlap_cb *) hashbin_get_first(irlap);
1114 self; self = (struct irlap_cb *) hashbin_get_next(irlap)) {
1115 if (iter->id == *pos)
1116 break;
1117 ++iter->id;
1118 }
1119
1120 return self;
1121 }
1122
1123 static void *irlap_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1124 {
1125 struct irlap_iter_state *iter = seq->private;
1126
1127 ++*pos;
1128 ++iter->id;
1129 return (void *) hashbin_get_next(irlap);
1130 }
1131
1132 static void irlap_seq_stop(struct seq_file *seq, void *v)
1133 {
1134 spin_unlock_irq(&irlap->hb_spinlock);
1135 }
1136
1137 static int irlap_seq_show(struct seq_file *seq, void *v)
1138 {
1139 const struct irlap_iter_state *iter = seq->private;
1140 const struct irlap_cb *self = v;
1141
1142 IRDA_ASSERT(self->magic == LAP_MAGIC, return -EINVAL;);
1143
1144 seq_printf(seq, "irlap%d ", iter->id);
1145 seq_printf(seq, "state: %s\n",
1146 irlap_state[self->state]);
1147
1148 seq_printf(seq, " device name: %s, ",
1149 (self->netdev) ? self->netdev->name : "bug");
1150 seq_printf(seq, "hardware name: %s\n", self->hw_name);
1151
1152 seq_printf(seq, " caddr: %#02x, ", self->caddr);
1153 seq_printf(seq, "saddr: %#08x, ", self->saddr);
1154 seq_printf(seq, "daddr: %#08x\n", self->daddr);
1155
1156 seq_printf(seq, " win size: %d, ",
1157 self->window_size);
1158 seq_printf(seq, "win: %d, ", self->window);
1159 #ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1160 seq_printf(seq, "line capacity: %d, ",
1161 self->line_capacity);
1162 seq_printf(seq, "bytes left: %d\n", self->bytes_left);
1163 #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1164 seq_printf(seq, " tx queue len: %d ",
1165 skb_queue_len(&self->txq));
1166 seq_printf(seq, "win queue len: %d ",
1167 skb_queue_len(&self->wx_list));
1168 seq_printf(seq, "rbusy: %s", self->remote_busy ?
1169 "TRUE" : "FALSE");
1170 seq_printf(seq, " mbusy: %s\n", self->media_busy ?
1171 "TRUE" : "FALSE");
1172
1173 seq_printf(seq, " retrans: %d ", self->retry_count);
1174 seq_printf(seq, "vs: %d ", self->vs);
1175 seq_printf(seq, "vr: %d ", self->vr);
1176 seq_printf(seq, "va: %d\n", self->va);
1177
1178 seq_printf(seq, " qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n");
1179
1180 seq_printf(seq, " tx\t%d\t",
1181 self->qos_tx.baud_rate.value);
1182 seq_printf(seq, "%d\t",
1183 self->qos_tx.max_turn_time.value);
1184 seq_printf(seq, "%d\t",
1185 self->qos_tx.data_size.value);
1186 seq_printf(seq, "%d\t",
1187 self->qos_tx.window_size.value);
1188 seq_printf(seq, "%d\t",
1189 self->qos_tx.additional_bofs.value);
1190 seq_printf(seq, "%d\t",
1191 self->qos_tx.min_turn_time.value);
1192 seq_printf(seq, "%d\t",
1193 self->qos_tx.link_disc_time.value);
1194 seq_printf(seq, "\n");
1195
1196 seq_printf(seq, " rx\t%d\t",
1197 self->qos_rx.baud_rate.value);
1198 seq_printf(seq, "%d\t",
1199 self->qos_rx.max_turn_time.value);
1200 seq_printf(seq, "%d\t",
1201 self->qos_rx.data_size.value);
1202 seq_printf(seq, "%d\t",
1203 self->qos_rx.window_size.value);
1204 seq_printf(seq, "%d\t",
1205 self->qos_rx.additional_bofs.value);
1206 seq_printf(seq, "%d\t",
1207 self->qos_rx.min_turn_time.value);
1208 seq_printf(seq, "%d\n",
1209 self->qos_rx.link_disc_time.value);
1210
1211 return 0;
1212 }
1213
1214 static struct seq_operations irlap_seq_ops = {
1215 .start = irlap_seq_start,
1216 .next = irlap_seq_next,
1217 .stop = irlap_seq_stop,
1218 .show = irlap_seq_show,
1219 };
1220
1221 static int irlap_seq_open(struct inode *inode, struct file *file)
1222 {
1223 struct seq_file *seq;
1224 int rc = -ENOMEM;
1225 struct irlap_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
1226
1227 if (!s)
1228 goto out;
1229
1230 if (irlap == NULL) {
1231 rc = -EINVAL;
1232 goto out_kfree;
1233 }
1234
1235 rc = seq_open(file, &irlap_seq_ops);
1236 if (rc)
1237 goto out_kfree;
1238
1239 seq = file->private_data;
1240 seq->private = s;
1241 memset(s, 0, sizeof(*s));
1242 out:
1243 return rc;
1244 out_kfree:
1245 kfree(s);
1246 goto out;
1247 }
1248
1249 struct file_operations irlap_seq_fops = {
1250 .owner = THIS_MODULE,
1251 .open = irlap_seq_open,
1252 .read = seq_read,
1253 .llseek = seq_lseek,
1254 .release = seq_release_private,
1255 };
1256
1257 #endif /* CONFIG_PROC_FS */