]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/irda/irlap.c
Linux-2.6.12-rc2
[mirror_ubuntu-bionic-kernel.git] / net / irda / irlap.c
1 /*********************************************************************
2 *
3 * Filename: irlap.c
4 * Version: 1.0
5 * Description: IrLAP implementation for Linux
6 * Status: Stable
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Mon Aug 4 20:40:53 1997
9 * Modified at: Tue Dec 14 09:26:44 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
13 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
28 * MA 02111-1307 USA
29 *
30 ********************************************************************/
31
32 #include <linux/config.h>
33 #include <linux/slab.h>
34 #include <linux/string.h>
35 #include <linux/skbuff.h>
36 #include <linux/delay.h>
37 #include <linux/proc_fs.h>
38 #include <linux/init.h>
39 #include <linux/random.h>
40 #include <linux/module.h>
41 #include <linux/seq_file.h>
42
43 #include <net/irda/irda.h>
44 #include <net/irda/irda_device.h>
45 #include <net/irda/irqueue.h>
46 #include <net/irda/irlmp.h>
47 #include <net/irda/irlmp_frame.h>
48 #include <net/irda/irlap_frame.h>
49 #include <net/irda/irlap.h>
50 #include <net/irda/timer.h>
51 #include <net/irda/qos.h>
52
53 static hashbin_t *irlap = NULL;
54 int sysctl_slot_timeout = SLOT_TIMEOUT * 1000 / HZ;
55
56 /* This is the delay of missed pf period before generating an event
57 * to the application. The spec mandate 3 seconds, but in some cases
58 * it's way too long. - Jean II */
59 int sysctl_warn_noreply_time = 3;
60
61 extern void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
62 static void __irlap_close(struct irlap_cb *self);
63 static void irlap_init_qos_capabilities(struct irlap_cb *self,
64 struct qos_info *qos_user);
65
66 #ifdef CONFIG_IRDA_DEBUG
67 static char *lap_reasons[] = {
68 "ERROR, NOT USED",
69 "LAP_DISC_INDICATION",
70 "LAP_NO_RESPONSE",
71 "LAP_RESET_INDICATION",
72 "LAP_FOUND_NONE",
73 "LAP_MEDIA_BUSY",
74 "LAP_PRIMARY_CONFLICT",
75 "ERROR, NOT USED",
76 };
77 #endif /* CONFIG_IRDA_DEBUG */
78
79 int __init irlap_init(void)
80 {
81 /* Check if the compiler did its job properly.
82 * May happen on some ARM configuration, check with Russell King. */
83 IRDA_ASSERT(sizeof(struct xid_frame) == 14, ;);
84 IRDA_ASSERT(sizeof(struct test_frame) == 10, ;);
85 IRDA_ASSERT(sizeof(struct ua_frame) == 10, ;);
86 IRDA_ASSERT(sizeof(struct snrm_frame) == 11, ;);
87
88 /* Allocate master array */
89 irlap = hashbin_new(HB_LOCK);
90 if (irlap == NULL) {
91 IRDA_ERROR("%s: can't allocate irlap hashbin!\n",
92 __FUNCTION__);
93 return -ENOMEM;
94 }
95
96 return 0;
97 }
98
99 void __exit irlap_cleanup(void)
100 {
101 IRDA_ASSERT(irlap != NULL, return;);
102
103 hashbin_delete(irlap, (FREE_FUNC) __irlap_close);
104 }
105
106 /*
107 * Function irlap_open (driver)
108 *
109 * Initialize IrLAP layer
110 *
111 */
112 struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
113 const char *hw_name)
114 {
115 struct irlap_cb *self;
116
117 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
118
119 /* Initialize the irlap structure. */
120 self = kmalloc(sizeof(struct irlap_cb), GFP_KERNEL);
121 if (self == NULL)
122 return NULL;
123
124 memset(self, 0, sizeof(struct irlap_cb));
125 self->magic = LAP_MAGIC;
126
127 /* Make a binding between the layers */
128 self->netdev = dev;
129 self->qos_dev = qos;
130 /* Copy hardware name */
131 if(hw_name != NULL) {
132 strlcpy(self->hw_name, hw_name, sizeof(self->hw_name));
133 } else {
134 self->hw_name[0] = '\0';
135 }
136
137 /* FIXME: should we get our own field? */
138 dev->atalk_ptr = self;
139
140 self->state = LAP_OFFLINE;
141
142 /* Initialize transmit queue */
143 skb_queue_head_init(&self->txq);
144 skb_queue_head_init(&self->txq_ultra);
145 skb_queue_head_init(&self->wx_list);
146
147 /* My unique IrLAP device address! */
148 /* We don't want the broadcast address, neither the NULL address
149 * (most often used to signify "invalid"), and we don't want an
150 * address already in use (otherwise connect won't be able
151 * to select the proper link). - Jean II */
152 do {
153 get_random_bytes(&self->saddr, sizeof(self->saddr));
154 } while ((self->saddr == 0x0) || (self->saddr == BROADCAST) ||
155 (hashbin_lock_find(irlap, self->saddr, NULL)) );
156 /* Copy to the driver */
157 memcpy(dev->dev_addr, &self->saddr, 4);
158
159 init_timer(&self->slot_timer);
160 init_timer(&self->query_timer);
161 init_timer(&self->discovery_timer);
162 init_timer(&self->final_timer);
163 init_timer(&self->poll_timer);
164 init_timer(&self->wd_timer);
165 init_timer(&self->backoff_timer);
166 init_timer(&self->media_busy_timer);
167
168 irlap_apply_default_connection_parameters(self);
169
170 self->N3 = 3; /* # connections attemts to try before giving up */
171
172 self->state = LAP_NDM;
173
174 hashbin_insert(irlap, (irda_queue_t *) self, self->saddr, NULL);
175
176 irlmp_register_link(self, self->saddr, &self->notify);
177
178 return self;
179 }
180 EXPORT_SYMBOL(irlap_open);
181
182 /*
183 * Function __irlap_close (self)
184 *
185 * Remove IrLAP and all allocated memory. Stop any pending timers.
186 *
187 */
188 static void __irlap_close(struct irlap_cb *self)
189 {
190 IRDA_ASSERT(self != NULL, return;);
191 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
192
193 /* Stop timers */
194 del_timer(&self->slot_timer);
195 del_timer(&self->query_timer);
196 del_timer(&self->discovery_timer);
197 del_timer(&self->final_timer);
198 del_timer(&self->poll_timer);
199 del_timer(&self->wd_timer);
200 del_timer(&self->backoff_timer);
201 del_timer(&self->media_busy_timer);
202
203 irlap_flush_all_queues(self);
204
205 self->magic = 0;
206
207 kfree(self);
208 }
209
210 /*
211 * Function irlap_close (self)
212 *
213 * Remove IrLAP instance
214 *
215 */
216 void irlap_close(struct irlap_cb *self)
217 {
218 struct irlap_cb *lap;
219
220 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
221
222 IRDA_ASSERT(self != NULL, return;);
223 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
224
225 /* We used to send a LAP_DISC_INDICATION here, but this was
226 * racy. This has been move within irlmp_unregister_link()
227 * itself. Jean II */
228
229 /* Kill the LAP and all LSAPs on top of it */
230 irlmp_unregister_link(self->saddr);
231 self->notify.instance = NULL;
232
233 /* Be sure that we manage to remove ourself from the hash */
234 lap = hashbin_remove(irlap, self->saddr, NULL);
235 if (!lap) {
236 IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __FUNCTION__);
237 return;
238 }
239 __irlap_close(lap);
240 }
241 EXPORT_SYMBOL(irlap_close);
242
243 /*
244 * Function irlap_connect_indication (self, skb)
245 *
246 * Another device is attempting to make a connection
247 *
248 */
249 void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb)
250 {
251 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
252
253 IRDA_ASSERT(self != NULL, return;);
254 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
255
256 irlap_init_qos_capabilities(self, NULL); /* No user QoS! */
257
258 irlmp_link_connect_indication(self->notify.instance, self->saddr,
259 self->daddr, &self->qos_tx, skb);
260 }
261
262 /*
263 * Function irlap_connect_response (self, skb)
264 *
265 * Service user has accepted incoming connection
266 *
267 */
268 void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata)
269 {
270 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
271
272 irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL);
273 }
274
275 /*
276 * Function irlap_connect_request (self, daddr, qos_user, sniff)
277 *
278 * Request connection with another device, sniffing is not implemented
279 * yet.
280 *
281 */
282 void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
283 struct qos_info *qos_user, int sniff)
284 {
285 IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __FUNCTION__, daddr);
286
287 IRDA_ASSERT(self != NULL, return;);
288 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
289
290 self->daddr = daddr;
291
292 /*
293 * If the service user specifies QoS values for this connection,
294 * then use them
295 */
296 irlap_init_qos_capabilities(self, qos_user);
297
298 if ((self->state == LAP_NDM) && !self->media_busy)
299 irlap_do_event(self, CONNECT_REQUEST, NULL, NULL);
300 else
301 self->connect_pending = TRUE;
302 }
303
304 /*
305 * Function irlap_connect_confirm (self, skb)
306 *
307 * Connection request has been accepted
308 *
309 */
310 void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb)
311 {
312 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
313
314 IRDA_ASSERT(self != NULL, return;);
315 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
316
317 irlmp_link_connect_confirm(self->notify.instance, &self->qos_tx, skb);
318 }
319
320 /*
321 * Function irlap_data_indication (self, skb)
322 *
323 * Received data frames from IR-port, so we just pass them up to
324 * IrLMP for further processing
325 *
326 */
327 void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb,
328 int unreliable)
329 {
330 /* Hide LAP header from IrLMP layer */
331 skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
332
333 irlmp_link_data_indication(self->notify.instance, skb, unreliable);
334 }
335
336
337 /*
338 * Function irlap_data_request (self, skb)
339 *
340 * Queue data for transmission, must wait until XMIT state
341 *
342 */
343 void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
344 int unreliable)
345 {
346 IRDA_ASSERT(self != NULL, return;);
347 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
348
349 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
350
351 IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
352 return;);
353 skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
354
355 /*
356 * Must set frame format now so that the rest of the code knows
357 * if its dealing with an I or an UI frame
358 */
359 if (unreliable)
360 skb->data[1] = UI_FRAME;
361 else
362 skb->data[1] = I_FRAME;
363
364 /* Don't forget to refcount it - see irlmp_connect_request(). */
365 skb_get(skb);
366
367 /* Add at the end of the queue (keep ordering) - Jean II */
368 skb_queue_tail(&self->txq, skb);
369
370 /*
371 * Send event if this frame only if we are in the right state
372 * FIXME: udata should be sent first! (skb_queue_head?)
373 */
374 if ((self->state == LAP_XMIT_P) || (self->state == LAP_XMIT_S)) {
375 /* If we are not already processing the Tx queue, trigger
376 * transmission immediately - Jean II */
377 if((skb_queue_len(&self->txq) <= 1) && (!self->local_busy))
378 irlap_do_event(self, DATA_REQUEST, skb, NULL);
379 /* Otherwise, the packets will be sent normally at the
380 * next pf-poll - Jean II */
381 }
382 }
383
384 /*
385 * Function irlap_unitdata_request (self, skb)
386 *
387 * Send Ultra data. This is data that must be sent outside any connection
388 *
389 */
390 #ifdef CONFIG_IRDA_ULTRA
391 void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb)
392 {
393 IRDA_ASSERT(self != NULL, return;);
394 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
395
396 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
397
398 IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
399 return;);
400 skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
401
402 skb->data[0] = CBROADCAST;
403 skb->data[1] = UI_FRAME;
404
405 /* Don't need to refcount, see irlmp_connless_data_request() */
406
407 skb_queue_tail(&self->txq_ultra, skb);
408
409 irlap_do_event(self, SEND_UI_FRAME, NULL, NULL);
410 }
411 #endif /*CONFIG_IRDA_ULTRA */
412
413 /*
414 * Function irlap_udata_indication (self, skb)
415 *
416 * Receive Ultra data. This is data that is received outside any connection
417 *
418 */
419 #ifdef CONFIG_IRDA_ULTRA
420 void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb)
421 {
422 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
423
424 IRDA_ASSERT(self != NULL, return;);
425 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
426 IRDA_ASSERT(skb != NULL, return;);
427
428 /* Hide LAP header from IrLMP layer */
429 skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
430
431 irlmp_link_unitdata_indication(self->notify.instance, skb);
432 }
433 #endif /* CONFIG_IRDA_ULTRA */
434
435 /*
436 * Function irlap_disconnect_request (void)
437 *
438 * Request to disconnect connection by service user
439 */
440 void irlap_disconnect_request(struct irlap_cb *self)
441 {
442 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
443
444 IRDA_ASSERT(self != NULL, return;);
445 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
446
447 /* Don't disconnect until all data frames are successfully sent */
448 if (skb_queue_len(&self->txq) > 0) {
449 self->disconnect_pending = TRUE;
450
451 return;
452 }
453
454 /* Check if we are in the right state for disconnecting */
455 switch (self->state) {
456 case LAP_XMIT_P: /* FALLTROUGH */
457 case LAP_XMIT_S: /* FALLTROUGH */
458 case LAP_CONN: /* FALLTROUGH */
459 case LAP_RESET_WAIT: /* FALLTROUGH */
460 case LAP_RESET_CHECK:
461 irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL);
462 break;
463 default:
464 IRDA_DEBUG(2, "%s(), disconnect pending!\n", __FUNCTION__);
465 self->disconnect_pending = TRUE;
466 break;
467 }
468 }
469
470 /*
471 * Function irlap_disconnect_indication (void)
472 *
473 * Disconnect request from other device
474 *
475 */
476 void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
477 {
478 IRDA_DEBUG(1, "%s(), reason=%s\n", __FUNCTION__, lap_reasons[reason]);
479
480 IRDA_ASSERT(self != NULL, return;);
481 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
482
483 /* Flush queues */
484 irlap_flush_all_queues(self);
485
486 switch (reason) {
487 case LAP_RESET_INDICATION:
488 IRDA_DEBUG(1, "%s(), Sending reset request!\n", __FUNCTION__);
489 irlap_do_event(self, RESET_REQUEST, NULL, NULL);
490 break;
491 case LAP_NO_RESPONSE: /* FALLTROUGH */
492 case LAP_DISC_INDICATION: /* FALLTROUGH */
493 case LAP_FOUND_NONE: /* FALLTROUGH */
494 case LAP_MEDIA_BUSY:
495 irlmp_link_disconnect_indication(self->notify.instance, self,
496 reason, NULL);
497 break;
498 default:
499 IRDA_ERROR("%s: Unknown reason %d\n", __FUNCTION__, reason);
500 }
501 }
502
503 /*
504 * Function irlap_discovery_request (gen_addr_bit)
505 *
506 * Start one single discovery operation.
507 *
508 */
509 void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
510 {
511 struct irlap_info info;
512
513 IRDA_ASSERT(self != NULL, return;);
514 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
515 IRDA_ASSERT(discovery != NULL, return;);
516
517 IRDA_DEBUG(4, "%s(), nslots = %d\n", __FUNCTION__, discovery->nslots);
518
519 IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) ||
520 (discovery->nslots == 8) || (discovery->nslots == 16),
521 return;);
522
523 /* Discovery is only possible in NDM mode */
524 if (self->state != LAP_NDM) {
525 IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n",
526 __FUNCTION__);
527 irlap_discovery_confirm(self, NULL);
528 /* Note : in theory, if we are not in NDM, we could postpone
529 * the discovery like we do for connection request.
530 * In practice, it's not worth it. If the media was busy,
531 * it's likely next time around it won't be busy. If we are
532 * in REPLY state, we will get passive discovery info & event.
533 * Jean II */
534 return;
535 }
536
537 /* Check if last discovery request finished in time, or if
538 * it was aborted due to the media busy flag. */
539 if (self->discovery_log != NULL) {
540 hashbin_delete(self->discovery_log, (FREE_FUNC) kfree);
541 self->discovery_log = NULL;
542 }
543
544 /* All operations will occur at predictable time, no need to lock */
545 self->discovery_log = hashbin_new(HB_NOLOCK);
546
547 if (self->discovery_log == NULL) {
548 IRDA_WARNING("%s(), Unable to allocate discovery log!\n",
549 __FUNCTION__);
550 return;
551 }
552
553 info.S = discovery->nslots; /* Number of slots */
554 info.s = 0; /* Current slot */
555
556 self->discovery_cmd = discovery;
557 info.discovery = discovery;
558
559 /* sysctl_slot_timeout bounds are checked in irsysctl.c - Jean II */
560 self->slot_timeout = sysctl_slot_timeout * HZ / 1000;
561
562 irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info);
563 }
564
565 /*
566 * Function irlap_discovery_confirm (log)
567 *
568 * A device has been discovered in front of this station, we
569 * report directly to LMP.
570 */
571 void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log)
572 {
573 IRDA_ASSERT(self != NULL, return;);
574 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
575
576 IRDA_ASSERT(self->notify.instance != NULL, return;);
577
578 /*
579 * Check for successful discovery, since we are then allowed to clear
580 * the media busy condition (IrLAP 6.13.4 - p.94). This should allow
581 * us to make connection attempts much faster and easier (i.e. no
582 * collisions).
583 * Setting media busy to false will also generate an event allowing
584 * to process pending events in NDM state machine.
585 * Note : the spec doesn't define what's a successful discovery is.
586 * If we want Ultra to work, it's successful even if there is
587 * nobody discovered - Jean II
588 */
589 if (discovery_log)
590 irda_device_set_media_busy(self->netdev, FALSE);
591
592 /* Inform IrLMP */
593 irlmp_link_discovery_confirm(self->notify.instance, discovery_log);
594 }
595
596 /*
597 * Function irlap_discovery_indication (log)
598 *
599 * Somebody is trying to discover us!
600 *
601 */
602 void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery)
603 {
604 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
605
606 IRDA_ASSERT(self != NULL, return;);
607 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
608 IRDA_ASSERT(discovery != NULL, return;);
609
610 IRDA_ASSERT(self->notify.instance != NULL, return;);
611
612 /* A device is very likely to connect immediately after it performs
613 * a successful discovery. This means that in our case, we are much
614 * more likely to receive a connection request over the medium.
615 * So, we backoff to avoid collisions.
616 * IrLAP spec 6.13.4 suggest 100ms...
617 * Note : this little trick actually make a *BIG* difference. If I set
618 * my Linux box with discovery enabled and one Ultra frame sent every
619 * second, my Palm has no trouble connecting to it every time !
620 * Jean II */
621 irda_device_set_media_busy(self->netdev, SMALL);
622
623 irlmp_link_discovery_indication(self->notify.instance, discovery);
624 }
625
626 /*
627 * Function irlap_status_indication (quality_of_link)
628 */
629 void irlap_status_indication(struct irlap_cb *self, int quality_of_link)
630 {
631 switch (quality_of_link) {
632 case STATUS_NO_ACTIVITY:
633 IRDA_MESSAGE("IrLAP, no activity on link!\n");
634 break;
635 case STATUS_NOISY:
636 IRDA_MESSAGE("IrLAP, noisy link!\n");
637 break;
638 default:
639 break;
640 }
641 irlmp_status_indication(self->notify.instance,
642 quality_of_link, LOCK_NO_CHANGE);
643 }
644
645 /*
646 * Function irlap_reset_indication (void)
647 */
648 void irlap_reset_indication(struct irlap_cb *self)
649 {
650 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
651
652 IRDA_ASSERT(self != NULL, return;);
653 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
654
655 if (self->state == LAP_RESET_WAIT)
656 irlap_do_event(self, RESET_REQUEST, NULL, NULL);
657 else
658 irlap_do_event(self, RESET_RESPONSE, NULL, NULL);
659 }
660
661 /*
662 * Function irlap_reset_confirm (void)
663 */
664 void irlap_reset_confirm(void)
665 {
666 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
667 }
668
669 /*
670 * Function irlap_generate_rand_time_slot (S, s)
671 *
672 * Generate a random time slot between s and S-1 where
673 * S = Number of slots (0 -> S-1)
674 * s = Current slot
675 */
676 int irlap_generate_rand_time_slot(int S, int s)
677 {
678 static int rand;
679 int slot;
680
681 IRDA_ASSERT((S - s) > 0, return 0;);
682
683 rand += jiffies;
684 rand ^= (rand << 12);
685 rand ^= (rand >> 20);
686
687 slot = s + rand % (S-s);
688
689 IRDA_ASSERT((slot >= s) || (slot < S), return 0;);
690
691 return slot;
692 }
693
694 /*
695 * Function irlap_update_nr_received (nr)
696 *
697 * Remove all acknowledged frames in current window queue. This code is
698 * not intuitive and you should not try to change it. If you think it
699 * contains bugs, please mail a patch to the author instead.
700 */
701 void irlap_update_nr_received(struct irlap_cb *self, int nr)
702 {
703 struct sk_buff *skb = NULL;
704 int count = 0;
705
706 /*
707 * Remove all the ack-ed frames from the window queue.
708 */
709
710 /*
711 * Optimize for the common case. It is most likely that the receiver
712 * will acknowledge all the frames we have sent! So in that case we
713 * delete all frames stored in window.
714 */
715 if (nr == self->vs) {
716 while ((skb = skb_dequeue(&self->wx_list)) != NULL) {
717 dev_kfree_skb(skb);
718 }
719 /* The last acked frame is the next to send minus one */
720 self->va = nr - 1;
721 } else {
722 /* Remove all acknowledged frames in current window */
723 while ((skb_peek(&self->wx_list) != NULL) &&
724 (((self->va+1) % 8) != nr))
725 {
726 skb = skb_dequeue(&self->wx_list);
727 dev_kfree_skb(skb);
728
729 self->va = (self->va + 1) % 8;
730 count++;
731 }
732 }
733
734 /* Advance window */
735 self->window = self->window_size - skb_queue_len(&self->wx_list);
736 }
737
738 /*
739 * Function irlap_validate_ns_received (ns)
740 *
741 * Validate the next to send (ns) field from received frame.
742 */
743 int irlap_validate_ns_received(struct irlap_cb *self, int ns)
744 {
745 /* ns as expected? */
746 if (ns == self->vr)
747 return NS_EXPECTED;
748 /*
749 * Stations are allowed to treat invalid NS as unexpected NS
750 * IrLAP, Recv ... with-invalid-Ns. p. 84
751 */
752 return NS_UNEXPECTED;
753
754 /* return NR_INVALID; */
755 }
756 /*
757 * Function irlap_validate_nr_received (nr)
758 *
759 * Validate the next to receive (nr) field from received frame.
760 *
761 */
762 int irlap_validate_nr_received(struct irlap_cb *self, int nr)
763 {
764 /* nr as expected? */
765 if (nr == self->vs) {
766 IRDA_DEBUG(4, "%s(), expected!\n", __FUNCTION__);
767 return NR_EXPECTED;
768 }
769
770 /*
771 * unexpected nr? (but within current window), first we check if the
772 * ns numbers of the frames in the current window wrap.
773 */
774 if (self->va < self->vs) {
775 if ((nr >= self->va) && (nr <= self->vs))
776 return NR_UNEXPECTED;
777 } else {
778 if ((nr >= self->va) || (nr <= self->vs))
779 return NR_UNEXPECTED;
780 }
781
782 /* Invalid nr! */
783 return NR_INVALID;
784 }
785
786 /*
787 * Function irlap_initiate_connection_state ()
788 *
789 * Initialize the connection state parameters
790 *
791 */
792 void irlap_initiate_connection_state(struct irlap_cb *self)
793 {
794 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
795
796 IRDA_ASSERT(self != NULL, return;);
797 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
798
799 /* Next to send and next to receive */
800 self->vs = self->vr = 0;
801
802 /* Last frame which got acked (0 - 1) % 8 */
803 self->va = 7;
804
805 self->window = 1;
806
807 self->remote_busy = FALSE;
808 self->retry_count = 0;
809 }
810
811 /*
812 * Function irlap_wait_min_turn_around (self, qos)
813 *
814 * Wait negotiated minimum turn around time, this function actually sets
815 * the number of BOS's that must be sent before the next transmitted
816 * frame in order to delay for the specified amount of time. This is
817 * done to avoid using timers, and the forbidden udelay!
818 */
819 void irlap_wait_min_turn_around(struct irlap_cb *self, struct qos_info *qos)
820 {
821 __u32 min_turn_time;
822 __u32 speed;
823
824 /* Get QoS values. */
825 speed = qos->baud_rate.value;
826 min_turn_time = qos->min_turn_time.value;
827
828 /* No need to calculate XBOFs for speeds over 115200 bps */
829 if (speed > 115200) {
830 self->mtt_required = min_turn_time;
831 return;
832 }
833
834 /*
835 * Send additional BOF's for the next frame for the requested
836 * min turn time, so now we must calculate how many chars (XBOF's) we
837 * must send for the requested time period (min turn time)
838 */
839 self->xbofs_delay = irlap_min_turn_time_in_bytes(speed, min_turn_time);
840 }
841
842 /*
843 * Function irlap_flush_all_queues (void)
844 *
845 * Flush all queues
846 *
847 */
848 void irlap_flush_all_queues(struct irlap_cb *self)
849 {
850 struct sk_buff* skb;
851
852 IRDA_ASSERT(self != NULL, return;);
853 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
854
855 /* Free transmission queue */
856 while ((skb = skb_dequeue(&self->txq)) != NULL)
857 dev_kfree_skb(skb);
858
859 while ((skb = skb_dequeue(&self->txq_ultra)) != NULL)
860 dev_kfree_skb(skb);
861
862 /* Free sliding window buffered packets */
863 while ((skb = skb_dequeue(&self->wx_list)) != NULL)
864 dev_kfree_skb(skb);
865 }
866
867 /*
868 * Function irlap_setspeed (self, speed)
869 *
870 * Change the speed of the IrDA port
871 *
872 */
873 static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
874 {
875 struct sk_buff *skb;
876
877 IRDA_DEBUG(0, "%s(), setting speed to %d\n", __FUNCTION__, speed);
878
879 IRDA_ASSERT(self != NULL, return;);
880 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
881
882 self->speed = speed;
883
884 /* Change speed now, or just piggyback speed on frames */
885 if (now) {
886 /* Send down empty frame to trigger speed change */
887 skb = dev_alloc_skb(0);
888 irlap_queue_xmit(self, skb);
889 }
890 }
891
892 /*
893 * Function irlap_init_qos_capabilities (self, qos)
894 *
895 * Initialize QoS for this IrLAP session, What we do is to compute the
896 * intersection of the QoS capabilities for the user, driver and for
897 * IrLAP itself. Normally, IrLAP will not specify any values, but it can
898 * be used to restrict certain values.
899 */
900 static void irlap_init_qos_capabilities(struct irlap_cb *self,
901 struct qos_info *qos_user)
902 {
903 IRDA_ASSERT(self != NULL, return;);
904 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
905 IRDA_ASSERT(self->netdev != NULL, return;);
906
907 /* Start out with the maximum QoS support possible */
908 irda_init_max_qos_capabilies(&self->qos_rx);
909
910 /* Apply drivers QoS capabilities */
911 irda_qos_compute_intersection(&self->qos_rx, self->qos_dev);
912
913 /*
914 * Check for user supplied QoS parameters. The service user is only
915 * allowed to supply these values. We check each parameter since the
916 * user may not have set all of them.
917 */
918 if (qos_user) {
919 IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __FUNCTION__);
920
921 if (qos_user->baud_rate.bits)
922 self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits;
923
924 if (qos_user->max_turn_time.bits)
925 self->qos_rx.max_turn_time.bits &= qos_user->max_turn_time.bits;
926 if (qos_user->data_size.bits)
927 self->qos_rx.data_size.bits &= qos_user->data_size.bits;
928
929 if (qos_user->link_disc_time.bits)
930 self->qos_rx.link_disc_time.bits &= qos_user->link_disc_time.bits;
931 }
932
933 /* Use 500ms in IrLAP for now */
934 self->qos_rx.max_turn_time.bits &= 0x01;
935
936 /* Set data size */
937 /*self->qos_rx.data_size.bits &= 0x03;*/
938
939 irda_qos_bits_to_value(&self->qos_rx);
940 }
941
942 /*
943 * Function irlap_apply_default_connection_parameters (void, now)
944 *
945 * Use the default connection and transmission parameters
946 */
947 void irlap_apply_default_connection_parameters(struct irlap_cb *self)
948 {
949 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
950
951 IRDA_ASSERT(self != NULL, return;);
952 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
953
954 /* xbofs : Default value in NDM */
955 self->next_bofs = 12;
956 self->bofs_count = 12;
957
958 /* NDM Speed is 9600 */
959 irlap_change_speed(self, 9600, TRUE);
960
961 /* Set mbusy when going to NDM state */
962 irda_device_set_media_busy(self->netdev, TRUE);
963
964 /*
965 * Generate random connection address for this session, which must
966 * be 7 bits wide and different from 0x00 and 0xfe
967 */
968 while ((self->caddr == 0x00) || (self->caddr == 0xfe)) {
969 get_random_bytes(&self->caddr, sizeof(self->caddr));
970 self->caddr &= 0xfe;
971 }
972
973 /* Use default values until connection has been negitiated */
974 self->slot_timeout = sysctl_slot_timeout;
975 self->final_timeout = FINAL_TIMEOUT;
976 self->poll_timeout = POLL_TIMEOUT;
977 self->wd_timeout = WD_TIMEOUT;
978
979 /* Set some default values */
980 self->qos_tx.baud_rate.value = 9600;
981 self->qos_rx.baud_rate.value = 9600;
982 self->qos_tx.max_turn_time.value = 0;
983 self->qos_rx.max_turn_time.value = 0;
984 self->qos_tx.min_turn_time.value = 0;
985 self->qos_rx.min_turn_time.value = 0;
986 self->qos_tx.data_size.value = 64;
987 self->qos_rx.data_size.value = 64;
988 self->qos_tx.window_size.value = 1;
989 self->qos_rx.window_size.value = 1;
990 self->qos_tx.additional_bofs.value = 12;
991 self->qos_rx.additional_bofs.value = 12;
992 self->qos_tx.link_disc_time.value = 0;
993 self->qos_rx.link_disc_time.value = 0;
994
995 irlap_flush_all_queues(self);
996
997 self->disconnect_pending = FALSE;
998 self->connect_pending = FALSE;
999 }
1000
1001 /*
1002 * Function irlap_apply_connection_parameters (qos, now)
1003 *
1004 * Initialize IrLAP with the negotiated QoS values
1005 *
1006 * If 'now' is false, the speed and xbofs will be changed after the next
1007 * frame is sent.
1008 * If 'now' is true, the speed and xbofs is changed immediately
1009 */
1010 void irlap_apply_connection_parameters(struct irlap_cb *self, int now)
1011 {
1012 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
1013
1014 IRDA_ASSERT(self != NULL, return;);
1015 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
1016
1017 /* Set the negotiated xbofs value */
1018 self->next_bofs = self->qos_tx.additional_bofs.value;
1019 if (now)
1020 self->bofs_count = self->next_bofs;
1021
1022 /* Set the negotiated link speed (may need the new xbofs value) */
1023 irlap_change_speed(self, self->qos_tx.baud_rate.value, now);
1024
1025 self->window_size = self->qos_tx.window_size.value;
1026 self->window = self->qos_tx.window_size.value;
1027
1028 #ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1029 /*
1030 * Calculate how many bytes it is possible to transmit before the
1031 * link must be turned around
1032 */
1033 self->line_capacity =
1034 irlap_max_line_capacity(self->qos_tx.baud_rate.value,
1035 self->qos_tx.max_turn_time.value);
1036 self->bytes_left = self->line_capacity;
1037 #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1038
1039
1040 /*
1041 * Initialize timeout values, some of the rules are listed on
1042 * page 92 in IrLAP.
1043 */
1044 IRDA_ASSERT(self->qos_tx.max_turn_time.value != 0, return;);
1045 IRDA_ASSERT(self->qos_rx.max_turn_time.value != 0, return;);
1046 /* The poll timeout applies only to the primary station.
1047 * It defines the maximum time the primary stay in XMIT mode
1048 * before timeout and turning the link around (sending a RR).
1049 * Or, this is how much we can keep the pf bit in primary mode.
1050 * Therefore, it must be lower or equal than our *OWN* max turn around.
1051 * Jean II */
1052 self->poll_timeout = self->qos_tx.max_turn_time.value * HZ / 1000;
1053 /* The Final timeout applies only to the primary station.
1054 * It defines the maximum time the primary wait (mostly in RECV mode)
1055 * for an answer from the secondary station before polling it again.
1056 * Therefore, it must be greater or equal than our *PARTNER*
1057 * max turn around time - Jean II */
1058 self->final_timeout = self->qos_rx.max_turn_time.value * HZ / 1000;
1059 /* The Watchdog Bit timeout applies only to the secondary station.
1060 * It defines the maximum time the secondary wait (mostly in RECV mode)
1061 * for poll from the primary station before getting annoyed.
1062 * Therefore, it must be greater or equal than our *PARTNER*
1063 * max turn around time - Jean II */
1064 self->wd_timeout = self->final_timeout * 2;
1065
1066 /*
1067 * N1 and N2 are maximum retry count for *both* the final timer
1068 * and the wd timer (with a factor 2) as defined above.
1069 * After N1 retry of a timer, we give a warning to the user.
1070 * After N2 retry, we consider the link dead and disconnect it.
1071 * Jean II
1072 */
1073
1074 /*
1075 * Set N1 to 0 if Link Disconnect/Threshold Time = 3 and set it to
1076 * 3 seconds otherwise. See page 71 in IrLAP for more details.
1077 * Actually, it's not always 3 seconds, as we allow to set
1078 * it via sysctl... Max maxtt is 500ms, and N1 need to be multiple
1079 * of 2, so 1 second is minimum we can allow. - Jean II
1080 */
1081 if (self->qos_tx.link_disc_time.value == sysctl_warn_noreply_time)
1082 /*
1083 * If we set N1 to 0, it will trigger immediately, which is
1084 * not what we want. What we really want is to disable it,
1085 * Jean II
1086 */
1087 self->N1 = -2; /* Disable - Need to be multiple of 2*/
1088 else
1089 self->N1 = sysctl_warn_noreply_time * 1000 /
1090 self->qos_rx.max_turn_time.value;
1091
1092 IRDA_DEBUG(4, "Setting N1 = %d\n", self->N1);
1093
1094 /* Set N2 to match our own disconnect time */
1095 self->N2 = self->qos_tx.link_disc_time.value * 1000 /
1096 self->qos_rx.max_turn_time.value;
1097 IRDA_DEBUG(4, "Setting N2 = %d\n", self->N2);
1098 }
1099
1100 #ifdef CONFIG_PROC_FS
1101 struct irlap_iter_state {
1102 int id;
1103 };
1104
1105 static void *irlap_seq_start(struct seq_file *seq, loff_t *pos)
1106 {
1107 struct irlap_iter_state *iter = seq->private;
1108 struct irlap_cb *self;
1109
1110 /* Protect our access to the tsap list */
1111 spin_lock_irq(&irlap->hb_spinlock);
1112 iter->id = 0;
1113
1114 for (self = (struct irlap_cb *) hashbin_get_first(irlap);
1115 self; self = (struct irlap_cb *) hashbin_get_next(irlap)) {
1116 if (iter->id == *pos)
1117 break;
1118 ++iter->id;
1119 }
1120
1121 return self;
1122 }
1123
1124 static void *irlap_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1125 {
1126 struct irlap_iter_state *iter = seq->private;
1127
1128 ++*pos;
1129 ++iter->id;
1130 return (void *) hashbin_get_next(irlap);
1131 }
1132
1133 static void irlap_seq_stop(struct seq_file *seq, void *v)
1134 {
1135 spin_unlock_irq(&irlap->hb_spinlock);
1136 }
1137
1138 static int irlap_seq_show(struct seq_file *seq, void *v)
1139 {
1140 const struct irlap_iter_state *iter = seq->private;
1141 const struct irlap_cb *self = v;
1142
1143 IRDA_ASSERT(self->magic == LAP_MAGIC, return -EINVAL;);
1144
1145 seq_printf(seq, "irlap%d ", iter->id);
1146 seq_printf(seq, "state: %s\n",
1147 irlap_state[self->state]);
1148
1149 seq_printf(seq, " device name: %s, ",
1150 (self->netdev) ? self->netdev->name : "bug");
1151 seq_printf(seq, "hardware name: %s\n", self->hw_name);
1152
1153 seq_printf(seq, " caddr: %#02x, ", self->caddr);
1154 seq_printf(seq, "saddr: %#08x, ", self->saddr);
1155 seq_printf(seq, "daddr: %#08x\n", self->daddr);
1156
1157 seq_printf(seq, " win size: %d, ",
1158 self->window_size);
1159 seq_printf(seq, "win: %d, ", self->window);
1160 #ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1161 seq_printf(seq, "line capacity: %d, ",
1162 self->line_capacity);
1163 seq_printf(seq, "bytes left: %d\n", self->bytes_left);
1164 #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1165 seq_printf(seq, " tx queue len: %d ",
1166 skb_queue_len(&self->txq));
1167 seq_printf(seq, "win queue len: %d ",
1168 skb_queue_len(&self->wx_list));
1169 seq_printf(seq, "rbusy: %s", self->remote_busy ?
1170 "TRUE" : "FALSE");
1171 seq_printf(seq, " mbusy: %s\n", self->media_busy ?
1172 "TRUE" : "FALSE");
1173
1174 seq_printf(seq, " retrans: %d ", self->retry_count);
1175 seq_printf(seq, "vs: %d ", self->vs);
1176 seq_printf(seq, "vr: %d ", self->vr);
1177 seq_printf(seq, "va: %d\n", self->va);
1178
1179 seq_printf(seq, " qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n");
1180
1181 seq_printf(seq, " tx\t%d\t",
1182 self->qos_tx.baud_rate.value);
1183 seq_printf(seq, "%d\t",
1184 self->qos_tx.max_turn_time.value);
1185 seq_printf(seq, "%d\t",
1186 self->qos_tx.data_size.value);
1187 seq_printf(seq, "%d\t",
1188 self->qos_tx.window_size.value);
1189 seq_printf(seq, "%d\t",
1190 self->qos_tx.additional_bofs.value);
1191 seq_printf(seq, "%d\t",
1192 self->qos_tx.min_turn_time.value);
1193 seq_printf(seq, "%d\t",
1194 self->qos_tx.link_disc_time.value);
1195 seq_printf(seq, "\n");
1196
1197 seq_printf(seq, " rx\t%d\t",
1198 self->qos_rx.baud_rate.value);
1199 seq_printf(seq, "%d\t",
1200 self->qos_rx.max_turn_time.value);
1201 seq_printf(seq, "%d\t",
1202 self->qos_rx.data_size.value);
1203 seq_printf(seq, "%d\t",
1204 self->qos_rx.window_size.value);
1205 seq_printf(seq, "%d\t",
1206 self->qos_rx.additional_bofs.value);
1207 seq_printf(seq, "%d\t",
1208 self->qos_rx.min_turn_time.value);
1209 seq_printf(seq, "%d\n",
1210 self->qos_rx.link_disc_time.value);
1211
1212 return 0;
1213 }
1214
1215 static struct seq_operations irlap_seq_ops = {
1216 .start = irlap_seq_start,
1217 .next = irlap_seq_next,
1218 .stop = irlap_seq_stop,
1219 .show = irlap_seq_show,
1220 };
1221
1222 static int irlap_seq_open(struct inode *inode, struct file *file)
1223 {
1224 struct seq_file *seq;
1225 int rc = -ENOMEM;
1226 struct irlap_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
1227
1228 if (!s)
1229 goto out;
1230
1231 if (irlap == NULL) {
1232 rc = -EINVAL;
1233 goto out_kfree;
1234 }
1235
1236 rc = seq_open(file, &irlap_seq_ops);
1237 if (rc)
1238 goto out_kfree;
1239
1240 seq = file->private_data;
1241 seq->private = s;
1242 memset(s, 0, sizeof(*s));
1243 out:
1244 return rc;
1245 out_kfree:
1246 kfree(s);
1247 goto out;
1248 }
1249
1250 struct file_operations irlap_seq_fops = {
1251 .owner = THIS_MODULE,
1252 .open = irlap_seq_open,
1253 .read = seq_read,
1254 .llseek = seq_lseek,
1255 .release = seq_release_private,
1256 };
1257
1258 #endif /* CONFIG_PROC_FS */