]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/irda/irlap.c
irda: Remove IRDA_<TYPE> logging macros
[mirror_ubuntu-bionic-kernel.git] / net / irda / irlap.c
CommitLineData
1da177e4
LT
1/*********************************************************************
2 *
3 * Filename: irlap.c
4 * Version: 1.0
5 * Description: IrLAP implementation for Linux
6 * Status: Stable
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Mon Aug 4 20:40:53 1997
9 * Modified at: Tue Dec 14 09:26:44 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
13 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
d3770509 26 * along with this program; if not, see <http://www.gnu.org/licenses/>.
1da177e4
LT
27 *
28 ********************************************************************/
29
1da177e4
LT
30#include <linux/slab.h>
31#include <linux/string.h>
32#include <linux/skbuff.h>
33#include <linux/delay.h>
34#include <linux/proc_fs.h>
35#include <linux/init.h>
36#include <linux/random.h>
37#include <linux/module.h>
38#include <linux/seq_file.h>
39
40#include <net/irda/irda.h>
41#include <net/irda/irda_device.h>
42#include <net/irda/irqueue.h>
43#include <net/irda/irlmp.h>
44#include <net/irda/irlmp_frame.h>
45#include <net/irda/irlap_frame.h>
46#include <net/irda/irlap.h>
47#include <net/irda/timer.h>
48#include <net/irda/qos.h>
49
50static hashbin_t *irlap = NULL;
51int sysctl_slot_timeout = SLOT_TIMEOUT * 1000 / HZ;
52
53/* This is the delay of missed pf period before generating an event
54 * to the application. The spec mandate 3 seconds, but in some cases
55 * it's way too long. - Jean II */
56int sysctl_warn_noreply_time = 3;
57
58extern void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
59static void __irlap_close(struct irlap_cb *self);
60static void irlap_init_qos_capabilities(struct irlap_cb *self,
61 struct qos_info *qos_user);
62
63#ifdef CONFIG_IRDA_DEBUG
36cbd3dc 64static const char *const lap_reasons[] = {
1da177e4
LT
65 "ERROR, NOT USED",
66 "LAP_DISC_INDICATION",
67 "LAP_NO_RESPONSE",
68 "LAP_RESET_INDICATION",
69 "LAP_FOUND_NONE",
70 "LAP_MEDIA_BUSY",
71 "LAP_PRIMARY_CONFLICT",
72 "ERROR, NOT USED",
73};
74#endif /* CONFIG_IRDA_DEBUG */
75
76int __init irlap_init(void)
77{
78 /* Check if the compiler did its job properly.
79 * May happen on some ARM configuration, check with Russell King. */
80 IRDA_ASSERT(sizeof(struct xid_frame) == 14, ;);
81 IRDA_ASSERT(sizeof(struct test_frame) == 10, ;);
82 IRDA_ASSERT(sizeof(struct ua_frame) == 10, ;);
83 IRDA_ASSERT(sizeof(struct snrm_frame) == 11, ;);
84
85 /* Allocate master array */
86 irlap = hashbin_new(HB_LOCK);
87 if (irlap == NULL) {
6c91023d
JP
88 net_err_ratelimited("%s: can't allocate irlap hashbin!\n",
89 __func__);
1da177e4
LT
90 return -ENOMEM;
91 }
92
93 return 0;
94}
95
75a69ac6 96void irlap_cleanup(void)
1da177e4
LT
97{
98 IRDA_ASSERT(irlap != NULL, return;);
99
100 hashbin_delete(irlap, (FREE_FUNC) __irlap_close);
101}
102
103/*
104 * Function irlap_open (driver)
105 *
106 * Initialize IrLAP layer
107 *
108 */
109struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
110 const char *hw_name)
111{
112 struct irlap_cb *self;
113
0dc47877 114 IRDA_DEBUG(4, "%s()\n", __func__);
1da177e4
LT
115
116 /* Initialize the irlap structure. */
0da974f4 117 self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL);
1da177e4
LT
118 if (self == NULL)
119 return NULL;
120
1da177e4
LT
121 self->magic = LAP_MAGIC;
122
123 /* Make a binding between the layers */
124 self->netdev = dev;
125 self->qos_dev = qos;
126 /* Copy hardware name */
127 if(hw_name != NULL) {
128 strlcpy(self->hw_name, hw_name, sizeof(self->hw_name));
129 } else {
130 self->hw_name[0] = '\0';
131 }
132
133 /* FIXME: should we get our own field? */
134 dev->atalk_ptr = self;
135
136 self->state = LAP_OFFLINE;
137
138 /* Initialize transmit queue */
139 skb_queue_head_init(&self->txq);
140 skb_queue_head_init(&self->txq_ultra);
141 skb_queue_head_init(&self->wx_list);
142
143 /* My unique IrLAP device address! */
144 /* We don't want the broadcast address, neither the NULL address
145 * (most often used to signify "invalid"), and we don't want an
146 * address already in use (otherwise connect won't be able
147 * to select the proper link). - Jean II */
148 do {
149 get_random_bytes(&self->saddr, sizeof(self->saddr));
150 } while ((self->saddr == 0x0) || (self->saddr == BROADCAST) ||
151 (hashbin_lock_find(irlap, self->saddr, NULL)) );
152 /* Copy to the driver */
153 memcpy(dev->dev_addr, &self->saddr, 4);
154
155 init_timer(&self->slot_timer);
156 init_timer(&self->query_timer);
157 init_timer(&self->discovery_timer);
158 init_timer(&self->final_timer);
159 init_timer(&self->poll_timer);
160 init_timer(&self->wd_timer);
161 init_timer(&self->backoff_timer);
162 init_timer(&self->media_busy_timer);
163
164 irlap_apply_default_connection_parameters(self);
165
25985edc 166 self->N3 = 3; /* # connections attempts to try before giving up */
1da177e4
LT
167
168 self->state = LAP_NDM;
169
170 hashbin_insert(irlap, (irda_queue_t *) self, self->saddr, NULL);
171
172 irlmp_register_link(self, self->saddr, &self->notify);
173
174 return self;
175}
176EXPORT_SYMBOL(irlap_open);
177
178/*
179 * Function __irlap_close (self)
180 *
181 * Remove IrLAP and all allocated memory. Stop any pending timers.
182 *
183 */
184static void __irlap_close(struct irlap_cb *self)
185{
186 IRDA_ASSERT(self != NULL, return;);
187 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
188
189 /* Stop timers */
190 del_timer(&self->slot_timer);
191 del_timer(&self->query_timer);
192 del_timer(&self->discovery_timer);
193 del_timer(&self->final_timer);
194 del_timer(&self->poll_timer);
195 del_timer(&self->wd_timer);
196 del_timer(&self->backoff_timer);
197 del_timer(&self->media_busy_timer);
198
199 irlap_flush_all_queues(self);
200
201 self->magic = 0;
202
203 kfree(self);
204}
205
206/*
207 * Function irlap_close (self)
208 *
209 * Remove IrLAP instance
210 *
211 */
212void irlap_close(struct irlap_cb *self)
213{
214 struct irlap_cb *lap;
215
0dc47877 216 IRDA_DEBUG(4, "%s()\n", __func__);
1da177e4
LT
217
218 IRDA_ASSERT(self != NULL, return;);
219 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
220
221 /* We used to send a LAP_DISC_INDICATION here, but this was
222 * racy. This has been move within irlmp_unregister_link()
223 * itself. Jean II */
224
225 /* Kill the LAP and all LSAPs on top of it */
226 irlmp_unregister_link(self->saddr);
227 self->notify.instance = NULL;
228
229 /* Be sure that we manage to remove ourself from the hash */
230 lap = hashbin_remove(irlap, self->saddr, NULL);
231 if (!lap) {
0dc47877 232 IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __func__);
1da177e4
LT
233 return;
234 }
235 __irlap_close(lap);
236}
237EXPORT_SYMBOL(irlap_close);
238
239/*
240 * Function irlap_connect_indication (self, skb)
241 *
242 * Another device is attempting to make a connection
243 *
244 */
245void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb)
246{
0dc47877 247 IRDA_DEBUG(4, "%s()\n", __func__);
1da177e4
LT
248
249 IRDA_ASSERT(self != NULL, return;);
250 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
251
252 irlap_init_qos_capabilities(self, NULL); /* No user QoS! */
253
254 irlmp_link_connect_indication(self->notify.instance, self->saddr,
255 self->daddr, &self->qos_tx, skb);
256}
257
258/*
259 * Function irlap_connect_response (self, skb)
260 *
261 * Service user has accepted incoming connection
262 *
263 */
264void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata)
265{
0dc47877 266 IRDA_DEBUG(4, "%s()\n", __func__);
1da177e4
LT
267
268 irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL);
269}
270
271/*
272 * Function irlap_connect_request (self, daddr, qos_user, sniff)
273 *
274 * Request connection with another device, sniffing is not implemented
275 * yet.
276 *
277 */
278void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
279 struct qos_info *qos_user, int sniff)
280{
0dc47877 281 IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __func__, daddr);
1da177e4
LT
282
283 IRDA_ASSERT(self != NULL, return;);
284 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
285
286 self->daddr = daddr;
287
288 /*
289 * If the service user specifies QoS values for this connection,
290 * then use them
291 */
292 irlap_init_qos_capabilities(self, qos_user);
293
294 if ((self->state == LAP_NDM) && !self->media_busy)
295 irlap_do_event(self, CONNECT_REQUEST, NULL, NULL);
296 else
297 self->connect_pending = TRUE;
298}
299
300/*
301 * Function irlap_connect_confirm (self, skb)
302 *
303 * Connection request has been accepted
304 *
305 */
306void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb)
307{
0dc47877 308 IRDA_DEBUG(4, "%s()\n", __func__);
1da177e4
LT
309
310 IRDA_ASSERT(self != NULL, return;);
311 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
312
313 irlmp_link_connect_confirm(self->notify.instance, &self->qos_tx, skb);
314}
315
316/*
317 * Function irlap_data_indication (self, skb)
318 *
319 * Received data frames from IR-port, so we just pass them up to
320 * IrLMP for further processing
321 *
322 */
323void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb,
324 int unreliable)
325{
326 /* Hide LAP header from IrLMP layer */
327 skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
328
329 irlmp_link_data_indication(self->notify.instance, skb, unreliable);
330}
331
332
333/*
334 * Function irlap_data_request (self, skb)
335 *
336 * Queue data for transmission, must wait until XMIT state
337 *
338 */
339void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
340 int unreliable)
341{
342 IRDA_ASSERT(self != NULL, return;);
343 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
344
0dc47877 345 IRDA_DEBUG(3, "%s()\n", __func__);
1da177e4
LT
346
347 IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
348 return;);
349 skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
350
351 /*
352 * Must set frame format now so that the rest of the code knows
353 * if its dealing with an I or an UI frame
354 */
355 if (unreliable)
356 skb->data[1] = UI_FRAME;
357 else
358 skb->data[1] = I_FRAME;
359
360 /* Don't forget to refcount it - see irlmp_connect_request(). */
361 skb_get(skb);
362
363 /* Add at the end of the queue (keep ordering) - Jean II */
364 skb_queue_tail(&self->txq, skb);
365
366 /*
367 * Send event if this frame only if we are in the right state
368 * FIXME: udata should be sent first! (skb_queue_head?)
369 */
370 if ((self->state == LAP_XMIT_P) || (self->state == LAP_XMIT_S)) {
371 /* If we are not already processing the Tx queue, trigger
372 * transmission immediately - Jean II */
373 if((skb_queue_len(&self->txq) <= 1) && (!self->local_busy))
374 irlap_do_event(self, DATA_REQUEST, skb, NULL);
375 /* Otherwise, the packets will be sent normally at the
376 * next pf-poll - Jean II */
377 }
378}
379
380/*
381 * Function irlap_unitdata_request (self, skb)
382 *
383 * Send Ultra data. This is data that must be sent outside any connection
384 *
385 */
386#ifdef CONFIG_IRDA_ULTRA
387void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb)
388{
389 IRDA_ASSERT(self != NULL, return;);
390 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
391
0dc47877 392 IRDA_DEBUG(3, "%s()\n", __func__);
1da177e4
LT
393
394 IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
395 return;);
396 skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
397
398 skb->data[0] = CBROADCAST;
399 skb->data[1] = UI_FRAME;
400
401 /* Don't need to refcount, see irlmp_connless_data_request() */
402
403 skb_queue_tail(&self->txq_ultra, skb);
404
405 irlap_do_event(self, SEND_UI_FRAME, NULL, NULL);
406}
407#endif /*CONFIG_IRDA_ULTRA */
408
409/*
410 * Function irlap_udata_indication (self, skb)
411 *
412 * Receive Ultra data. This is data that is received outside any connection
413 *
414 */
415#ifdef CONFIG_IRDA_ULTRA
416void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb)
417{
0dc47877 418 IRDA_DEBUG(1, "%s()\n", __func__);
1da177e4
LT
419
420 IRDA_ASSERT(self != NULL, return;);
421 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
422 IRDA_ASSERT(skb != NULL, return;);
423
424 /* Hide LAP header from IrLMP layer */
425 skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
426
427 irlmp_link_unitdata_indication(self->notify.instance, skb);
428}
429#endif /* CONFIG_IRDA_ULTRA */
430
431/*
432 * Function irlap_disconnect_request (void)
433 *
434 * Request to disconnect connection by service user
435 */
436void irlap_disconnect_request(struct irlap_cb *self)
437{
0dc47877 438 IRDA_DEBUG(3, "%s()\n", __func__);
1da177e4
LT
439
440 IRDA_ASSERT(self != NULL, return;);
441 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
442
443 /* Don't disconnect until all data frames are successfully sent */
b03efcfb 444 if (!skb_queue_empty(&self->txq)) {
1da177e4 445 self->disconnect_pending = TRUE;
1da177e4
LT
446 return;
447 }
448
449 /* Check if we are in the right state for disconnecting */
450 switch (self->state) {
af901ca1
AGR
451 case LAP_XMIT_P: /* FALLTHROUGH */
452 case LAP_XMIT_S: /* FALLTHROUGH */
453 case LAP_CONN: /* FALLTHROUGH */
454 case LAP_RESET_WAIT: /* FALLTHROUGH */
1da177e4
LT
455 case LAP_RESET_CHECK:
456 irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL);
457 break;
458 default:
0dc47877 459 IRDA_DEBUG(2, "%s(), disconnect pending!\n", __func__);
1da177e4
LT
460 self->disconnect_pending = TRUE;
461 break;
462 }
463}
464
465/*
466 * Function irlap_disconnect_indication (void)
467 *
468 * Disconnect request from other device
469 *
470 */
471void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
472{
0dc47877 473 IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, lap_reasons[reason]);
1da177e4
LT
474
475 IRDA_ASSERT(self != NULL, return;);
476 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
477
478 /* Flush queues */
479 irlap_flush_all_queues(self);
480
481 switch (reason) {
482 case LAP_RESET_INDICATION:
0dc47877 483 IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__);
1da177e4
LT
484 irlap_do_event(self, RESET_REQUEST, NULL, NULL);
485 break;
af901ca1
AGR
486 case LAP_NO_RESPONSE: /* FALLTHROUGH */
487 case LAP_DISC_INDICATION: /* FALLTHROUGH */
488 case LAP_FOUND_NONE: /* FALLTHROUGH */
1da177e4
LT
489 case LAP_MEDIA_BUSY:
490 irlmp_link_disconnect_indication(self->notify.instance, self,
491 reason, NULL);
492 break;
493 default:
6c91023d
JP
494 net_err_ratelimited("%s: Unknown reason %d\n",
495 __func__, reason);
1da177e4
LT
496 }
497}
498
499/*
500 * Function irlap_discovery_request (gen_addr_bit)
501 *
502 * Start one single discovery operation.
503 *
504 */
505void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
506{
507 struct irlap_info info;
508
509 IRDA_ASSERT(self != NULL, return;);
510 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
511 IRDA_ASSERT(discovery != NULL, return;);
512
0dc47877 513 IRDA_DEBUG(4, "%s(), nslots = %d\n", __func__, discovery->nslots);
1da177e4
LT
514
515 IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) ||
516 (discovery->nslots == 8) || (discovery->nslots == 16),
517 return;);
518
519 /* Discovery is only possible in NDM mode */
520 if (self->state != LAP_NDM) {
521 IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n",
0dc47877 522 __func__);
1da177e4
LT
523 irlap_discovery_confirm(self, NULL);
524 /* Note : in theory, if we are not in NDM, we could postpone
525 * the discovery like we do for connection request.
526 * In practice, it's not worth it. If the media was busy,
527 * it's likely next time around it won't be busy. If we are
528 * in REPLY state, we will get passive discovery info & event.
529 * Jean II */
530 return;
531 }
532
533 /* Check if last discovery request finished in time, or if
534 * it was aborted due to the media busy flag. */
535 if (self->discovery_log != NULL) {
536 hashbin_delete(self->discovery_log, (FREE_FUNC) kfree);
537 self->discovery_log = NULL;
538 }
539
540 /* All operations will occur at predictable time, no need to lock */
541 self->discovery_log = hashbin_new(HB_NOLOCK);
542
543 if (self->discovery_log == NULL) {
6c91023d
JP
544 net_warn_ratelimited("%s(), Unable to allocate discovery log!\n",
545 __func__);
1da177e4
LT
546 return;
547 }
548
549 info.S = discovery->nslots; /* Number of slots */
550 info.s = 0; /* Current slot */
551
552 self->discovery_cmd = discovery;
553 info.discovery = discovery;
554
555 /* sysctl_slot_timeout bounds are checked in irsysctl.c - Jean II */
556 self->slot_timeout = sysctl_slot_timeout * HZ / 1000;
557
558 irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info);
559}
560
561/*
562 * Function irlap_discovery_confirm (log)
563 *
564 * A device has been discovered in front of this station, we
565 * report directly to LMP.
566 */
567void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log)
568{
569 IRDA_ASSERT(self != NULL, return;);
570 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
571
572 IRDA_ASSERT(self->notify.instance != NULL, return;);
573
574 /*
575 * Check for successful discovery, since we are then allowed to clear
576 * the media busy condition (IrLAP 6.13.4 - p.94). This should allow
577 * us to make connection attempts much faster and easier (i.e. no
578 * collisions).
579 * Setting media busy to false will also generate an event allowing
580 * to process pending events in NDM state machine.
581 * Note : the spec doesn't define what's a successful discovery is.
582 * If we want Ultra to work, it's successful even if there is
583 * nobody discovered - Jean II
584 */
585 if (discovery_log)
586 irda_device_set_media_busy(self->netdev, FALSE);
587
588 /* Inform IrLMP */
589 irlmp_link_discovery_confirm(self->notify.instance, discovery_log);
590}
591
592/*
593 * Function irlap_discovery_indication (log)
594 *
595 * Somebody is trying to discover us!
596 *
597 */
598void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery)
599{
0dc47877 600 IRDA_DEBUG(4, "%s()\n", __func__);
1da177e4
LT
601
602 IRDA_ASSERT(self != NULL, return;);
603 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
604 IRDA_ASSERT(discovery != NULL, return;);
605
606 IRDA_ASSERT(self->notify.instance != NULL, return;);
607
608 /* A device is very likely to connect immediately after it performs
609 * a successful discovery. This means that in our case, we are much
610 * more likely to receive a connection request over the medium.
611 * So, we backoff to avoid collisions.
612 * IrLAP spec 6.13.4 suggest 100ms...
613 * Note : this little trick actually make a *BIG* difference. If I set
614 * my Linux box with discovery enabled and one Ultra frame sent every
615 * second, my Palm has no trouble connecting to it every time !
616 * Jean II */
617 irda_device_set_media_busy(self->netdev, SMALL);
618
619 irlmp_link_discovery_indication(self->notify.instance, discovery);
620}
621
622/*
623 * Function irlap_status_indication (quality_of_link)
624 */
625void irlap_status_indication(struct irlap_cb *self, int quality_of_link)
626{
627 switch (quality_of_link) {
628 case STATUS_NO_ACTIVITY:
6c91023d 629 net_info_ratelimited("IrLAP, no activity on link!\n");
1da177e4
LT
630 break;
631 case STATUS_NOISY:
6c91023d 632 net_info_ratelimited("IrLAP, noisy link!\n");
1da177e4
LT
633 break;
634 default:
635 break;
636 }
637 irlmp_status_indication(self->notify.instance,
638 quality_of_link, LOCK_NO_CHANGE);
639}
640
641/*
642 * Function irlap_reset_indication (void)
643 */
644void irlap_reset_indication(struct irlap_cb *self)
645{
0dc47877 646 IRDA_DEBUG(1, "%s()\n", __func__);
1da177e4
LT
647
648 IRDA_ASSERT(self != NULL, return;);
649 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
650
651 if (self->state == LAP_RESET_WAIT)
652 irlap_do_event(self, RESET_REQUEST, NULL, NULL);
653 else
654 irlap_do_event(self, RESET_RESPONSE, NULL, NULL);
655}
656
657/*
658 * Function irlap_reset_confirm (void)
659 */
660void irlap_reset_confirm(void)
661{
0dc47877 662 IRDA_DEBUG(1, "%s()\n", __func__);
1da177e4
LT
663}
664
665/*
666 * Function irlap_generate_rand_time_slot (S, s)
667 *
668 * Generate a random time slot between s and S-1 where
669 * S = Number of slots (0 -> S-1)
670 * s = Current slot
671 */
672int irlap_generate_rand_time_slot(int S, int s)
673{
674 static int rand;
675 int slot;
676
677 IRDA_ASSERT((S - s) > 0, return 0;);
678
679 rand += jiffies;
680 rand ^= (rand << 12);
681 rand ^= (rand >> 20);
682
683 slot = s + rand % (S-s);
684
685 IRDA_ASSERT((slot >= s) || (slot < S), return 0;);
686
687 return slot;
688}
689
690/*
691 * Function irlap_update_nr_received (nr)
692 *
693 * Remove all acknowledged frames in current window queue. This code is
694 * not intuitive and you should not try to change it. If you think it
695 * contains bugs, please mail a patch to the author instead.
696 */
697void irlap_update_nr_received(struct irlap_cb *self, int nr)
698{
699 struct sk_buff *skb = NULL;
700 int count = 0;
701
702 /*
6819bc2e
YH
703 * Remove all the ack-ed frames from the window queue.
704 */
1da177e4
LT
705
706 /*
707 * Optimize for the common case. It is most likely that the receiver
708 * will acknowledge all the frames we have sent! So in that case we
709 * delete all frames stored in window.
710 */
711 if (nr == self->vs) {
712 while ((skb = skb_dequeue(&self->wx_list)) != NULL) {
713 dev_kfree_skb(skb);
714 }
715 /* The last acked frame is the next to send minus one */
716 self->va = nr - 1;
717 } else {
718 /* Remove all acknowledged frames in current window */
719 while ((skb_peek(&self->wx_list) != NULL) &&
720 (((self->va+1) % 8) != nr))
721 {
722 skb = skb_dequeue(&self->wx_list);
723 dev_kfree_skb(skb);
724
725 self->va = (self->va + 1) % 8;
726 count++;
727 }
728 }
729
730 /* Advance window */
731 self->window = self->window_size - skb_queue_len(&self->wx_list);
732}
733
734/*
735 * Function irlap_validate_ns_received (ns)
736 *
737 * Validate the next to send (ns) field from received frame.
738 */
739int irlap_validate_ns_received(struct irlap_cb *self, int ns)
740{
741 /* ns as expected? */
742 if (ns == self->vr)
743 return NS_EXPECTED;
744 /*
745 * Stations are allowed to treat invalid NS as unexpected NS
746 * IrLAP, Recv ... with-invalid-Ns. p. 84
747 */
748 return NS_UNEXPECTED;
749
750 /* return NR_INVALID; */
751}
752/*
753 * Function irlap_validate_nr_received (nr)
754 *
755 * Validate the next to receive (nr) field from received frame.
756 *
757 */
758int irlap_validate_nr_received(struct irlap_cb *self, int nr)
759{
760 /* nr as expected? */
761 if (nr == self->vs) {
0dc47877 762 IRDA_DEBUG(4, "%s(), expected!\n", __func__);
1da177e4
LT
763 return NR_EXPECTED;
764 }
765
766 /*
767 * unexpected nr? (but within current window), first we check if the
768 * ns numbers of the frames in the current window wrap.
769 */
770 if (self->va < self->vs) {
771 if ((nr >= self->va) && (nr <= self->vs))
772 return NR_UNEXPECTED;
773 } else {
774 if ((nr >= self->va) || (nr <= self->vs))
775 return NR_UNEXPECTED;
776 }
777
778 /* Invalid nr! */
779 return NR_INVALID;
780}
781
782/*
783 * Function irlap_initiate_connection_state ()
784 *
785 * Initialize the connection state parameters
786 *
787 */
788void irlap_initiate_connection_state(struct irlap_cb *self)
789{
0dc47877 790 IRDA_DEBUG(4, "%s()\n", __func__);
1da177e4
LT
791
792 IRDA_ASSERT(self != NULL, return;);
793 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
794
795 /* Next to send and next to receive */
796 self->vs = self->vr = 0;
797
798 /* Last frame which got acked (0 - 1) % 8 */
799 self->va = 7;
800
801 self->window = 1;
802
803 self->remote_busy = FALSE;
804 self->retry_count = 0;
805}
806
807/*
808 * Function irlap_wait_min_turn_around (self, qos)
809 *
810 * Wait negotiated minimum turn around time, this function actually sets
811 * the number of BOS's that must be sent before the next transmitted
812 * frame in order to delay for the specified amount of time. This is
813 * done to avoid using timers, and the forbidden udelay!
814 */
815void irlap_wait_min_turn_around(struct irlap_cb *self, struct qos_info *qos)
816{
817 __u32 min_turn_time;
818 __u32 speed;
819
820 /* Get QoS values. */
821 speed = qos->baud_rate.value;
822 min_turn_time = qos->min_turn_time.value;
823
824 /* No need to calculate XBOFs for speeds over 115200 bps */
825 if (speed > 115200) {
826 self->mtt_required = min_turn_time;
827 return;
828 }
829
830 /*
831 * Send additional BOF's for the next frame for the requested
832 * min turn time, so now we must calculate how many chars (XBOF's) we
833 * must send for the requested time period (min turn time)
834 */
835 self->xbofs_delay = irlap_min_turn_time_in_bytes(speed, min_turn_time);
836}
837
838/*
839 * Function irlap_flush_all_queues (void)
840 *
841 * Flush all queues
842 *
843 */
844void irlap_flush_all_queues(struct irlap_cb *self)
845{
846 struct sk_buff* skb;
847
848 IRDA_ASSERT(self != NULL, return;);
849 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
850
851 /* Free transmission queue */
852 while ((skb = skb_dequeue(&self->txq)) != NULL)
853 dev_kfree_skb(skb);
854
855 while ((skb = skb_dequeue(&self->txq_ultra)) != NULL)
856 dev_kfree_skb(skb);
857
858 /* Free sliding window buffered packets */
859 while ((skb = skb_dequeue(&self->wx_list)) != NULL)
860 dev_kfree_skb(skb);
861}
862
863/*
864 * Function irlap_setspeed (self, speed)
865 *
866 * Change the speed of the IrDA port
867 *
868 */
869static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
870{
871 struct sk_buff *skb;
872
0dc47877 873 IRDA_DEBUG(0, "%s(), setting speed to %d\n", __func__, speed);
1da177e4
LT
874
875 IRDA_ASSERT(self != NULL, return;);
876 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
877
878 self->speed = speed;
879
880 /* Change speed now, or just piggyback speed on frames */
881 if (now) {
882 /* Send down empty frame to trigger speed change */
485fb2c9 883 skb = alloc_skb(0, GFP_ATOMIC);
8c893ff6
FM
884 if (skb)
885 irlap_queue_xmit(self, skb);
1da177e4
LT
886 }
887}
888
889/*
890 * Function irlap_init_qos_capabilities (self, qos)
891 *
892 * Initialize QoS for this IrLAP session, What we do is to compute the
893 * intersection of the QoS capabilities for the user, driver and for
894 * IrLAP itself. Normally, IrLAP will not specify any values, but it can
895 * be used to restrict certain values.
896 */
897static void irlap_init_qos_capabilities(struct irlap_cb *self,
898 struct qos_info *qos_user)
899{
900 IRDA_ASSERT(self != NULL, return;);
901 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
902 IRDA_ASSERT(self->netdev != NULL, return;);
903
904 /* Start out with the maximum QoS support possible */
905 irda_init_max_qos_capabilies(&self->qos_rx);
906
907 /* Apply drivers QoS capabilities */
908 irda_qos_compute_intersection(&self->qos_rx, self->qos_dev);
909
910 /*
911 * Check for user supplied QoS parameters. The service user is only
912 * allowed to supply these values. We check each parameter since the
913 * user may not have set all of them.
914 */
915 if (qos_user) {
0dc47877 916 IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __func__);
1da177e4
LT
917
918 if (qos_user->baud_rate.bits)
919 self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits;
920
921 if (qos_user->max_turn_time.bits)
922 self->qos_rx.max_turn_time.bits &= qos_user->max_turn_time.bits;
923 if (qos_user->data_size.bits)
924 self->qos_rx.data_size.bits &= qos_user->data_size.bits;
925
926 if (qos_user->link_disc_time.bits)
927 self->qos_rx.link_disc_time.bits &= qos_user->link_disc_time.bits;
928 }
929
930 /* Use 500ms in IrLAP for now */
931 self->qos_rx.max_turn_time.bits &= 0x01;
932
933 /* Set data size */
934 /*self->qos_rx.data_size.bits &= 0x03;*/
935
936 irda_qos_bits_to_value(&self->qos_rx);
937}
938
939/*
940 * Function irlap_apply_default_connection_parameters (void, now)
941 *
942 * Use the default connection and transmission parameters
943 */
944void irlap_apply_default_connection_parameters(struct irlap_cb *self)
945{
0dc47877 946 IRDA_DEBUG(4, "%s()\n", __func__);
1da177e4
LT
947
948 IRDA_ASSERT(self != NULL, return;);
949 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
950
951 /* xbofs : Default value in NDM */
952 self->next_bofs = 12;
953 self->bofs_count = 12;
954
955 /* NDM Speed is 9600 */
956 irlap_change_speed(self, 9600, TRUE);
957
958 /* Set mbusy when going to NDM state */
959 irda_device_set_media_busy(self->netdev, TRUE);
960
961 /*
962 * Generate random connection address for this session, which must
963 * be 7 bits wide and different from 0x00 and 0xfe
964 */
965 while ((self->caddr == 0x00) || (self->caddr == 0xfe)) {
966 get_random_bytes(&self->caddr, sizeof(self->caddr));
967 self->caddr &= 0xfe;
968 }
969
970 /* Use default values until connection has been negitiated */
971 self->slot_timeout = sysctl_slot_timeout;
972 self->final_timeout = FINAL_TIMEOUT;
973 self->poll_timeout = POLL_TIMEOUT;
974 self->wd_timeout = WD_TIMEOUT;
975
976 /* Set some default values */
977 self->qos_tx.baud_rate.value = 9600;
978 self->qos_rx.baud_rate.value = 9600;
979 self->qos_tx.max_turn_time.value = 0;
980 self->qos_rx.max_turn_time.value = 0;
981 self->qos_tx.min_turn_time.value = 0;
982 self->qos_rx.min_turn_time.value = 0;
983 self->qos_tx.data_size.value = 64;
984 self->qos_rx.data_size.value = 64;
985 self->qos_tx.window_size.value = 1;
986 self->qos_rx.window_size.value = 1;
987 self->qos_tx.additional_bofs.value = 12;
988 self->qos_rx.additional_bofs.value = 12;
989 self->qos_tx.link_disc_time.value = 0;
990 self->qos_rx.link_disc_time.value = 0;
991
992 irlap_flush_all_queues(self);
993
994 self->disconnect_pending = FALSE;
995 self->connect_pending = FALSE;
996}
997
998/*
999 * Function irlap_apply_connection_parameters (qos, now)
1000 *
1001 * Initialize IrLAP with the negotiated QoS values
1002 *
1003 * If 'now' is false, the speed and xbofs will be changed after the next
1004 * frame is sent.
1005 * If 'now' is true, the speed and xbofs is changed immediately
1006 */
1007void irlap_apply_connection_parameters(struct irlap_cb *self, int now)
1008{
0dc47877 1009 IRDA_DEBUG(4, "%s()\n", __func__);
1da177e4
LT
1010
1011 IRDA_ASSERT(self != NULL, return;);
1012 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
1013
1014 /* Set the negotiated xbofs value */
1015 self->next_bofs = self->qos_tx.additional_bofs.value;
1016 if (now)
1017 self->bofs_count = self->next_bofs;
1018
1019 /* Set the negotiated link speed (may need the new xbofs value) */
1020 irlap_change_speed(self, self->qos_tx.baud_rate.value, now);
1021
1022 self->window_size = self->qos_tx.window_size.value;
1023 self->window = self->qos_tx.window_size.value;
1024
1025#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1026 /*
1027 * Calculate how many bytes it is possible to transmit before the
1028 * link must be turned around
1029 */
1030 self->line_capacity =
1031 irlap_max_line_capacity(self->qos_tx.baud_rate.value,
1032 self->qos_tx.max_turn_time.value);
1033 self->bytes_left = self->line_capacity;
1034#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1035
1036
1037 /*
1038 * Initialize timeout values, some of the rules are listed on
1039 * page 92 in IrLAP.
1040 */
1041 IRDA_ASSERT(self->qos_tx.max_turn_time.value != 0, return;);
1042 IRDA_ASSERT(self->qos_rx.max_turn_time.value != 0, return;);
1043 /* The poll timeout applies only to the primary station.
1044 * It defines the maximum time the primary stay in XMIT mode
1045 * before timeout and turning the link around (sending a RR).
1046 * Or, this is how much we can keep the pf bit in primary mode.
1047 * Therefore, it must be lower or equal than our *OWN* max turn around.
1048 * Jean II */
1049 self->poll_timeout = self->qos_tx.max_turn_time.value * HZ / 1000;
1050 /* The Final timeout applies only to the primary station.
1051 * It defines the maximum time the primary wait (mostly in RECV mode)
1052 * for an answer from the secondary station before polling it again.
1053 * Therefore, it must be greater or equal than our *PARTNER*
1054 * max turn around time - Jean II */
1055 self->final_timeout = self->qos_rx.max_turn_time.value * HZ / 1000;
1056 /* The Watchdog Bit timeout applies only to the secondary station.
1057 * It defines the maximum time the secondary wait (mostly in RECV mode)
1058 * for poll from the primary station before getting annoyed.
1059 * Therefore, it must be greater or equal than our *PARTNER*
1060 * max turn around time - Jean II */
1061 self->wd_timeout = self->final_timeout * 2;
1062
1063 /*
1064 * N1 and N2 are maximum retry count for *both* the final timer
1065 * and the wd timer (with a factor 2) as defined above.
1066 * After N1 retry of a timer, we give a warning to the user.
1067 * After N2 retry, we consider the link dead and disconnect it.
1068 * Jean II
1069 */
1070
1071 /*
1072 * Set N1 to 0 if Link Disconnect/Threshold Time = 3 and set it to
1073 * 3 seconds otherwise. See page 71 in IrLAP for more details.
1074 * Actually, it's not always 3 seconds, as we allow to set
1075 * it via sysctl... Max maxtt is 500ms, and N1 need to be multiple
1076 * of 2, so 1 second is minimum we can allow. - Jean II
1077 */
1078 if (self->qos_tx.link_disc_time.value == sysctl_warn_noreply_time)
1079 /*
1080 * If we set N1 to 0, it will trigger immediately, which is
1081 * not what we want. What we really want is to disable it,
1082 * Jean II
1083 */
1084 self->N1 = -2; /* Disable - Need to be multiple of 2*/
1085 else
1086 self->N1 = sysctl_warn_noreply_time * 1000 /
1087 self->qos_rx.max_turn_time.value;
1088
1089 IRDA_DEBUG(4, "Setting N1 = %d\n", self->N1);
1090
1091 /* Set N2 to match our own disconnect time */
1092 self->N2 = self->qos_tx.link_disc_time.value * 1000 /
1093 self->qos_rx.max_turn_time.value;
1094 IRDA_DEBUG(4, "Setting N2 = %d\n", self->N2);
1095}
1096
1097#ifdef CONFIG_PROC_FS
1098struct irlap_iter_state {
1099 int id;
1100};
1101
1102static void *irlap_seq_start(struct seq_file *seq, loff_t *pos)
1103{
1104 struct irlap_iter_state *iter = seq->private;
1105 struct irlap_cb *self;
1106
1107 /* Protect our access to the tsap list */
1108 spin_lock_irq(&irlap->hb_spinlock);
1109 iter->id = 0;
1110
6819bc2e 1111 for (self = (struct irlap_cb *) hashbin_get_first(irlap);
1da177e4
LT
1112 self; self = (struct irlap_cb *) hashbin_get_next(irlap)) {
1113 if (iter->id == *pos)
1114 break;
1115 ++iter->id;
1116 }
6819bc2e 1117
1da177e4
LT
1118 return self;
1119}
1120
1121static void *irlap_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1122{
1123 struct irlap_iter_state *iter = seq->private;
1124
1125 ++*pos;
1126 ++iter->id;
1127 return (void *) hashbin_get_next(irlap);
1128}
1129
1130static void irlap_seq_stop(struct seq_file *seq, void *v)
1131{
1132 spin_unlock_irq(&irlap->hb_spinlock);
1133}
1134
1135static int irlap_seq_show(struct seq_file *seq, void *v)
1136{
1137 const struct irlap_iter_state *iter = seq->private;
1138 const struct irlap_cb *self = v;
6819bc2e 1139
1da177e4
LT
1140 IRDA_ASSERT(self->magic == LAP_MAGIC, return -EINVAL;);
1141
1142 seq_printf(seq, "irlap%d ", iter->id);
1143 seq_printf(seq, "state: %s\n",
1144 irlap_state[self->state]);
1145
1146 seq_printf(seq, " device name: %s, ",
1147 (self->netdev) ? self->netdev->name : "bug");
1148 seq_printf(seq, "hardware name: %s\n", self->hw_name);
1149
1150 seq_printf(seq, " caddr: %#02x, ", self->caddr);
1151 seq_printf(seq, "saddr: %#08x, ", self->saddr);
1152 seq_printf(seq, "daddr: %#08x\n", self->daddr);
1153
1154 seq_printf(seq, " win size: %d, ",
1155 self->window_size);
1156 seq_printf(seq, "win: %d, ", self->window);
1157#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1158 seq_printf(seq, "line capacity: %d, ",
1159 self->line_capacity);
1160 seq_printf(seq, "bytes left: %d\n", self->bytes_left);
1161#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1162 seq_printf(seq, " tx queue len: %d ",
1163 skb_queue_len(&self->txq));
1164 seq_printf(seq, "win queue len: %d ",
1165 skb_queue_len(&self->wx_list));
1166 seq_printf(seq, "rbusy: %s", self->remote_busy ?
1167 "TRUE" : "FALSE");
1168 seq_printf(seq, " mbusy: %s\n", self->media_busy ?
1169 "TRUE" : "FALSE");
1170
1171 seq_printf(seq, " retrans: %d ", self->retry_count);
1172 seq_printf(seq, "vs: %d ", self->vs);
1173 seq_printf(seq, "vr: %d ", self->vr);
1174 seq_printf(seq, "va: %d\n", self->va);
1175
1176 seq_printf(seq, " qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n");
1177
1178 seq_printf(seq, " tx\t%d\t",
1179 self->qos_tx.baud_rate.value);
1180 seq_printf(seq, "%d\t",
1181 self->qos_tx.max_turn_time.value);
1182 seq_printf(seq, "%d\t",
1183 self->qos_tx.data_size.value);
1184 seq_printf(seq, "%d\t",
1185 self->qos_tx.window_size.value);
1186 seq_printf(seq, "%d\t",
1187 self->qos_tx.additional_bofs.value);
1188 seq_printf(seq, "%d\t",
1189 self->qos_tx.min_turn_time.value);
1190 seq_printf(seq, "%d\t",
1191 self->qos_tx.link_disc_time.value);
1192 seq_printf(seq, "\n");
1193
1194 seq_printf(seq, " rx\t%d\t",
1195 self->qos_rx.baud_rate.value);
1196 seq_printf(seq, "%d\t",
1197 self->qos_rx.max_turn_time.value);
1198 seq_printf(seq, "%d\t",
1199 self->qos_rx.data_size.value);
1200 seq_printf(seq, "%d\t",
1201 self->qos_rx.window_size.value);
1202 seq_printf(seq, "%d\t",
1203 self->qos_rx.additional_bofs.value);
1204 seq_printf(seq, "%d\t",
1205 self->qos_rx.min_turn_time.value);
1206 seq_printf(seq, "%d\n",
1207 self->qos_rx.link_disc_time.value);
1208
1209 return 0;
1210}
1211
56b3d975 1212static const struct seq_operations irlap_seq_ops = {
1da177e4
LT
1213 .start = irlap_seq_start,
1214 .next = irlap_seq_next,
1215 .stop = irlap_seq_stop,
1216 .show = irlap_seq_show,
1217};
1218
1219static int irlap_seq_open(struct inode *inode, struct file *file)
1220{
a662d4cb
PE
1221 if (irlap == NULL)
1222 return -EINVAL;
6819bc2e 1223
a662d4cb
PE
1224 return seq_open_private(file, &irlap_seq_ops,
1225 sizeof(struct irlap_iter_state));
1da177e4
LT
1226}
1227
da7071d7 1228const struct file_operations irlap_seq_fops = {
1da177e4
LT
1229 .owner = THIS_MODULE,
1230 .open = irlap_seq_open,
1231 .read = seq_read,
1232 .llseek = seq_lseek,
1233 .release = seq_release_private,
1234};
1235
1236#endif /* CONFIG_PROC_FS */