1 /* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/errno.h>
13 #include <linux/ieee80211.h>
14 #include <linux/slab.h>
16 #include "ozprotocol.h"
23 #include <asm/unaligned.h>
24 #include <linux/uaccess.h>
25 #include <net/psnap.h>
27 #define OZ_CF_CONN_SUCCESS 1
28 #define OZ_CF_CONN_FAILURE 2
34 struct packet_type ptype
;
35 char name
[OZ_MAX_BINDING_LEN
];
36 struct list_head link
;
43 DEFINE_SPINLOCK(g_polling_lock
);
45 * Static external variables.
47 static LIST_HEAD(g_pd_list
);
48 static LIST_HEAD(g_binding
);
49 static DEFINE_SPINLOCK(g_binding_lock
);
50 static struct sk_buff_head g_rx_queue
;
51 static u8 g_session_id
;
52 static u16 g_apps
= 0x1;
53 static int g_processing_rx
;
55 struct kmem_cache
*oz_elt_info_cache
;
56 struct kmem_cache
*oz_tx_frame_cache
;
59 * Context: softirq-serialized
61 static u8
oz_get_new_session_id(u8 exclude
)
63 if (++g_session_id
== 0)
65 if (g_session_id
== exclude
) {
66 if (++g_session_id
== 0)
73 * Context: softirq-serialized
75 static void oz_send_conn_rsp(struct oz_pd
*pd
, u8 status
)
78 struct net_device
*dev
= pd
->net_dev
;
79 struct oz_hdr
*oz_hdr
;
81 struct oz_elt_connect_rsp
*body
;
83 int sz
= sizeof(struct oz_hdr
) + sizeof(struct oz_elt
) +
84 sizeof(struct oz_elt_connect_rsp
);
85 skb
= alloc_skb(sz
+ OZ_ALLOCATED_SPACE(dev
), GFP_ATOMIC
);
88 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
89 skb_reset_network_header(skb
);
90 oz_hdr
= (struct oz_hdr
*)skb_put(skb
, sz
);
91 elt
= (struct oz_elt
*)(oz_hdr
+1);
92 body
= (struct oz_elt_connect_rsp
*)(elt
+1);
94 skb
->protocol
= htons(OZ_ETHERTYPE
);
95 /* Fill in device header */
96 if (dev_hard_header(skb
, dev
, OZ_ETHERTYPE
, pd
->mac_addr
,
97 dev
->dev_addr
, skb
->len
) < 0) {
101 oz_hdr
->control
= (OZ_PROTOCOL_VERSION
<<OZ_VERSION_SHIFT
);
102 oz_hdr
->last_pkt_num
= 0;
103 put_unaligned(0, &oz_hdr
->pkt_num
);
104 elt
->type
= OZ_ELT_CONNECT_RSP
;
105 elt
->length
= sizeof(struct oz_elt_connect_rsp
);
106 memset(body
, 0, sizeof(struct oz_elt_connect_rsp
));
107 body
->status
= status
;
109 body
->mode
= pd
->mode
;
110 body
->session_id
= pd
->session_id
;
111 put_unaligned(cpu_to_le16(pd
->total_apps
), &body
->apps
);
113 oz_dbg(ON
, "TX: OZ_ELT_CONNECT_RSP %d", status
);
118 * Context: softirq-serialized
120 static void pd_set_keepalive(struct oz_pd
*pd
, u8 kalive
)
122 unsigned long keep_alive
= kalive
& OZ_KALIVE_VALUE_MASK
;
124 switch (kalive
& OZ_KALIVE_TYPE_MASK
) {
125 case OZ_KALIVE_SPECIAL
:
126 pd
->keep_alive
= keep_alive
* 1000*60*60*24*20;
129 pd
->keep_alive
= keep_alive
*1000;
132 pd
->keep_alive
= keep_alive
*1000*60;
134 case OZ_KALIVE_HOURS
:
135 pd
->keep_alive
= keep_alive
*1000*60*60;
140 oz_dbg(ON
, "Keepalive = %lu mSec\n", pd
->keep_alive
);
144 * Context: softirq-serialized
146 static void pd_set_presleep(struct oz_pd
*pd
, u8 presleep
, u8 start_timer
)
149 pd
->presleep
= presleep
*100;
151 pd
->presleep
= OZ_PRESLEEP_TOUT
;
153 spin_unlock(&g_polling_lock
);
154 oz_timer_add(pd
, OZ_TIMER_TOUT
, pd
->presleep
);
155 spin_lock(&g_polling_lock
);
157 oz_dbg(ON
, "Presleep time = %lu mSec\n", pd
->presleep
);
161 * Context: softirq-serialized
163 static struct oz_pd
*oz_connect_req(struct oz_pd
*cur_pd
, struct oz_elt
*elt
,
164 const u8
*pd_addr
, struct net_device
*net_dev
)
167 struct oz_elt_connect_req
*body
=
168 (struct oz_elt_connect_req
*)(elt
+1);
169 u8 rsp_status
= OZ_STATUS_SUCCESS
;
171 u16 new_apps
= g_apps
;
172 struct net_device
*old_net_dev
= NULL
;
173 struct oz_pd
*free_pd
= NULL
;
177 spin_lock_bh(&g_polling_lock
);
179 struct oz_pd
*pd2
= NULL
;
182 pd
= oz_pd_alloc(pd_addr
);
185 getnstimeofday(&pd
->last_rx_timestamp
);
186 spin_lock_bh(&g_polling_lock
);
187 list_for_each(e
, &g_pd_list
) {
188 pd2
= list_entry(e
, struct oz_pd
, link
);
189 if (ether_addr_equal(pd2
->mac_addr
, pd_addr
)) {
196 list_add_tail(&pd
->link
, &g_pd_list
);
199 spin_unlock_bh(&g_polling_lock
);
202 if (pd
->net_dev
!= net_dev
) {
203 old_net_dev
= pd
->net_dev
;
205 pd
->net_dev
= net_dev
;
207 oz_dbg(ON
, "Host vendor: %d\n", body
->host_vendor
);
208 pd
->max_tx_size
= OZ_MAX_TX_SIZE
;
209 pd
->mode
= body
->mode
;
210 pd
->pd_info
= body
->pd_info
;
211 if (pd
->mode
& OZ_F_ISOC_NO_ELTS
) {
212 pd
->ms_per_isoc
= body
->ms_per_isoc
;
213 if (!pd
->ms_per_isoc
)
216 switch (body
->ms_isoc_latency
& OZ_LATENCY_MASK
) {
217 case OZ_ONE_MS_LATENCY
:
218 pd
->isoc_latency
= (body
->ms_isoc_latency
&
219 ~OZ_LATENCY_MASK
) / pd
->ms_per_isoc
;
221 case OZ_TEN_MS_LATENCY
:
222 pd
->isoc_latency
= ((body
->ms_isoc_latency
&
223 ~OZ_LATENCY_MASK
) * 10) / pd
->ms_per_isoc
;
226 pd
->isoc_latency
= OZ_MAX_TX_QUEUE_ISOC
;
229 if (body
->max_len_div16
)
230 pd
->max_tx_size
= ((u16
)body
->max_len_div16
)<<4;
231 oz_dbg(ON
, "Max frame:%u Ms per isoc:%u\n",
232 pd
->max_tx_size
, pd
->ms_per_isoc
);
233 pd
->max_stream_buffering
= 3*1024;
234 pd
->pulse_period
= OZ_QUANTUM
;
235 pd_set_presleep(pd
, body
->presleep
, 0);
236 pd_set_keepalive(pd
, body
->keep_alive
);
238 new_apps
&= le16_to_cpu(get_unaligned(&body
->apps
));
239 if ((new_apps
& 0x1) && (body
->session_id
)) {
240 if (pd
->session_id
) {
241 if (pd
->session_id
!= body
->session_id
) {
242 rsp_status
= OZ_STATUS_SESSION_MISMATCH
;
246 new_apps
&= ~0x1; /* Resume not permitted */
248 oz_get_new_session_id(body
->session_id
);
251 if (pd
->session_id
&& !body
->session_id
) {
252 rsp_status
= OZ_STATUS_SESSION_TEARDOWN
;
255 new_apps
&= ~0x1; /* Resume not permitted */
257 oz_get_new_session_id(body
->session_id
);
261 if (rsp_status
== OZ_STATUS_SUCCESS
) {
262 u16 start_apps
= new_apps
& ~pd
->total_apps
& ~0x1;
263 u16 stop_apps
= pd
->total_apps
& ~new_apps
& ~0x1;
264 u16 resume_apps
= new_apps
& pd
->paused_apps
& ~0x1;
266 spin_unlock_bh(&g_polling_lock
);
267 oz_pd_set_state(pd
, OZ_PD_S_CONNECTED
);
268 oz_dbg(ON
, "new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
269 new_apps
, pd
->total_apps
, pd
->paused_apps
);
271 if (oz_services_start(pd
, start_apps
, 0))
272 rsp_status
= OZ_STATUS_TOO_MANY_PDS
;
275 if (oz_services_start(pd
, resume_apps
, 1))
276 rsp_status
= OZ_STATUS_TOO_MANY_PDS
;
278 oz_services_stop(pd
, stop_apps
, 0);
279 oz_pd_request_heartbeat(pd
);
281 spin_unlock_bh(&g_polling_lock
);
283 oz_send_conn_rsp(pd
, rsp_status
);
284 if (rsp_status
!= OZ_STATUS_SUCCESS
) {
291 dev_put(old_net_dev
);
293 oz_pd_destroy(free_pd
);
298 * Context: softirq-serialized
300 static void oz_add_farewell(struct oz_pd
*pd
, u8 ep_num
, u8 index
,
301 const u8
*report
, u8 len
)
303 struct oz_farewell
*f
;
304 struct oz_farewell
*f2
;
307 f
= kmalloc(sizeof(struct oz_farewell
) + len
, GFP_ATOMIC
);
313 memcpy(f
->report
, report
, len
);
314 oz_dbg(ON
, "RX: Adding farewell report\n");
315 spin_lock(&g_polling_lock
);
316 list_for_each_entry(f2
, &pd
->farewell_list
, link
) {
317 if ((f2
->ep_num
== ep_num
) && (f2
->index
== index
)) {
323 list_add_tail(&f
->link
, &pd
->farewell_list
);
324 spin_unlock(&g_polling_lock
);
330 * Context: softirq-serialized
332 static void oz_rx_frame(struct sk_buff
*skb
)
338 struct oz_pd
*pd
= NULL
;
339 struct oz_hdr
*oz_hdr
= (struct oz_hdr
*)skb_network_header(skb
);
340 struct timespec current_time
;
344 oz_dbg(RX_FRAMES
, "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
345 oz_hdr
->pkt_num
, oz_hdr
->last_pkt_num
, oz_hdr
->control
);
346 mac_hdr
= skb_mac_header(skb
);
347 src_addr
= &mac_hdr
[ETH_ALEN
];
350 /* Check the version field */
351 if (oz_get_prot_ver(oz_hdr
->control
) != OZ_PROTOCOL_VERSION
) {
352 oz_dbg(ON
, "Incorrect protocol version: %d\n",
353 oz_get_prot_ver(oz_hdr
->control
));
357 pkt_num
= le32_to_cpu(get_unaligned(&oz_hdr
->pkt_num
));
359 pd
= oz_pd_find(src_addr
);
361 if (!(pd
->state
& OZ_PD_S_CONNECTED
))
362 oz_pd_set_state(pd
, OZ_PD_S_CONNECTED
);
363 getnstimeofday(¤t_time
);
364 if ((current_time
.tv_sec
!= pd
->last_rx_timestamp
.tv_sec
) ||
365 (pd
->presleep
< MSEC_PER_SEC
)) {
366 oz_timer_add(pd
, OZ_TIMER_TOUT
, pd
->presleep
);
367 pd
->last_rx_timestamp
= current_time
;
369 if (pkt_num
!= pd
->last_rx_pkt_num
) {
370 pd
->last_rx_pkt_num
= pkt_num
;
373 oz_dbg(ON
, "Duplicate frame\n");
377 if (pd
&& !dup
&& ((pd
->mode
& OZ_MODE_MASK
) == OZ_MODE_TRIGGERED
)) {
378 oz_dbg(RX_FRAMES
, "Received TRIGGER Frame\n");
379 pd
->last_sent_frame
= &pd
->tx_queue
;
380 if (oz_hdr
->control
& OZ_F_ACK
) {
381 /* Retire completed frames */
382 oz_retire_tx_frames(pd
, oz_hdr
->last_pkt_num
);
384 if ((oz_hdr
->control
& OZ_F_ACK_REQUESTED
) &&
385 (pd
->state
== OZ_PD_S_CONNECTED
)) {
386 int backlog
= pd
->nb_queued_frames
;
388 pd
->trigger_pkt_num
= pkt_num
;
389 /* Send queued frames */
390 oz_send_queued_frames(pd
, backlog
);
394 length
-= sizeof(struct oz_hdr
);
395 elt
= (struct oz_elt
*)((u8
*)oz_hdr
+ sizeof(struct oz_hdr
));
397 while (length
>= sizeof(struct oz_elt
)) {
398 length
-= sizeof(struct oz_elt
) + elt
->length
;
402 case OZ_ELT_CONNECT_REQ
:
403 oz_dbg(ON
, "RX: OZ_ELT_CONNECT_REQ\n");
404 pd
= oz_connect_req(pd
, elt
, src_addr
, skb
->dev
);
406 case OZ_ELT_DISCONNECT
:
407 oz_dbg(ON
, "RX: OZ_ELT_DISCONNECT\n");
411 case OZ_ELT_UPDATE_PARAM_REQ
: {
412 struct oz_elt_update_param
*body
=
413 (struct oz_elt_update_param
*)(elt
+ 1);
414 oz_dbg(ON
, "RX: OZ_ELT_UPDATE_PARAM_REQ\n");
415 if (pd
&& (pd
->state
& OZ_PD_S_CONNECTED
)) {
416 spin_lock(&g_polling_lock
);
417 pd_set_keepalive(pd
, body
->keepalive
);
418 pd_set_presleep(pd
, body
->presleep
, 1);
419 spin_unlock(&g_polling_lock
);
423 case OZ_ELT_FAREWELL_REQ
: {
424 struct oz_elt_farewell
*body
=
425 (struct oz_elt_farewell
*)(elt
+ 1);
426 oz_dbg(ON
, "RX: OZ_ELT_FAREWELL_REQ\n");
427 oz_add_farewell(pd
, body
->ep_num
,
428 body
->index
, body
->report
,
429 elt
->length
+ 1 - sizeof(*body
));
432 case OZ_ELT_APP_DATA
:
433 if (pd
&& (pd
->state
& OZ_PD_S_CONNECTED
)) {
434 struct oz_app_hdr
*app_hdr
=
435 (struct oz_app_hdr
*)(elt
+1);
438 oz_handle_app_elt(pd
, app_hdr
->app_id
, elt
);
442 oz_dbg(ON
, "RX: Unknown elt %02x\n", elt
->type
);
444 elt
= oz_next_elt(elt
);
455 void oz_protocol_term(void)
457 struct oz_binding
*b
, *t
;
459 /* Walk the list of bindings and remove each one.
461 spin_lock_bh(&g_binding_lock
);
462 list_for_each_entry_safe(b
, t
, &g_binding
, link
) {
464 spin_unlock_bh(&g_binding_lock
);
465 dev_remove_pack(&b
->ptype
);
467 dev_put(b
->ptype
.dev
);
469 spin_lock_bh(&g_binding_lock
);
471 spin_unlock_bh(&g_binding_lock
);
472 /* Walk the list of PDs and stop each one. This causes the PD to be
473 * removed from the list so we can just pull each one from the head
476 spin_lock_bh(&g_polling_lock
);
477 while (!list_empty(&g_pd_list
)) {
479 list_first_entry(&g_pd_list
, struct oz_pd
, link
);
481 spin_unlock_bh(&g_polling_lock
);
484 spin_lock_bh(&g_polling_lock
);
486 spin_unlock_bh(&g_polling_lock
);
487 oz_dbg(ON
, "Protocol stopped\n");
489 kmem_cache_destroy(oz_tx_frame_cache
);
490 kmem_cache_destroy(oz_elt_info_cache
);
496 void oz_pd_heartbeat_handler(unsigned long data
)
498 struct oz_pd
*pd
= (struct oz_pd
*)data
;
501 spin_lock_bh(&g_polling_lock
);
502 if (pd
->state
& OZ_PD_S_CONNECTED
)
503 apps
= pd
->total_apps
;
504 spin_unlock_bh(&g_polling_lock
);
506 oz_pd_heartbeat(pd
, apps
);
513 void oz_pd_timeout_handler(unsigned long data
)
516 struct oz_pd
*pd
= (struct oz_pd
*)data
;
518 spin_lock_bh(&g_polling_lock
);
519 type
= pd
->timeout_type
;
520 spin_unlock_bh(&g_polling_lock
);
535 enum hrtimer_restart
oz_pd_heartbeat_event(struct hrtimer
*timer
)
539 pd
= container_of(timer
, struct oz_pd
, heartbeat
);
540 hrtimer_forward_now(timer
, ktime_set(pd
->pulse_period
/
541 MSEC_PER_SEC
, (pd
->pulse_period
% MSEC_PER_SEC
) * NSEC_PER_MSEC
));
543 tasklet_schedule(&pd
->heartbeat_tasklet
);
544 return HRTIMER_RESTART
;
550 enum hrtimer_restart
oz_pd_timeout_event(struct hrtimer
*timer
)
554 pd
= container_of(timer
, struct oz_pd
, timeout
);
556 tasklet_schedule(&pd
->timeout_tasklet
);
557 return HRTIMER_NORESTART
;
561 * Context: softirq or process
563 void oz_timer_add(struct oz_pd
*pd
, int type
, unsigned long due_time
)
565 spin_lock_bh(&g_polling_lock
);
569 if (hrtimer_active(&pd
->timeout
)) {
570 hrtimer_set_expires(&pd
->timeout
, ktime_set(due_time
/
571 MSEC_PER_SEC
, (due_time
% MSEC_PER_SEC
) *
573 hrtimer_start_expires(&pd
->timeout
, HRTIMER_MODE_REL
);
575 hrtimer_start(&pd
->timeout
, ktime_set(due_time
/
576 MSEC_PER_SEC
, (due_time
% MSEC_PER_SEC
) *
577 NSEC_PER_MSEC
), HRTIMER_MODE_REL
);
579 pd
->timeout_type
= type
;
581 case OZ_TIMER_HEARTBEAT
:
582 if (!hrtimer_active(&pd
->heartbeat
))
583 hrtimer_start(&pd
->heartbeat
, ktime_set(due_time
/
584 MSEC_PER_SEC
, (due_time
% MSEC_PER_SEC
) *
585 NSEC_PER_MSEC
), HRTIMER_MODE_REL
);
588 spin_unlock_bh(&g_polling_lock
);
592 * Context: softirq or process
594 void oz_pd_request_heartbeat(struct oz_pd
*pd
)
596 oz_timer_add(pd
, OZ_TIMER_HEARTBEAT
, pd
->pulse_period
> 0 ?
597 pd
->pulse_period
: OZ_QUANTUM
);
601 * Context: softirq or process
603 struct oz_pd
*oz_pd_find(const u8
*mac_addr
)
607 spin_lock_bh(&g_polling_lock
);
608 list_for_each_entry(pd
, &g_pd_list
, link
) {
609 if (ether_addr_equal(pd
->mac_addr
, mac_addr
)) {
611 spin_unlock_bh(&g_polling_lock
);
615 spin_unlock_bh(&g_polling_lock
);
622 void oz_app_enable(int app_id
, int enable
)
624 if (app_id
< OZ_NB_APPS
) {
625 spin_lock_bh(&g_polling_lock
);
627 g_apps
|= (1<<app_id
);
629 g_apps
&= ~(1<<app_id
);
630 spin_unlock_bh(&g_polling_lock
);
637 static int oz_pkt_recv(struct sk_buff
*skb
, struct net_device
*dev
,
638 struct packet_type
*pt
, struct net_device
*orig_dev
)
640 skb
= skb_share_check(skb
, GFP_ATOMIC
);
643 spin_lock_bh(&g_rx_queue
.lock
);
644 if (g_processing_rx
) {
645 /* We already hold the lock so use __ variant.
647 __skb_queue_head(&g_rx_queue
, skb
);
648 spin_unlock_bh(&g_rx_queue
.lock
);
653 spin_unlock_bh(&g_rx_queue
.lock
);
655 spin_lock_bh(&g_rx_queue
.lock
);
656 if (skb_queue_empty(&g_rx_queue
)) {
658 spin_unlock_bh(&g_rx_queue
.lock
);
661 /* We already hold the lock so use __ variant.
663 skb
= __skb_dequeue(&g_rx_queue
);
672 void oz_binding_add(const char *net_dev
)
674 struct oz_binding
*binding
;
676 binding
= kzalloc(sizeof(struct oz_binding
), GFP_KERNEL
);
680 binding
->ptype
.type
= htons(OZ_ETHERTYPE
);
681 binding
->ptype
.func
= oz_pkt_recv
;
682 if (net_dev
&& *net_dev
) {
683 memcpy(binding
->name
, net_dev
, OZ_MAX_BINDING_LEN
);
684 oz_dbg(ON
, "Adding binding: %s\n", net_dev
);
685 binding
->ptype
.dev
= dev_get_by_name(&init_net
, net_dev
);
686 if (binding
->ptype
.dev
== NULL
) {
687 oz_dbg(ON
, "Netdev %s not found\n", net_dev
);
692 dev_add_pack(&binding
->ptype
);
693 spin_lock_bh(&g_binding_lock
);
694 list_add_tail(&binding
->link
, &g_binding
);
695 spin_unlock_bh(&g_binding_lock
);
701 static void pd_stop_all_for_device(struct net_device
*net_dev
)
707 spin_lock_bh(&g_polling_lock
);
708 list_for_each_entry_safe(pd
, n
, &g_pd_list
, link
) {
709 if (pd
->net_dev
== net_dev
) {
710 list_move(&pd
->link
, &h
);
714 spin_unlock_bh(&g_polling_lock
);
715 while (!list_empty(&h
)) {
716 pd
= list_first_entry(&h
, struct oz_pd
, link
);
725 void oz_binding_remove(const char *net_dev
)
727 struct oz_binding
*binding
;
730 oz_dbg(ON
, "Removing binding: %s\n", net_dev
);
731 spin_lock_bh(&g_binding_lock
);
732 list_for_each_entry(binding
, &g_binding
, link
) {
733 if (strncmp(binding
->name
, net_dev
, OZ_MAX_BINDING_LEN
) == 0) {
734 oz_dbg(ON
, "Binding '%s' found\n", net_dev
);
739 spin_unlock_bh(&g_binding_lock
);
741 dev_remove_pack(&binding
->ptype
);
742 if (binding
->ptype
.dev
) {
743 dev_put(binding
->ptype
.dev
);
744 pd_stop_all_for_device(binding
->ptype
.dev
);
746 list_del(&binding
->link
);
754 static char *oz_get_next_device_name(char *s
, char *dname
, int max_size
)
758 while (*s
&& (*s
!= ',') && max_size
> 1) {
769 int oz_protocol_init(char *devs
)
771 oz_elt_info_cache
= KMEM_CACHE(oz_elt_info
, 0);
772 if (!oz_elt_info_cache
)
775 oz_tx_frame_cache
= KMEM_CACHE(oz_tx_frame
, 0);
776 if (!oz_tx_frame_cache
) {
777 kmem_cache_destroy(oz_elt_info_cache
);
781 skb_queue_head_init(&g_rx_queue
);
782 if (devs
[0] == '*') {
783 oz_binding_add(NULL
);
788 devs
= oz_get_next_device_name(devs
, d
, sizeof(d
));
799 int oz_get_pd_list(struct oz_mac_addr
*addr
, int max_count
)
804 spin_lock_bh(&g_polling_lock
);
805 list_for_each_entry(pd
, &g_pd_list
, link
) {
806 if (count
>= max_count
)
808 ether_addr_copy((u8
*)&addr
[count
++], pd
->mac_addr
);
810 spin_unlock_bh(&g_polling_lock
);