]>
Commit | Line | Data |
---|---|---|
b482cd20 SB |
1 | /* |
2 | * Copyright (C) ST-Ericsson AB 2010 | |
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | |
4 | * License terms: GNU General Public License (GPL) version 2 | |
5 | */ | |
b31fa5ba JP |
6 | |
7 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ | |
8 | ||
b482cd20 SB |
9 | #include <linux/stddef.h> |
10 | #include <linux/spinlock.h> | |
11 | #include <linux/slab.h> | |
0b1e9738 | 12 | #include <linux/rculist.h> |
b482cd20 SB |
13 | #include <net/caif/cfpkt.h> |
14 | #include <net/caif/cfmuxl.h> | |
15 | #include <net/caif/cfsrvl.h> | |
16 | #include <net/caif/cffrml.h> | |
17 | ||
18 | #define container_obj(layr) container_of(layr, struct cfmuxl, layer) | |
19 | ||
20 | #define CAIF_CTRL_CHANNEL 0 | |
21 | #define UP_CACHE_SIZE 8 | |
22 | #define DN_CACHE_SIZE 8 | |
23 | ||
24 | struct cfmuxl { | |
25 | struct cflayer layer; | |
26 | struct list_head srvl_list; | |
27 | struct list_head frml_list; | |
28 | struct cflayer *up_cache[UP_CACHE_SIZE]; | |
29 | struct cflayer *dn_cache[DN_CACHE_SIZE]; | |
30 | /* | |
31 | * Set when inserting or removing downwards layers. | |
32 | */ | |
33 | spinlock_t transmit_lock; | |
34 | ||
35 | /* | |
36 | * Set when inserting or removing upwards layers. | |
37 | */ | |
38 | spinlock_t receive_lock; | |
39 | ||
40 | }; | |
41 | ||
42 | static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt); | |
43 | static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt); | |
44 | static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | |
45 | int phyid); | |
46 | static struct cflayer *get_up(struct cfmuxl *muxl, u16 id); | |
47 | ||
48 | struct cflayer *cfmuxl_create(void) | |
49 | { | |
50 | struct cfmuxl *this = kmalloc(sizeof(struct cfmuxl), GFP_ATOMIC); | |
51 | if (!this) | |
52 | return NULL; | |
53 | memset(this, 0, sizeof(*this)); | |
54 | this->layer.receive = cfmuxl_receive; | |
55 | this->layer.transmit = cfmuxl_transmit; | |
56 | this->layer.ctrlcmd = cfmuxl_ctrlcmd; | |
57 | INIT_LIST_HEAD(&this->srvl_list); | |
58 | INIT_LIST_HEAD(&this->frml_list); | |
59 | spin_lock_init(&this->transmit_lock); | |
60 | spin_lock_init(&this->receive_lock); | |
61 | snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "mux"); | |
62 | return &this->layer; | |
63 | } | |
64 | ||
65 | int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid) | |
66 | { | |
67 | struct cfmuxl *muxl = container_obj(layr); | |
0b1e9738 | 68 | |
69 | spin_lock_bh(&muxl->receive_lock); | |
70 | list_add_rcu(&up->node, &muxl->srvl_list); | |
71 | spin_unlock_bh(&muxl->receive_lock); | |
b482cd20 SB |
72 | return 0; |
73 | } | |
74 | ||
b482cd20 SB |
75 | int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) |
76 | { | |
77 | struct cfmuxl *muxl = (struct cfmuxl *) layr; | |
0b1e9738 | 78 | |
79 | spin_lock_bh(&muxl->transmit_lock); | |
80 | list_add_rcu(&dn->node, &muxl->frml_list); | |
81 | spin_unlock_bh(&muxl->transmit_lock); | |
b482cd20 SB |
82 | return 0; |
83 | } | |
84 | ||
85 | static struct cflayer *get_from_id(struct list_head *list, u16 id) | |
86 | { | |
0b1e9738 | 87 | struct cflayer *lyr; |
88 | list_for_each_entry_rcu(lyr, list, node) { | |
89 | if (lyr->id == id) | |
90 | return lyr; | |
b482cd20 | 91 | } |
0b1e9738 | 92 | |
b482cd20 SB |
93 | return NULL; |
94 | } | |
95 | ||
96 | struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid) | |
97 | { | |
98 | struct cfmuxl *muxl = container_obj(layr); | |
99 | struct cflayer *dn; | |
0b1e9738 | 100 | int idx = phyid % DN_CACHE_SIZE; |
101 | ||
102 | spin_lock_bh(&muxl->transmit_lock); | |
103 | rcu_assign_pointer(muxl->dn_cache[idx], NULL); | |
b482cd20 | 104 | dn = get_from_id(&muxl->frml_list, phyid); |
0b1e9738 | 105 | if (dn == NULL) |
106 | goto out; | |
107 | ||
108 | list_del_rcu(&dn->node); | |
b482cd20 | 109 | caif_assert(dn != NULL); |
0b1e9738 | 110 | out: |
111 | spin_unlock_bh(&muxl->transmit_lock); | |
b482cd20 SB |
112 | return dn; |
113 | } | |
114 | ||
b482cd20 SB |
115 | static struct cflayer *get_up(struct cfmuxl *muxl, u16 id) |
116 | { | |
117 | struct cflayer *up; | |
118 | int idx = id % UP_CACHE_SIZE; | |
0b1e9738 | 119 | up = rcu_dereference(muxl->up_cache[idx]); |
b482cd20 | 120 | if (up == NULL || up->id != id) { |
0b1e9738 | 121 | spin_lock_bh(&muxl->receive_lock); |
b482cd20 | 122 | up = get_from_id(&muxl->srvl_list, id); |
0b1e9738 | 123 | rcu_assign_pointer(muxl->up_cache[idx], up); |
124 | spin_unlock_bh(&muxl->receive_lock); | |
b482cd20 SB |
125 | } |
126 | return up; | |
127 | } | |
128 | ||
b482cd20 SB |
129 | static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info) |
130 | { | |
131 | struct cflayer *dn; | |
132 | int idx = dev_info->id % DN_CACHE_SIZE; | |
0b1e9738 | 133 | dn = rcu_dereference(muxl->dn_cache[idx]); |
b482cd20 | 134 | if (dn == NULL || dn->id != dev_info->id) { |
0b1e9738 | 135 | spin_lock_bh(&muxl->transmit_lock); |
b482cd20 | 136 | dn = get_from_id(&muxl->frml_list, dev_info->id); |
0b1e9738 | 137 | rcu_assign_pointer(muxl->dn_cache[idx], dn); |
138 | spin_unlock_bh(&muxl->transmit_lock); | |
b482cd20 SB |
139 | } |
140 | return dn; | |
141 | } | |
142 | ||
143 | struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id) | |
144 | { | |
145 | struct cflayer *up; | |
146 | struct cfmuxl *muxl = container_obj(layr); | |
0b1e9738 | 147 | int idx = id % UP_CACHE_SIZE; |
148 | ||
149 | spin_lock_bh(&muxl->receive_lock); | |
150 | up = get_from_id(&muxl->srvl_list, id); | |
5b208656 | 151 | if (up == NULL) |
a9a8f107 | 152 | goto out; |
0b1e9738 | 153 | |
154 | rcu_assign_pointer(muxl->up_cache[idx], NULL); | |
155 | list_del_rcu(&up->node); | |
a9a8f107 | 156 | out: |
0b1e9738 | 157 | spin_unlock_bh(&muxl->receive_lock); |
b482cd20 SB |
158 | return up; |
159 | } | |
160 | ||
161 | static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt) | |
162 | { | |
163 | int ret; | |
164 | struct cfmuxl *muxl = container_obj(layr); | |
165 | u8 id; | |
166 | struct cflayer *up; | |
167 | if (cfpkt_extr_head(pkt, &id, 1) < 0) { | |
b31fa5ba | 168 | pr_err("erroneous Caif Packet\n"); |
b482cd20 SB |
169 | cfpkt_destroy(pkt); |
170 | return -EPROTO; | |
171 | } | |
0b1e9738 | 172 | rcu_read_lock(); |
b482cd20 | 173 | up = get_up(muxl, id); |
0b1e9738 | 174 | |
b482cd20 | 175 | if (up == NULL) { |
0b1e9738 | 176 | pr_debug("Received data on unknown link ID = %d (0x%x)" |
177 | " up == NULL", id, id); | |
b482cd20 SB |
178 | cfpkt_destroy(pkt); |
179 | /* | |
180 | * Don't return ERROR, since modem misbehaves and sends out | |
181 | * flow on before linksetup response. | |
182 | */ | |
0b1e9738 | 183 | |
184 | rcu_read_unlock(); | |
b482cd20 SB |
185 | return /* CFGLU_EPROT; */ 0; |
186 | } | |
0b1e9738 | 187 | |
188 | /* We can't hold rcu_lock during receive, so take a ref count instead */ | |
5b208656 | 189 | cfsrvl_get(up); |
0b1e9738 | 190 | rcu_read_unlock(); |
191 | ||
b482cd20 | 192 | ret = up->receive(up, pkt); |
0b1e9738 | 193 | |
5b208656 | 194 | cfsrvl_put(up); |
b482cd20 SB |
195 | return ret; |
196 | } | |
197 | ||
198 | static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt) | |
199 | { | |
b482cd20 | 200 | struct cfmuxl *muxl = container_obj(layr); |
0b1e9738 | 201 | int err; |
b482cd20 SB |
202 | u8 linkid; |
203 | struct cflayer *dn; | |
204 | struct caif_payload_info *info = cfpkt_info(pkt); | |
39b9afbb | 205 | BUG_ON(!info); |
0b1e9738 | 206 | |
207 | rcu_read_lock(); | |
208 | ||
39b9afbb | 209 | dn = get_dn(muxl, info->dev_info); |
b482cd20 | 210 | if (dn == NULL) { |
0b1e9738 | 211 | pr_debug("Send data on unknown phy ID = %d (0x%x)\n", |
b31fa5ba | 212 | info->dev_info->id, info->dev_info->id); |
0b1e9738 | 213 | rcu_read_unlock(); |
214 | cfpkt_destroy(pkt); | |
b482cd20 SB |
215 | return -ENOTCONN; |
216 | } | |
0b1e9738 | 217 | |
b482cd20 SB |
218 | info->hdr_len += 1; |
219 | linkid = info->channel_id; | |
220 | cfpkt_add_head(pkt, &linkid, 1); | |
0b1e9738 | 221 | |
222 | /* We can't hold rcu_lock during receive, so take a ref count instead */ | |
223 | cffrml_hold(dn); | |
224 | ||
225 | rcu_read_unlock(); | |
226 | ||
227 | err = dn->transmit(dn, pkt); | |
228 | ||
229 | cffrml_put(dn); | |
230 | return err; | |
b482cd20 SB |
231 | } |
232 | ||
233 | static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | |
234 | int phyid) | |
235 | { | |
236 | struct cfmuxl *muxl = container_obj(layr); | |
b482cd20 | 237 | struct cflayer *layer; |
0b1e9738 | 238 | |
239 | rcu_read_lock(); | |
240 | list_for_each_entry_rcu(layer, &muxl->srvl_list, node) { | |
241 | if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) | |
242 | /* NOTE: ctrlcmd is not allowed to block */ | |
b482cd20 SB |
243 | layer->ctrlcmd(layer, ctrl, phyid); |
244 | } | |
0b1e9738 | 245 | rcu_read_unlock(); |
b482cd20 | 246 | } |