]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/block/aoe/aoedev.c
aoe: improve handling of misbehaving network paths
[mirror_ubuntu-zesty-kernel.git] / drivers / block / aoe / aoedev.c
1 /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
2 /*
3 * aoedev.c
4 * AoE device utility functions; maintains device list.
5 */
6
7 #include <linux/hdreg.h>
8 #include <linux/blkdev.h>
9 #include <linux/netdevice.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/bitmap.h>
13 #include <linux/kdev_t.h>
14 #include <linux/moduleparam.h>
15 #include "aoe.h"
16
17 static void dummy_timer(ulong);
18 static void aoedev_freedev(struct aoedev *);
19 static void freetgt(struct aoedev *d, struct aoetgt *t);
20 static void skbpoolfree(struct aoedev *d);
21
22 static int aoe_dyndevs = 1;
23 module_param(aoe_dyndevs, int, 0644);
24 MODULE_PARM_DESC(aoe_dyndevs, "Use dynamic minor numbers for devices.");
25
26 static struct aoedev *devlist;
27 static DEFINE_SPINLOCK(devlist_lock);
28
29 /* Because some systems will have one, many, or no
30 * - partitions,
31 * - slots per shelf,
32 * - or shelves,
33 * we need some flexibility in the way the minor numbers
34 * are allocated. So they are dynamic.
35 */
36 #define N_DEVS ((1U<<MINORBITS)/AOE_PARTITIONS)
37
38 static DEFINE_SPINLOCK(used_minors_lock);
39 static DECLARE_BITMAP(used_minors, N_DEVS);
40
41 static int
42 minor_get_dyn(ulong *sysminor)
43 {
44 ulong flags;
45 ulong n;
46 int error = 0;
47
48 spin_lock_irqsave(&used_minors_lock, flags);
49 n = find_first_zero_bit(used_minors, N_DEVS);
50 if (n < N_DEVS)
51 set_bit(n, used_minors);
52 else
53 error = -1;
54 spin_unlock_irqrestore(&used_minors_lock, flags);
55
56 *sysminor = n * AOE_PARTITIONS;
57 return error;
58 }
59
60 static int
61 minor_get_static(ulong *sysminor, ulong aoemaj, int aoemin)
62 {
63 ulong flags;
64 ulong n;
65 int error = 0;
66 enum {
67 /* for backwards compatibility when !aoe_dyndevs,
68 * a static number of supported slots per shelf */
69 NPERSHELF = 16,
70 };
71
72 if (aoemin >= NPERSHELF) {
73 pr_err("aoe: %s %d slots per shelf\n",
74 "static minor device numbers support only",
75 NPERSHELF);
76 error = -1;
77 goto out;
78 }
79
80 n = aoemaj * NPERSHELF + aoemin;
81 if (n >= N_DEVS) {
82 pr_err("aoe: %s with e%ld.%d\n",
83 "cannot use static minor device numbers",
84 aoemaj, aoemin);
85 error = -1;
86 goto out;
87 }
88
89 spin_lock_irqsave(&used_minors_lock, flags);
90 if (test_bit(n, used_minors)) {
91 pr_err("aoe: %s %lu\n",
92 "existing device already has static minor number",
93 n);
94 error = -1;
95 } else
96 set_bit(n, used_minors);
97 spin_unlock_irqrestore(&used_minors_lock, flags);
98 *sysminor = n * AOE_PARTITIONS;
99 out:
100 return error;
101 }
102
103 static int
104 minor_get(ulong *sysminor, ulong aoemaj, int aoemin)
105 {
106 if (aoe_dyndevs)
107 return minor_get_dyn(sysminor);
108 else
109 return minor_get_static(sysminor, aoemaj, aoemin);
110 }
111
112 static void
113 minor_free(ulong minor)
114 {
115 ulong flags;
116
117 minor /= AOE_PARTITIONS;
118 BUG_ON(minor >= N_DEVS);
119
120 spin_lock_irqsave(&used_minors_lock, flags);
121 BUG_ON(!test_bit(minor, used_minors));
122 clear_bit(minor, used_minors);
123 spin_unlock_irqrestore(&used_minors_lock, flags);
124 }
125
126 /*
127 * Users who grab a pointer to the device with aoedev_by_aoeaddr
128 * automatically get a reference count and must be responsible
129 * for performing a aoedev_put. With the addition of async
130 * kthread processing I'm no longer confident that we can
131 * guarantee consistency in the face of device flushes.
132 *
133 * For the time being, we only bother to add extra references for
134 * frames sitting on the iocq. When the kthreads finish processing
135 * these frames, they will aoedev_put the device.
136 */
137
138 void
139 aoedev_put(struct aoedev *d)
140 {
141 ulong flags;
142
143 spin_lock_irqsave(&devlist_lock, flags);
144 d->ref--;
145 spin_unlock_irqrestore(&devlist_lock, flags);
146 }
147
148 static void
149 dummy_timer(ulong vp)
150 {
151 struct aoedev *d;
152
153 d = (struct aoedev *)vp;
154 if (d->flags & DEVFL_TKILL)
155 return;
156 d->timer.expires = jiffies + HZ;
157 add_timer(&d->timer);
158 }
159
160 static void
161 aoe_failip(struct aoedev *d)
162 {
163 struct request *rq;
164 struct bio *bio;
165 unsigned long n;
166
167 aoe_failbuf(d, d->ip.buf);
168
169 rq = d->ip.rq;
170 if (rq == NULL)
171 return;
172 while ((bio = d->ip.nxbio)) {
173 clear_bit(BIO_UPTODATE, &bio->bi_flags);
174 d->ip.nxbio = bio->bi_next;
175 n = (unsigned long) rq->special;
176 rq->special = (void *) --n;
177 }
178 if ((unsigned long) rq->special == 0)
179 aoe_end_request(d, rq, 0);
180 }
181
182 static void
183 downdev_frame(struct list_head *pos)
184 {
185 struct frame *f;
186
187 f = list_entry(pos, struct frame, head);
188 list_del(pos);
189 if (f->buf) {
190 f->buf->nframesout--;
191 aoe_failbuf(f->t->d, f->buf);
192 }
193 aoe_freetframe(f);
194 }
195
196 void
197 aoedev_downdev(struct aoedev *d)
198 {
199 struct aoetgt *t, **tt, **te;
200 struct list_head *head, *pos, *nx;
201 struct request *rq;
202 int i;
203
204 d->flags &= ~DEVFL_UP;
205
206 /* clean out active and to-be-retransmitted buffers */
207 for (i = 0; i < NFACTIVE; i++) {
208 head = &d->factive[i];
209 list_for_each_safe(pos, nx, head)
210 downdev_frame(pos);
211 }
212 head = &d->rexmitq;
213 list_for_each_safe(pos, nx, head)
214 downdev_frame(pos);
215
216 /* reset window dressings */
217 tt = d->targets;
218 te = tt + NTARGETS;
219 for (; tt < te && (t = *tt); tt++) {
220 aoecmd_wreset(t);
221 t->nout = 0;
222 }
223
224 /* clean out the in-process request (if any) */
225 aoe_failip(d);
226
227 /* fast fail all pending I/O */
228 if (d->blkq) {
229 while ((rq = blk_peek_request(d->blkq))) {
230 blk_start_request(rq);
231 aoe_end_request(d, rq, 1);
232 }
233 }
234
235 if (d->gd)
236 set_capacity(d->gd, 0);
237 }
238
239 static void
240 aoedev_freedev(struct aoedev *d)
241 {
242 struct aoetgt **t, **e;
243
244 cancel_work_sync(&d->work);
245 if (d->gd) {
246 aoedisk_rm_sysfs(d);
247 del_gendisk(d->gd);
248 put_disk(d->gd);
249 blk_cleanup_queue(d->blkq);
250 }
251 t = d->targets;
252 e = t + NTARGETS;
253 for (; t < e && *t; t++)
254 freetgt(d, *t);
255 if (d->bufpool)
256 mempool_destroy(d->bufpool);
257 skbpoolfree(d);
258 minor_free(d->sysminor);
259 kfree(d);
260 }
261
262 /* return whether the user asked for this particular
263 * device to be flushed
264 */
265 static int
266 user_req(char *s, size_t slen, struct aoedev *d)
267 {
268 char *p;
269 size_t lim;
270
271 if (!d->gd)
272 return 0;
273 p = strrchr(d->gd->disk_name, '/');
274 if (!p)
275 p = d->gd->disk_name;
276 else
277 p += 1;
278 lim = sizeof(d->gd->disk_name);
279 lim -= p - d->gd->disk_name;
280 if (slen < lim)
281 lim = slen;
282
283 return !strncmp(s, p, lim);
284 }
285
286 int
287 aoedev_flush(const char __user *str, size_t cnt)
288 {
289 ulong flags;
290 struct aoedev *d, **dd;
291 struct aoedev *rmd = NULL;
292 char buf[16];
293 int all = 0;
294 int specified = 0; /* flush a specific device */
295
296 if (cnt >= 3) {
297 if (cnt > sizeof buf)
298 cnt = sizeof buf;
299 if (copy_from_user(buf, str, cnt))
300 return -EFAULT;
301 all = !strncmp(buf, "all", 3);
302 if (!all)
303 specified = 1;
304 }
305
306 spin_lock_irqsave(&devlist_lock, flags);
307 dd = &devlist;
308 while ((d = *dd)) {
309 spin_lock(&d->lock);
310 if (specified) {
311 if (!user_req(buf, cnt, d))
312 goto skip;
313 } else if ((!all && (d->flags & DEVFL_UP))
314 || (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
315 || d->nopen
316 || d->ref)
317 goto skip;
318
319 *dd = d->next;
320 aoedev_downdev(d);
321 d->flags |= DEVFL_TKILL;
322 spin_unlock(&d->lock);
323 d->next = rmd;
324 rmd = d;
325 continue;
326 skip:
327 spin_unlock(&d->lock);
328 dd = &d->next;
329 }
330 spin_unlock_irqrestore(&devlist_lock, flags);
331 while ((d = rmd)) {
332 rmd = d->next;
333 del_timer_sync(&d->timer);
334 aoedev_freedev(d); /* must be able to sleep */
335 }
336 return 0;
337 }
338
339 /* This has been confirmed to occur once with Tms=3*1000 due to the
340 * driver changing link and not processing its transmit ring. The
341 * problem is hard enough to solve by returning an error that I'm
342 * still punting on "solving" this.
343 */
344 static void
345 skbfree(struct sk_buff *skb)
346 {
347 enum { Sms = 250, Tms = 30 * 1000};
348 int i = Tms / Sms;
349
350 if (skb == NULL)
351 return;
352 while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0)
353 msleep(Sms);
354 if (i < 0) {
355 printk(KERN_ERR
356 "aoe: %s holds ref: %s\n",
357 skb->dev ? skb->dev->name : "netif",
358 "cannot free skb -- memory leaked.");
359 return;
360 }
361 skb->truesize -= skb->data_len;
362 skb_shinfo(skb)->nr_frags = skb->data_len = 0;
363 skb_trim(skb, 0);
364 dev_kfree_skb(skb);
365 }
366
367 static void
368 skbpoolfree(struct aoedev *d)
369 {
370 struct sk_buff *skb, *tmp;
371
372 skb_queue_walk_safe(&d->skbpool, skb, tmp)
373 skbfree(skb);
374
375 __skb_queue_head_init(&d->skbpool);
376 }
377
378 /* find it or allocate it */
379 struct aoedev *
380 aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
381 {
382 struct aoedev *d;
383 int i;
384 ulong flags;
385 ulong sysminor = 0;
386
387 spin_lock_irqsave(&devlist_lock, flags);
388
389 for (d=devlist; d; d=d->next)
390 if (d->aoemajor == maj && d->aoeminor == min) {
391 d->ref++;
392 break;
393 }
394 if (d || !do_alloc || minor_get(&sysminor, maj, min) < 0)
395 goto out;
396 d = kcalloc(1, sizeof *d, GFP_ATOMIC);
397 if (!d)
398 goto out;
399 INIT_WORK(&d->work, aoecmd_sleepwork);
400 spin_lock_init(&d->lock);
401 skb_queue_head_init(&d->skbpool);
402 init_timer(&d->timer);
403 d->timer.data = (ulong) d;
404 d->timer.function = dummy_timer;
405 d->timer.expires = jiffies + HZ;
406 add_timer(&d->timer);
407 d->bufpool = NULL; /* defer to aoeblk_gdalloc */
408 d->tgt = d->targets;
409 d->ref = 1;
410 for (i = 0; i < NFACTIVE; i++)
411 INIT_LIST_HEAD(&d->factive[i]);
412 INIT_LIST_HEAD(&d->rexmitq);
413 d->sysminor = sysminor;
414 d->aoemajor = maj;
415 d->aoeminor = min;
416 d->rttavg = RTTAVG_INIT;
417 d->rttdev = RTTDEV_INIT;
418 d->next = devlist;
419 devlist = d;
420 out:
421 spin_unlock_irqrestore(&devlist_lock, flags);
422 return d;
423 }
424
425 static void
426 freetgt(struct aoedev *d, struct aoetgt *t)
427 {
428 struct frame *f;
429 struct list_head *pos, *nx, *head;
430 struct aoeif *ifp;
431
432 for (ifp = t->ifs; ifp < &t->ifs[NAOEIFS]; ++ifp) {
433 if (!ifp->nd)
434 break;
435 dev_put(ifp->nd);
436 }
437
438 head = &t->ffree;
439 list_for_each_safe(pos, nx, head) {
440 list_del(pos);
441 f = list_entry(pos, struct frame, head);
442 skbfree(f->skb);
443 kfree(f);
444 }
445 kfree(t);
446 }
447
448 void
449 aoedev_exit(void)
450 {
451 struct aoedev *d;
452 ulong flags;
453
454 aoe_flush_iocq();
455 while ((d = devlist)) {
456 devlist = d->next;
457
458 spin_lock_irqsave(&d->lock, flags);
459 aoedev_downdev(d);
460 d->flags |= DEVFL_TKILL;
461 spin_unlock_irqrestore(&d->lock, flags);
462
463 del_timer_sync(&d->timer);
464 aoedev_freedev(d);
465 }
466 }
467
468 int __init
469 aoedev_init(void)
470 {
471 return 0;
472 }