]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/block/aoe/aoecmd.c
aoe: whitespace cleanup
[mirror_ubuntu-jammy-kernel.git] / drivers / block / aoe / aoecmd.c
1 /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
2 /*
3 * aoecmd.c
4 * Filesystem request handling methods
5 */
6
7 #include <linux/ata.h>
8 #include <linux/slab.h>
9 #include <linux/hdreg.h>
10 #include <linux/blkdev.h>
11 #include <linux/skbuff.h>
12 #include <linux/netdevice.h>
13 #include <linux/genhd.h>
14 #include <linux/moduleparam.h>
15 #include <linux/workqueue.h>
16 #include <linux/kthread.h>
17 #include <net/net_namespace.h>
18 #include <asm/unaligned.h>
19 #include <linux/uio.h>
20 #include "aoe.h"
21
22 #define MAXIOC (8192) /* default meant to avoid most soft lockups */
23
24 static void ktcomplete(struct frame *, struct sk_buff *);
25
26 static struct buf *nextbuf(struct aoedev *);
27
28 static int aoe_deadsecs = 60 * 3;
29 module_param(aoe_deadsecs, int, 0644);
30 MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
31
32 static int aoe_maxout = 16;
33 module_param(aoe_maxout, int, 0644);
34 MODULE_PARM_DESC(aoe_maxout,
35 "Only aoe_maxout outstanding packets for every MAC on eX.Y.");
36
37 static wait_queue_head_t ktiowq;
38 static struct ktstate kts;
39
40 /* io completion queue */
41 static struct {
42 struct list_head head;
43 spinlock_t lock;
44 } iocq;
45
46 static struct sk_buff *
47 new_skb(ulong len)
48 {
49 struct sk_buff *skb;
50
51 skb = alloc_skb(len, GFP_ATOMIC);
52 if (skb) {
53 skb_reset_mac_header(skb);
54 skb_reset_network_header(skb);
55 skb->protocol = __constant_htons(ETH_P_AOE);
56 skb_checksum_none_assert(skb);
57 }
58 return skb;
59 }
60
61 static struct frame *
62 getframe(struct aoedev *d, u32 tag)
63 {
64 struct frame *f;
65 struct list_head *head, *pos, *nx;
66 u32 n;
67
68 n = tag % NFACTIVE;
69 head = &d->factive[n];
70 list_for_each_safe(pos, nx, head) {
71 f = list_entry(pos, struct frame, head);
72 if (f->tag == tag) {
73 list_del(pos);
74 return f;
75 }
76 }
77 return NULL;
78 }
79
80 /*
81 * Leave the top bit clear so we have tagspace for userland.
82 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
83 * This driver reserves tag -1 to mean "unused frame."
84 */
85 static int
86 newtag(struct aoedev *d)
87 {
88 register ulong n;
89
90 n = jiffies & 0xffff;
91 return n |= (++d->lasttag & 0x7fff) << 16;
92 }
93
94 static u32
95 aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h)
96 {
97 u32 host_tag = newtag(d);
98
99 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
100 memcpy(h->dst, t->addr, sizeof h->dst);
101 h->type = __constant_cpu_to_be16(ETH_P_AOE);
102 h->verfl = AOE_HVER;
103 h->major = cpu_to_be16(d->aoemajor);
104 h->minor = d->aoeminor;
105 h->cmd = AOECMD_ATA;
106 h->tag = cpu_to_be32(host_tag);
107
108 return host_tag;
109 }
110
111 static inline void
112 put_lba(struct aoe_atahdr *ah, sector_t lba)
113 {
114 ah->lba0 = lba;
115 ah->lba1 = lba >>= 8;
116 ah->lba2 = lba >>= 8;
117 ah->lba3 = lba >>= 8;
118 ah->lba4 = lba >>= 8;
119 ah->lba5 = lba >>= 8;
120 }
121
122 static struct aoeif *
123 ifrotate(struct aoetgt *t)
124 {
125 struct aoeif *ifp;
126
127 ifp = t->ifp;
128 ifp++;
129 if (ifp >= &t->ifs[NAOEIFS] || ifp->nd == NULL)
130 ifp = t->ifs;
131 if (ifp->nd == NULL)
132 return NULL;
133 return t->ifp = ifp;
134 }
135
136 static void
137 skb_pool_put(struct aoedev *d, struct sk_buff *skb)
138 {
139 __skb_queue_tail(&d->skbpool, skb);
140 }
141
142 static struct sk_buff *
143 skb_pool_get(struct aoedev *d)
144 {
145 struct sk_buff *skb = skb_peek(&d->skbpool);
146
147 if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
148 __skb_unlink(skb, &d->skbpool);
149 return skb;
150 }
151 if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
152 (skb = new_skb(ETH_ZLEN)))
153 return skb;
154
155 return NULL;
156 }
157
158 void
159 aoe_freetframe(struct frame *f)
160 {
161 struct aoetgt *t;
162
163 t = f->t;
164 f->buf = NULL;
165 f->bv = NULL;
166 f->r_skb = NULL;
167 list_add(&f->head, &t->ffree);
168 }
169
170 static struct frame *
171 newtframe(struct aoedev *d, struct aoetgt *t)
172 {
173 struct frame *f;
174 struct sk_buff *skb;
175 struct list_head *pos;
176
177 if (list_empty(&t->ffree)) {
178 if (t->falloc >= NSKBPOOLMAX*2)
179 return NULL;
180 f = kcalloc(1, sizeof(*f), GFP_ATOMIC);
181 if (f == NULL)
182 return NULL;
183 t->falloc++;
184 f->t = t;
185 } else {
186 pos = t->ffree.next;
187 list_del(pos);
188 f = list_entry(pos, struct frame, head);
189 }
190
191 skb = f->skb;
192 if (skb == NULL) {
193 f->skb = skb = new_skb(ETH_ZLEN);
194 if (!skb) {
195 bail: aoe_freetframe(f);
196 return NULL;
197 }
198 }
199
200 if (atomic_read(&skb_shinfo(skb)->dataref) != 1) {
201 skb = skb_pool_get(d);
202 if (skb == NULL)
203 goto bail;
204 skb_pool_put(d, f->skb);
205 f->skb = skb;
206 }
207
208 skb->truesize -= skb->data_len;
209 skb_shinfo(skb)->nr_frags = skb->data_len = 0;
210 skb_trim(skb, 0);
211 return f;
212 }
213
214 static struct frame *
215 newframe(struct aoedev *d)
216 {
217 struct frame *f;
218 struct aoetgt *t, **tt;
219 int totout = 0;
220
221 if (d->targets[0] == NULL) { /* shouldn't happen, but I'm paranoid */
222 printk(KERN_ERR "aoe: NULL TARGETS!\n");
223 return NULL;
224 }
225 tt = d->tgt; /* last used target */
226 for (;;) {
227 tt++;
228 if (tt >= &d->targets[NTARGETS] || !*tt)
229 tt = d->targets;
230 t = *tt;
231 totout += t->nout;
232 if (t->nout < t->maxout
233 && t != d->htgt
234 && t->ifp->nd) {
235 f = newtframe(d, t);
236 if (f) {
237 ifrotate(t);
238 d->tgt = tt;
239 return f;
240 }
241 }
242 if (tt == d->tgt) /* we've looped and found nada */
243 break;
244 }
245 if (totout == 0) {
246 d->kicked++;
247 d->flags |= DEVFL_KICKME;
248 }
249 return NULL;
250 }
251
252 static void
253 skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt)
254 {
255 int frag = 0;
256 ulong fcnt;
257 loop:
258 fcnt = bv->bv_len - (off - bv->bv_offset);
259 if (fcnt > cnt)
260 fcnt = cnt;
261 skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt);
262 cnt -= fcnt;
263 if (cnt <= 0)
264 return;
265 bv++;
266 off = bv->bv_offset;
267 goto loop;
268 }
269
270 static void
271 fhash(struct frame *f)
272 {
273 struct aoedev *d = f->t->d;
274 u32 n;
275
276 n = f->tag % NFACTIVE;
277 list_add_tail(&f->head, &d->factive[n]);
278 }
279
280 static int
281 aoecmd_ata_rw(struct aoedev *d)
282 {
283 struct frame *f;
284 struct aoe_hdr *h;
285 struct aoe_atahdr *ah;
286 struct buf *buf;
287 struct aoetgt *t;
288 struct sk_buff *skb;
289 struct sk_buff_head queue;
290 ulong bcnt, fbcnt;
291 char writebit, extbit;
292
293 writebit = 0x10;
294 extbit = 0x4;
295
296 buf = nextbuf(d);
297 if (buf == NULL)
298 return 0;
299 f = newframe(d);
300 if (f == NULL)
301 return 0;
302 t = *d->tgt;
303 bcnt = d->maxbcnt;
304 if (bcnt == 0)
305 bcnt = DEFAULTBCNT;
306 if (bcnt > buf->resid)
307 bcnt = buf->resid;
308 fbcnt = bcnt;
309 f->bv = buf->bv;
310 f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid);
311 do {
312 if (fbcnt < buf->bv_resid) {
313 buf->bv_resid -= fbcnt;
314 buf->resid -= fbcnt;
315 break;
316 }
317 fbcnt -= buf->bv_resid;
318 buf->resid -= buf->bv_resid;
319 if (buf->resid == 0) {
320 d->ip.buf = NULL;
321 break;
322 }
323 buf->bv++;
324 buf->bv_resid = buf->bv->bv_len;
325 WARN_ON(buf->bv_resid == 0);
326 } while (fbcnt);
327
328 /* initialize the headers & frame */
329 skb = f->skb;
330 h = (struct aoe_hdr *) skb_mac_header(skb);
331 ah = (struct aoe_atahdr *) (h+1);
332 skb_put(skb, sizeof *h + sizeof *ah);
333 memset(h, 0, skb->len);
334 f->tag = aoehdr_atainit(d, t, h);
335 fhash(f);
336 t->nout++;
337 f->waited = 0;
338 f->buf = buf;
339 f->bcnt = bcnt;
340 f->lba = buf->sector;
341
342 /* set up ata header */
343 ah->scnt = bcnt >> 9;
344 put_lba(ah, buf->sector);
345 if (d->flags & DEVFL_EXT) {
346 ah->aflags |= AOEAFL_EXT;
347 } else {
348 extbit = 0;
349 ah->lba3 &= 0x0f;
350 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
351 }
352 if (bio_data_dir(buf->bio) == WRITE) {
353 skb_fillup(skb, f->bv, f->bv_off, bcnt);
354 ah->aflags |= AOEAFL_WRITE;
355 skb->len += bcnt;
356 skb->data_len = bcnt;
357 skb->truesize += bcnt;
358 t->wpkts++;
359 } else {
360 t->rpkts++;
361 writebit = 0;
362 }
363
364 ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
365
366 /* mark all tracking fields and load out */
367 buf->nframesout += 1;
368 buf->sector += bcnt >> 9;
369
370 skb->dev = t->ifp->nd;
371 skb = skb_clone(skb, GFP_ATOMIC);
372 if (skb) {
373 __skb_queue_head_init(&queue);
374 __skb_queue_tail(&queue, skb);
375 aoenet_xmit(&queue);
376 }
377 return 1;
378 }
379
380 /* some callers cannot sleep, and they can call this function,
381 * transmitting the packets later, when interrupts are on
382 */
383 static void
384 aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
385 {
386 struct aoe_hdr *h;
387 struct aoe_cfghdr *ch;
388 struct sk_buff *skb;
389 struct net_device *ifp;
390
391 rcu_read_lock();
392 for_each_netdev_rcu(&init_net, ifp) {
393 dev_hold(ifp);
394 if (!is_aoe_netif(ifp))
395 goto cont;
396
397 skb = new_skb(sizeof *h + sizeof *ch);
398 if (skb == NULL) {
399 printk(KERN_INFO "aoe: skb alloc failure\n");
400 goto cont;
401 }
402 skb_put(skb, sizeof *h + sizeof *ch);
403 skb->dev = ifp;
404 __skb_queue_tail(queue, skb);
405 h = (struct aoe_hdr *) skb_mac_header(skb);
406 memset(h, 0, sizeof *h + sizeof *ch);
407
408 memset(h->dst, 0xff, sizeof h->dst);
409 memcpy(h->src, ifp->dev_addr, sizeof h->src);
410 h->type = __constant_cpu_to_be16(ETH_P_AOE);
411 h->verfl = AOE_HVER;
412 h->major = cpu_to_be16(aoemajor);
413 h->minor = aoeminor;
414 h->cmd = AOECMD_CFG;
415
416 cont:
417 dev_put(ifp);
418 }
419 rcu_read_unlock();
420 }
421
422 static void
423 resend(struct aoedev *d, struct frame *f)
424 {
425 struct sk_buff *skb;
426 struct sk_buff_head queue;
427 struct aoe_hdr *h;
428 struct aoe_atahdr *ah;
429 struct aoetgt *t;
430 char buf[128];
431 u32 n;
432
433 t = f->t;
434 n = newtag(d);
435 skb = f->skb;
436 if (ifrotate(t) == NULL) {
437 /* probably can't happen, but set it up to fail anyway */
438 pr_info("aoe: resend: no interfaces to rotate to.\n");
439 ktcomplete(f, NULL);
440 return;
441 }
442 h = (struct aoe_hdr *) skb_mac_header(skb);
443 ah = (struct aoe_atahdr *) (h+1);
444
445 snprintf(buf, sizeof buf,
446 "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
447 "retransmit", d->aoemajor, d->aoeminor, f->tag, jiffies, n,
448 h->src, h->dst, t->nout);
449 aoechr_error(buf);
450
451 f->tag = n;
452 fhash(f);
453 h->tag = cpu_to_be32(n);
454 memcpy(h->dst, t->addr, sizeof h->dst);
455 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
456
457 skb->dev = t->ifp->nd;
458 skb = skb_clone(skb, GFP_ATOMIC);
459 if (skb == NULL)
460 return;
461 __skb_queue_head_init(&queue);
462 __skb_queue_tail(&queue, skb);
463 aoenet_xmit(&queue);
464 }
465
466 static int
467 tsince(u32 tag)
468 {
469 int n;
470
471 n = jiffies & 0xffff;
472 n -= tag & 0xffff;
473 if (n < 0)
474 n += 1<<16;
475 return n;
476 }
477
478 static struct aoeif *
479 getif(struct aoetgt *t, struct net_device *nd)
480 {
481 struct aoeif *p, *e;
482
483 p = t->ifs;
484 e = p + NAOEIFS;
485 for (; p < e; p++)
486 if (p->nd == nd)
487 return p;
488 return NULL;
489 }
490
491 static void
492 ejectif(struct aoetgt *t, struct aoeif *ifp)
493 {
494 struct aoeif *e;
495 struct net_device *nd;
496 ulong n;
497
498 nd = ifp->nd;
499 e = t->ifs + NAOEIFS - 1;
500 n = (e - ifp) * sizeof *ifp;
501 memmove(ifp, ifp+1, n);
502 e->nd = NULL;
503 dev_put(nd);
504 }
505
506 static int
507 sthtith(struct aoedev *d)
508 {
509 struct frame *f, *nf;
510 struct list_head *nx, *pos, *head;
511 struct sk_buff *skb;
512 struct aoetgt *ht = d->htgt;
513 int i;
514
515 for (i = 0; i < NFACTIVE; i++) {
516 head = &d->factive[i];
517 list_for_each_safe(pos, nx, head) {
518 f = list_entry(pos, struct frame, head);
519 if (f->t != ht)
520 continue;
521
522 nf = newframe(d);
523 if (!nf)
524 return 0;
525
526 /* remove frame from active list */
527 list_del(pos);
528
529 /* reassign all pertinent bits to new outbound frame */
530 skb = nf->skb;
531 nf->skb = f->skb;
532 nf->buf = f->buf;
533 nf->bcnt = f->bcnt;
534 nf->lba = f->lba;
535 nf->bv = f->bv;
536 nf->bv_off = f->bv_off;
537 nf->waited = 0;
538 f->skb = skb;
539 aoe_freetframe(f);
540 ht->nout--;
541 nf->t->nout++;
542 resend(d, nf);
543 }
544 }
545 /* We've cleaned up the outstanding so take away his
546 * interfaces so he won't be used. We should remove him from
547 * the target array here, but cleaning up a target is
548 * involved. PUNT!
549 */
550 memset(ht->ifs, 0, sizeof ht->ifs);
551 d->htgt = NULL;
552 return 1;
553 }
554
555 static void
556 rexmit_timer(ulong vp)
557 {
558 struct aoedev *d;
559 struct aoetgt *t, **tt, **te;
560 struct aoeif *ifp;
561 struct frame *f;
562 struct list_head *head, *pos, *nx;
563 LIST_HEAD(flist);
564 register long timeout;
565 ulong flags, n;
566 int i;
567
568 d = (struct aoedev *) vp;
569
570 /* timeout is always ~150% of the moving average */
571 timeout = d->rttavg;
572 timeout += timeout >> 1;
573
574 spin_lock_irqsave(&d->lock, flags);
575
576 if (d->flags & DEVFL_TKILL) {
577 spin_unlock_irqrestore(&d->lock, flags);
578 return;
579 }
580
581 /* collect all frames to rexmit into flist */
582 for (i = 0; i < NFACTIVE; i++) {
583 head = &d->factive[i];
584 list_for_each_safe(pos, nx, head) {
585 f = list_entry(pos, struct frame, head);
586 if (tsince(f->tag) < timeout)
587 break; /* end of expired frames */
588 /* move to flist for later processing */
589 list_move_tail(pos, &flist);
590 }
591 }
592 /* window check */
593 tt = d->targets;
594 te = tt + d->ntargets;
595 for (; tt < te && (t = *tt); tt++) {
596 if (t->nout == t->maxout
597 && t->maxout < t->nframes
598 && (jiffies - t->lastwadj)/HZ > 10) {
599 t->maxout++;
600 t->lastwadj = jiffies;
601 }
602 }
603
604 if (!list_empty(&flist)) { /* retransmissions necessary */
605 n = d->rttavg <<= 1;
606 if (n > MAXTIMER)
607 d->rttavg = MAXTIMER;
608 }
609
610 /* process expired frames */
611 while (!list_empty(&flist)) {
612 pos = flist.next;
613 f = list_entry(pos, struct frame, head);
614 n = f->waited += timeout;
615 n /= HZ;
616 if (n > aoe_deadsecs) {
617 /* Waited too long. Device failure.
618 * Hang all frames on first hash bucket for downdev
619 * to clean up.
620 */
621 list_splice(&flist, &d->factive[0]);
622 aoedev_downdev(d);
623 break;
624 }
625 list_del(pos);
626
627 t = f->t;
628 if (n > aoe_deadsecs/2)
629 d->htgt = t; /* see if another target can help */
630
631 if (t->nout == t->maxout) {
632 if (t->maxout > 1)
633 t->maxout--;
634 t->lastwadj = jiffies;
635 }
636
637 ifp = getif(t, f->skb->dev);
638 if (ifp && ++ifp->lost > (t->nframes << 1)
639 && (ifp != t->ifs || t->ifs[1].nd)) {
640 ejectif(t, ifp);
641 ifp = NULL;
642 }
643 resend(d, f);
644 }
645
646 if ((d->flags & DEVFL_KICKME || d->htgt) && d->blkq) {
647 d->flags &= ~DEVFL_KICKME;
648 d->blkq->request_fn(d->blkq);
649 }
650
651 d->timer.expires = jiffies + TIMERTICK;
652 add_timer(&d->timer);
653
654 spin_unlock_irqrestore(&d->lock, flags);
655 }
656
657 static unsigned long
658 rqbiocnt(struct request *r)
659 {
660 struct bio *bio;
661 unsigned long n = 0;
662
663 __rq_for_each_bio(bio, r)
664 n++;
665 return n;
666 }
667
668 /* This can be removed if we are certain that no users of the block
669 * layer will ever use zero-count pages in bios. Otherwise we have to
670 * protect against the put_page sometimes done by the network layer.
671 *
672 * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for
673 * discussion.
674 *
675 * We cannot use get_page in the workaround, because it insists on a
676 * positive page count as a precondition. So we use _count directly.
677 */
678 static void
679 bio_pageinc(struct bio *bio)
680 {
681 struct bio_vec *bv;
682 struct page *page;
683 int i;
684
685 bio_for_each_segment(bv, bio, i) {
686 page = bv->bv_page;
687 /* Non-zero page count for non-head members of
688 * compound pages is no longer allowed by the kernel,
689 * but this has never been seen here.
690 */
691 if (unlikely(PageCompound(page)))
692 if (compound_trans_head(page) != page) {
693 pr_crit("page tail used for block I/O\n");
694 BUG();
695 }
696 atomic_inc(&page->_count);
697 }
698 }
699
700 static void
701 bio_pagedec(struct bio *bio)
702 {
703 struct bio_vec *bv;
704 int i;
705
706 bio_for_each_segment(bv, bio, i)
707 atomic_dec(&bv->bv_page->_count);
708 }
709
710 static void
711 bufinit(struct buf *buf, struct request *rq, struct bio *bio)
712 {
713 struct bio_vec *bv;
714
715 memset(buf, 0, sizeof(*buf));
716 buf->rq = rq;
717 buf->bio = bio;
718 buf->resid = bio->bi_size;
719 buf->sector = bio->bi_sector;
720 bio_pageinc(bio);
721 buf->bv = bv = &bio->bi_io_vec[bio->bi_idx];
722 buf->bv_resid = bv->bv_len;
723 WARN_ON(buf->bv_resid == 0);
724 }
725
726 static struct buf *
727 nextbuf(struct aoedev *d)
728 {
729 struct request *rq;
730 struct request_queue *q;
731 struct buf *buf;
732 struct bio *bio;
733
734 q = d->blkq;
735 if (q == NULL)
736 return NULL; /* initializing */
737 if (d->ip.buf)
738 return d->ip.buf;
739 rq = d->ip.rq;
740 if (rq == NULL) {
741 rq = blk_peek_request(q);
742 if (rq == NULL)
743 return NULL;
744 blk_start_request(rq);
745 d->ip.rq = rq;
746 d->ip.nxbio = rq->bio;
747 rq->special = (void *) rqbiocnt(rq);
748 }
749 buf = mempool_alloc(d->bufpool, GFP_ATOMIC);
750 if (buf == NULL) {
751 pr_err("aoe: nextbuf: unable to mempool_alloc!\n");
752 return NULL;
753 }
754 bio = d->ip.nxbio;
755 bufinit(buf, rq, bio);
756 bio = bio->bi_next;
757 d->ip.nxbio = bio;
758 if (bio == NULL)
759 d->ip.rq = NULL;
760 return d->ip.buf = buf;
761 }
762
763 /* enters with d->lock held */
764 void
765 aoecmd_work(struct aoedev *d)
766 {
767 if (d->htgt && !sthtith(d))
768 return;
769 while (aoecmd_ata_rw(d))
770 ;
771 }
772
773 /* this function performs work that has been deferred until sleeping is OK
774 */
775 void
776 aoecmd_sleepwork(struct work_struct *work)
777 {
778 struct aoedev *d = container_of(work, struct aoedev, work);
779 struct block_device *bd;
780 u64 ssize;
781
782 if (d->flags & DEVFL_GDALLOC)
783 aoeblk_gdalloc(d);
784
785 if (d->flags & DEVFL_NEWSIZE) {
786 ssize = get_capacity(d->gd);
787 bd = bdget_disk(d->gd, 0);
788 if (bd) {
789 mutex_lock(&bd->bd_inode->i_mutex);
790 i_size_write(bd->bd_inode, (loff_t)ssize<<9);
791 mutex_unlock(&bd->bd_inode->i_mutex);
792 bdput(bd);
793 }
794 spin_lock_irq(&d->lock);
795 d->flags |= DEVFL_UP;
796 d->flags &= ~DEVFL_NEWSIZE;
797 spin_unlock_irq(&d->lock);
798 }
799 }
800
801 static void
802 ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
803 {
804 u64 ssize;
805 u16 n;
806
807 /* word 83: command set supported */
808 n = get_unaligned_le16(&id[83 << 1]);
809
810 /* word 86: command set/feature enabled */
811 n |= get_unaligned_le16(&id[86 << 1]);
812
813 if (n & (1<<10)) { /* bit 10: LBA 48 */
814 d->flags |= DEVFL_EXT;
815
816 /* word 100: number lba48 sectors */
817 ssize = get_unaligned_le64(&id[100 << 1]);
818
819 /* set as in ide-disk.c:init_idedisk_capacity */
820 d->geo.cylinders = ssize;
821 d->geo.cylinders /= (255 * 63);
822 d->geo.heads = 255;
823 d->geo.sectors = 63;
824 } else {
825 d->flags &= ~DEVFL_EXT;
826
827 /* number lba28 sectors */
828 ssize = get_unaligned_le32(&id[60 << 1]);
829
830 /* NOTE: obsolete in ATA 6 */
831 d->geo.cylinders = get_unaligned_le16(&id[54 << 1]);
832 d->geo.heads = get_unaligned_le16(&id[55 << 1]);
833 d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
834 }
835
836 if (d->ssize != ssize)
837 printk(KERN_INFO
838 "aoe: %pm e%ld.%d v%04x has %llu sectors\n",
839 t->addr,
840 d->aoemajor, d->aoeminor,
841 d->fw_ver, (long long)ssize);
842 d->ssize = ssize;
843 d->geo.start = 0;
844 if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
845 return;
846 if (d->gd != NULL) {
847 set_capacity(d->gd, ssize);
848 d->flags |= DEVFL_NEWSIZE;
849 } else
850 d->flags |= DEVFL_GDALLOC;
851 schedule_work(&d->work);
852 }
853
854 static void
855 calc_rttavg(struct aoedev *d, int rtt)
856 {
857 register long n;
858
859 n = rtt;
860 if (n < 0) {
861 n = -rtt;
862 if (n < MINTIMER)
863 n = MINTIMER;
864 else if (n > MAXTIMER)
865 n = MAXTIMER;
866 d->mintimer += (n - d->mintimer) >> 1;
867 } else if (n < d->mintimer)
868 n = d->mintimer;
869 else if (n > MAXTIMER)
870 n = MAXTIMER;
871
872 /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
873 n -= d->rttavg;
874 d->rttavg += n >> 2;
875 }
876
877 static struct aoetgt *
878 gettgt(struct aoedev *d, char *addr)
879 {
880 struct aoetgt **t, **e;
881
882 t = d->targets;
883 e = t + NTARGETS;
884 for (; t < e && *t; t++)
885 if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
886 return *t;
887 return NULL;
888 }
889
890 static void
891 bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt)
892 {
893 ulong fcnt;
894 char *p;
895 int soff = 0;
896 loop:
897 fcnt = bv->bv_len - (off - bv->bv_offset);
898 if (fcnt > cnt)
899 fcnt = cnt;
900 p = page_address(bv->bv_page) + off;
901 skb_copy_bits(skb, soff, p, fcnt);
902 soff += fcnt;
903 cnt -= fcnt;
904 if (cnt <= 0)
905 return;
906 bv++;
907 off = bv->bv_offset;
908 goto loop;
909 }
910
911 void
912 aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
913 {
914 struct bio *bio;
915 int bok;
916 struct request_queue *q;
917
918 q = d->blkq;
919 if (rq == d->ip.rq)
920 d->ip.rq = NULL;
921 do {
922 bio = rq->bio;
923 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
924 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size));
925
926 /* cf. http://lkml.org/lkml/2006/10/31/28 */
927 if (!fastfail)
928 __blk_run_queue(q);
929 }
930
931 static void
932 aoe_end_buf(struct aoedev *d, struct buf *buf)
933 {
934 struct request *rq;
935 unsigned long n;
936
937 if (buf == d->ip.buf)
938 d->ip.buf = NULL;
939 rq = buf->rq;
940 bio_pagedec(buf->bio);
941 mempool_free(buf, d->bufpool);
942 n = (unsigned long) rq->special;
943 rq->special = (void *) --n;
944 if (n == 0)
945 aoe_end_request(d, rq, 0);
946 }
947
948 static void
949 ktiocomplete(struct frame *f)
950 {
951 struct aoe_hdr *hin, *hout;
952 struct aoe_atahdr *ahin, *ahout;
953 struct buf *buf;
954 struct sk_buff *skb;
955 struct aoetgt *t;
956 struct aoeif *ifp;
957 struct aoedev *d;
958 long n;
959
960 if (f == NULL)
961 return;
962
963 t = f->t;
964 d = t->d;
965
966 hout = (struct aoe_hdr *) skb_mac_header(f->skb);
967 ahout = (struct aoe_atahdr *) (hout+1);
968 buf = f->buf;
969 skb = f->r_skb;
970 if (skb == NULL)
971 goto noskb; /* just fail the buf. */
972
973 hin = (struct aoe_hdr *) skb->data;
974 skb_pull(skb, sizeof(*hin));
975 ahin = (struct aoe_atahdr *) skb->data;
976 skb_pull(skb, sizeof(*ahin));
977 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
978 pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
979 ahout->cmdstat, ahin->cmdstat,
980 d->aoemajor, d->aoeminor);
981 noskb: if (buf)
982 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
983 goto badrsp;
984 }
985
986 n = ahout->scnt << 9;
987 switch (ahout->cmdstat) {
988 case ATA_CMD_PIO_READ:
989 case ATA_CMD_PIO_READ_EXT:
990 if (skb->len < n) {
991 pr_err("aoe: runt data size in read. skb->len=%d need=%ld\n",
992 skb->len, n);
993 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
994 break;
995 }
996 bvcpy(f->bv, f->bv_off, skb, n);
997 case ATA_CMD_PIO_WRITE:
998 case ATA_CMD_PIO_WRITE_EXT:
999 spin_lock_irq(&d->lock);
1000 ifp = getif(t, skb->dev);
1001 if (ifp)
1002 ifp->lost = 0;
1003 if (d->htgt == t) /* I'll help myself, thank you. */
1004 d->htgt = NULL;
1005 spin_unlock_irq(&d->lock);
1006 break;
1007 case ATA_CMD_ID_ATA:
1008 if (skb->len < 512) {
1009 pr_info("aoe: runt data size in ataid. skb->len=%d\n",
1010 skb->len);
1011 break;
1012 }
1013 if (skb_linearize(skb))
1014 break;
1015 spin_lock_irq(&d->lock);
1016 ataid_complete(d, t, skb->data);
1017 spin_unlock_irq(&d->lock);
1018 break;
1019 default:
1020 pr_info("aoe: unrecognized ata command %2.2Xh for %d.%d\n",
1021 ahout->cmdstat,
1022 be16_to_cpu(get_unaligned(&hin->major)),
1023 hin->minor);
1024 }
1025 badrsp:
1026 spin_lock_irq(&d->lock);
1027
1028 aoe_freetframe(f);
1029
1030 if (buf && --buf->nframesout == 0 && buf->resid == 0)
1031 aoe_end_buf(d, buf);
1032
1033 aoecmd_work(d);
1034
1035 spin_unlock_irq(&d->lock);
1036 aoedev_put(d);
1037 dev_kfree_skb(skb);
1038 }
1039
1040 /* Enters with iocq.lock held.
1041 * Returns true iff responses needing processing remain.
1042 */
1043 static int
1044 ktio(void)
1045 {
1046 struct frame *f;
1047 struct list_head *pos;
1048 int i;
1049
1050 for (i = 0; ; ++i) {
1051 if (i == MAXIOC)
1052 return 1;
1053 if (list_empty(&iocq.head))
1054 return 0;
1055 pos = iocq.head.next;
1056 list_del(pos);
1057 spin_unlock_irq(&iocq.lock);
1058 f = list_entry(pos, struct frame, head);
1059 ktiocomplete(f);
1060 spin_lock_irq(&iocq.lock);
1061 }
1062 }
1063
1064 static int
1065 kthread(void *vp)
1066 {
1067 struct ktstate *k;
1068 DECLARE_WAITQUEUE(wait, current);
1069 int more;
1070
1071 k = vp;
1072 current->flags |= PF_NOFREEZE;
1073 set_user_nice(current, -10);
1074 complete(&k->rendez); /* tell spawner we're running */
1075 do {
1076 spin_lock_irq(k->lock);
1077 more = k->fn();
1078 if (!more) {
1079 add_wait_queue(k->waitq, &wait);
1080 __set_current_state(TASK_INTERRUPTIBLE);
1081 }
1082 spin_unlock_irq(k->lock);
1083 if (!more) {
1084 schedule();
1085 remove_wait_queue(k->waitq, &wait);
1086 } else
1087 cond_resched();
1088 } while (!kthread_should_stop());
1089 complete(&k->rendez); /* tell spawner we're stopping */
1090 return 0;
1091 }
1092
1093 void
1094 aoe_ktstop(struct ktstate *k)
1095 {
1096 kthread_stop(k->task);
1097 wait_for_completion(&k->rendez);
1098 }
1099
1100 int
1101 aoe_ktstart(struct ktstate *k)
1102 {
1103 struct task_struct *task;
1104
1105 init_completion(&k->rendez);
1106 task = kthread_run(kthread, k, k->name);
1107 if (task == NULL || IS_ERR(task))
1108 return -ENOMEM;
1109 k->task = task;
1110 wait_for_completion(&k->rendez); /* allow kthread to start */
1111 init_completion(&k->rendez); /* for waiting for exit later */
1112 return 0;
1113 }
1114
1115 /* pass it off to kthreads for processing */
1116 static void
1117 ktcomplete(struct frame *f, struct sk_buff *skb)
1118 {
1119 ulong flags;
1120
1121 f->r_skb = skb;
1122 spin_lock_irqsave(&iocq.lock, flags);
1123 list_add_tail(&f->head, &iocq.head);
1124 spin_unlock_irqrestore(&iocq.lock, flags);
1125 wake_up(&ktiowq);
1126 }
1127
1128 struct sk_buff *
1129 aoecmd_ata_rsp(struct sk_buff *skb)
1130 {
1131 struct aoedev *d;
1132 struct aoe_hdr *h;
1133 struct frame *f;
1134 struct aoetgt *t;
1135 u32 n;
1136 ulong flags;
1137 char ebuf[128];
1138 u16 aoemajor;
1139
1140 h = (struct aoe_hdr *) skb->data;
1141 aoemajor = be16_to_cpu(get_unaligned(&h->major));
1142 d = aoedev_by_aoeaddr(aoemajor, h->minor, 0);
1143 if (d == NULL) {
1144 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
1145 "for unknown device %d.%d\n",
1146 aoemajor, h->minor);
1147 aoechr_error(ebuf);
1148 return skb;
1149 }
1150
1151 spin_lock_irqsave(&d->lock, flags);
1152
1153 n = be32_to_cpu(get_unaligned(&h->tag));
1154 f = getframe(d, n);
1155 if (f == NULL) {
1156 calc_rttavg(d, -tsince(n));
1157 spin_unlock_irqrestore(&d->lock, flags);
1158 aoedev_put(d);
1159 snprintf(ebuf, sizeof ebuf,
1160 "%15s e%d.%d tag=%08x@%08lx\n",
1161 "unexpected rsp",
1162 get_unaligned_be16(&h->major),
1163 h->minor,
1164 get_unaligned_be32(&h->tag),
1165 jiffies);
1166 aoechr_error(ebuf);
1167 return skb;
1168 }
1169 t = f->t;
1170 calc_rttavg(d, tsince(f->tag));
1171 t->nout--;
1172 aoecmd_work(d);
1173
1174 spin_unlock_irqrestore(&d->lock, flags);
1175
1176 ktcomplete(f, skb);
1177
1178 /*
1179 * Note here that we do not perform an aoedev_put, as we are
1180 * leaving this reference for the ktio to release.
1181 */
1182 return NULL;
1183 }
1184
1185 void
1186 aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
1187 {
1188 struct sk_buff_head queue;
1189
1190 __skb_queue_head_init(&queue);
1191 aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
1192 aoenet_xmit(&queue);
1193 }
1194
1195 struct sk_buff *
1196 aoecmd_ata_id(struct aoedev *d)
1197 {
1198 struct aoe_hdr *h;
1199 struct aoe_atahdr *ah;
1200 struct frame *f;
1201 struct sk_buff *skb;
1202 struct aoetgt *t;
1203
1204 f = newframe(d);
1205 if (f == NULL)
1206 return NULL;
1207
1208 t = *d->tgt;
1209
1210 /* initialize the headers & frame */
1211 skb = f->skb;
1212 h = (struct aoe_hdr *) skb_mac_header(skb);
1213 ah = (struct aoe_atahdr *) (h+1);
1214 skb_put(skb, sizeof *h + sizeof *ah);
1215 memset(h, 0, skb->len);
1216 f->tag = aoehdr_atainit(d, t, h);
1217 fhash(f);
1218 t->nout++;
1219 f->waited = 0;
1220
1221 /* set up ata header */
1222 ah->scnt = 1;
1223 ah->cmdstat = ATA_CMD_ID_ATA;
1224 ah->lba3 = 0xa0;
1225
1226 skb->dev = t->ifp->nd;
1227
1228 d->rttavg = MAXTIMER;
1229 d->timer.function = rexmit_timer;
1230
1231 return skb_clone(skb, GFP_ATOMIC);
1232 }
1233
1234 static struct aoetgt *
1235 addtgt(struct aoedev *d, char *addr, ulong nframes)
1236 {
1237 struct aoetgt *t, **tt, **te;
1238
1239 tt = d->targets;
1240 te = tt + NTARGETS;
1241 for (; tt < te && *tt; tt++)
1242 ;
1243
1244 if (tt == te) {
1245 printk(KERN_INFO
1246 "aoe: device addtgt failure; too many targets\n");
1247 return NULL;
1248 }
1249 t = kzalloc(sizeof(*t), GFP_ATOMIC);
1250 if (!t) {
1251 printk(KERN_INFO "aoe: cannot allocate memory to add target\n");
1252 return NULL;
1253 }
1254
1255 d->ntargets++;
1256 t->nframes = nframes;
1257 t->d = d;
1258 memcpy(t->addr, addr, sizeof t->addr);
1259 t->ifp = t->ifs;
1260 t->maxout = t->nframes;
1261 INIT_LIST_HEAD(&t->ffree);
1262 return *tt = t;
1263 }
1264
1265 static void
1266 setdbcnt(struct aoedev *d)
1267 {
1268 struct aoetgt **t, **e;
1269 int bcnt = 0;
1270
1271 t = d->targets;
1272 e = t + NTARGETS;
1273 for (; t < e && *t; t++)
1274 if (bcnt == 0 || bcnt > (*t)->minbcnt)
1275 bcnt = (*t)->minbcnt;
1276 if (bcnt != d->maxbcnt) {
1277 d->maxbcnt = bcnt;
1278 pr_info("aoe: e%ld.%d: setting %d byte data frames\n",
1279 d->aoemajor, d->aoeminor, bcnt);
1280 }
1281 }
1282
1283 static void
1284 setifbcnt(struct aoetgt *t, struct net_device *nd, int bcnt)
1285 {
1286 struct aoedev *d;
1287 struct aoeif *p, *e;
1288 int minbcnt;
1289
1290 d = t->d;
1291 minbcnt = bcnt;
1292 p = t->ifs;
1293 e = p + NAOEIFS;
1294 for (; p < e; p++) {
1295 if (p->nd == NULL)
1296 break; /* end of the valid interfaces */
1297 if (p->nd == nd) {
1298 p->bcnt = bcnt; /* we're updating */
1299 nd = NULL;
1300 } else if (minbcnt > p->bcnt)
1301 minbcnt = p->bcnt; /* find the min interface */
1302 }
1303 if (nd) {
1304 if (p == e) {
1305 pr_err("aoe: device setifbcnt failure; too many interfaces.\n");
1306 return;
1307 }
1308 dev_hold(nd);
1309 p->nd = nd;
1310 p->bcnt = bcnt;
1311 }
1312 t->minbcnt = minbcnt;
1313 setdbcnt(d);
1314 }
1315
1316 void
1317 aoecmd_cfg_rsp(struct sk_buff *skb)
1318 {
1319 struct aoedev *d;
1320 struct aoe_hdr *h;
1321 struct aoe_cfghdr *ch;
1322 struct aoetgt *t;
1323 ulong flags, aoemajor;
1324 struct sk_buff *sl;
1325 struct sk_buff_head queue;
1326 u16 n;
1327
1328 sl = NULL;
1329 h = (struct aoe_hdr *) skb_mac_header(skb);
1330 ch = (struct aoe_cfghdr *) (h+1);
1331
1332 /*
1333 * Enough people have their dip switches set backwards to
1334 * warrant a loud message for this special case.
1335 */
1336 aoemajor = get_unaligned_be16(&h->major);
1337 if (aoemajor == 0xfff) {
1338 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
1339 "Check shelf dip switches.\n");
1340 return;
1341 }
1342 if (aoemajor == 0xffff) {
1343 pr_info("aoe: e%ld.%d: broadcast shelf number invalid\n",
1344 aoemajor, (int) h->minor);
1345 return;
1346 }
1347 if (h->minor == 0xff) {
1348 pr_info("aoe: e%ld.%d: broadcast slot number invalid\n",
1349 aoemajor, (int) h->minor);
1350 return;
1351 }
1352
1353 n = be16_to_cpu(ch->bufcnt);
1354 if (n > aoe_maxout) /* keep it reasonable */
1355 n = aoe_maxout;
1356
1357 d = aoedev_by_aoeaddr(aoemajor, h->minor, 1);
1358 if (d == NULL) {
1359 pr_info("aoe: device allocation failure\n");
1360 return;
1361 }
1362
1363 spin_lock_irqsave(&d->lock, flags);
1364
1365 t = gettgt(d, h->src);
1366 if (t) {
1367 t->nframes = n;
1368 if (n < t->maxout)
1369 t->maxout = n;
1370 } else {
1371 t = addtgt(d, h->src, n);
1372 if (!t)
1373 goto bail;
1374 }
1375 n = skb->dev->mtu;
1376 n -= sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr);
1377 n /= 512;
1378 if (n > ch->scnt)
1379 n = ch->scnt;
1380 n = n ? n * 512 : DEFAULTBCNT;
1381 setifbcnt(t, skb->dev, n);
1382
1383 /* don't change users' perspective */
1384 if (d->nopen == 0) {
1385 d->fw_ver = be16_to_cpu(ch->fwver);
1386 sl = aoecmd_ata_id(d);
1387 }
1388 bail:
1389 spin_unlock_irqrestore(&d->lock, flags);
1390 aoedev_put(d);
1391 if (sl) {
1392 __skb_queue_head_init(&queue);
1393 __skb_queue_tail(&queue, sl);
1394 aoenet_xmit(&queue);
1395 }
1396 }
1397
1398 void
1399 aoecmd_cleanslate(struct aoedev *d)
1400 {
1401 struct aoetgt **t, **te;
1402
1403 d->mintimer = MINTIMER;
1404 d->maxbcnt = 0;
1405
1406 t = d->targets;
1407 te = t + NTARGETS;
1408 for (; t < te && *t; t++)
1409 (*t)->maxout = (*t)->nframes;
1410 }
1411
1412 void
1413 aoe_failbuf(struct aoedev *d, struct buf *buf)
1414 {
1415 if (buf == NULL)
1416 return;
1417 buf->resid = 0;
1418 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1419 if (buf->nframesout == 0)
1420 aoe_end_buf(d, buf);
1421 }
1422
1423 void
1424 aoe_flush_iocq(void)
1425 {
1426 struct frame *f;
1427 struct aoedev *d;
1428 LIST_HEAD(flist);
1429 struct list_head *pos;
1430 struct sk_buff *skb;
1431 ulong flags;
1432
1433 spin_lock_irqsave(&iocq.lock, flags);
1434 list_splice_init(&iocq.head, &flist);
1435 spin_unlock_irqrestore(&iocq.lock, flags);
1436 while (!list_empty(&flist)) {
1437 pos = flist.next;
1438 list_del(pos);
1439 f = list_entry(pos, struct frame, head);
1440 d = f->t->d;
1441 skb = f->r_skb;
1442 spin_lock_irqsave(&d->lock, flags);
1443 if (f->buf) {
1444 f->buf->nframesout--;
1445 aoe_failbuf(d, f->buf);
1446 }
1447 aoe_freetframe(f);
1448 spin_unlock_irqrestore(&d->lock, flags);
1449 dev_kfree_skb(skb);
1450 aoedev_put(d);
1451 }
1452 }
1453
1454 int __init
1455 aoecmd_init(void)
1456 {
1457 INIT_LIST_HEAD(&iocq.head);
1458 spin_lock_init(&iocq.lock);
1459 init_waitqueue_head(&ktiowq);
1460 kts.name = "aoe_ktio";
1461 kts.fn = ktio;
1462 kts.waitq = &ktiowq;
1463 kts.lock = &iocq.lock;
1464 return aoe_ktstart(&kts);
1465 }
1466
1467 void
1468 aoecmd_exit(void)
1469 {
1470 aoe_ktstop(&kts);
1471 aoe_flush_iocq();
1472 }