]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/block/aoe/aoecmd.c
1 /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
4 * Filesystem request handling methods
8 #include <linux/slab.h>
9 #include <linux/hdreg.h>
10 #include <linux/blkdev.h>
11 #include <linux/skbuff.h>
12 #include <linux/netdevice.h>
13 #include <linux/genhd.h>
14 #include <linux/moduleparam.h>
15 #include <linux/workqueue.h>
16 #include <linux/kthread.h>
17 #include <net/net_namespace.h>
18 #include <asm/unaligned.h>
19 #include <linux/uio.h>
22 #define MAXIOC (8192) /* default meant to avoid most soft lockups */
24 static void ktcomplete(struct frame
*, struct sk_buff
*);
26 static struct buf
*nextbuf(struct aoedev
*);
28 static int aoe_deadsecs
= 60 * 3;
29 module_param(aoe_deadsecs
, int, 0644);
30 MODULE_PARM_DESC(aoe_deadsecs
, "After aoe_deadsecs seconds, give up and fail dev.");
32 static int aoe_maxout
= 16;
33 module_param(aoe_maxout
, int, 0644);
34 MODULE_PARM_DESC(aoe_maxout
,
35 "Only aoe_maxout outstanding packets for every MAC on eX.Y.");
37 static wait_queue_head_t ktiowq
;
38 static struct ktstate kts
;
40 /* io completion queue */
42 struct list_head head
;
46 static struct sk_buff
*
51 skb
= alloc_skb(len
, GFP_ATOMIC
);
53 skb_reset_mac_header(skb
);
54 skb_reset_network_header(skb
);
55 skb
->protocol
= __constant_htons(ETH_P_AOE
);
56 skb_checksum_none_assert(skb
);
62 getframe(struct aoedev
*d
, u32 tag
)
65 struct list_head
*head
, *pos
, *nx
;
69 head
= &d
->factive
[n
];
70 list_for_each_safe(pos
, nx
, head
) {
71 f
= list_entry(pos
, struct frame
, head
);
81 * Leave the top bit clear so we have tagspace for userland.
82 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
83 * This driver reserves tag -1 to mean "unused frame."
86 newtag(struct aoedev
*d
)
91 return n
|= (++d
->lasttag
& 0x7fff) << 16;
95 aoehdr_atainit(struct aoedev
*d
, struct aoetgt
*t
, struct aoe_hdr
*h
)
97 u32 host_tag
= newtag(d
);
99 memcpy(h
->src
, t
->ifp
->nd
->dev_addr
, sizeof h
->src
);
100 memcpy(h
->dst
, t
->addr
, sizeof h
->dst
);
101 h
->type
= __constant_cpu_to_be16(ETH_P_AOE
);
103 h
->major
= cpu_to_be16(d
->aoemajor
);
104 h
->minor
= d
->aoeminor
;
106 h
->tag
= cpu_to_be32(host_tag
);
112 put_lba(struct aoe_atahdr
*ah
, sector_t lba
)
115 ah
->lba1
= lba
>>= 8;
116 ah
->lba2
= lba
>>= 8;
117 ah
->lba3
= lba
>>= 8;
118 ah
->lba4
= lba
>>= 8;
119 ah
->lba5
= lba
>>= 8;
122 static struct aoeif
*
123 ifrotate(struct aoetgt
*t
)
129 if (ifp
>= &t
->ifs
[NAOEIFS
] || ifp
->nd
== NULL
)
137 skb_pool_put(struct aoedev
*d
, struct sk_buff
*skb
)
139 __skb_queue_tail(&d
->skbpool
, skb
);
142 static struct sk_buff
*
143 skb_pool_get(struct aoedev
*d
)
145 struct sk_buff
*skb
= skb_peek(&d
->skbpool
);
147 if (skb
&& atomic_read(&skb_shinfo(skb
)->dataref
) == 1) {
148 __skb_unlink(skb
, &d
->skbpool
);
151 if (skb_queue_len(&d
->skbpool
) < NSKBPOOLMAX
&&
152 (skb
= new_skb(ETH_ZLEN
)))
159 aoe_freetframe(struct frame
*f
)
167 list_add(&f
->head
, &t
->ffree
);
170 static struct frame
*
171 newtframe(struct aoedev
*d
, struct aoetgt
*t
)
175 struct list_head
*pos
;
177 if (list_empty(&t
->ffree
)) {
178 if (t
->falloc
>= NSKBPOOLMAX
*2)
180 f
= kcalloc(1, sizeof(*f
), GFP_ATOMIC
);
188 f
= list_entry(pos
, struct frame
, head
);
193 f
->skb
= skb
= new_skb(ETH_ZLEN
);
195 bail
: aoe_freetframe(f
);
200 if (atomic_read(&skb_shinfo(skb
)->dataref
) != 1) {
201 skb
= skb_pool_get(d
);
204 skb_pool_put(d
, f
->skb
);
208 skb
->truesize
-= skb
->data_len
;
209 skb_shinfo(skb
)->nr_frags
= skb
->data_len
= 0;
214 static struct frame
*
215 newframe(struct aoedev
*d
)
218 struct aoetgt
*t
, **tt
;
221 if (d
->targets
[0] == NULL
) { /* shouldn't happen, but I'm paranoid */
222 printk(KERN_ERR
"aoe: NULL TARGETS!\n");
225 tt
= d
->tgt
; /* last used target */
228 if (tt
>= &d
->targets
[NTARGETS
] || !*tt
)
232 if (t
->nout
< t
->maxout
242 if (tt
== d
->tgt
) /* we've looped and found nada */
247 d
->flags
|= DEVFL_KICKME
;
253 skb_fillup(struct sk_buff
*skb
, struct bio_vec
*bv
, ulong off
, ulong cnt
)
258 fcnt
= bv
->bv_len
- (off
- bv
->bv_offset
);
261 skb_fill_page_desc(skb
, frag
++, bv
->bv_page
, off
, fcnt
);
271 fhash(struct frame
*f
)
273 struct aoedev
*d
= f
->t
->d
;
276 n
= f
->tag
% NFACTIVE
;
277 list_add_tail(&f
->head
, &d
->factive
[n
]);
281 aoecmd_ata_rw(struct aoedev
*d
)
285 struct aoe_atahdr
*ah
;
289 struct sk_buff_head queue
;
291 char writebit
, extbit
;
306 if (bcnt
> buf
->resid
)
310 f
->bv_off
= f
->bv
->bv_offset
+ (f
->bv
->bv_len
- buf
->bv_resid
);
312 if (fbcnt
< buf
->bv_resid
) {
313 buf
->bv_resid
-= fbcnt
;
317 fbcnt
-= buf
->bv_resid
;
318 buf
->resid
-= buf
->bv_resid
;
319 if (buf
->resid
== 0) {
324 buf
->bv_resid
= buf
->bv
->bv_len
;
325 WARN_ON(buf
->bv_resid
== 0);
328 /* initialize the headers & frame */
330 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
331 ah
= (struct aoe_atahdr
*) (h
+1);
332 skb_put(skb
, sizeof *h
+ sizeof *ah
);
333 memset(h
, 0, skb
->len
);
334 f
->tag
= aoehdr_atainit(d
, t
, h
);
340 f
->lba
= buf
->sector
;
342 /* set up ata header */
343 ah
->scnt
= bcnt
>> 9;
344 put_lba(ah
, buf
->sector
);
345 if (d
->flags
& DEVFL_EXT
) {
346 ah
->aflags
|= AOEAFL_EXT
;
350 ah
->lba3
|= 0xe0; /* LBA bit + obsolete 0xa0 */
352 if (bio_data_dir(buf
->bio
) == WRITE
) {
353 skb_fillup(skb
, f
->bv
, f
->bv_off
, bcnt
);
354 ah
->aflags
|= AOEAFL_WRITE
;
356 skb
->data_len
= bcnt
;
357 skb
->truesize
+= bcnt
;
364 ah
->cmdstat
= ATA_CMD_PIO_READ
| writebit
| extbit
;
366 /* mark all tracking fields and load out */
367 buf
->nframesout
+= 1;
368 buf
->sector
+= bcnt
>> 9;
370 skb
->dev
= t
->ifp
->nd
;
371 skb
= skb_clone(skb
, GFP_ATOMIC
);
373 __skb_queue_head_init(&queue
);
374 __skb_queue_tail(&queue
, skb
);
380 /* some callers cannot sleep, and they can call this function,
381 * transmitting the packets later, when interrupts are on
384 aoecmd_cfg_pkts(ushort aoemajor
, unsigned char aoeminor
, struct sk_buff_head
*queue
)
387 struct aoe_cfghdr
*ch
;
389 struct net_device
*ifp
;
392 for_each_netdev_rcu(&init_net
, ifp
) {
394 if (!is_aoe_netif(ifp
))
397 skb
= new_skb(sizeof *h
+ sizeof *ch
);
399 printk(KERN_INFO
"aoe: skb alloc failure\n");
402 skb_put(skb
, sizeof *h
+ sizeof *ch
);
404 __skb_queue_tail(queue
, skb
);
405 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
406 memset(h
, 0, sizeof *h
+ sizeof *ch
);
408 memset(h
->dst
, 0xff, sizeof h
->dst
);
409 memcpy(h
->src
, ifp
->dev_addr
, sizeof h
->src
);
410 h
->type
= __constant_cpu_to_be16(ETH_P_AOE
);
412 h
->major
= cpu_to_be16(aoemajor
);
423 resend(struct aoedev
*d
, struct frame
*f
)
426 struct sk_buff_head queue
;
428 struct aoe_atahdr
*ah
;
436 if (ifrotate(t
) == NULL
) {
437 /* probably can't happen, but set it up to fail anyway */
438 pr_info("aoe: resend: no interfaces to rotate to.\n");
442 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
443 ah
= (struct aoe_atahdr
*) (h
+1);
445 snprintf(buf
, sizeof buf
,
446 "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
447 "retransmit", d
->aoemajor
, d
->aoeminor
, f
->tag
, jiffies
, n
,
448 h
->src
, h
->dst
, t
->nout
);
453 h
->tag
= cpu_to_be32(n
);
454 memcpy(h
->dst
, t
->addr
, sizeof h
->dst
);
455 memcpy(h
->src
, t
->ifp
->nd
->dev_addr
, sizeof h
->src
);
457 skb
->dev
= t
->ifp
->nd
;
458 skb
= skb_clone(skb
, GFP_ATOMIC
);
461 __skb_queue_head_init(&queue
);
462 __skb_queue_tail(&queue
, skb
);
471 n
= jiffies
& 0xffff;
478 static struct aoeif
*
479 getif(struct aoetgt
*t
, struct net_device
*nd
)
492 ejectif(struct aoetgt
*t
, struct aoeif
*ifp
)
495 struct net_device
*nd
;
499 e
= t
->ifs
+ NAOEIFS
- 1;
500 n
= (e
- ifp
) * sizeof *ifp
;
501 memmove(ifp
, ifp
+1, n
);
507 sthtith(struct aoedev
*d
)
509 struct frame
*f
, *nf
;
510 struct list_head
*nx
, *pos
, *head
;
512 struct aoetgt
*ht
= d
->htgt
;
515 for (i
= 0; i
< NFACTIVE
; i
++) {
516 head
= &d
->factive
[i
];
517 list_for_each_safe(pos
, nx
, head
) {
518 f
= list_entry(pos
, struct frame
, head
);
526 /* remove frame from active list */
529 /* reassign all pertinent bits to new outbound frame */
536 nf
->bv_off
= f
->bv_off
;
545 /* We've cleaned up the outstanding so take away his
546 * interfaces so he won't be used. We should remove him from
547 * the target array here, but cleaning up a target is
550 memset(ht
->ifs
, 0, sizeof ht
->ifs
);
556 rexmit_timer(ulong vp
)
559 struct aoetgt
*t
, **tt
, **te
;
562 struct list_head
*head
, *pos
, *nx
;
564 register long timeout
;
568 d
= (struct aoedev
*) vp
;
570 /* timeout is always ~150% of the moving average */
572 timeout
+= timeout
>> 1;
574 spin_lock_irqsave(&d
->lock
, flags
);
576 if (d
->flags
& DEVFL_TKILL
) {
577 spin_unlock_irqrestore(&d
->lock
, flags
);
581 /* collect all frames to rexmit into flist */
582 for (i
= 0; i
< NFACTIVE
; i
++) {
583 head
= &d
->factive
[i
];
584 list_for_each_safe(pos
, nx
, head
) {
585 f
= list_entry(pos
, struct frame
, head
);
586 if (tsince(f
->tag
) < timeout
)
587 break; /* end of expired frames */
588 /* move to flist for later processing */
589 list_move_tail(pos
, &flist
);
594 te
= tt
+ d
->ntargets
;
595 for (; tt
< te
&& (t
= *tt
); tt
++) {
596 if (t
->nout
== t
->maxout
597 && t
->maxout
< t
->nframes
598 && (jiffies
- t
->lastwadj
)/HZ
> 10) {
600 t
->lastwadj
= jiffies
;
604 if (!list_empty(&flist
)) { /* retransmissions necessary */
607 d
->rttavg
= MAXTIMER
;
610 /* process expired frames */
611 while (!list_empty(&flist
)) {
613 f
= list_entry(pos
, struct frame
, head
);
614 n
= f
->waited
+= timeout
;
616 if (n
> aoe_deadsecs
) {
617 /* Waited too long. Device failure.
618 * Hang all frames on first hash bucket for downdev
621 list_splice(&flist
, &d
->factive
[0]);
628 if (n
> aoe_deadsecs
/2)
629 d
->htgt
= t
; /* see if another target can help */
631 if (t
->nout
== t
->maxout
) {
634 t
->lastwadj
= jiffies
;
637 ifp
= getif(t
, f
->skb
->dev
);
638 if (ifp
&& ++ifp
->lost
> (t
->nframes
<< 1)
639 && (ifp
!= t
->ifs
|| t
->ifs
[1].nd
)) {
646 if ((d
->flags
& DEVFL_KICKME
|| d
->htgt
) && d
->blkq
) {
647 d
->flags
&= ~DEVFL_KICKME
;
648 d
->blkq
->request_fn(d
->blkq
);
651 d
->timer
.expires
= jiffies
+ TIMERTICK
;
652 add_timer(&d
->timer
);
654 spin_unlock_irqrestore(&d
->lock
, flags
);
658 rqbiocnt(struct request
*r
)
663 __rq_for_each_bio(bio
, r
)
668 /* This can be removed if we are certain that no users of the block
669 * layer will ever use zero-count pages in bios. Otherwise we have to
670 * protect against the put_page sometimes done by the network layer.
672 * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for
675 * We cannot use get_page in the workaround, because it insists on a
676 * positive page count as a precondition. So we use _count directly.
679 bio_pageinc(struct bio
*bio
)
685 bio_for_each_segment(bv
, bio
, i
) {
687 /* Non-zero page count for non-head members of
688 * compound pages is no longer allowed by the kernel,
689 * but this has never been seen here.
691 if (unlikely(PageCompound(page
)))
692 if (compound_trans_head(page
) != page
) {
693 pr_crit("page tail used for block I/O\n");
696 atomic_inc(&page
->_count
);
701 bio_pagedec(struct bio
*bio
)
706 bio_for_each_segment(bv
, bio
, i
)
707 atomic_dec(&bv
->bv_page
->_count
);
711 bufinit(struct buf
*buf
, struct request
*rq
, struct bio
*bio
)
715 memset(buf
, 0, sizeof(*buf
));
718 buf
->resid
= bio
->bi_size
;
719 buf
->sector
= bio
->bi_sector
;
721 buf
->bv
= bv
= &bio
->bi_io_vec
[bio
->bi_idx
];
722 buf
->bv_resid
= bv
->bv_len
;
723 WARN_ON(buf
->bv_resid
== 0);
727 nextbuf(struct aoedev
*d
)
730 struct request_queue
*q
;
736 return NULL
; /* initializing */
741 rq
= blk_peek_request(q
);
744 blk_start_request(rq
);
746 d
->ip
.nxbio
= rq
->bio
;
747 rq
->special
= (void *) rqbiocnt(rq
);
749 buf
= mempool_alloc(d
->bufpool
, GFP_ATOMIC
);
751 pr_err("aoe: nextbuf: unable to mempool_alloc!\n");
755 bufinit(buf
, rq
, bio
);
760 return d
->ip
.buf
= buf
;
763 /* enters with d->lock held */
765 aoecmd_work(struct aoedev
*d
)
767 if (d
->htgt
&& !sthtith(d
))
769 while (aoecmd_ata_rw(d
))
773 /* this function performs work that has been deferred until sleeping is OK
776 aoecmd_sleepwork(struct work_struct
*work
)
778 struct aoedev
*d
= container_of(work
, struct aoedev
, work
);
779 struct block_device
*bd
;
782 if (d
->flags
& DEVFL_GDALLOC
)
785 if (d
->flags
& DEVFL_NEWSIZE
) {
786 ssize
= get_capacity(d
->gd
);
787 bd
= bdget_disk(d
->gd
, 0);
789 mutex_lock(&bd
->bd_inode
->i_mutex
);
790 i_size_write(bd
->bd_inode
, (loff_t
)ssize
<<9);
791 mutex_unlock(&bd
->bd_inode
->i_mutex
);
794 spin_lock_irq(&d
->lock
);
795 d
->flags
|= DEVFL_UP
;
796 d
->flags
&= ~DEVFL_NEWSIZE
;
797 spin_unlock_irq(&d
->lock
);
802 ataid_complete(struct aoedev
*d
, struct aoetgt
*t
, unsigned char *id
)
807 /* word 83: command set supported */
808 n
= get_unaligned_le16(&id
[83 << 1]);
810 /* word 86: command set/feature enabled */
811 n
|= get_unaligned_le16(&id
[86 << 1]);
813 if (n
& (1<<10)) { /* bit 10: LBA 48 */
814 d
->flags
|= DEVFL_EXT
;
816 /* word 100: number lba48 sectors */
817 ssize
= get_unaligned_le64(&id
[100 << 1]);
819 /* set as in ide-disk.c:init_idedisk_capacity */
820 d
->geo
.cylinders
= ssize
;
821 d
->geo
.cylinders
/= (255 * 63);
825 d
->flags
&= ~DEVFL_EXT
;
827 /* number lba28 sectors */
828 ssize
= get_unaligned_le32(&id
[60 << 1]);
830 /* NOTE: obsolete in ATA 6 */
831 d
->geo
.cylinders
= get_unaligned_le16(&id
[54 << 1]);
832 d
->geo
.heads
= get_unaligned_le16(&id
[55 << 1]);
833 d
->geo
.sectors
= get_unaligned_le16(&id
[56 << 1]);
836 if (d
->ssize
!= ssize
)
838 "aoe: %pm e%ld.%d v%04x has %llu sectors\n",
840 d
->aoemajor
, d
->aoeminor
,
841 d
->fw_ver
, (long long)ssize
);
844 if (d
->flags
& (DEVFL_GDALLOC
|DEVFL_NEWSIZE
))
847 set_capacity(d
->gd
, ssize
);
848 d
->flags
|= DEVFL_NEWSIZE
;
850 d
->flags
|= DEVFL_GDALLOC
;
851 schedule_work(&d
->work
);
855 calc_rttavg(struct aoedev
*d
, int rtt
)
864 else if (n
> MAXTIMER
)
866 d
->mintimer
+= (n
- d
->mintimer
) >> 1;
867 } else if (n
< d
->mintimer
)
869 else if (n
> MAXTIMER
)
872 /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
877 static struct aoetgt
*
878 gettgt(struct aoedev
*d
, char *addr
)
880 struct aoetgt
**t
, **e
;
884 for (; t
< e
&& *t
; t
++)
885 if (memcmp((*t
)->addr
, addr
, sizeof((*t
)->addr
)) == 0)
891 bvcpy(struct bio_vec
*bv
, ulong off
, struct sk_buff
*skb
, long cnt
)
897 fcnt
= bv
->bv_len
- (off
- bv
->bv_offset
);
900 p
= page_address(bv
->bv_page
) + off
;
901 skb_copy_bits(skb
, soff
, p
, fcnt
);
912 aoe_end_request(struct aoedev
*d
, struct request
*rq
, int fastfail
)
916 struct request_queue
*q
;
923 bok
= !fastfail
&& test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
924 } while (__blk_end_request(rq
, bok
? 0 : -EIO
, bio
->bi_size
));
926 /* cf. http://lkml.org/lkml/2006/10/31/28 */
932 aoe_end_buf(struct aoedev
*d
, struct buf
*buf
)
937 if (buf
== d
->ip
.buf
)
940 bio_pagedec(buf
->bio
);
941 mempool_free(buf
, d
->bufpool
);
942 n
= (unsigned long) rq
->special
;
943 rq
->special
= (void *) --n
;
945 aoe_end_request(d
, rq
, 0);
949 ktiocomplete(struct frame
*f
)
951 struct aoe_hdr
*hin
, *hout
;
952 struct aoe_atahdr
*ahin
, *ahout
;
966 hout
= (struct aoe_hdr
*) skb_mac_header(f
->skb
);
967 ahout
= (struct aoe_atahdr
*) (hout
+1);
971 goto noskb
; /* just fail the buf. */
973 hin
= (struct aoe_hdr
*) skb
->data
;
974 skb_pull(skb
, sizeof(*hin
));
975 ahin
= (struct aoe_atahdr
*) skb
->data
;
976 skb_pull(skb
, sizeof(*ahin
));
977 if (ahin
->cmdstat
& 0xa9) { /* these bits cleared on success */
978 pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
979 ahout
->cmdstat
, ahin
->cmdstat
,
980 d
->aoemajor
, d
->aoeminor
);
982 clear_bit(BIO_UPTODATE
, &buf
->bio
->bi_flags
);
986 n
= ahout
->scnt
<< 9;
987 switch (ahout
->cmdstat
) {
988 case ATA_CMD_PIO_READ
:
989 case ATA_CMD_PIO_READ_EXT
:
991 pr_err("aoe: runt data size in read. skb->len=%d need=%ld\n",
993 clear_bit(BIO_UPTODATE
, &buf
->bio
->bi_flags
);
996 bvcpy(f
->bv
, f
->bv_off
, skb
, n
);
997 case ATA_CMD_PIO_WRITE
:
998 case ATA_CMD_PIO_WRITE_EXT
:
999 spin_lock_irq(&d
->lock
);
1000 ifp
= getif(t
, skb
->dev
);
1003 if (d
->htgt
== t
) /* I'll help myself, thank you. */
1005 spin_unlock_irq(&d
->lock
);
1007 case ATA_CMD_ID_ATA
:
1008 if (skb
->len
< 512) {
1009 pr_info("aoe: runt data size in ataid. skb->len=%d\n",
1013 if (skb_linearize(skb
))
1015 spin_lock_irq(&d
->lock
);
1016 ataid_complete(d
, t
, skb
->data
);
1017 spin_unlock_irq(&d
->lock
);
1020 pr_info("aoe: unrecognized ata command %2.2Xh for %d.%d\n",
1022 be16_to_cpu(get_unaligned(&hin
->major
)),
1026 spin_lock_irq(&d
->lock
);
1030 if (buf
&& --buf
->nframesout
== 0 && buf
->resid
== 0)
1031 aoe_end_buf(d
, buf
);
1035 spin_unlock_irq(&d
->lock
);
1040 /* Enters with iocq.lock held.
1041 * Returns true iff responses needing processing remain.
1047 struct list_head
*pos
;
1050 for (i
= 0; ; ++i
) {
1053 if (list_empty(&iocq
.head
))
1055 pos
= iocq
.head
.next
;
1057 spin_unlock_irq(&iocq
.lock
);
1058 f
= list_entry(pos
, struct frame
, head
);
1060 spin_lock_irq(&iocq
.lock
);
1068 DECLARE_WAITQUEUE(wait
, current
);
1072 current
->flags
|= PF_NOFREEZE
;
1073 set_user_nice(current
, -10);
1074 complete(&k
->rendez
); /* tell spawner we're running */
1076 spin_lock_irq(k
->lock
);
1079 add_wait_queue(k
->waitq
, &wait
);
1080 __set_current_state(TASK_INTERRUPTIBLE
);
1082 spin_unlock_irq(k
->lock
);
1085 remove_wait_queue(k
->waitq
, &wait
);
1088 } while (!kthread_should_stop());
1089 complete(&k
->rendez
); /* tell spawner we're stopping */
1094 aoe_ktstop(struct ktstate
*k
)
1096 kthread_stop(k
->task
);
1097 wait_for_completion(&k
->rendez
);
1101 aoe_ktstart(struct ktstate
*k
)
1103 struct task_struct
*task
;
1105 init_completion(&k
->rendez
);
1106 task
= kthread_run(kthread
, k
, k
->name
);
1107 if (task
== NULL
|| IS_ERR(task
))
1110 wait_for_completion(&k
->rendez
); /* allow kthread to start */
1111 init_completion(&k
->rendez
); /* for waiting for exit later */
1115 /* pass it off to kthreads for processing */
1117 ktcomplete(struct frame
*f
, struct sk_buff
*skb
)
1122 spin_lock_irqsave(&iocq
.lock
, flags
);
1123 list_add_tail(&f
->head
, &iocq
.head
);
1124 spin_unlock_irqrestore(&iocq
.lock
, flags
);
1129 aoecmd_ata_rsp(struct sk_buff
*skb
)
1140 h
= (struct aoe_hdr
*) skb
->data
;
1141 aoemajor
= be16_to_cpu(get_unaligned(&h
->major
));
1142 d
= aoedev_by_aoeaddr(aoemajor
, h
->minor
, 0);
1144 snprintf(ebuf
, sizeof ebuf
, "aoecmd_ata_rsp: ata response "
1145 "for unknown device %d.%d\n",
1146 aoemajor
, h
->minor
);
1151 spin_lock_irqsave(&d
->lock
, flags
);
1153 n
= be32_to_cpu(get_unaligned(&h
->tag
));
1156 calc_rttavg(d
, -tsince(n
));
1157 spin_unlock_irqrestore(&d
->lock
, flags
);
1159 snprintf(ebuf
, sizeof ebuf
,
1160 "%15s e%d.%d tag=%08x@%08lx\n",
1162 get_unaligned_be16(&h
->major
),
1164 get_unaligned_be32(&h
->tag
),
1170 calc_rttavg(d
, tsince(f
->tag
));
1174 spin_unlock_irqrestore(&d
->lock
, flags
);
1179 * Note here that we do not perform an aoedev_put, as we are
1180 * leaving this reference for the ktio to release.
1186 aoecmd_cfg(ushort aoemajor
, unsigned char aoeminor
)
1188 struct sk_buff_head queue
;
1190 __skb_queue_head_init(&queue
);
1191 aoecmd_cfg_pkts(aoemajor
, aoeminor
, &queue
);
1192 aoenet_xmit(&queue
);
1196 aoecmd_ata_id(struct aoedev
*d
)
1199 struct aoe_atahdr
*ah
;
1201 struct sk_buff
*skb
;
1210 /* initialize the headers & frame */
1212 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
1213 ah
= (struct aoe_atahdr
*) (h
+1);
1214 skb_put(skb
, sizeof *h
+ sizeof *ah
);
1215 memset(h
, 0, skb
->len
);
1216 f
->tag
= aoehdr_atainit(d
, t
, h
);
1221 /* set up ata header */
1223 ah
->cmdstat
= ATA_CMD_ID_ATA
;
1226 skb
->dev
= t
->ifp
->nd
;
1228 d
->rttavg
= MAXTIMER
;
1229 d
->timer
.function
= rexmit_timer
;
1231 return skb_clone(skb
, GFP_ATOMIC
);
1234 static struct aoetgt
*
1235 addtgt(struct aoedev
*d
, char *addr
, ulong nframes
)
1237 struct aoetgt
*t
, **tt
, **te
;
1241 for (; tt
< te
&& *tt
; tt
++)
1246 "aoe: device addtgt failure; too many targets\n");
1249 t
= kzalloc(sizeof(*t
), GFP_ATOMIC
);
1251 printk(KERN_INFO
"aoe: cannot allocate memory to add target\n");
1256 t
->nframes
= nframes
;
1258 memcpy(t
->addr
, addr
, sizeof t
->addr
);
1260 t
->maxout
= t
->nframes
;
1261 INIT_LIST_HEAD(&t
->ffree
);
1266 setdbcnt(struct aoedev
*d
)
1268 struct aoetgt
**t
, **e
;
1273 for (; t
< e
&& *t
; t
++)
1274 if (bcnt
== 0 || bcnt
> (*t
)->minbcnt
)
1275 bcnt
= (*t
)->minbcnt
;
1276 if (bcnt
!= d
->maxbcnt
) {
1278 pr_info("aoe: e%ld.%d: setting %d byte data frames\n",
1279 d
->aoemajor
, d
->aoeminor
, bcnt
);
1284 setifbcnt(struct aoetgt
*t
, struct net_device
*nd
, int bcnt
)
1287 struct aoeif
*p
, *e
;
1294 for (; p
< e
; p
++) {
1296 break; /* end of the valid interfaces */
1298 p
->bcnt
= bcnt
; /* we're updating */
1300 } else if (minbcnt
> p
->bcnt
)
1301 minbcnt
= p
->bcnt
; /* find the min interface */
1305 pr_err("aoe: device setifbcnt failure; too many interfaces.\n");
1312 t
->minbcnt
= minbcnt
;
1317 aoecmd_cfg_rsp(struct sk_buff
*skb
)
1321 struct aoe_cfghdr
*ch
;
1323 ulong flags
, aoemajor
;
1325 struct sk_buff_head queue
;
1329 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
1330 ch
= (struct aoe_cfghdr
*) (h
+1);
1333 * Enough people have their dip switches set backwards to
1334 * warrant a loud message for this special case.
1336 aoemajor
= get_unaligned_be16(&h
->major
);
1337 if (aoemajor
== 0xfff) {
1338 printk(KERN_ERR
"aoe: Warning: shelf address is all ones. "
1339 "Check shelf dip switches.\n");
1342 if (aoemajor
== 0xffff) {
1343 pr_info("aoe: e%ld.%d: broadcast shelf number invalid\n",
1344 aoemajor
, (int) h
->minor
);
1347 if (h
->minor
== 0xff) {
1348 pr_info("aoe: e%ld.%d: broadcast slot number invalid\n",
1349 aoemajor
, (int) h
->minor
);
1353 n
= be16_to_cpu(ch
->bufcnt
);
1354 if (n
> aoe_maxout
) /* keep it reasonable */
1357 d
= aoedev_by_aoeaddr(aoemajor
, h
->minor
, 1);
1359 pr_info("aoe: device allocation failure\n");
1363 spin_lock_irqsave(&d
->lock
, flags
);
1365 t
= gettgt(d
, h
->src
);
1371 t
= addtgt(d
, h
->src
, n
);
1376 n
-= sizeof(struct aoe_hdr
) + sizeof(struct aoe_atahdr
);
1380 n
= n
? n
* 512 : DEFAULTBCNT
;
1381 setifbcnt(t
, skb
->dev
, n
);
1383 /* don't change users' perspective */
1384 if (d
->nopen
== 0) {
1385 d
->fw_ver
= be16_to_cpu(ch
->fwver
);
1386 sl
= aoecmd_ata_id(d
);
1389 spin_unlock_irqrestore(&d
->lock
, flags
);
1392 __skb_queue_head_init(&queue
);
1393 __skb_queue_tail(&queue
, sl
);
1394 aoenet_xmit(&queue
);
1399 aoecmd_cleanslate(struct aoedev
*d
)
1401 struct aoetgt
**t
, **te
;
1403 d
->mintimer
= MINTIMER
;
1408 for (; t
< te
&& *t
; t
++)
1409 (*t
)->maxout
= (*t
)->nframes
;
1413 aoe_failbuf(struct aoedev
*d
, struct buf
*buf
)
1418 clear_bit(BIO_UPTODATE
, &buf
->bio
->bi_flags
);
1419 if (buf
->nframesout
== 0)
1420 aoe_end_buf(d
, buf
);
1424 aoe_flush_iocq(void)
1429 struct list_head
*pos
;
1430 struct sk_buff
*skb
;
1433 spin_lock_irqsave(&iocq
.lock
, flags
);
1434 list_splice_init(&iocq
.head
, &flist
);
1435 spin_unlock_irqrestore(&iocq
.lock
, flags
);
1436 while (!list_empty(&flist
)) {
1439 f
= list_entry(pos
, struct frame
, head
);
1442 spin_lock_irqsave(&d
->lock
, flags
);
1444 f
->buf
->nframesout
--;
1445 aoe_failbuf(d
, f
->buf
);
1448 spin_unlock_irqrestore(&d
->lock
, flags
);
1457 INIT_LIST_HEAD(&iocq
.head
);
1458 spin_lock_init(&iocq
.lock
);
1459 init_waitqueue_head(&ktiowq
);
1460 kts
.name
= "aoe_ktio";
1462 kts
.waitq
= &ktiowq
;
1463 kts
.lock
= &iocq
.lock
;
1464 return aoe_ktstart(&kts
);