1 /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
4 * Filesystem request handling methods
8 #include <linux/slab.h>
9 #include <linux/hdreg.h>
10 #include <linux/blkdev.h>
11 #include <linux/skbuff.h>
12 #include <linux/netdevice.h>
13 #include <linux/genhd.h>
14 #include <linux/moduleparam.h>
15 #include <linux/workqueue.h>
16 #include <linux/kthread.h>
17 #include <net/net_namespace.h>
18 #include <asm/unaligned.h>
19 #include <linux/uio.h>
22 #define MAXIOC (8192) /* default meant to avoid most soft lockups */
24 static void ktcomplete(struct frame
*, struct sk_buff
*);
25 static int count_targets(struct aoedev
*d
, int *untainted
);
27 static struct buf
*nextbuf(struct aoedev
*);
29 static int aoe_deadsecs
= 60 * 3;
30 module_param(aoe_deadsecs
, int, 0644);
31 MODULE_PARM_DESC(aoe_deadsecs
, "After aoe_deadsecs seconds, give up and fail dev.");
33 static int aoe_maxout
= 64;
34 module_param(aoe_maxout
, int, 0644);
35 MODULE_PARM_DESC(aoe_maxout
,
36 "Only aoe_maxout outstanding packets for every MAC on eX.Y.");
38 static wait_queue_head_t ktiowq
;
39 static struct ktstate kts
;
41 /* io completion queue */
43 struct list_head head
;
47 static struct page
*empty_page
;
49 static struct sk_buff
*
54 skb
= alloc_skb(len
, GFP_ATOMIC
);
56 skb_reset_mac_header(skb
);
57 skb_reset_network_header(skb
);
58 skb
->protocol
= __constant_htons(ETH_P_AOE
);
59 skb_checksum_none_assert(skb
);
65 getframe_deferred(struct aoedev
*d
, u32 tag
)
67 struct list_head
*head
, *pos
, *nx
;
71 list_for_each_safe(pos
, nx
, head
) {
72 f
= list_entry(pos
, struct frame
, head
);
82 getframe(struct aoedev
*d
, u32 tag
)
85 struct list_head
*head
, *pos
, *nx
;
89 head
= &d
->factive
[n
];
90 list_for_each_safe(pos
, nx
, head
) {
91 f
= list_entry(pos
, struct frame
, head
);
101 * Leave the top bit clear so we have tagspace for userland.
102 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
103 * This driver reserves tag -1 to mean "unused frame."
106 newtag(struct aoedev
*d
)
110 n
= jiffies
& 0xffff;
111 return n
|= (++d
->lasttag
& 0x7fff) << 16;
115 aoehdr_atainit(struct aoedev
*d
, struct aoetgt
*t
, struct aoe_hdr
*h
)
117 u32 host_tag
= newtag(d
);
119 memcpy(h
->src
, t
->ifp
->nd
->dev_addr
, sizeof h
->src
);
120 memcpy(h
->dst
, t
->addr
, sizeof h
->dst
);
121 h
->type
= __constant_cpu_to_be16(ETH_P_AOE
);
123 h
->major
= cpu_to_be16(d
->aoemajor
);
124 h
->minor
= d
->aoeminor
;
126 h
->tag
= cpu_to_be32(host_tag
);
132 put_lba(struct aoe_atahdr
*ah
, sector_t lba
)
135 ah
->lba1
= lba
>>= 8;
136 ah
->lba2
= lba
>>= 8;
137 ah
->lba3
= lba
>>= 8;
138 ah
->lba4
= lba
>>= 8;
139 ah
->lba5
= lba
>>= 8;
142 static struct aoeif
*
143 ifrotate(struct aoetgt
*t
)
149 if (ifp
>= &t
->ifs
[NAOEIFS
] || ifp
->nd
== NULL
)
157 skb_pool_put(struct aoedev
*d
, struct sk_buff
*skb
)
159 __skb_queue_tail(&d
->skbpool
, skb
);
162 static struct sk_buff
*
163 skb_pool_get(struct aoedev
*d
)
165 struct sk_buff
*skb
= skb_peek(&d
->skbpool
);
167 if (skb
&& atomic_read(&skb_shinfo(skb
)->dataref
) == 1) {
168 __skb_unlink(skb
, &d
->skbpool
);
171 if (skb_queue_len(&d
->skbpool
) < NSKBPOOLMAX
&&
172 (skb
= new_skb(ETH_ZLEN
)))
179 aoe_freetframe(struct frame
*f
)
189 list_add(&f
->head
, &t
->ffree
);
192 static struct frame
*
193 newtframe(struct aoedev
*d
, struct aoetgt
*t
)
197 struct list_head
*pos
;
199 if (list_empty(&t
->ffree
)) {
200 if (t
->falloc
>= NSKBPOOLMAX
*2)
202 f
= kcalloc(1, sizeof(*f
), GFP_ATOMIC
);
210 f
= list_entry(pos
, struct frame
, head
);
215 f
->skb
= skb
= new_skb(ETH_ZLEN
);
217 bail
: aoe_freetframe(f
);
222 if (atomic_read(&skb_shinfo(skb
)->dataref
) != 1) {
223 skb
= skb_pool_get(d
);
226 skb_pool_put(d
, f
->skb
);
230 skb
->truesize
-= skb
->data_len
;
231 skb_shinfo(skb
)->nr_frags
= skb
->data_len
= 0;
236 static struct frame
*
237 newframe(struct aoedev
*d
)
240 struct aoetgt
*t
, **tt
;
245 if (!d
->targets
|| !d
->targets
[0]) {
246 printk(KERN_ERR
"aoe: NULL TARGETS!\n");
249 tt
= d
->tgt
; /* last used target */
250 for (use_tainted
= 0, has_untainted
= 0;;) {
252 if (tt
>= &d
->targets
[d
->ntargets
] || !*tt
)
259 if (t
->nout
< t
->maxout
260 && (use_tainted
|| !t
->taint
)
269 if (tt
== d
->tgt
) { /* we've looped and found nada */
270 if (!use_tainted
&& !has_untainted
)
278 d
->flags
|= DEVFL_KICKME
;
284 skb_fillup(struct sk_buff
*skb
, struct bio_vec
*bv
, ulong off
, ulong cnt
)
289 fcnt
= bv
->bv_len
- (off
- bv
->bv_offset
);
292 skb_fill_page_desc(skb
, frag
++, bv
->bv_page
, off
, fcnt
);
302 fhash(struct frame
*f
)
304 struct aoedev
*d
= f
->t
->d
;
307 n
= f
->tag
% NFACTIVE
;
308 list_add_tail(&f
->head
, &d
->factive
[n
]);
312 ata_rw_frameinit(struct frame
*f
)
316 struct aoe_atahdr
*ah
;
318 char writebit
, extbit
;
321 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
322 ah
= (struct aoe_atahdr
*) (h
+ 1);
323 skb_put(skb
, sizeof(*h
) + sizeof(*ah
));
324 memset(h
, 0, skb
->len
);
330 f
->tag
= aoehdr_atainit(t
->d
, t
, h
);
336 f
->lba
= f
->buf
->sector
;
338 /* set up ata header */
339 ah
->scnt
= f
->bcnt
>> 9;
341 if (t
->d
->flags
& DEVFL_EXT
) {
342 ah
->aflags
|= AOEAFL_EXT
;
346 ah
->lba3
|= 0xe0; /* LBA bit + obsolete 0xa0 */
348 if (f
->buf
&& bio_data_dir(f
->buf
->bio
) == WRITE
) {
349 skb_fillup(skb
, f
->bv
, f
->bv_off
, f
->bcnt
);
350 ah
->aflags
|= AOEAFL_WRITE
;
352 skb
->data_len
= f
->bcnt
;
353 skb
->truesize
+= f
->bcnt
;
360 ah
->cmdstat
= ATA_CMD_PIO_READ
| writebit
| extbit
;
361 skb
->dev
= t
->ifp
->nd
;
365 aoecmd_ata_rw(struct aoedev
*d
)
371 struct sk_buff_head queue
;
384 if (bcnt
> buf
->resid
)
388 f
->bv_off
= f
->bv
->bv_offset
+ (f
->bv
->bv_len
- buf
->bv_resid
);
390 if (fbcnt
< buf
->bv_resid
) {
391 buf
->bv_resid
-= fbcnt
;
395 fbcnt
-= buf
->bv_resid
;
396 buf
->resid
-= buf
->bv_resid
;
397 if (buf
->resid
== 0) {
402 buf
->bv_resid
= buf
->bv
->bv_len
;
403 WARN_ON(buf
->bv_resid
== 0);
406 /* initialize the headers & frame */
411 /* mark all tracking fields and load out */
412 buf
->nframesout
+= 1;
413 buf
->sector
+= bcnt
>> 9;
415 skb
= skb_clone(f
->skb
, GFP_ATOMIC
);
417 do_gettimeofday(&f
->sent
);
418 f
->sent_jiffs
= (u32
) jiffies
;
419 __skb_queue_head_init(&queue
);
420 __skb_queue_tail(&queue
, skb
);
426 /* some callers cannot sleep, and they can call this function,
427 * transmitting the packets later, when interrupts are on
430 aoecmd_cfg_pkts(ushort aoemajor
, unsigned char aoeminor
, struct sk_buff_head
*queue
)
433 struct aoe_cfghdr
*ch
;
435 struct net_device
*ifp
;
438 for_each_netdev_rcu(&init_net
, ifp
) {
440 if (!is_aoe_netif(ifp
))
443 skb
= new_skb(sizeof *h
+ sizeof *ch
);
445 printk(KERN_INFO
"aoe: skb alloc failure\n");
448 skb_put(skb
, sizeof *h
+ sizeof *ch
);
450 __skb_queue_tail(queue
, skb
);
451 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
452 memset(h
, 0, sizeof *h
+ sizeof *ch
);
454 memset(h
->dst
, 0xff, sizeof h
->dst
);
455 memcpy(h
->src
, ifp
->dev_addr
, sizeof h
->src
);
456 h
->type
= __constant_cpu_to_be16(ETH_P_AOE
);
458 h
->major
= cpu_to_be16(aoemajor
);
469 resend(struct aoedev
*d
, struct frame
*f
)
472 struct sk_buff_head queue
;
474 struct aoe_atahdr
*ah
;
482 if (ifrotate(t
) == NULL
) {
483 /* probably can't happen, but set it up to fail anyway */
484 pr_info("aoe: resend: no interfaces to rotate to.\n");
488 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
489 ah
= (struct aoe_atahdr
*) (h
+1);
491 if (!(f
->flags
& FFL_PROBE
)) {
492 snprintf(buf
, sizeof(buf
),
493 "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
494 "retransmit", d
->aoemajor
, d
->aoeminor
,
496 h
->src
, h
->dst
, t
->nout
);
502 h
->tag
= cpu_to_be32(n
);
503 memcpy(h
->dst
, t
->addr
, sizeof h
->dst
);
504 memcpy(h
->src
, t
->ifp
->nd
->dev_addr
, sizeof h
->src
);
506 skb
->dev
= t
->ifp
->nd
;
507 skb
= skb_clone(skb
, GFP_ATOMIC
);
510 do_gettimeofday(&f
->sent
);
511 f
->sent_jiffs
= (u32
) jiffies
;
512 __skb_queue_head_init(&queue
);
513 __skb_queue_tail(&queue
, skb
);
518 tsince_hr(struct frame
*f
)
523 do_gettimeofday(&now
);
524 n
= now
.tv_usec
- f
->sent
.tv_usec
;
525 n
+= (now
.tv_sec
- f
->sent
.tv_sec
) * USEC_PER_SEC
;
530 /* For relatively long periods, use jiffies to avoid
531 * discrepancies caused by updates to the system time.
533 * On system with HZ of 1000, 32-bits is over 49 days
534 * worth of jiffies, or over 71 minutes worth of usecs.
536 * Jiffies overflow is handled by subtraction of unsigned ints:
537 * (gdb) print (unsigned) 2 - (unsigned) 0xfffffffe
541 if (n
> USEC_PER_SEC
/ 4) {
542 n
= ((u32
) jiffies
) - f
->sent_jiffs
;
543 n
*= USEC_PER_SEC
/ HZ
;
554 n
= jiffies
& 0xffff;
558 return jiffies_to_usecs(n
+ 1);
561 static struct aoeif
*
562 getif(struct aoetgt
*t
, struct net_device
*nd
)
575 ejectif(struct aoetgt
*t
, struct aoeif
*ifp
)
578 struct net_device
*nd
;
582 e
= t
->ifs
+ NAOEIFS
- 1;
583 n
= (e
- ifp
) * sizeof *ifp
;
584 memmove(ifp
, ifp
+1, n
);
589 static struct frame
*
590 reassign_frame(struct frame
*f
)
595 nf
= newframe(f
->t
->d
);
609 nf
->bv_off
= f
->bv_off
;
611 nf
->waited_total
= f
->waited_total
;
613 nf
->sent_jiffs
= f
->sent_jiffs
;
620 probe(struct aoetgt
*t
)
625 struct sk_buff_head queue
;
632 pr_err("%s %pm for e%ld.%d: %s\n",
633 "aoe: cannot probe remote address",
635 (long) d
->aoemajor
, d
->aoeminor
,
636 "no frame available");
639 f
->flags
|= FFL_PROBE
;
641 f
->bcnt
= t
->d
->maxbcnt
? t
->d
->maxbcnt
: DEFAULTBCNT
;
644 for (frag
= 0, n
= f
->bcnt
; n
> 0; ++frag
, n
-= m
) {
649 skb_fill_page_desc(skb
, frag
, empty_page
, 0, m
);
652 skb
->data_len
= f
->bcnt
;
653 skb
->truesize
+= f
->bcnt
;
655 skb
= skb_clone(f
->skb
, GFP_ATOMIC
);
657 do_gettimeofday(&f
->sent
);
658 f
->sent_jiffs
= (u32
) jiffies
;
659 __skb_queue_head_init(&queue
);
660 __skb_queue_tail(&queue
, skb
);
666 rto(struct aoedev
*d
)
670 t
= 2 * d
->rttavg
>> RTTSCALE
;
671 t
+= 8 * d
->rttdev
>> RTTDSCALE
;
679 rexmit_deferred(struct aoedev
*d
)
684 struct list_head
*pos
, *nx
, *head
;
688 count_targets(d
, &untainted
);
691 list_for_each_safe(pos
, nx
, head
) {
692 f
= list_entry(pos
, struct frame
, head
);
695 if (!(f
->flags
& FFL_PROBE
)) {
696 nf
= reassign_frame(f
);
698 if (t
->nout_probes
== 0
703 list_replace(&f
->head
, &nf
->head
);
709 } else if (untainted
< 1) {
710 /* don't probe w/o other untainted aoetgts */
712 } else if (tsince_hr(f
) < t
->taint
* rto(d
)) {
713 /* reprobe slowly when taint is high */
716 } else if (f
->flags
& FFL_PROBE
) {
717 stop_probe
: /* don't probe untainted aoetgts */
720 /* leaving d->kicked, because this is routine */
721 f
->t
->d
->flags
|= DEVFL_KICKME
;
724 if (t
->nout
>= t
->maxout
)
728 if (f
->flags
& FFL_PROBE
)
730 since
= tsince_hr(f
);
732 f
->waited_total
+= since
;
737 /* An aoetgt accumulates demerits quickly, and successful
738 * probing redeems the aoetgt slowly.
741 scorn(struct aoetgt
*t
)
746 t
->taint
+= t
->taint
* 2;
749 if (t
->taint
> MAX_TAINT
)
750 t
->taint
= MAX_TAINT
;
754 count_targets(struct aoedev
*d
, int *untainted
)
758 for (i
= good
= 0; i
< d
->ntargets
&& d
->targets
[i
]; ++i
)
759 if (d
->targets
[i
]->taint
== 0)
768 rexmit_timer(ulong vp
)
774 struct list_head
*head
, *pos
, *nx
;
776 register long timeout
;
779 int utgts
; /* number of aoetgt descriptors (not slots) */
782 d
= (struct aoedev
*) vp
;
784 spin_lock_irqsave(&d
->lock
, flags
);
786 /* timeout based on observed timings and variations */
789 utgts
= count_targets(d
, NULL
);
791 if (d
->flags
& DEVFL_TKILL
) {
792 spin_unlock_irqrestore(&d
->lock
, flags
);
796 /* collect all frames to rexmit into flist */
797 for (i
= 0; i
< NFACTIVE
; i
++) {
798 head
= &d
->factive
[i
];
799 list_for_each_safe(pos
, nx
, head
) {
800 f
= list_entry(pos
, struct frame
, head
);
801 if (tsince_hr(f
) < timeout
)
802 break; /* end of expired frames */
803 /* move to flist for later processing */
804 list_move_tail(pos
, &flist
);
808 /* process expired frames */
809 while (!list_empty(&flist
)) {
811 f
= list_entry(pos
, struct frame
, head
);
812 since
= tsince_hr(f
);
813 n
= f
->waited_total
+ since
;
817 && !(f
->flags
& FFL_PROBE
)) {
818 /* Waited too long. Device failure.
819 * Hang all frames on first hash bucket for downdev
822 list_splice(&flist
, &d
->factive
[0]);
828 n
= f
->waited
+ since
;
830 if (aoe_deadsecs
&& utgts
> 0
831 && (n
> aoe_deadsecs
/ utgts
|| n
> HARD_SCORN_SECS
))
832 scorn(t
); /* avoid this target */
834 if (t
->maxout
!= 1) {
835 t
->ssthresh
= t
->maxout
/ 2;
839 if (f
->flags
& FFL_PROBE
) {
842 ifp
= getif(t
, f
->skb
->dev
);
843 if (ifp
&& ++ifp
->lost
> (t
->nframes
<< 1)
844 && (ifp
!= t
->ifs
|| t
->ifs
[1].nd
)) {
849 list_move_tail(pos
, &d
->rexmitq
);
855 if ((d
->flags
& DEVFL_KICKME
) && d
->blkq
) {
856 d
->flags
&= ~DEVFL_KICKME
;
857 d
->blkq
->request_fn(d
->blkq
);
860 d
->timer
.expires
= jiffies
+ TIMERTICK
;
861 add_timer(&d
->timer
);
863 spin_unlock_irqrestore(&d
->lock
, flags
);
867 rqbiocnt(struct request
*r
)
872 __rq_for_each_bio(bio
, r
)
877 /* This can be removed if we are certain that no users of the block
878 * layer will ever use zero-count pages in bios. Otherwise we have to
879 * protect against the put_page sometimes done by the network layer.
881 * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for
884 * We cannot use get_page in the workaround, because it insists on a
885 * positive page count as a precondition. So we use _count directly.
888 bio_pageinc(struct bio
*bio
)
894 bio_for_each_segment(bv
, bio
, i
) {
896 /* Non-zero page count for non-head members of
897 * compound pages is no longer allowed by the kernel,
898 * but this has never been seen here.
900 if (unlikely(PageCompound(page
)))
901 if (compound_trans_head(page
) != page
) {
902 pr_crit("page tail used for block I/O\n");
905 atomic_inc(&page
->_count
);
910 bio_pagedec(struct bio
*bio
)
915 bio_for_each_segment(bv
, bio
, i
)
916 atomic_dec(&bv
->bv_page
->_count
);
920 bufinit(struct buf
*buf
, struct request
*rq
, struct bio
*bio
)
924 memset(buf
, 0, sizeof(*buf
));
927 buf
->resid
= bio
->bi_size
;
928 buf
->sector
= bio
->bi_sector
;
930 buf
->bv
= bv
= &bio
->bi_io_vec
[bio
->bi_idx
];
931 buf
->bv_resid
= bv
->bv_len
;
932 WARN_ON(buf
->bv_resid
== 0);
936 nextbuf(struct aoedev
*d
)
939 struct request_queue
*q
;
945 return NULL
; /* initializing */
950 rq
= blk_peek_request(q
);
953 blk_start_request(rq
);
955 d
->ip
.nxbio
= rq
->bio
;
956 rq
->special
= (void *) rqbiocnt(rq
);
958 buf
= mempool_alloc(d
->bufpool
, GFP_ATOMIC
);
960 pr_err("aoe: nextbuf: unable to mempool_alloc!\n");
964 bufinit(buf
, rq
, bio
);
969 return d
->ip
.buf
= buf
;
972 /* enters with d->lock held */
974 aoecmd_work(struct aoedev
*d
)
977 while (aoecmd_ata_rw(d
))
981 /* this function performs work that has been deferred until sleeping is OK
984 aoecmd_sleepwork(struct work_struct
*work
)
986 struct aoedev
*d
= container_of(work
, struct aoedev
, work
);
987 struct block_device
*bd
;
990 if (d
->flags
& DEVFL_GDALLOC
)
993 if (d
->flags
& DEVFL_NEWSIZE
) {
994 ssize
= get_capacity(d
->gd
);
995 bd
= bdget_disk(d
->gd
, 0);
997 mutex_lock(&bd
->bd_inode
->i_mutex
);
998 i_size_write(bd
->bd_inode
, (loff_t
)ssize
<<9);
999 mutex_unlock(&bd
->bd_inode
->i_mutex
);
1002 spin_lock_irq(&d
->lock
);
1003 d
->flags
|= DEVFL_UP
;
1004 d
->flags
&= ~DEVFL_NEWSIZE
;
1005 spin_unlock_irq(&d
->lock
);
1010 ata_ident_fixstring(u16
*id
, int ns
)
1016 *id
++ = s
>> 8 | s
<< 8;
1021 ataid_complete(struct aoedev
*d
, struct aoetgt
*t
, unsigned char *id
)
1026 /* word 83: command set supported */
1027 n
= get_unaligned_le16(&id
[83 << 1]);
1029 /* word 86: command set/feature enabled */
1030 n
|= get_unaligned_le16(&id
[86 << 1]);
1032 if (n
& (1<<10)) { /* bit 10: LBA 48 */
1033 d
->flags
|= DEVFL_EXT
;
1035 /* word 100: number lba48 sectors */
1036 ssize
= get_unaligned_le64(&id
[100 << 1]);
1038 /* set as in ide-disk.c:init_idedisk_capacity */
1039 d
->geo
.cylinders
= ssize
;
1040 d
->geo
.cylinders
/= (255 * 63);
1042 d
->geo
.sectors
= 63;
1044 d
->flags
&= ~DEVFL_EXT
;
1046 /* number lba28 sectors */
1047 ssize
= get_unaligned_le32(&id
[60 << 1]);
1049 /* NOTE: obsolete in ATA 6 */
1050 d
->geo
.cylinders
= get_unaligned_le16(&id
[54 << 1]);
1051 d
->geo
.heads
= get_unaligned_le16(&id
[55 << 1]);
1052 d
->geo
.sectors
= get_unaligned_le16(&id
[56 << 1]);
1055 ata_ident_fixstring((u16
*) &id
[10<<1], 10); /* serial */
1056 ata_ident_fixstring((u16
*) &id
[23<<1], 4); /* firmware */
1057 ata_ident_fixstring((u16
*) &id
[27<<1], 20); /* model */
1058 memcpy(d
->ident
, id
, sizeof(d
->ident
));
1060 if (d
->ssize
!= ssize
)
1062 "aoe: %pm e%ld.%d v%04x has %llu sectors\n",
1064 d
->aoemajor
, d
->aoeminor
,
1065 d
->fw_ver
, (long long)ssize
);
1068 if (d
->flags
& (DEVFL_GDALLOC
|DEVFL_NEWSIZE
))
1070 if (d
->gd
!= NULL
) {
1071 set_capacity(d
->gd
, ssize
);
1072 d
->flags
|= DEVFL_NEWSIZE
;
1074 d
->flags
|= DEVFL_GDALLOC
;
1075 schedule_work(&d
->work
);
1079 calc_rttavg(struct aoedev
*d
, struct aoetgt
*t
, int rtt
)
1085 /* cf. Congestion Avoidance and Control, Jacobson & Karels, 1988 */
1086 n
-= d
->rttavg
>> RTTSCALE
;
1090 n
-= d
->rttdev
>> RTTDSCALE
;
1093 if (!t
|| t
->maxout
>= t
->nframes
)
1095 if (t
->maxout
< t
->ssthresh
)
1097 else if (t
->nout
== t
->maxout
&& t
->next_cwnd
-- == 0) {
1099 t
->next_cwnd
= t
->maxout
;
1103 static struct aoetgt
*
1104 gettgt(struct aoedev
*d
, char *addr
)
1106 struct aoetgt
**t
, **e
;
1109 e
= t
+ d
->ntargets
;
1110 for (; t
< e
&& *t
; t
++)
1111 if (memcmp((*t
)->addr
, addr
, sizeof((*t
)->addr
)) == 0)
1117 bvcpy(struct bio_vec
*bv
, ulong off
, struct sk_buff
*skb
, long cnt
)
1123 fcnt
= bv
->bv_len
- (off
- bv
->bv_offset
);
1126 p
= page_address(bv
->bv_page
) + off
;
1127 skb_copy_bits(skb
, soff
, p
, fcnt
);
1133 off
= bv
->bv_offset
;
1138 aoe_end_request(struct aoedev
*d
, struct request
*rq
, int fastfail
)
1142 struct request_queue
*q
;
1149 bok
= !fastfail
&& test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
1150 } while (__blk_end_request(rq
, bok
? 0 : -EIO
, bio
->bi_size
));
1152 /* cf. http://lkml.org/lkml/2006/10/31/28 */
1158 aoe_end_buf(struct aoedev
*d
, struct buf
*buf
)
1163 if (buf
== d
->ip
.buf
)
1166 bio_pagedec(buf
->bio
);
1167 mempool_free(buf
, d
->bufpool
);
1168 n
= (unsigned long) rq
->special
;
1169 rq
->special
= (void *) --n
;
1171 aoe_end_request(d
, rq
, 0);
1175 ktiocomplete(struct frame
*f
)
1177 struct aoe_hdr
*hin
, *hout
;
1178 struct aoe_atahdr
*ahin
, *ahout
;
1180 struct sk_buff
*skb
;
1194 if (f
->flags
& FFL_PROBE
)
1196 if (!skb
) /* just fail the buf. */
1199 hout
= (struct aoe_hdr
*) skb_mac_header(f
->skb
);
1200 ahout
= (struct aoe_atahdr
*) (hout
+1);
1202 hin
= (struct aoe_hdr
*) skb
->data
;
1203 skb_pull(skb
, sizeof(*hin
));
1204 ahin
= (struct aoe_atahdr
*) skb
->data
;
1205 skb_pull(skb
, sizeof(*ahin
));
1206 if (ahin
->cmdstat
& 0xa9) { /* these bits cleared on success */
1207 pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
1208 ahout
->cmdstat
, ahin
->cmdstat
,
1209 d
->aoemajor
, d
->aoeminor
);
1211 clear_bit(BIO_UPTODATE
, &buf
->bio
->bi_flags
);
1215 n
= ahout
->scnt
<< 9;
1216 switch (ahout
->cmdstat
) {
1217 case ATA_CMD_PIO_READ
:
1218 case ATA_CMD_PIO_READ_EXT
:
1220 pr_err("%s e%ld.%d. skb->len=%d need=%ld\n",
1221 "aoe: runt data size in read from",
1222 (long) d
->aoemajor
, d
->aoeminor
,
1224 clear_bit(BIO_UPTODATE
, &buf
->bio
->bi_flags
);
1227 bvcpy(f
->bv
, f
->bv_off
, skb
, n
);
1228 case ATA_CMD_PIO_WRITE
:
1229 case ATA_CMD_PIO_WRITE_EXT
:
1230 spin_lock_irq(&d
->lock
);
1231 ifp
= getif(t
, skb
->dev
);
1234 spin_unlock_irq(&d
->lock
);
1236 case ATA_CMD_ID_ATA
:
1237 if (skb
->len
< 512) {
1238 pr_info("%s e%ld.%d. skb->len=%d need=512\n",
1239 "aoe: runt data size in ataid from",
1240 (long) d
->aoemajor
, d
->aoeminor
,
1244 if (skb_linearize(skb
))
1246 spin_lock_irq(&d
->lock
);
1247 ataid_complete(d
, t
, skb
->data
);
1248 spin_unlock_irq(&d
->lock
);
1251 pr_info("aoe: unrecognized ata command %2.2Xh for %d.%d\n",
1253 be16_to_cpu(get_unaligned(&hin
->major
)),
1257 spin_lock_irq(&d
->lock
);
1260 && t
->nout_probes
== 0) {
1261 count_targets(d
, &untainted
);
1262 if (untainted
> 0) {
1270 if (buf
&& --buf
->nframesout
== 0 && buf
->resid
== 0)
1271 aoe_end_buf(d
, buf
);
1273 spin_unlock_irq(&d
->lock
);
1278 /* Enters with iocq.lock held.
1279 * Returns true iff responses needing processing remain.
1285 struct list_head
*pos
;
1288 for (i
= 0; ; ++i
) {
1291 if (list_empty(&iocq
.head
))
1293 pos
= iocq
.head
.next
;
1295 spin_unlock_irq(&iocq
.lock
);
1296 f
= list_entry(pos
, struct frame
, head
);
1298 spin_lock_irq(&iocq
.lock
);
1306 DECLARE_WAITQUEUE(wait
, current
);
1310 current
->flags
|= PF_NOFREEZE
;
1311 set_user_nice(current
, -10);
1312 complete(&k
->rendez
); /* tell spawner we're running */
1314 spin_lock_irq(k
->lock
);
1317 add_wait_queue(k
->waitq
, &wait
);
1318 __set_current_state(TASK_INTERRUPTIBLE
);
1320 spin_unlock_irq(k
->lock
);
1323 remove_wait_queue(k
->waitq
, &wait
);
1326 } while (!kthread_should_stop());
1327 complete(&k
->rendez
); /* tell spawner we're stopping */
1332 aoe_ktstop(struct ktstate
*k
)
1334 kthread_stop(k
->task
);
1335 wait_for_completion(&k
->rendez
);
1339 aoe_ktstart(struct ktstate
*k
)
1341 struct task_struct
*task
;
1343 init_completion(&k
->rendez
);
1344 task
= kthread_run(kthread
, k
, k
->name
);
1345 if (task
== NULL
|| IS_ERR(task
))
1348 wait_for_completion(&k
->rendez
); /* allow kthread to start */
1349 init_completion(&k
->rendez
); /* for waiting for exit later */
1353 /* pass it off to kthreads for processing */
1355 ktcomplete(struct frame
*f
, struct sk_buff
*skb
)
1360 spin_lock_irqsave(&iocq
.lock
, flags
);
1361 list_add_tail(&f
->head
, &iocq
.head
);
1362 spin_unlock_irqrestore(&iocq
.lock
, flags
);
1367 aoecmd_ata_rsp(struct sk_buff
*skb
)
1377 h
= (struct aoe_hdr
*) skb
->data
;
1378 aoemajor
= be16_to_cpu(get_unaligned(&h
->major
));
1379 d
= aoedev_by_aoeaddr(aoemajor
, h
->minor
, 0);
1381 snprintf(ebuf
, sizeof ebuf
, "aoecmd_ata_rsp: ata response "
1382 "for unknown device %d.%d\n",
1383 aoemajor
, h
->minor
);
1388 spin_lock_irqsave(&d
->lock
, flags
);
1390 n
= be32_to_cpu(get_unaligned(&h
->tag
));
1393 calc_rttavg(d
, f
->t
, tsince_hr(f
));
1395 if (f
->flags
& FFL_PROBE
)
1396 f
->t
->nout_probes
--;
1398 f
= getframe_deferred(d
, n
);
1400 calc_rttavg(d
, NULL
, tsince_hr(f
));
1402 calc_rttavg(d
, NULL
, tsince(n
));
1403 spin_unlock_irqrestore(&d
->lock
, flags
);
1405 snprintf(ebuf
, sizeof(ebuf
),
1406 "%15s e%d.%d tag=%08x@%08lx s=%pm d=%pm\n",
1408 get_unaligned_be16(&h
->major
),
1410 get_unaligned_be32(&h
->tag
),
1420 spin_unlock_irqrestore(&d
->lock
, flags
);
1425 * Note here that we do not perform an aoedev_put, as we are
1426 * leaving this reference for the ktio to release.
1432 aoecmd_cfg(ushort aoemajor
, unsigned char aoeminor
)
1434 struct sk_buff_head queue
;
1436 __skb_queue_head_init(&queue
);
1437 aoecmd_cfg_pkts(aoemajor
, aoeminor
, &queue
);
1438 aoenet_xmit(&queue
);
1442 aoecmd_ata_id(struct aoedev
*d
)
1445 struct aoe_atahdr
*ah
;
1447 struct sk_buff
*skb
;
1456 /* initialize the headers & frame */
1458 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
1459 ah
= (struct aoe_atahdr
*) (h
+1);
1460 skb_put(skb
, sizeof *h
+ sizeof *ah
);
1461 memset(h
, 0, skb
->len
);
1462 f
->tag
= aoehdr_atainit(d
, t
, h
);
1466 f
->waited_total
= 0;
1468 /* set up ata header */
1470 ah
->cmdstat
= ATA_CMD_ID_ATA
;
1473 skb
->dev
= t
->ifp
->nd
;
1475 d
->rttavg
= RTTAVG_INIT
;
1476 d
->rttdev
= RTTDEV_INIT
;
1477 d
->timer
.function
= rexmit_timer
;
1479 skb
= skb_clone(skb
, GFP_ATOMIC
);
1481 do_gettimeofday(&f
->sent
);
1482 f
->sent_jiffs
= (u32
) jiffies
;
1488 static struct aoetgt
**
1489 grow_targets(struct aoedev
*d
)
1496 tt
= kcalloc(newn
, sizeof(*d
->targets
), GFP_ATOMIC
);
1499 memmove(tt
, d
->targets
, sizeof(*d
->targets
) * oldn
);
1500 d
->tgt
= tt
+ (d
->tgt
- d
->targets
);
1505 return &d
->targets
[oldn
];
1508 static struct aoetgt
*
1509 addtgt(struct aoedev
*d
, char *addr
, ulong nframes
)
1511 struct aoetgt
*t
, **tt
, **te
;
1514 te
= tt
+ d
->ntargets
;
1515 for (; tt
< te
&& *tt
; tt
++)
1519 tt
= grow_targets(d
);
1523 t
= kzalloc(sizeof(*t
), GFP_ATOMIC
);
1526 t
->nframes
= nframes
;
1528 memcpy(t
->addr
, addr
, sizeof t
->addr
);
1531 t
->maxout
= t
->nframes
/ 2;
1532 INIT_LIST_HEAD(&t
->ffree
);
1536 pr_info("aoe: cannot allocate memory to add target\n");
1541 setdbcnt(struct aoedev
*d
)
1543 struct aoetgt
**t
, **e
;
1547 e
= t
+ d
->ntargets
;
1548 for (; t
< e
&& *t
; t
++)
1549 if (bcnt
== 0 || bcnt
> (*t
)->minbcnt
)
1550 bcnt
= (*t
)->minbcnt
;
1551 if (bcnt
!= d
->maxbcnt
) {
1553 pr_info("aoe: e%ld.%d: setting %d byte data frames\n",
1554 d
->aoemajor
, d
->aoeminor
, bcnt
);
1559 setifbcnt(struct aoetgt
*t
, struct net_device
*nd
, int bcnt
)
1562 struct aoeif
*p
, *e
;
1569 for (; p
< e
; p
++) {
1571 break; /* end of the valid interfaces */
1573 p
->bcnt
= bcnt
; /* we're updating */
1575 } else if (minbcnt
> p
->bcnt
)
1576 minbcnt
= p
->bcnt
; /* find the min interface */
1580 pr_err("aoe: device setifbcnt failure; too many interfaces.\n");
1587 t
->minbcnt
= minbcnt
;
1592 aoecmd_cfg_rsp(struct sk_buff
*skb
)
1596 struct aoe_cfghdr
*ch
;
1598 ulong flags
, aoemajor
;
1600 struct sk_buff_head queue
;
1604 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
1605 ch
= (struct aoe_cfghdr
*) (h
+1);
1608 * Enough people have their dip switches set backwards to
1609 * warrant a loud message for this special case.
1611 aoemajor
= get_unaligned_be16(&h
->major
);
1612 if (aoemajor
== 0xfff) {
1613 printk(KERN_ERR
"aoe: Warning: shelf address is all ones. "
1614 "Check shelf dip switches.\n");
1617 if (aoemajor
== 0xffff) {
1618 pr_info("aoe: e%ld.%d: broadcast shelf number invalid\n",
1619 aoemajor
, (int) h
->minor
);
1622 if (h
->minor
== 0xff) {
1623 pr_info("aoe: e%ld.%d: broadcast slot number invalid\n",
1624 aoemajor
, (int) h
->minor
);
1628 n
= be16_to_cpu(ch
->bufcnt
);
1629 if (n
> aoe_maxout
) /* keep it reasonable */
1632 d
= aoedev_by_aoeaddr(aoemajor
, h
->minor
, 1);
1634 pr_info("aoe: device allocation failure\n");
1638 spin_lock_irqsave(&d
->lock
, flags
);
1640 t
= gettgt(d
, h
->src
);
1646 t
= addtgt(d
, h
->src
, n
);
1651 n
-= sizeof(struct aoe_hdr
) + sizeof(struct aoe_atahdr
);
1655 n
= n
? n
* 512 : DEFAULTBCNT
;
1656 setifbcnt(t
, skb
->dev
, n
);
1658 /* don't change users' perspective */
1659 if (d
->nopen
== 0) {
1660 d
->fw_ver
= be16_to_cpu(ch
->fwver
);
1661 sl
= aoecmd_ata_id(d
);
1664 spin_unlock_irqrestore(&d
->lock
, flags
);
1667 __skb_queue_head_init(&queue
);
1668 __skb_queue_tail(&queue
, sl
);
1669 aoenet_xmit(&queue
);
1674 aoecmd_wreset(struct aoetgt
*t
)
1677 t
->ssthresh
= t
->nframes
/ 2;
1678 t
->next_cwnd
= t
->nframes
;
1682 aoecmd_cleanslate(struct aoedev
*d
)
1684 struct aoetgt
**t
, **te
;
1686 d
->rttavg
= RTTAVG_INIT
;
1687 d
->rttdev
= RTTDEV_INIT
;
1691 te
= t
+ d
->ntargets
;
1692 for (; t
< te
&& *t
; t
++)
1697 aoe_failbuf(struct aoedev
*d
, struct buf
*buf
)
1702 clear_bit(BIO_UPTODATE
, &buf
->bio
->bi_flags
);
1703 if (buf
->nframesout
== 0)
1704 aoe_end_buf(d
, buf
);
1708 aoe_flush_iocq(void)
1713 struct list_head
*pos
;
1714 struct sk_buff
*skb
;
1717 spin_lock_irqsave(&iocq
.lock
, flags
);
1718 list_splice_init(&iocq
.head
, &flist
);
1719 spin_unlock_irqrestore(&iocq
.lock
, flags
);
1720 while (!list_empty(&flist
)) {
1723 f
= list_entry(pos
, struct frame
, head
);
1726 spin_lock_irqsave(&d
->lock
, flags
);
1728 f
->buf
->nframesout
--;
1729 aoe_failbuf(d
, f
->buf
);
1732 spin_unlock_irqrestore(&d
->lock
, flags
);
1743 /* get_zeroed_page returns page with ref count 1 */
1744 p
= (void *) get_zeroed_page(GFP_KERNEL
| __GFP_REPEAT
);
1747 empty_page
= virt_to_page(p
);
1749 INIT_LIST_HEAD(&iocq
.head
);
1750 spin_lock_init(&iocq
.lock
);
1751 init_waitqueue_head(&ktiowq
);
1752 kts
.name
= "aoe_ktio";
1754 kts
.waitq
= &ktiowq
;
1755 kts
.lock
= &iocq
.lock
;
1756 return aoe_ktstart(&kts
);
1765 free_page((unsigned long) page_address(empty_page
));