Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
2/*
3 * aoecmd.c
4 * Filesystem request handling methods
5 */
6
7#include <linux/ata.h>
8#include <linux/slab.h>
9#include <linux/hdreg.h>
10#include <linux/blkdev.h>
11#include <linux/skbuff.h>
12#include <linux/netdevice.h>
13#include <linux/genhd.h>
14#include <linux/moduleparam.h>
15#include <linux/workqueue.h>
16#include <linux/kthread.h>
17#include <net/net_namespace.h>
18#include <asm/unaligned.h>
19#include <linux/uio.h>
20#include "aoe.h"
21
22#define MAXIOC (8192) /* default meant to avoid most soft lockups */
23
24static void ktcomplete(struct frame *, struct sk_buff *);
25static int count_targets(struct aoedev *d, int *untainted);
26
27static struct buf *nextbuf(struct aoedev *);
28
29static int aoe_deadsecs = 60 * 3;
30module_param(aoe_deadsecs, int, 0644);
31MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
32
33static int aoe_maxout = 64;
34module_param(aoe_maxout, int, 0644);
35MODULE_PARM_DESC(aoe_maxout,
36 "Only aoe_maxout outstanding packets for every MAC on eX.Y.");
37
38/* The number of online cpus during module initialization gives us a
39 * convenient heuristic cap on the parallelism used for ktio threads
40 * doing I/O completion. It is not important that the cap equal the
41 * actual number of running CPUs at any given time, but because of CPU
42 * hotplug, we take care to use ncpus instead of using
43 * num_online_cpus() after module initialization.
44 */
45static int ncpus;
46
47/* mutex lock used for synchronization while thread spawning */
48static DEFINE_MUTEX(ktio_spawn_lock);
49
50static wait_queue_head_t *ktiowq;
51static struct ktstate *kts;
52
53/* io completion queue */
54struct iocq_ktio {
55 struct list_head head;
56 spinlock_t lock;
57};
58static struct iocq_ktio *iocq;
59
60static struct page *empty_page;
61
62static struct sk_buff *
63new_skb(ulong len)
64{
65 struct sk_buff *skb;
66
67 skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
68 if (skb) {
69 skb_reserve(skb, MAX_HEADER);
70 skb_reset_mac_header(skb);
71 skb_reset_network_header(skb);
72 skb->protocol = __constant_htons(ETH_P_AOE);
73 skb_checksum_none_assert(skb);
74 }
75 return skb;
76}
77
78static struct frame *
79getframe_deferred(struct aoedev *d, u32 tag)
80{
81 struct list_head *head, *pos, *nx;
82 struct frame *f;
83
84 head = &d->rexmitq;
85 list_for_each_safe(pos, nx, head) {
86 f = list_entry(pos, struct frame, head);
87 if (f->tag == tag) {
88 list_del(pos);
89 return f;
90 }
91 }
92 return NULL;
93}
94
95static struct frame *
96getframe(struct aoedev *d, u32 tag)
97{
98 struct frame *f;
99 struct list_head *head, *pos, *nx;
100 u32 n;
101
102 n = tag % NFACTIVE;
103 head = &d->factive[n];
104 list_for_each_safe(pos, nx, head) {
105 f = list_entry(pos, struct frame, head);
106 if (f->tag == tag) {
107 list_del(pos);
108 return f;
109 }
110 }
111 return NULL;
112}
113
114/*
115 * Leave the top bit clear so we have tagspace for userland.
116 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
117 * This driver reserves tag -1 to mean "unused frame."
118 */
119static int
120newtag(struct aoedev *d)
121{
122 register ulong n;
123
124 n = jiffies & 0xffff;
125 return n |= (++d->lasttag & 0x7fff) << 16;
126}
127
128static u32
129aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h)
130{
131 u32 host_tag = newtag(d);
132
133 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
134 memcpy(h->dst, t->addr, sizeof h->dst);
135 h->type = __constant_cpu_to_be16(ETH_P_AOE);
136 h->verfl = AOE_HVER;
137 h->major = cpu_to_be16(d->aoemajor);
138 h->minor = d->aoeminor;
139 h->cmd = AOECMD_ATA;
140 h->tag = cpu_to_be32(host_tag);
141
142 return host_tag;
143}
144
145static inline void
146put_lba(struct aoe_atahdr *ah, sector_t lba)
147{
148 ah->lba0 = lba;
149 ah->lba1 = lba >>= 8;
150 ah->lba2 = lba >>= 8;
151 ah->lba3 = lba >>= 8;
152 ah->lba4 = lba >>= 8;
153 ah->lba5 = lba >>= 8;
154}
155
156static struct aoeif *
157ifrotate(struct aoetgt *t)
158{
159 struct aoeif *ifp;
160
161 ifp = t->ifp;
162 ifp++;
163 if (ifp >= &t->ifs[NAOEIFS] || ifp->nd == NULL)
164 ifp = t->ifs;
165 if (ifp->nd == NULL)
166 return NULL;
167 return t->ifp = ifp;
168}
169
170static void
171skb_pool_put(struct aoedev *d, struct sk_buff *skb)
172{
173 __skb_queue_tail(&d->skbpool, skb);
174}
175
176static struct sk_buff *
177skb_pool_get(struct aoedev *d)
178{
179 struct sk_buff *skb = skb_peek(&d->skbpool);
180
181 if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
182 __skb_unlink(skb, &d->skbpool);
183 return skb;
184 }
185 if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
186 (skb = new_skb(ETH_ZLEN)))
187 return skb;
188
189 return NULL;
190}
191
192void
193aoe_freetframe(struct frame *f)
194{
195 struct aoetgt *t;
196
197 t = f->t;
198 f->buf = NULL;
199 memset(&f->iter, 0, sizeof(f->iter));
200 f->r_skb = NULL;
201 f->flags = 0;
202 list_add(&f->head, &t->ffree);
203}
204
205static struct frame *
206newtframe(struct aoedev *d, struct aoetgt *t)
207{
208 struct frame *f;
209 struct sk_buff *skb;
210 struct list_head *pos;
211
212 if (list_empty(&t->ffree)) {
213 if (t->falloc >= NSKBPOOLMAX*2)
214 return NULL;
215 f = kcalloc(1, sizeof(*f), GFP_ATOMIC);
216 if (f == NULL)
217 return NULL;
218 t->falloc++;
219 f->t = t;
220 } else {
221 pos = t->ffree.next;
222 list_del(pos);
223 f = list_entry(pos, struct frame, head);
224 }
225
226 skb = f->skb;
227 if (skb == NULL) {
228 f->skb = skb = new_skb(ETH_ZLEN);
229 if (!skb) {
230bail: aoe_freetframe(f);
231 return NULL;
232 }
233 }
234
235 if (atomic_read(&skb_shinfo(skb)->dataref) != 1) {
236 skb = skb_pool_get(d);
237 if (skb == NULL)
238 goto bail;
239 skb_pool_put(d, f->skb);
240 f->skb = skb;
241 }
242
243 skb->truesize -= skb->data_len;
244 skb_shinfo(skb)->nr_frags = skb->data_len = 0;
245 skb_trim(skb, 0);
246 return f;
247}
248
249static struct frame *
250newframe(struct aoedev *d)
251{
252 struct frame *f;
253 struct aoetgt *t, **tt;
254 int totout = 0;
255 int use_tainted;
256 int has_untainted;
257
258 if (!d->targets || !d->targets[0]) {
259 printk(KERN_ERR "aoe: NULL TARGETS!\n");
260 return NULL;
261 }
262 tt = d->tgt; /* last used target */
263 for (use_tainted = 0, has_untainted = 0;;) {
264 tt++;
265 if (tt >= &d->targets[d->ntargets] || !*tt)
266 tt = d->targets;
267 t = *tt;
268 if (!t->taint) {
269 has_untainted = 1;
270 totout += t->nout;
271 }
272 if (t->nout < t->maxout
273 && (use_tainted || !t->taint)
274 && t->ifp->nd) {
275 f = newtframe(d, t);
276 if (f) {
277 ifrotate(t);
278 d->tgt = tt;
279 return f;
280 }
281 }
282 if (tt == d->tgt) { /* we've looped and found nada */
283 if (!use_tainted && !has_untainted)
284 use_tainted = 1;
285 else
286 break;
287 }
288 }
289 if (totout == 0) {
290 d->kicked++;
291 d->flags |= DEVFL_KICKME;
292 }
293 return NULL;
294}
295
296static void
297skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
298{
299 int frag = 0;
300 struct bio_vec bv;
301
302 __bio_for_each_segment(bv, bio, iter, iter)
303 skb_fill_page_desc(skb, frag++, bv.bv_page,
304 bv.bv_offset, bv.bv_len);
305}
306
307static void
308fhash(struct frame *f)
309{
310 struct aoedev *d = f->t->d;
311 u32 n;
312
313 n = f->tag % NFACTIVE;
314 list_add_tail(&f->head, &d->factive[n]);
315}
316
317static void
318ata_rw_frameinit(struct frame *f)
319{
320 struct aoetgt *t;
321 struct aoe_hdr *h;
322 struct aoe_atahdr *ah;
323 struct sk_buff *skb;
324 char writebit, extbit;
325
326 skb = f->skb;
327 h = (struct aoe_hdr *) skb_mac_header(skb);
328 ah = (struct aoe_atahdr *) (h + 1);
329 skb_put(skb, sizeof(*h) + sizeof(*ah));
330 memset(h, 0, skb->len);
331
332 writebit = 0x10;
333 extbit = 0x4;
334
335 t = f->t;
336 f->tag = aoehdr_atainit(t->d, t, h);
337 fhash(f);
338 t->nout++;
339 f->waited = 0;
340 f->waited_total = 0;
341
342 /* set up ata header */
343 ah->scnt = f->iter.bi_size >> 9;
344 put_lba(ah, f->iter.bi_sector);
345 if (t->d->flags & DEVFL_EXT) {
346 ah->aflags |= AOEAFL_EXT;
347 } else {
348 extbit = 0;
349 ah->lba3 &= 0x0f;
350 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
351 }
352 if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
353 skb_fillup(skb, f->buf->bio, f->iter);
354 ah->aflags |= AOEAFL_WRITE;
355 skb->len += f->iter.bi_size;
356 skb->data_len = f->iter.bi_size;
357 skb->truesize += f->iter.bi_size;
358 t->wpkts++;
359 } else {
360 t->rpkts++;
361 writebit = 0;
362 }
363
364 ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
365 skb->dev = t->ifp->nd;
366}
367
368static int
369aoecmd_ata_rw(struct aoedev *d)
370{
371 struct frame *f;
372 struct buf *buf;
373 struct sk_buff *skb;
374 struct sk_buff_head queue;
375
376 buf = nextbuf(d);
377 if (buf == NULL)
378 return 0;
379 f = newframe(d);
380 if (f == NULL)
381 return 0;
382
383 /* initialize the headers & frame */
384 f->buf = buf;
385 f->iter = buf->iter;
386 f->iter.bi_size = min_t(unsigned long,
387 d->maxbcnt ?: DEFAULTBCNT,
388 f->iter.bi_size);
389 bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
390
391 if (!buf->iter.bi_size)
392 d->ip.buf = NULL;
393
394 /* mark all tracking fields and load out */
395 buf->nframesout += 1;
396
397 ata_rw_frameinit(f);
398
399 skb = skb_clone(f->skb, GFP_ATOMIC);
400 if (skb) {
401 f->sent = ktime_get();
402 __skb_queue_head_init(&queue);
403 __skb_queue_tail(&queue, skb);
404 aoenet_xmit(&queue);
405 }
406 return 1;
407}
408
409/* some callers cannot sleep, and they can call this function,
410 * transmitting the packets later, when interrupts are on
411 */
412static void
413aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
414{
415 struct aoe_hdr *h;
416 struct aoe_cfghdr *ch;
417 struct sk_buff *skb;
418 struct net_device *ifp;
419
420 rcu_read_lock();
421 for_each_netdev_rcu(&init_net, ifp) {
422 dev_hold(ifp);
423 if (!is_aoe_netif(ifp))
424 goto cont;
425
426 skb = new_skb(sizeof *h + sizeof *ch);
427 if (skb == NULL) {
428 printk(KERN_INFO "aoe: skb alloc failure\n");
429 goto cont;
430 }
431 skb_put(skb, sizeof *h + sizeof *ch);
432 skb->dev = ifp;
433 __skb_queue_tail(queue, skb);
434 h = (struct aoe_hdr *) skb_mac_header(skb);
435 memset(h, 0, sizeof *h + sizeof *ch);
436
437 memset(h->dst, 0xff, sizeof h->dst);
438 memcpy(h->src, ifp->dev_addr, sizeof h->src);
439 h->type = __constant_cpu_to_be16(ETH_P_AOE);
440 h->verfl = AOE_HVER;
441 h->major = cpu_to_be16(aoemajor);
442 h->minor = aoeminor;
443 h->cmd = AOECMD_CFG;
444
445cont:
446 dev_put(ifp);
447 }
448 rcu_read_unlock();
449}
450
451static void
452resend(struct aoedev *d, struct frame *f)
453{
454 struct sk_buff *skb;
455 struct sk_buff_head queue;
456 struct aoe_hdr *h;
457 struct aoetgt *t;
458 char buf[128];
459 u32 n;
460
461 t = f->t;
462 n = newtag(d);
463 skb = f->skb;
464 if (ifrotate(t) == NULL) {
465 /* probably can't happen, but set it up to fail anyway */
466 pr_info("aoe: resend: no interfaces to rotate to.\n");
467 ktcomplete(f, NULL);
468 return;
469 }
470 h = (struct aoe_hdr *) skb_mac_header(skb);
471
472 if (!(f->flags & FFL_PROBE)) {
473 snprintf(buf, sizeof(buf),
474 "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
475 "retransmit", d->aoemajor, d->aoeminor,
476 f->tag, jiffies, n,
477 h->src, h->dst, t->nout);
478 aoechr_error(buf);
479 }
480
481 f->tag = n;
482 fhash(f);
483 h->tag = cpu_to_be32(n);
484 memcpy(h->dst, t->addr, sizeof h->dst);
485 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
486
487 skb->dev = t->ifp->nd;
488 skb = skb_clone(skb, GFP_ATOMIC);
489 if (skb == NULL)
490 return;
491 f->sent = ktime_get();
492 __skb_queue_head_init(&queue);
493 __skb_queue_tail(&queue, skb);
494 aoenet_xmit(&queue);
495}
496
497static int
498tsince_hr(struct frame *f)
499{
500 u64 delta = ktime_to_ns(ktime_sub(ktime_get(), f->sent));
501
502 /* delta is normally under 4.2 seconds, avoid 64-bit division */
503 if (likely(delta <= UINT_MAX))
504 return (u32)delta / NSEC_PER_USEC;
505
506 /* avoid overflow after 71 minutes */
507 if (delta > ((u64)INT_MAX * NSEC_PER_USEC))
508 return INT_MAX;
509
510 return div_u64(delta, NSEC_PER_USEC);
511}
512
513static int
514tsince(u32 tag)
515{
516 int n;
517
518 n = jiffies & 0xffff;
519 n -= tag & 0xffff;
520 if (n < 0)
521 n += 1<<16;
522 return jiffies_to_usecs(n + 1);
523}
524
525static struct aoeif *
526getif(struct aoetgt *t, struct net_device *nd)
527{
528 struct aoeif *p, *e;
529
530 p = t->ifs;
531 e = p + NAOEIFS;
532 for (; p < e; p++)
533 if (p->nd == nd)
534 return p;
535 return NULL;
536}
537
538static void
539ejectif(struct aoetgt *t, struct aoeif *ifp)
540{
541 struct aoeif *e;
542 struct net_device *nd;
543 ulong n;
544
545 nd = ifp->nd;
546 e = t->ifs + NAOEIFS - 1;
547 n = (e - ifp) * sizeof *ifp;
548 memmove(ifp, ifp+1, n);
549 e->nd = NULL;
550 dev_put(nd);
551}
552
553static struct frame *
554reassign_frame(struct frame *f)
555{
556 struct frame *nf;
557 struct sk_buff *skb;
558
559 nf = newframe(f->t->d);
560 if (!nf)
561 return NULL;
562 if (nf->t == f->t) {
563 aoe_freetframe(nf);
564 return NULL;
565 }
566
567 skb = nf->skb;
568 nf->skb = f->skb;
569 nf->buf = f->buf;
570 nf->iter = f->iter;
571 nf->waited = 0;
572 nf->waited_total = f->waited_total;
573 nf->sent = f->sent;
574 f->skb = skb;
575
576 return nf;
577}
578
579static void
580probe(struct aoetgt *t)
581{
582 struct aoedev *d;
583 struct frame *f;
584 struct sk_buff *skb;
585 struct sk_buff_head queue;
586 size_t n, m;
587 int frag;
588
589 d = t->d;
590 f = newtframe(d, t);
591 if (!f) {
592 pr_err("%s %pm for e%ld.%d: %s\n",
593 "aoe: cannot probe remote address",
594 t->addr,
595 (long) d->aoemajor, d->aoeminor,
596 "no frame available");
597 return;
598 }
599 f->flags |= FFL_PROBE;
600 ifrotate(t);
601 f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
602 ata_rw_frameinit(f);
603 skb = f->skb;
604 for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) {
605 if (n < PAGE_SIZE)
606 m = n;
607 else
608 m = PAGE_SIZE;
609 skb_fill_page_desc(skb, frag, empty_page, 0, m);
610 }
611 skb->len += f->iter.bi_size;
612 skb->data_len = f->iter.bi_size;
613 skb->truesize += f->iter.bi_size;
614
615 skb = skb_clone(f->skb, GFP_ATOMIC);
616 if (skb) {
617 f->sent = ktime_get();
618 __skb_queue_head_init(&queue);
619 __skb_queue_tail(&queue, skb);
620 aoenet_xmit(&queue);
621 }
622}
623
624static long
625rto(struct aoedev *d)
626{
627 long t;
628
629 t = 2 * d->rttavg >> RTTSCALE;
630 t += 8 * d->rttdev >> RTTDSCALE;
631 if (t == 0)
632 t = 1;
633
634 return t;
635}
636
637static void
638rexmit_deferred(struct aoedev *d)
639{
640 struct aoetgt *t;
641 struct frame *f;
642 struct frame *nf;
643 struct list_head *pos, *nx, *head;
644 int since;
645 int untainted;
646
647 count_targets(d, &untainted);
648
649 head = &d->rexmitq;
650 list_for_each_safe(pos, nx, head) {
651 f = list_entry(pos, struct frame, head);
652 t = f->t;
653 if (t->taint) {
654 if (!(f->flags & FFL_PROBE)) {
655 nf = reassign_frame(f);
656 if (nf) {
657 if (t->nout_probes == 0
658 && untainted > 0) {
659 probe(t);
660 t->nout_probes++;
661 }
662 list_replace(&f->head, &nf->head);
663 pos = &nf->head;
664 aoe_freetframe(f);
665 f = nf;
666 t = f->t;
667 }
668 } else if (untainted < 1) {
669 /* don't probe w/o other untainted aoetgts */
670 goto stop_probe;
671 } else if (tsince_hr(f) < t->taint * rto(d)) {
672 /* reprobe slowly when taint is high */
673 continue;
674 }
675 } else if (f->flags & FFL_PROBE) {
676stop_probe: /* don't probe untainted aoetgts */
677 list_del(pos);
678 aoe_freetframe(f);
679 /* leaving d->kicked, because this is routine */
680 f->t->d->flags |= DEVFL_KICKME;
681 continue;
682 }
683 if (t->nout >= t->maxout)
684 continue;
685 list_del(pos);
686 t->nout++;
687 if (f->flags & FFL_PROBE)
688 t->nout_probes++;
689 since = tsince_hr(f);
690 f->waited += since;
691 f->waited_total += since;
692 resend(d, f);
693 }
694}
695
696/* An aoetgt accumulates demerits quickly, and successful
697 * probing redeems the aoetgt slowly.
698 */
699static void
700scorn(struct aoetgt *t)
701{
702 int n;
703
704 n = t->taint++;
705 t->taint += t->taint * 2;
706 if (n > t->taint)
707 t->taint = n;
708 if (t->taint > MAX_TAINT)
709 t->taint = MAX_TAINT;
710}
711
712static int
713count_targets(struct aoedev *d, int *untainted)
714{
715 int i, good;
716
717 for (i = good = 0; i < d->ntargets && d->targets[i]; ++i)
718 if (d->targets[i]->taint == 0)
719 good++;
720
721 if (untainted)
722 *untainted = good;
723 return i;
724}
725
726static void
727rexmit_timer(struct timer_list *timer)
728{
729 struct aoedev *d;
730 struct aoetgt *t;
731 struct aoeif *ifp;
732 struct frame *f;
733 struct list_head *head, *pos, *nx;
734 LIST_HEAD(flist);
735 register long timeout;
736 ulong flags, n;
737 int i;
738 int utgts; /* number of aoetgt descriptors (not slots) */
739 int since;
740
741 d = from_timer(d, timer, timer);
742
743 spin_lock_irqsave(&d->lock, flags);
744
745 /* timeout based on observed timings and variations */
746 timeout = rto(d);
747
748 utgts = count_targets(d, NULL);
749
750 if (d->flags & DEVFL_TKILL) {
751 spin_unlock_irqrestore(&d->lock, flags);
752 return;
753 }
754
755 /* collect all frames to rexmit into flist */
756 for (i = 0; i < NFACTIVE; i++) {
757 head = &d->factive[i];
758 list_for_each_safe(pos, nx, head) {
759 f = list_entry(pos, struct frame, head);
760 if (tsince_hr(f) < timeout)
761 break; /* end of expired frames */
762 /* move to flist for later processing */
763 list_move_tail(pos, &flist);
764 }
765 }
766
767 /* process expired frames */
768 while (!list_empty(&flist)) {
769 pos = flist.next;
770 f = list_entry(pos, struct frame, head);
771 since = tsince_hr(f);
772 n = f->waited_total + since;
773 n /= USEC_PER_SEC;
774 if (aoe_deadsecs
775 && n > aoe_deadsecs
776 && !(f->flags & FFL_PROBE)) {
777 /* Waited too long. Device failure.
778 * Hang all frames on first hash bucket for downdev
779 * to clean up.
780 */
781 list_splice(&flist, &d->factive[0]);
782 aoedev_downdev(d);
783 goto out;
784 }
785
786 t = f->t;
787 n = f->waited + since;
788 n /= USEC_PER_SEC;
789 if (aoe_deadsecs && utgts > 0
790 && (n > aoe_deadsecs / utgts || n > HARD_SCORN_SECS))
791 scorn(t); /* avoid this target */
792
793 if (t->maxout != 1) {
794 t->ssthresh = t->maxout / 2;
795 t->maxout = 1;
796 }
797
798 if (f->flags & FFL_PROBE) {
799 t->nout_probes--;
800 } else {
801 ifp = getif(t, f->skb->dev);
802 if (ifp && ++ifp->lost > (t->nframes << 1)
803 && (ifp != t->ifs || t->ifs[1].nd)) {
804 ejectif(t, ifp);
805 ifp = NULL;
806 }
807 }
808 list_move_tail(pos, &d->rexmitq);
809 t->nout--;
810 }
811 rexmit_deferred(d);
812
813out:
814 if ((d->flags & DEVFL_KICKME) && d->blkq) {
815 d->flags &= ~DEVFL_KICKME;
816 d->blkq->request_fn(d->blkq);
817 }
818
819 d->timer.expires = jiffies + TIMERTICK;
820 add_timer(&d->timer);
821
822 spin_unlock_irqrestore(&d->lock, flags);
823}
824
825static unsigned long
826rqbiocnt(struct request *r)
827{
828 struct bio *bio;
829 unsigned long n = 0;
830
831 __rq_for_each_bio(bio, r)
832 n++;
833 return n;
834}
835
836static void
837bufinit(struct buf *buf, struct request *rq, struct bio *bio)
838{
839 memset(buf, 0, sizeof(*buf));
840 buf->rq = rq;
841 buf->bio = bio;
842 buf->iter = bio->bi_iter;
843}
844
845static struct buf *
846nextbuf(struct aoedev *d)
847{
848 struct request *rq;
849 struct request_queue *q;
850 struct buf *buf;
851 struct bio *bio;
852
853 q = d->blkq;
854 if (q == NULL)
855 return NULL; /* initializing */
856 if (d->ip.buf)
857 return d->ip.buf;
858 rq = d->ip.rq;
859 if (rq == NULL) {
860 rq = blk_peek_request(q);
861 if (rq == NULL)
862 return NULL;
863 blk_start_request(rq);
864 d->ip.rq = rq;
865 d->ip.nxbio = rq->bio;
866 rq->special = (void *) rqbiocnt(rq);
867 }
868 buf = mempool_alloc(d->bufpool, GFP_ATOMIC);
869 if (buf == NULL) {
870 pr_err("aoe: nextbuf: unable to mempool_alloc!\n");
871 return NULL;
872 }
873 bio = d->ip.nxbio;
874 bufinit(buf, rq, bio);
875 bio = bio->bi_next;
876 d->ip.nxbio = bio;
877 if (bio == NULL)
878 d->ip.rq = NULL;
879 return d->ip.buf = buf;
880}
881
882/* enters with d->lock held */
883void
884aoecmd_work(struct aoedev *d)
885{
886 rexmit_deferred(d);
887 while (aoecmd_ata_rw(d))
888 ;
889}
890
891/* this function performs work that has been deferred until sleeping is OK
892 */
893void
894aoecmd_sleepwork(struct work_struct *work)
895{
896 struct aoedev *d = container_of(work, struct aoedev, work);
897 struct block_device *bd;
898 u64 ssize;
899
900 if (d->flags & DEVFL_GDALLOC)
901 aoeblk_gdalloc(d);
902
903 if (d->flags & DEVFL_NEWSIZE) {
904 ssize = get_capacity(d->gd);
905 bd = bdget_disk(d->gd, 0);
906 if (bd) {
907 inode_lock(bd->bd_inode);
908 i_size_write(bd->bd_inode, (loff_t)ssize<<9);
909 inode_unlock(bd->bd_inode);
910 bdput(bd);
911 }
912 spin_lock_irq(&d->lock);
913 d->flags |= DEVFL_UP;
914 d->flags &= ~DEVFL_NEWSIZE;
915 spin_unlock_irq(&d->lock);
916 }
917}
918
919static void
920ata_ident_fixstring(u16 *id, int ns)
921{
922 u16 s;
923
924 while (ns-- > 0) {
925 s = *id;
926 *id++ = s >> 8 | s << 8;
927 }
928}
929
930static void
931ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
932{
933 u64 ssize;
934 u16 n;
935
936 /* word 83: command set supported */
937 n = get_unaligned_le16(&id[83 << 1]);
938
939 /* word 86: command set/feature enabled */
940 n |= get_unaligned_le16(&id[86 << 1]);
941
942 if (n & (1<<10)) { /* bit 10: LBA 48 */
943 d->flags |= DEVFL_EXT;
944
945 /* word 100: number lba48 sectors */
946 ssize = get_unaligned_le64(&id[100 << 1]);
947
948 /* set as in ide-disk.c:init_idedisk_capacity */
949 d->geo.cylinders = ssize;
950 d->geo.cylinders /= (255 * 63);
951 d->geo.heads = 255;
952 d->geo.sectors = 63;
953 } else {
954 d->flags &= ~DEVFL_EXT;
955
956 /* number lba28 sectors */
957 ssize = get_unaligned_le32(&id[60 << 1]);
958
959 /* NOTE: obsolete in ATA 6 */
960 d->geo.cylinders = get_unaligned_le16(&id[54 << 1]);
961 d->geo.heads = get_unaligned_le16(&id[55 << 1]);
962 d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
963 }
964
965 ata_ident_fixstring((u16 *) &id[10<<1], 10); /* serial */
966 ata_ident_fixstring((u16 *) &id[23<<1], 4); /* firmware */
967 ata_ident_fixstring((u16 *) &id[27<<1], 20); /* model */
968 memcpy(d->ident, id, sizeof(d->ident));
969
970 if (d->ssize != ssize)
971 printk(KERN_INFO
972 "aoe: %pm e%ld.%d v%04x has %llu sectors\n",
973 t->addr,
974 d->aoemajor, d->aoeminor,
975 d->fw_ver, (long long)ssize);
976 d->ssize = ssize;
977 d->geo.start = 0;
978 if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
979 return;
980 if (d->gd != NULL) {
981 set_capacity(d->gd, ssize);
982 d->flags |= DEVFL_NEWSIZE;
983 } else
984 d->flags |= DEVFL_GDALLOC;
985 schedule_work(&d->work);
986}
987
988static void
989calc_rttavg(struct aoedev *d, struct aoetgt *t, int rtt)
990{
991 register long n;
992
993 n = rtt;
994
995 /* cf. Congestion Avoidance and Control, Jacobson & Karels, 1988 */
996 n -= d->rttavg >> RTTSCALE;
997 d->rttavg += n;
998 if (n < 0)
999 n = -n;
1000 n -= d->rttdev >> RTTDSCALE;
1001 d->rttdev += n;
1002
1003 if (!t || t->maxout >= t->nframes)
1004 return;
1005 if (t->maxout < t->ssthresh)
1006 t->maxout += 1;
1007 else if (t->nout == t->maxout && t->next_cwnd-- == 0) {
1008 t->maxout += 1;
1009 t->next_cwnd = t->maxout;
1010 }
1011}
1012
1013static struct aoetgt *
1014gettgt(struct aoedev *d, char *addr)
1015{
1016 struct aoetgt **t, **e;
1017
1018 t = d->targets;
1019 e = t + d->ntargets;
1020 for (; t < e && *t; t++)
1021 if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
1022 return *t;
1023 return NULL;
1024}
1025
1026static void
1027bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
1028{
1029 int soff = 0;
1030 struct bio_vec bv;
1031
1032 iter.bi_size = cnt;
1033
1034 __bio_for_each_segment(bv, bio, iter, iter) {
1035 char *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
1036 skb_copy_bits(skb, soff, p, bv.bv_len);
1037 kunmap_atomic(p);
1038 soff += bv.bv_len;
1039 }
1040}
1041
1042void
1043aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
1044{
1045 struct bio *bio;
1046 int bok;
1047 struct request_queue *q;
1048
1049 q = d->blkq;
1050 if (rq == d->ip.rq)
1051 d->ip.rq = NULL;
1052 do {
1053 bio = rq->bio;
1054 bok = !fastfail && !bio->bi_status;
1055 } while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
1056
1057 /* cf. http://lkml.org/lkml/2006/10/31/28 */
1058 if (!fastfail)
1059 __blk_run_queue(q);
1060}
1061
1062static void
1063aoe_end_buf(struct aoedev *d, struct buf *buf)
1064{
1065 struct request *rq;
1066 unsigned long n;
1067
1068 if (buf == d->ip.buf)
1069 d->ip.buf = NULL;
1070 rq = buf->rq;
1071 mempool_free(buf, d->bufpool);
1072 n = (unsigned long) rq->special;
1073 rq->special = (void *) --n;
1074 if (n == 0)
1075 aoe_end_request(d, rq, 0);
1076}
1077
1078static void
1079ktiocomplete(struct frame *f)
1080{
1081 struct aoe_hdr *hin, *hout;
1082 struct aoe_atahdr *ahin, *ahout;
1083 struct buf *buf;
1084 struct sk_buff *skb;
1085 struct aoetgt *t;
1086 struct aoeif *ifp;
1087 struct aoedev *d;
1088 long n;
1089 int untainted;
1090
1091 if (f == NULL)
1092 return;
1093
1094 t = f->t;
1095 d = t->d;
1096 skb = f->r_skb;
1097 buf = f->buf;
1098 if (f->flags & FFL_PROBE)
1099 goto out;
1100 if (!skb) /* just fail the buf. */
1101 goto noskb;
1102
1103 hout = (struct aoe_hdr *) skb_mac_header(f->skb);
1104 ahout = (struct aoe_atahdr *) (hout+1);
1105
1106 hin = (struct aoe_hdr *) skb->data;
1107 skb_pull(skb, sizeof(*hin));
1108 ahin = (struct aoe_atahdr *) skb->data;
1109 skb_pull(skb, sizeof(*ahin));
1110 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
1111 pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
1112 ahout->cmdstat, ahin->cmdstat,
1113 d->aoemajor, d->aoeminor);
1114noskb: if (buf)
1115 buf->bio->bi_status = BLK_STS_IOERR;
1116 goto out;
1117 }
1118
1119 n = ahout->scnt << 9;
1120 switch (ahout->cmdstat) {
1121 case ATA_CMD_PIO_READ:
1122 case ATA_CMD_PIO_READ_EXT:
1123 if (skb->len < n) {
1124 pr_err("%s e%ld.%d. skb->len=%d need=%ld\n",
1125 "aoe: runt data size in read from",
1126 (long) d->aoemajor, d->aoeminor,
1127 skb->len, n);
1128 buf->bio->bi_status = BLK_STS_IOERR;
1129 break;
1130 }
1131 if (n > f->iter.bi_size) {
1132 pr_err_ratelimited("%s e%ld.%d. bytes=%ld need=%u\n",
1133 "aoe: too-large data size in read from",
1134 (long) d->aoemajor, d->aoeminor,
1135 n, f->iter.bi_size);
1136 buf->bio->bi_status = BLK_STS_IOERR;
1137 break;
1138 }
1139 bvcpy(skb, f->buf->bio, f->iter, n);
1140 case ATA_CMD_PIO_WRITE:
1141 case ATA_CMD_PIO_WRITE_EXT:
1142 spin_lock_irq(&d->lock);
1143 ifp = getif(t, skb->dev);
1144 if (ifp)
1145 ifp->lost = 0;
1146 spin_unlock_irq(&d->lock);
1147 break;
1148 case ATA_CMD_ID_ATA:
1149 if (skb->len < 512) {
1150 pr_info("%s e%ld.%d. skb->len=%d need=512\n",
1151 "aoe: runt data size in ataid from",
1152 (long) d->aoemajor, d->aoeminor,
1153 skb->len);
1154 break;
1155 }
1156 if (skb_linearize(skb))
1157 break;
1158 spin_lock_irq(&d->lock);
1159 ataid_complete(d, t, skb->data);
1160 spin_unlock_irq(&d->lock);
1161 break;
1162 default:
1163 pr_info("aoe: unrecognized ata command %2.2Xh for %d.%d\n",
1164 ahout->cmdstat,
1165 be16_to_cpu(get_unaligned(&hin->major)),
1166 hin->minor);
1167 }
1168out:
1169 spin_lock_irq(&d->lock);
1170 if (t->taint > 0
1171 && --t->taint > 0
1172 && t->nout_probes == 0) {
1173 count_targets(d, &untainted);
1174 if (untainted > 0) {
1175 probe(t);
1176 t->nout_probes++;
1177 }
1178 }
1179
1180 aoe_freetframe(f);
1181
1182 if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0)
1183 aoe_end_buf(d, buf);
1184
1185 spin_unlock_irq(&d->lock);
1186 aoedev_put(d);
1187 dev_kfree_skb(skb);
1188}
1189
1190/* Enters with iocq.lock held.
1191 * Returns true iff responses needing processing remain.
1192 */
1193static int
1194ktio(int id)
1195{
1196 struct frame *f;
1197 struct list_head *pos;
1198 int i;
1199 int actual_id;
1200
1201 for (i = 0; ; ++i) {
1202 if (i == MAXIOC)
1203 return 1;
1204 if (list_empty(&iocq[id].head))
1205 return 0;
1206 pos = iocq[id].head.next;
1207 list_del(pos);
1208 f = list_entry(pos, struct frame, head);
1209 spin_unlock_irq(&iocq[id].lock);
1210 ktiocomplete(f);
1211
1212 /* Figure out if extra threads are required. */
1213 actual_id = f->t->d->aoeminor % ncpus;
1214
1215 if (!kts[actual_id].active) {
1216 BUG_ON(id != 0);
1217 mutex_lock(&ktio_spawn_lock);
1218 if (!kts[actual_id].active
1219 && aoe_ktstart(&kts[actual_id]) == 0)
1220 kts[actual_id].active = 1;
1221 mutex_unlock(&ktio_spawn_lock);
1222 }
1223 spin_lock_irq(&iocq[id].lock);
1224 }
1225}
1226
1227static int
1228kthread(void *vp)
1229{
1230 struct ktstate *k;
1231 DECLARE_WAITQUEUE(wait, current);
1232 int more;
1233
1234 k = vp;
1235 current->flags |= PF_NOFREEZE;
1236 set_user_nice(current, -10);
1237 complete(&k->rendez); /* tell spawner we're running */
1238 do {
1239 spin_lock_irq(k->lock);
1240 more = k->fn(k->id);
1241 if (!more) {
1242 add_wait_queue(k->waitq, &wait);
1243 __set_current_state(TASK_INTERRUPTIBLE);
1244 }
1245 spin_unlock_irq(k->lock);
1246 if (!more) {
1247 schedule();
1248 remove_wait_queue(k->waitq, &wait);
1249 } else
1250 cond_resched();
1251 } while (!kthread_should_stop());
1252 complete(&k->rendez); /* tell spawner we're stopping */
1253 return 0;
1254}
1255
1256void
1257aoe_ktstop(struct ktstate *k)
1258{
1259 kthread_stop(k->task);
1260 wait_for_completion(&k->rendez);
1261}
1262
1263int
1264aoe_ktstart(struct ktstate *k)
1265{
1266 struct task_struct *task;
1267
1268 init_completion(&k->rendez);
1269 task = kthread_run(kthread, k, "%s", k->name);
1270 if (task == NULL || IS_ERR(task))
1271 return -ENOMEM;
1272 k->task = task;
1273 wait_for_completion(&k->rendez); /* allow kthread to start */
1274 init_completion(&k->rendez); /* for waiting for exit later */
1275 return 0;
1276}
1277
1278/* pass it off to kthreads for processing */
1279static void
1280ktcomplete(struct frame *f, struct sk_buff *skb)
1281{
1282 int id;
1283 ulong flags;
1284
1285 f->r_skb = skb;
1286 id = f->t->d->aoeminor % ncpus;
1287 spin_lock_irqsave(&iocq[id].lock, flags);
1288 if (!kts[id].active) {
1289 spin_unlock_irqrestore(&iocq[id].lock, flags);
1290 /* The thread with id has not been spawned yet,
1291 * so delegate the work to the main thread and
1292 * try spawning a new thread.
1293 */
1294 id = 0;
1295 spin_lock_irqsave(&iocq[id].lock, flags);
1296 }
1297 list_add_tail(&f->head, &iocq[id].head);
1298 spin_unlock_irqrestore(&iocq[id].lock, flags);
1299 wake_up(&ktiowq[id]);
1300}
1301
1302struct sk_buff *
1303aoecmd_ata_rsp(struct sk_buff *skb)
1304{
1305 struct aoedev *d;
1306 struct aoe_hdr *h;
1307 struct frame *f;
1308 u32 n;
1309 ulong flags;
1310 char ebuf[128];
1311 u16 aoemajor;
1312
1313 h = (struct aoe_hdr *) skb->data;
1314 aoemajor = be16_to_cpu(get_unaligned(&h->major));
1315 d = aoedev_by_aoeaddr(aoemajor, h->minor, 0);
1316 if (d == NULL) {
1317 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
1318 "for unknown device %d.%d\n",
1319 aoemajor, h->minor);
1320 aoechr_error(ebuf);
1321 return skb;
1322 }
1323
1324 spin_lock_irqsave(&d->lock, flags);
1325
1326 n = be32_to_cpu(get_unaligned(&h->tag));
1327 f = getframe(d, n);
1328 if (f) {
1329 calc_rttavg(d, f->t, tsince_hr(f));
1330 f->t->nout--;
1331 if (f->flags & FFL_PROBE)
1332 f->t->nout_probes--;
1333 } else {
1334 f = getframe_deferred(d, n);
1335 if (f) {
1336 calc_rttavg(d, NULL, tsince_hr(f));
1337 } else {
1338 calc_rttavg(d, NULL, tsince(n));
1339 spin_unlock_irqrestore(&d->lock, flags);
1340 aoedev_put(d);
1341 snprintf(ebuf, sizeof(ebuf),
1342 "%15s e%d.%d tag=%08x@%08lx s=%pm d=%pm\n",
1343 "unexpected rsp",
1344 get_unaligned_be16(&h->major),
1345 h->minor,
1346 get_unaligned_be32(&h->tag),
1347 jiffies,
1348 h->src,
1349 h->dst);
1350 aoechr_error(ebuf);
1351 return skb;
1352 }
1353 }
1354 aoecmd_work(d);
1355
1356 spin_unlock_irqrestore(&d->lock, flags);
1357
1358 ktcomplete(f, skb);
1359
1360 /*
1361 * Note here that we do not perform an aoedev_put, as we are
1362 * leaving this reference for the ktio to release.
1363 */
1364 return NULL;
1365}
1366
1367void
1368aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
1369{
1370 struct sk_buff_head queue;
1371
1372 __skb_queue_head_init(&queue);
1373 aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
1374 aoenet_xmit(&queue);
1375}
1376
1377struct sk_buff *
1378aoecmd_ata_id(struct aoedev *d)
1379{
1380 struct aoe_hdr *h;
1381 struct aoe_atahdr *ah;
1382 struct frame *f;
1383 struct sk_buff *skb;
1384 struct aoetgt *t;
1385
1386 f = newframe(d);
1387 if (f == NULL)
1388 return NULL;
1389
1390 t = *d->tgt;
1391
1392 /* initialize the headers & frame */
1393 skb = f->skb;
1394 h = (struct aoe_hdr *) skb_mac_header(skb);
1395 ah = (struct aoe_atahdr *) (h+1);
1396 skb_put(skb, sizeof *h + sizeof *ah);
1397 memset(h, 0, skb->len);
1398 f->tag = aoehdr_atainit(d, t, h);
1399 fhash(f);
1400 t->nout++;
1401 f->waited = 0;
1402 f->waited_total = 0;
1403
1404 /* set up ata header */
1405 ah->scnt = 1;
1406 ah->cmdstat = ATA_CMD_ID_ATA;
1407 ah->lba3 = 0xa0;
1408
1409 skb->dev = t->ifp->nd;
1410
1411 d->rttavg = RTTAVG_INIT;
1412 d->rttdev = RTTDEV_INIT;
1413 d->timer.function = rexmit_timer;
1414
1415 skb = skb_clone(skb, GFP_ATOMIC);
1416 if (skb)
1417 f->sent = ktime_get();
1418
1419 return skb;
1420}
1421
1422static struct aoetgt **
1423grow_targets(struct aoedev *d)
1424{
1425 ulong oldn, newn;
1426 struct aoetgt **tt;
1427
1428 oldn = d->ntargets;
1429 newn = oldn * 2;
1430 tt = kcalloc(newn, sizeof(*d->targets), GFP_ATOMIC);
1431 if (!tt)
1432 return NULL;
1433 memmove(tt, d->targets, sizeof(*d->targets) * oldn);
1434 d->tgt = tt + (d->tgt - d->targets);
1435 kfree(d->targets);
1436 d->targets = tt;
1437 d->ntargets = newn;
1438
1439 return &d->targets[oldn];
1440}
1441
1442static struct aoetgt *
1443addtgt(struct aoedev *d, char *addr, ulong nframes)
1444{
1445 struct aoetgt *t, **tt, **te;
1446
1447 tt = d->targets;
1448 te = tt + d->ntargets;
1449 for (; tt < te && *tt; tt++)
1450 ;
1451
1452 if (tt == te) {
1453 tt = grow_targets(d);
1454 if (!tt)
1455 goto nomem;
1456 }
1457 t = kzalloc(sizeof(*t), GFP_ATOMIC);
1458 if (!t)
1459 goto nomem;
1460 t->nframes = nframes;
1461 t->d = d;
1462 memcpy(t->addr, addr, sizeof t->addr);
1463 t->ifp = t->ifs;
1464 aoecmd_wreset(t);
1465 t->maxout = t->nframes / 2;
1466 INIT_LIST_HEAD(&t->ffree);
1467 return *tt = t;
1468
1469 nomem:
1470 pr_info("aoe: cannot allocate memory to add target\n");
1471 return NULL;
1472}
1473
1474static void
1475setdbcnt(struct aoedev *d)
1476{
1477 struct aoetgt **t, **e;
1478 int bcnt = 0;
1479
1480 t = d->targets;
1481 e = t + d->ntargets;
1482 for (; t < e && *t; t++)
1483 if (bcnt == 0 || bcnt > (*t)->minbcnt)
1484 bcnt = (*t)->minbcnt;
1485 if (bcnt != d->maxbcnt) {
1486 d->maxbcnt = bcnt;
1487 pr_info("aoe: e%ld.%d: setting %d byte data frames\n",
1488 d->aoemajor, d->aoeminor, bcnt);
1489 }
1490}
1491
1492static void
1493setifbcnt(struct aoetgt *t, struct net_device *nd, int bcnt)
1494{
1495 struct aoedev *d;
1496 struct aoeif *p, *e;
1497 int minbcnt;
1498
1499 d = t->d;
1500 minbcnt = bcnt;
1501 p = t->ifs;
1502 e = p + NAOEIFS;
1503 for (; p < e; p++) {
1504 if (p->nd == NULL)
1505 break; /* end of the valid interfaces */
1506 if (p->nd == nd) {
1507 p->bcnt = bcnt; /* we're updating */
1508 nd = NULL;
1509 } else if (minbcnt > p->bcnt)
1510 minbcnt = p->bcnt; /* find the min interface */
1511 }
1512 if (nd) {
1513 if (p == e) {
1514 pr_err("aoe: device setifbcnt failure; too many interfaces.\n");
1515 return;
1516 }
1517 dev_hold(nd);
1518 p->nd = nd;
1519 p->bcnt = bcnt;
1520 }
1521 t->minbcnt = minbcnt;
1522 setdbcnt(d);
1523}
1524
1525void
1526aoecmd_cfg_rsp(struct sk_buff *skb)
1527{
1528 struct aoedev *d;
1529 struct aoe_hdr *h;
1530 struct aoe_cfghdr *ch;
1531 struct aoetgt *t;
1532 ulong flags, aoemajor;
1533 struct sk_buff *sl;
1534 struct sk_buff_head queue;
1535 u16 n;
1536
1537 sl = NULL;
1538 h = (struct aoe_hdr *) skb_mac_header(skb);
1539 ch = (struct aoe_cfghdr *) (h+1);
1540
1541 /*
1542 * Enough people have their dip switches set backwards to
1543 * warrant a loud message for this special case.
1544 */
1545 aoemajor = get_unaligned_be16(&h->major);
1546 if (aoemajor == 0xfff) {
1547 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
1548 "Check shelf dip switches.\n");
1549 return;
1550 }
1551 if (aoemajor == 0xffff) {
1552 pr_info("aoe: e%ld.%d: broadcast shelf number invalid\n",
1553 aoemajor, (int) h->minor);
1554 return;
1555 }
1556 if (h->minor == 0xff) {
1557 pr_info("aoe: e%ld.%d: broadcast slot number invalid\n",
1558 aoemajor, (int) h->minor);
1559 return;
1560 }
1561
1562 n = be16_to_cpu(ch->bufcnt);
1563 if (n > aoe_maxout) /* keep it reasonable */
1564 n = aoe_maxout;
1565
1566 d = aoedev_by_aoeaddr(aoemajor, h->minor, 1);
1567 if (d == NULL) {
1568 pr_info("aoe: device allocation failure\n");
1569 return;
1570 }
1571
1572 spin_lock_irqsave(&d->lock, flags);
1573
1574 t = gettgt(d, h->src);
1575 if (t) {
1576 t->nframes = n;
1577 if (n < t->maxout)
1578 aoecmd_wreset(t);
1579 } else {
1580 t = addtgt(d, h->src, n);
1581 if (!t)
1582 goto bail;
1583 }
1584 n = skb->dev->mtu;
1585 n -= sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr);
1586 n /= 512;
1587 if (n > ch->scnt)
1588 n = ch->scnt;
1589 n = n ? n * 512 : DEFAULTBCNT;
1590 setifbcnt(t, skb->dev, n);
1591
1592 /* don't change users' perspective */
1593 if (d->nopen == 0) {
1594 d->fw_ver = be16_to_cpu(ch->fwver);
1595 sl = aoecmd_ata_id(d);
1596 }
1597bail:
1598 spin_unlock_irqrestore(&d->lock, flags);
1599 aoedev_put(d);
1600 if (sl) {
1601 __skb_queue_head_init(&queue);
1602 __skb_queue_tail(&queue, sl);
1603 aoenet_xmit(&queue);
1604 }
1605}
1606
1607void
1608aoecmd_wreset(struct aoetgt *t)
1609{
1610 t->maxout = 1;
1611 t->ssthresh = t->nframes / 2;
1612 t->next_cwnd = t->nframes;
1613}
1614
1615void
1616aoecmd_cleanslate(struct aoedev *d)
1617{
1618 struct aoetgt **t, **te;
1619
1620 d->rttavg = RTTAVG_INIT;
1621 d->rttdev = RTTDEV_INIT;
1622 d->maxbcnt = 0;
1623
1624 t = d->targets;
1625 te = t + d->ntargets;
1626 for (; t < te && *t; t++)
1627 aoecmd_wreset(*t);
1628}
1629
1630void
1631aoe_failbuf(struct aoedev *d, struct buf *buf)
1632{
1633 if (buf == NULL)
1634 return;
1635 buf->iter.bi_size = 0;
1636 buf->bio->bi_status = BLK_STS_IOERR;
1637 if (buf->nframesout == 0)
1638 aoe_end_buf(d, buf);
1639}
1640
1641void
1642aoe_flush_iocq(void)
1643{
1644 int i;
1645
1646 for (i = 0; i < ncpus; i++) {
1647 if (kts[i].active)
1648 aoe_flush_iocq_by_index(i);
1649 }
1650}
1651
1652void
1653aoe_flush_iocq_by_index(int id)
1654{
1655 struct frame *f;
1656 struct aoedev *d;
1657 LIST_HEAD(flist);
1658 struct list_head *pos;
1659 struct sk_buff *skb;
1660 ulong flags;
1661
1662 spin_lock_irqsave(&iocq[id].lock, flags);
1663 list_splice_init(&iocq[id].head, &flist);
1664 spin_unlock_irqrestore(&iocq[id].lock, flags);
1665 while (!list_empty(&flist)) {
1666 pos = flist.next;
1667 list_del(pos);
1668 f = list_entry(pos, struct frame, head);
1669 d = f->t->d;
1670 skb = f->r_skb;
1671 spin_lock_irqsave(&d->lock, flags);
1672 if (f->buf) {
1673 f->buf->nframesout--;
1674 aoe_failbuf(d, f->buf);
1675 }
1676 aoe_freetframe(f);
1677 spin_unlock_irqrestore(&d->lock, flags);
1678 dev_kfree_skb(skb);
1679 aoedev_put(d);
1680 }
1681}
1682
1683int __init
1684aoecmd_init(void)
1685{
1686 void *p;
1687 int i;
1688 int ret;
1689
1690 /* get_zeroed_page returns page with ref count 1 */
1691 p = (void *) get_zeroed_page(GFP_KERNEL);
1692 if (!p)
1693 return -ENOMEM;
1694 empty_page = virt_to_page(p);
1695
1696 ncpus = num_online_cpus();
1697
1698 iocq = kcalloc(ncpus, sizeof(struct iocq_ktio), GFP_KERNEL);
1699 if (!iocq)
1700 return -ENOMEM;
1701
1702 kts = kcalloc(ncpus, sizeof(struct ktstate), GFP_KERNEL);
1703 if (!kts) {
1704 ret = -ENOMEM;
1705 goto kts_fail;
1706 }
1707
1708 ktiowq = kcalloc(ncpus, sizeof(wait_queue_head_t), GFP_KERNEL);
1709 if (!ktiowq) {
1710 ret = -ENOMEM;
1711 goto ktiowq_fail;
1712 }
1713
1714 mutex_init(&ktio_spawn_lock);
1715
1716 for (i = 0; i < ncpus; i++) {
1717 INIT_LIST_HEAD(&iocq[i].head);
1718 spin_lock_init(&iocq[i].lock);
1719 init_waitqueue_head(&ktiowq[i]);
1720 snprintf(kts[i].name, sizeof(kts[i].name), "aoe_ktio%d", i);
1721 kts[i].fn = ktio;
1722 kts[i].waitq = &ktiowq[i];
1723 kts[i].lock = &iocq[i].lock;
1724 kts[i].id = i;
1725 kts[i].active = 0;
1726 }
1727 kts[0].active = 1;
1728 if (aoe_ktstart(&kts[0])) {
1729 ret = -ENOMEM;
1730 goto ktstart_fail;
1731 }
1732 return 0;
1733
1734ktstart_fail:
1735 kfree(ktiowq);
1736ktiowq_fail:
1737 kfree(kts);
1738kts_fail:
1739 kfree(iocq);
1740
1741 return ret;
1742}
1743
1744void
1745aoecmd_exit(void)
1746{
1747 int i;
1748
1749 for (i = 0; i < ncpus; i++)
1750 if (kts[i].active)
1751 aoe_ktstop(&kts[i]);
1752
1753 aoe_flush_iocq();
1754
1755 /* Free up the iocq and thread speicific configuration
1756 * allocated during startup.
1757 */
1758 kfree(iocq);
1759 kfree(kts);
1760 kfree(ktiowq);
1761
1762 free_page((unsigned long) page_address(empty_page));
1763 empty_page = NULL;
1764}