Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

aoe: handle multiple network paths to AoE device

A remote AoE device is something can process ATA commands and is identified by
an AoE shelf number and an AoE slot number. Such a device might have more
than one network interface, and it might be reachable by more than one local
network interface. This patch tracks the available network paths available to
each AoE device, allowing them to be used more efficiently.

Andrew Morton asked about the call to msleep_interruptible in the revalidate
function. Yes, if a signal is pending, then msleep_interruptible will not
return 0. That means we will not loop but will call aoenet_xmit with a NULL
skb, which is a noop. If the system is too low on memory or the aoe driver is
too low on frames, then the user can hit control-C to interrupt the attempt to
do a revalidate. I have added a comment to the code summarizing that.

Andrew Morton asked whether the allocation performed inside addtgt could use a
more relaxed allocation like GFP_KERNEL, but addtgt is called when the aoedev
lock has been locked with spin_lock_irqsave. It would be nice to allocate the
memory under fewer restrictions, but targets are only added when the device is
being discovered, and if the target can't be added right now, we can try again
in a minute when then next AoE config query broadcast goes out.

Andrew Morton pointed out that the "too many targets" message could be printed
for failing GFP_ATOMIC allocations. The last patch in this series makes the
messages more specific.

Signed-off-by: Ed L. Cashin <ecashin@coraid.com>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Ed L. Cashin and committed by
Linus Torvalds
68e0d42f 8911ef4d

+654 -336
+40 -17
drivers/block/aoe/aoe.h
··· 76 76 DEVFL_EXT = (1<<2), /* device accepts lba48 commands */ 77 77 DEVFL_CLOSEWAIT = (1<<3), /* device is waiting for all closes to revalidate */ 78 78 DEVFL_GDALLOC = (1<<4), /* need to alloc gendisk */ 79 - DEVFL_PAUSE = (1<<5), 79 + DEVFL_KICKME = (1<<5), /* slow polling network card catch */ 80 80 DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */ 81 - DEVFL_MAXBCNT = (1<<7), /* d->maxbcnt is not changeable */ 82 - DEVFL_KICKME = (1<<8), 83 81 84 82 BUFFL_FAIL = 1, 85 83 }; ··· 86 88 DEFAULTBCNT = 2 * 512, /* 2 sectors */ 87 89 NPERSHELF = 16, /* number of slots per shelf address */ 88 90 FREETAG = -1, 89 - MIN_BUFS = 8, 91 + MIN_BUFS = 16, 92 + NTARGETS = 8, 93 + NAOEIFS = 8, 94 + 95 + TIMERTICK = HZ / 10, 96 + MINTIMER = HZ >> 2, 97 + MAXTIMER = HZ << 1, 98 + HELPWAIT = 20, 90 99 }; 91 100 92 101 struct buf { 93 102 struct list_head bufs; 94 - ulong start_time; /* for disk stats */ 103 + ulong stime; /* for disk stats */ 95 104 ulong flags; 96 105 ulong nframesout; 97 - char *bufaddr; 98 106 ulong resid; 99 107 ulong bv_resid; 108 + ulong bv_off; 100 109 sector_t sector; 101 110 struct bio *bio; 102 111 struct bio_vec *bv; ··· 119 114 struct sk_buff *skb; 120 115 }; 121 116 117 + struct aoeif { 118 + struct net_device *nd; 119 + unsigned char lost; 120 + unsigned char lostjumbo; 121 + ushort maxbcnt; 122 + }; 123 + 124 + struct aoetgt { 125 + unsigned char addr[6]; 126 + ushort nframes; 127 + struct frame *frames; 128 + struct aoeif ifs[NAOEIFS]; 129 + struct aoeif *ifp; /* current aoeif in use */ 130 + ushort nout; 131 + ushort maxout; 132 + u16 lasttag; /* last tag sent */ 133 + u16 useme; 134 + ulong lastwadj; /* last window adjustment */ 135 + int wpkts, rpkts; 136 + }; 137 + 122 138 struct aoedev { 123 139 struct aoedev *next; 124 - unsigned char addr[6]; /* remote mac addr */ 125 - ushort flags; 126 140 ulong sysminor; 127 141 ulong aoemajor; 128 - ulong aoeminor; 142 + u16 aoeminor; 143 + u16 flags; 129 144 u16 nopen; /* (bd_openers isn't available without sleeping) */ 130 - u16 lasttag; /* last tag sent */ 131 145 u16 rttavg; /* round trip average of requests/responses */ 132 146 u16 mintimer; 133 147 u16 fw_ver; /* version of blade's firmware */ 134 - u16 maxbcnt; 135 148 struct work_struct work;/* disk create work struct */ 136 149 struct gendisk *gd; 137 150 struct request_queue blkq; ··· 157 134 sector_t ssize; 158 135 struct timer_list timer; 159 136 spinlock_t lock; 160 - struct net_device *ifp; /* interface ed is attached to */ 161 137 struct sk_buff *sendq_hd; /* packets needing to be sent, list head */ 162 138 struct sk_buff *sendq_tl; 163 139 mempool_t *bufpool; /* for deadlock-free Buf allocation */ 164 140 struct list_head bufq; /* queue of bios to work on */ 165 141 struct buf *inprocess; /* the one we're currently working on */ 166 - ushort lostjumbo; 167 - ushort nframes; /* number of frames below */ 168 - struct frame *frames; 142 + struct aoetgt *targets[NTARGETS]; 143 + struct aoetgt **tgt; /* target in use when working */ 144 + struct aoetgt **htgt; /* target needing rexmit assistance */ 169 145 }; 170 146 171 147 ··· 182 160 void aoecmd_ata_rsp(struct sk_buff *); 183 161 void aoecmd_cfg_rsp(struct sk_buff *); 184 162 void aoecmd_sleepwork(struct work_struct *); 185 - struct sk_buff *new_skb(ulong); 163 + void aoecmd_cleanslate(struct aoedev *); 164 + struct sk_buff *aoecmd_ata_id(struct aoedev *); 186 165 187 166 int aoedev_init(void); 188 167 void aoedev_exit(void); 189 168 struct aoedev *aoedev_by_aoeaddr(int maj, int min); 190 - struct aoedev *aoedev_by_sysminor_m(ulong sysminor, ulong bufcnt); 169 + struct aoedev *aoedev_by_sysminor_m(ulong sysminor); 191 170 void aoedev_downdev(struct aoedev *d); 192 171 int aoedev_isbusy(struct aoedev *d); 193 172
+55 -7
drivers/block/aoe/aoeblk.c
··· 24 24 return snprintf(page, PAGE_SIZE, 25 25 "%s%s\n", 26 26 (d->flags & DEVFL_UP) ? "up" : "down", 27 - (d->flags & DEVFL_PAUSE) ? ",paused" : 27 + (d->flags & DEVFL_KICKME) ? ",kickme" : 28 28 (d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : ""); 29 29 /* I'd rather see nopen exported so we can ditch closewait */ 30 30 } ··· 33 33 { 34 34 struct gendisk *disk = dev_to_disk(dev); 35 35 struct aoedev *d = disk->private_data; 36 + struct aoetgt *t = d->targets[0]; 36 37 38 + if (t == NULL) 39 + return snprintf(page, PAGE_SIZE, "none\n"); 37 40 return snprintf(page, PAGE_SIZE, "%012llx\n", 38 - (unsigned long long)mac_addr(d->addr)); 41 + (unsigned long long)mac_addr(t->addr)); 39 42 } 40 43 static ssize_t aoedisk_show_netif(struct device *dev, 41 44 struct device_attribute *attr, char *page) 42 45 { 43 46 struct gendisk *disk = dev_to_disk(dev); 44 47 struct aoedev *d = disk->private_data; 48 + struct net_device *nds[8], **nd, **nnd, **ne; 49 + struct aoetgt **t, **te; 50 + struct aoeif *ifp, *e; 51 + char *p; 45 52 46 - return snprintf(page, PAGE_SIZE, "%s\n", d->ifp->name); 53 + memset(nds, 0, sizeof nds); 54 + nd = nds; 55 + ne = nd + ARRAY_SIZE(nds); 56 + t = d->targets; 57 + te = t + NTARGETS; 58 + for (; t < te && *t; t++) { 59 + ifp = (*t)->ifs; 60 + e = ifp + NAOEIFS; 61 + for (; ifp < e && ifp->nd; ifp++) { 62 + for (nnd = nds; nnd < nd; nnd++) 63 + if (*nnd == ifp->nd) 64 + break; 65 + if (nnd == nd && nd != ne) 66 + *nd++ = ifp->nd; 67 + } 68 + } 69 + 70 + ne = nd; 71 + nd = nds; 72 + if (*nd == NULL) 73 + return snprintf(page, PAGE_SIZE, "none\n"); 74 + for (p = page; nd < ne; nd++) 75 + p += snprintf(p, PAGE_SIZE - (p-page), "%s%s", 76 + p == page ? "" : ",", (*nd)->name); 77 + p += snprintf(p, PAGE_SIZE - (p-page), "\n"); 78 + return p-page; 47 79 } 48 80 /* firmware version */ 49 81 static ssize_t aoedisk_show_fwver(struct device *dev, ··· 166 134 167 135 blk_queue_bounce(q, &bio); 168 136 137 + if (bio == NULL) { 138 + printk(KERN_ERR "aoe: bio is NULL\n"); 139 + BUG(); 140 + return 0; 141 + } 169 142 d = bio->bi_bdev->bd_disk->private_data; 143 + if (d == NULL) { 144 + printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n"); 145 + BUG(); 146 + bio_endio(bio, -ENXIO); 147 + return 0; 148 + } else if (bio->bi_io_vec == NULL) { 149 + printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); 150 + BUG(); 151 + bio_endio(bio, -ENXIO); 152 + return 0; 153 + } 170 154 buf = mempool_alloc(d->bufpool, GFP_NOIO); 171 155 if (buf == NULL) { 172 156 printk(KERN_INFO "aoe: buf allocation failure\n"); ··· 191 143 } 192 144 memset(buf, 0, sizeof(*buf)); 193 145 INIT_LIST_HEAD(&buf->bufs); 194 - buf->start_time = jiffies; 146 + buf->stime = jiffies; 195 147 buf->bio = bio; 196 148 buf->resid = bio->bi_size; 197 149 buf->sector = bio->bi_sector; 198 150 buf->bv = &bio->bi_io_vec[bio->bi_idx]; 199 - WARN_ON(buf->bv->bv_len == 0); 200 151 buf->bv_resid = buf->bv->bv_len; 201 - buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset; 152 + WARN_ON(buf->bv_resid == 0); 153 + buf->bv_off = buf->bv->bv_offset; 202 154 203 155 spin_lock_irqsave(&d->lock, flags); 204 156 ··· 277 229 gd->fops = &aoe_bdops; 278 230 gd->private_data = d; 279 231 gd->capacity = d->ssize; 280 - snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%ld", 232 + snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d", 281 233 d->aoemajor, d->aoeminor); 282 234 283 235 gd->queue = &d->blkq;
+13 -4
drivers/block/aoe/aoechr.c
··· 6 6 7 7 #include <linux/hdreg.h> 8 8 #include <linux/blkdev.h> 9 + #include <linux/delay.h> 9 10 #include "aoe.h" 10 11 11 12 enum { ··· 69 68 int major, minor, n; 70 69 ulong flags; 71 70 struct aoedev *d; 71 + struct sk_buff *skb; 72 72 char buf[16]; 73 73 74 74 if (size >= sizeof buf) ··· 87 85 d = aoedev_by_aoeaddr(major, minor); 88 86 if (!d) 89 87 return -EINVAL; 90 - 91 88 spin_lock_irqsave(&d->lock, flags); 92 - d->flags &= ~DEVFL_MAXBCNT; 93 - d->flags |= DEVFL_PAUSE; 89 + aoecmd_cleanslate(d); 90 + loop: 91 + skb = aoecmd_ata_id(d); 94 92 spin_unlock_irqrestore(&d->lock, flags); 93 + /* try again if we are able to sleep a bit, 94 + * otherwise give up this revalidation 95 + */ 96 + if (!skb && !msleep_interruptible(200)) { 97 + spin_lock_irqsave(&d->lock, flags); 98 + goto loop; 99 + } 100 + aoenet_xmit(skb); 95 101 aoecmd_cfg(major, minor); 96 - 97 102 return 0; 98 103 } 99 104
+461 -216
drivers/block/aoe/aoecmd.c
··· 9 9 #include <linux/skbuff.h> 10 10 #include <linux/netdevice.h> 11 11 #include <linux/genhd.h> 12 + #include <linux/moduleparam.h> 12 13 #include <net/net_namespace.h> 13 14 #include <asm/unaligned.h> 14 15 #include "aoe.h" 15 - 16 - #define TIMERTICK (HZ / 10) 17 - #define MINTIMER (2 * TIMERTICK) 18 - #define MAXTIMER (HZ << 1) 19 16 20 17 static int aoe_deadsecs = 60 * 3; 21 18 module_param(aoe_deadsecs, int, 0644); 22 19 MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev."); 23 20 24 - struct sk_buff * 21 + static struct sk_buff * 25 22 new_skb(ulong len) 26 23 { 27 24 struct sk_buff *skb; ··· 40 43 } 41 44 42 45 static struct frame * 43 - getframe(struct aoedev *d, int tag) 46 + getframe(struct aoetgt *t, int tag) 44 47 { 45 48 struct frame *f, *e; 46 49 47 - f = d->frames; 48 - e = f + d->nframes; 50 + f = t->frames; 51 + e = f + t->nframes; 49 52 for (; f<e; f++) 50 53 if (f->tag == tag) 51 54 return f; ··· 58 61 * This driver reserves tag -1 to mean "unused frame." 59 62 */ 60 63 static int 61 - newtag(struct aoedev *d) 64 + newtag(struct aoetgt *t) 62 65 { 63 66 register ulong n; 64 67 65 68 n = jiffies & 0xffff; 66 - return n |= (++d->lasttag & 0x7fff) << 16; 69 + return n |= (++t->lasttag & 0x7fff) << 16; 67 70 } 68 71 69 72 static int 70 - aoehdr_atainit(struct aoedev *d, struct aoe_hdr *h) 73 + aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h) 71 74 { 72 - u32 host_tag = newtag(d); 75 + u32 host_tag = newtag(t); 73 76 74 - memcpy(h->src, d->ifp->dev_addr, sizeof h->src); 75 - memcpy(h->dst, d->addr, sizeof h->dst); 77 + memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src); 78 + memcpy(h->dst, t->addr, sizeof h->dst); 76 79 h->type = __constant_cpu_to_be16(ETH_P_AOE); 77 80 h->verfl = AOE_HVER; 78 81 h->major = cpu_to_be16(d->aoemajor); ··· 95 98 } 96 99 97 100 static void 98 - aoecmd_ata_rw(struct aoedev *d, struct frame *f) 101 + ifrotate(struct aoetgt *t) 99 102 { 103 + t->ifp++; 104 + if (t->ifp >= &t->ifs[NAOEIFS] || t->ifp->nd == NULL) 105 + t->ifp = t->ifs; 106 + if (t->ifp->nd == NULL) { 107 + printk(KERN_INFO "aoe: no interface to rotate to\n"); 108 + BUG(); 109 + } 110 + } 111 + 112 + static struct frame * 113 + freeframe(struct aoedev *d) 114 + { 115 + struct frame *f, *e; 116 + struct aoetgt **t; 117 + ulong n; 118 + 119 + if (d->targets[0] == NULL) { /* shouldn't happen, but I'm paranoid */ 120 + printk(KERN_ERR "aoe: NULL TARGETS!\n"); 121 + return NULL; 122 + } 123 + t = d->targets; 124 + do { 125 + if (t != d->htgt 126 + && (*t)->ifp->nd 127 + && (*t)->nout < (*t)->maxout) { 128 + n = (*t)->nframes; 129 + f = (*t)->frames; 130 + e = f + n; 131 + for (; f < e; f++) { 132 + if (f->tag != FREETAG) 133 + continue; 134 + if (atomic_read(&skb_shinfo(f->skb)->dataref) 135 + != 1) { 136 + n--; 137 + continue; 138 + } 139 + skb_shinfo(f->skb)->nr_frags = 0; 140 + f->skb->data_len = 0; 141 + skb_trim(f->skb, 0); 142 + d->tgt = t; 143 + ifrotate(*t); 144 + return f; 145 + } 146 + if (n == 0) /* slow polling network card */ 147 + d->flags |= DEVFL_KICKME; 148 + } 149 + t++; 150 + } while (t < &d->targets[NTARGETS] && *t); 151 + return NULL; 152 + } 153 + 154 + static int 155 + aoecmd_ata_rw(struct aoedev *d) 156 + { 157 + struct frame *f; 100 158 struct aoe_hdr *h; 101 159 struct aoe_atahdr *ah; 102 160 struct buf *buf; 161 + struct bio_vec *bv; 162 + struct aoetgt *t; 103 163 struct sk_buff *skb; 104 164 ulong bcnt; 105 - register sector_t sector; 106 165 char writebit, extbit; 107 166 108 167 writebit = 0x10; 109 168 extbit = 0x4; 110 169 170 + f = freeframe(d); 171 + if (f == NULL) 172 + return 0; 173 + t = *d->tgt; 111 174 buf = d->inprocess; 112 - 113 - sector = buf->sector; 114 - bcnt = buf->bv_resid; 115 - if (bcnt > d->maxbcnt) 116 - bcnt = d->maxbcnt; 117 - 175 + bv = buf->bv; 176 + bcnt = t->ifp->maxbcnt; 177 + if (bcnt == 0) 178 + bcnt = DEFAULTBCNT; 179 + if (bcnt > buf->bv_resid) 180 + bcnt = buf->bv_resid; 118 181 /* initialize the headers & frame */ 119 182 skb = f->skb; 120 183 h = (struct aoe_hdr *) skb_mac_header(skb); 121 184 ah = (struct aoe_atahdr *) (h+1); 122 185 skb_put(skb, sizeof *h + sizeof *ah); 123 186 memset(h, 0, skb->len); 124 - f->tag = aoehdr_atainit(d, h); 187 + f->tag = aoehdr_atainit(d, t, h); 188 + t->nout++; 125 189 f->waited = 0; 126 190 f->buf = buf; 127 - f->bufaddr = buf->bufaddr; 191 + f->bufaddr = page_address(bv->bv_page) + buf->bv_off; 128 192 f->bcnt = bcnt; 129 - f->lba = sector; 193 + f->lba = buf->sector; 130 194 131 195 /* set up ata header */ 132 196 ah->scnt = bcnt >> 9; 133 - put_lba(ah, sector); 197 + put_lba(ah, buf->sector); 134 198 if (d->flags & DEVFL_EXT) { 135 199 ah->aflags |= AOEAFL_EXT; 136 200 } else { ··· 199 141 ah->lba3 &= 0x0f; 200 142 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */ 201 143 } 202 - 203 144 if (bio_data_dir(buf->bio) == WRITE) { 204 - skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr), 205 - offset_in_page(f->bufaddr), bcnt); 145 + skb_fill_page_desc(skb, 0, bv->bv_page, buf->bv_off, bcnt); 206 146 ah->aflags |= AOEAFL_WRITE; 207 147 skb->len += bcnt; 208 148 skb->data_len = bcnt; 149 + t->wpkts++; 209 150 } else { 151 + t->rpkts++; 210 152 writebit = 0; 211 153 } 212 154 ··· 214 156 215 157 /* mark all tracking fields and load out */ 216 158 buf->nframesout += 1; 217 - buf->bufaddr += bcnt; 159 + buf->bv_off += bcnt; 218 160 buf->bv_resid -= bcnt; 219 - /* printk(KERN_DEBUG "aoe: bv_resid=%ld\n", buf->bv_resid); */ 220 161 buf->resid -= bcnt; 221 162 buf->sector += bcnt >> 9; 222 163 if (buf->resid == 0) { 223 164 d->inprocess = NULL; 224 165 } else if (buf->bv_resid == 0) { 225 - buf->bv++; 226 - WARN_ON(buf->bv->bv_len == 0); 227 - buf->bv_resid = buf->bv->bv_len; 228 - buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset; 166 + buf->bv = ++bv; 167 + buf->bv_resid = bv->bv_len; 168 + WARN_ON(buf->bv_resid == 0); 169 + buf->bv_off = bv->bv_offset; 229 170 } 230 171 231 - skb->dev = d->ifp; 172 + skb->dev = t->ifp->nd; 232 173 skb = skb_clone(skb, GFP_ATOMIC); 233 - if (skb == NULL) 234 - return; 235 - if (d->sendq_hd) 236 - d->sendq_tl->next = skb; 237 - else 238 - d->sendq_hd = skb; 239 - d->sendq_tl = skb; 174 + if (skb) { 175 + if (d->sendq_hd) 176 + d->sendq_tl->next = skb; 177 + else 178 + d->sendq_hd = skb; 179 + d->sendq_tl = skb; 180 + } 181 + return 1; 240 182 } 241 183 242 184 /* some callers cannot sleep, and they can call this function, ··· 290 232 return sl; 291 233 } 292 234 293 - static struct frame * 294 - freeframe(struct aoedev *d) 295 - { 296 - struct frame *f, *e; 297 - int n = 0; 298 - 299 - f = d->frames; 300 - e = f + d->nframes; 301 - for (; f<e; f++) { 302 - if (f->tag != FREETAG) 303 - continue; 304 - if (atomic_read(&skb_shinfo(f->skb)->dataref) == 1) { 305 - skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0; 306 - skb_trim(f->skb, 0); 307 - return f; 308 - } 309 - n++; 310 - } 311 - if (n == d->nframes) /* wait for network layer */ 312 - d->flags |= DEVFL_KICKME; 313 - 314 - return NULL; 315 - } 316 - 317 - /* enters with d->lock held */ 318 - void 319 - aoecmd_work(struct aoedev *d) 320 - { 321 - struct frame *f; 322 - struct buf *buf; 323 - 324 - if (d->flags & DEVFL_PAUSE) { 325 - if (!aoedev_isbusy(d)) 326 - d->sendq_hd = aoecmd_cfg_pkts(d->aoemajor, 327 - d->aoeminor, &d->sendq_tl); 328 - return; 329 - } 330 - 331 - loop: 332 - f = freeframe(d); 333 - if (f == NULL) 334 - return; 335 - if (d->inprocess == NULL) { 336 - if (list_empty(&d->bufq)) 337 - return; 338 - buf = container_of(d->bufq.next, struct buf, bufs); 339 - list_del(d->bufq.next); 340 - /*printk(KERN_DEBUG "aoe: bi_size=%ld\n", buf->bio->bi_size); */ 341 - d->inprocess = buf; 342 - } 343 - aoecmd_ata_rw(d, f); 344 - goto loop; 345 - } 346 - 347 235 static void 348 - rexmit(struct aoedev *d, struct frame *f) 236 + resend(struct aoedev *d, struct aoetgt *t, struct frame *f) 349 237 { 350 238 struct sk_buff *skb; 351 239 struct aoe_hdr *h; ··· 299 295 char buf[128]; 300 296 u32 n; 301 297 302 - n = newtag(d); 303 - 304 - snprintf(buf, sizeof buf, 305 - "%15s e%ld.%ld oldtag=%08x@%08lx newtag=%08x\n", 306 - "retransmit", 307 - d->aoemajor, d->aoeminor, f->tag, jiffies, n); 308 - aoechr_error(buf); 309 - 298 + ifrotate(t); 299 + n = newtag(t); 310 300 skb = f->skb; 311 301 h = (struct aoe_hdr *) skb_mac_header(skb); 312 302 ah = (struct aoe_atahdr *) (h+1); 303 + 304 + snprintf(buf, sizeof buf, 305 + "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x " 306 + "s=%012llx d=%012llx nout=%d\n", 307 + "retransmit", d->aoemajor, d->aoeminor, f->tag, jiffies, n, 308 + mac_addr(h->src), mac_addr(h->dst), t->nout); 309 + aoechr_error(buf); 310 + 313 311 f->tag = n; 314 312 h->tag = cpu_to_be32(n); 315 - memcpy(h->dst, d->addr, sizeof h->dst); 316 - memcpy(h->src, d->ifp->dev_addr, sizeof h->src); 313 + memcpy(h->dst, t->addr, sizeof h->dst); 314 + memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src); 317 315 318 - n = DEFAULTBCNT / 512; 319 - if (ah->scnt > n) { 320 - ah->scnt = n; 316 + switch (ah->cmdstat) { 317 + default: 318 + break; 319 + case WIN_READ: 320 + case WIN_READ_EXT: 321 + case WIN_WRITE: 322 + case WIN_WRITE_EXT: 323 + put_lba(ah, f->lba); 324 + 325 + n = f->bcnt; 326 + if (n > DEFAULTBCNT) 327 + n = DEFAULTBCNT; 328 + ah->scnt = n >> 9; 321 329 if (ah->aflags & AOEAFL_WRITE) { 322 330 skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr), 323 - offset_in_page(f->bufaddr), DEFAULTBCNT); 324 - skb->len = sizeof *h + sizeof *ah + DEFAULTBCNT; 325 - skb->data_len = DEFAULTBCNT; 326 - } 327 - if (++d->lostjumbo > (d->nframes << 1)) 328 - if (d->maxbcnt != DEFAULTBCNT) { 329 - printk(KERN_INFO "aoe: e%ld.%ld: too many lost jumbo on %s - using 1KB frames.\n", 330 - d->aoemajor, d->aoeminor, d->ifp->name); 331 - d->maxbcnt = DEFAULTBCNT; 332 - d->flags |= DEVFL_MAXBCNT; 331 + offset_in_page(f->bufaddr), n); 332 + skb->len = sizeof *h + sizeof *ah + n; 333 + skb->data_len = n; 333 334 } 334 335 } 335 - 336 - skb->dev = d->ifp; 336 + skb->dev = t->ifp->nd; 337 337 skb = skb_clone(skb, GFP_ATOMIC); 338 338 if (skb == NULL) 339 339 return; ··· 360 352 return n; 361 353 } 362 354 355 + static struct aoeif * 356 + getif(struct aoetgt *t, struct net_device *nd) 357 + { 358 + struct aoeif *p, *e; 359 + 360 + p = t->ifs; 361 + e = p + NAOEIFS; 362 + for (; p < e; p++) 363 + if (p->nd == nd) 364 + return p; 365 + return NULL; 366 + } 367 + 368 + static struct aoeif * 369 + addif(struct aoetgt *t, struct net_device *nd) 370 + { 371 + struct aoeif *p; 372 + 373 + p = getif(t, NULL); 374 + if (!p) 375 + return NULL; 376 + p->nd = nd; 377 + p->maxbcnt = DEFAULTBCNT; 378 + p->lost = 0; 379 + p->lostjumbo = 0; 380 + return p; 381 + } 382 + 383 + static void 384 + ejectif(struct aoetgt *t, struct aoeif *ifp) 385 + { 386 + struct aoeif *e; 387 + ulong n; 388 + 389 + e = t->ifs + NAOEIFS - 1; 390 + n = (e - ifp) * sizeof *ifp; 391 + memmove(ifp, ifp+1, n); 392 + e->nd = NULL; 393 + } 394 + 395 + static int 396 + sthtith(struct aoedev *d) 397 + { 398 + struct frame *f, *e, *nf; 399 + struct sk_buff *skb; 400 + struct aoetgt *ht = *d->htgt; 401 + 402 + f = ht->frames; 403 + e = f + ht->nframes; 404 + for (; f < e; f++) { 405 + if (f->tag == FREETAG) 406 + continue; 407 + nf = freeframe(d); 408 + if (!nf) 409 + return 0; 410 + skb = nf->skb; 411 + *nf = *f; 412 + f->skb = skb; 413 + f->tag = FREETAG; 414 + nf->waited = 0; 415 + ht->nout--; 416 + (*d->tgt)->nout++; 417 + resend(d, *d->tgt, nf); 418 + } 419 + /* he's clean, he's useless. take away his interfaces */ 420 + memset(ht->ifs, 0, sizeof ht->ifs); 421 + d->htgt = NULL; 422 + return 1; 423 + } 424 + 425 + static inline unsigned char 426 + ata_scnt(unsigned char *packet) { 427 + struct aoe_hdr *h; 428 + struct aoe_atahdr *ah; 429 + 430 + h = (struct aoe_hdr *) packet; 431 + ah = (struct aoe_atahdr *) (h+1); 432 + return ah->scnt; 433 + } 434 + 363 435 static void 364 436 rexmit_timer(ulong vp) 365 437 { 366 438 struct aoedev *d; 439 + struct aoetgt *t, **tt, **te; 440 + struct aoeif *ifp; 367 441 struct frame *f, *e; 368 442 struct sk_buff *sl; 369 443 register long timeout; ··· 464 374 spin_unlock_irqrestore(&d->lock, flags); 465 375 return; 466 376 } 467 - f = d->frames; 468 - e = f + d->nframes; 469 - for (; f<e; f++) { 470 - if (f->tag != FREETAG && tsince(f->tag) >= timeout) { 377 + tt = d->targets; 378 + te = tt + NTARGETS; 379 + for (; tt < te && *tt; tt++) { 380 + t = *tt; 381 + f = t->frames; 382 + e = f + t->nframes; 383 + for (; f < e; f++) { 384 + if (f->tag == FREETAG 385 + || tsince(f->tag) < timeout) 386 + continue; 471 387 n = f->waited += timeout; 472 388 n /= HZ; 473 - if (n > aoe_deadsecs) { /* waited too long for response */ 389 + if (n > aoe_deadsecs) { 390 + /* waited too long. device failure. */ 474 391 aoedev_downdev(d); 475 392 break; 476 393 } 477 - rexmit(d, f); 394 + 395 + if (n > HELPWAIT /* see if another target can help */ 396 + && (tt != d->targets || d->targets[1])) 397 + d->htgt = tt; 398 + 399 + if (t->nout == t->maxout) { 400 + if (t->maxout > 1) 401 + t->maxout--; 402 + t->lastwadj = jiffies; 403 + } 404 + 405 + ifp = getif(t, f->skb->dev); 406 + if (ifp && ++ifp->lost > (t->nframes << 1) 407 + && (ifp != t->ifs || t->ifs[1].nd)) { 408 + ejectif(t, ifp); 409 + ifp = NULL; 410 + } 411 + 412 + if (ata_scnt(skb_mac_header(f->skb)) > DEFAULTBCNT / 512 413 + && ifp && ++ifp->lostjumbo > (t->nframes << 1) 414 + && ifp->maxbcnt != DEFAULTBCNT) { 415 + printk(KERN_INFO 416 + "aoe: e%ld.%d: " 417 + "too many lost jumbo on " 418 + "%s:%012llx - " 419 + "falling back to %d frames.\n", 420 + d->aoemajor, d->aoeminor, 421 + ifp->nd->name, mac_addr(t->addr), 422 + DEFAULTBCNT); 423 + ifp->maxbcnt = 0; 424 + } 425 + resend(d, t, f); 426 + } 427 + 428 + /* window check */ 429 + if (t->nout == t->maxout 430 + && t->maxout < t->nframes 431 + && (jiffies - t->lastwadj)/HZ > 10) { 432 + t->maxout++; 433 + t->lastwadj = jiffies; 478 434 } 479 435 } 480 - if (d->flags & DEVFL_KICKME) { 436 + 437 + if (d->sendq_hd) { 438 + n = d->rttavg <<= 1; 439 + if (n > MAXTIMER) 440 + d->rttavg = MAXTIMER; 441 + } 442 + 443 + if (d->flags & DEVFL_KICKME || d->htgt) { 481 444 d->flags &= ~DEVFL_KICKME; 482 445 aoecmd_work(d); 483 446 } 484 447 485 448 sl = d->sendq_hd; 486 449 d->sendq_hd = d->sendq_tl = NULL; 487 - if (sl) { 488 - n = d->rttavg <<= 1; 489 - if (n > MAXTIMER) 490 - d->rttavg = MAXTIMER; 491 - } 492 450 493 451 d->timer.expires = jiffies + TIMERTICK; 494 452 add_timer(&d->timer); ··· 544 406 spin_unlock_irqrestore(&d->lock, flags); 545 407 546 408 aoenet_xmit(sl); 409 + } 410 + 411 + /* enters with d->lock held */ 412 + void 413 + aoecmd_work(struct aoedev *d) 414 + { 415 + struct buf *buf; 416 + loop: 417 + if (d->htgt && !sthtith(d)) 418 + return; 419 + if (d->inprocess == NULL) { 420 + if (list_empty(&d->bufq)) 421 + return; 422 + buf = container_of(d->bufq.next, struct buf, bufs); 423 + list_del(d->bufq.next); 424 + d->inprocess = buf; 425 + } 426 + if (aoecmd_ata_rw(d)) 427 + goto loop; 547 428 } 548 429 549 430 /* this function performs work that has been deferred until sleeping is OK ··· 597 440 } 598 441 599 442 static void 600 - ataid_complete(struct aoedev *d, unsigned char *id) 443 + ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id) 601 444 { 602 445 u64 ssize; 603 446 u16 n; ··· 633 476 634 477 if (d->ssize != ssize) 635 478 printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu sectors\n", 636 - (unsigned long long)mac_addr(d->addr), 479 + (unsigned long long)mac_addr(t->addr), 637 480 d->aoemajor, d->aoeminor, 638 481 d->fw_ver, (long long)ssize); 639 482 d->ssize = ssize; ··· 641 484 if (d->gd != NULL) { 642 485 d->gd->capacity = ssize; 643 486 d->flags |= DEVFL_NEWSIZE; 644 - } else { 645 - if (d->flags & DEVFL_GDALLOC) { 646 - printk(KERN_ERR "aoe: can't schedule work for e%lu.%lu, %s\n", 647 - d->aoemajor, d->aoeminor, 648 - "it's already on! This shouldn't happen.\n"); 649 - return; 650 - } 487 + } else 651 488 d->flags |= DEVFL_GDALLOC; 652 - } 653 489 schedule_work(&d->work); 654 490 } 655 491 ··· 669 519 d->rttavg += n >> 2; 670 520 } 671 521 522 + static struct aoetgt * 523 + gettgt(struct aoedev *d, char *addr) 524 + { 525 + struct aoetgt **t, **e; 526 + 527 + t = d->targets; 528 + e = t + NTARGETS; 529 + for (; t < e && *t; t++) 530 + if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0) 531 + return *t; 532 + return NULL; 533 + } 534 + 535 + static inline void 536 + diskstats(struct gendisk *disk, struct bio *bio, ulong duration) 537 + { 538 + unsigned long n_sect = bio->bi_size >> 9; 539 + const int rw = bio_data_dir(bio); 540 + 541 + disk_stat_inc(disk, ios[rw]); 542 + disk_stat_add(disk, ticks[rw], duration); 543 + disk_stat_add(disk, sectors[rw], n_sect); 544 + disk_stat_add(disk, io_ticks, duration); 545 + } 546 + 672 547 void 673 548 aoecmd_ata_rsp(struct sk_buff *skb) 674 549 { ··· 703 528 struct frame *f; 704 529 struct buf *buf; 705 530 struct sk_buff *sl; 531 + struct aoetgt *t; 532 + struct aoeif *ifp; 706 533 register long n; 707 534 ulong flags; 708 535 char ebuf[128]; ··· 724 547 spin_lock_irqsave(&d->lock, flags); 725 548 726 549 n = be32_to_cpu(get_unaligned(&hin->tag)); 727 - f = getframe(d, n); 550 + t = gettgt(d, hin->src); 551 + if (t == NULL) { 552 + printk(KERN_INFO "aoe: can't find target e%ld.%d:%012llx\n", 553 + d->aoemajor, d->aoeminor, 554 + (unsigned long long) mac_addr(hin->src)); 555 + spin_unlock_irqrestore(&d->lock, flags); 556 + return; 557 + } 558 + f = getframe(t, n); 728 559 if (f == NULL) { 729 560 calc_rttavg(d, -tsince(n)); 730 561 spin_unlock_irqrestore(&d->lock, flags); ··· 754 569 ahout = (struct aoe_atahdr *) (hout+1); 755 570 buf = f->buf; 756 571 757 - if (ahout->cmdstat == WIN_IDENTIFY) 758 - d->flags &= ~DEVFL_PAUSE; 759 572 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */ 760 573 printk(KERN_ERR 761 574 "aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%ld\n", ··· 762 579 if (buf) 763 580 buf->flags |= BUFFL_FAIL; 764 581 } else { 582 + if (d->htgt && t == *d->htgt) /* I'll help myself, thank you. */ 583 + d->htgt = NULL; 765 584 n = ahout->scnt << 9; 766 585 switch (ahout->cmdstat) { 767 586 case WIN_READ: 768 587 case WIN_READ_EXT: 769 588 if (skb->len - sizeof *hin - sizeof *ahin < n) { 770 589 printk(KERN_ERR 771 - "aoe: runt data size in read. skb->len=%d\n", 772 - skb->len); 590 + "aoe: %s. skb->len=%d need=%ld\n", 591 + "runt data size in read", skb->len, n); 773 592 /* fail frame f? just returning will rexmit. */ 774 593 spin_unlock_irqrestore(&d->lock, flags); 775 594 return; ··· 779 594 memcpy(f->bufaddr, ahin+1, n); 780 595 case WIN_WRITE: 781 596 case WIN_WRITE_EXT: 782 - if (f->bcnt -= n) { 783 - skb = f->skb; 784 - f->bufaddr += n; 785 - put_lba(ahout, f->lba += ahout->scnt); 786 - n = f->bcnt; 597 + ifp = getif(t, skb->dev); 598 + if (ifp) { 599 + ifp->lost = 0; 787 600 if (n > DEFAULTBCNT) 788 - n = DEFAULTBCNT; 789 - ahout->scnt = n >> 9; 790 - if (ahout->aflags & AOEAFL_WRITE) { 791 - skb_fill_page_desc(skb, 0, 792 - virt_to_page(f->bufaddr), 793 - offset_in_page(f->bufaddr), n); 794 - skb->len = sizeof *hout + sizeof *ahout + n; 795 - skb->data_len = n; 796 - } 797 - f->tag = newtag(d); 798 - hout->tag = cpu_to_be32(f->tag); 799 - skb->dev = d->ifp; 800 - skb = skb_clone(skb, GFP_ATOMIC); 801 - spin_unlock_irqrestore(&d->lock, flags); 802 - if (skb) 803 - aoenet_xmit(skb); 804 - return; 601 + ifp->lostjumbo = 0; 805 602 } 806 - if (n > DEFAULTBCNT) 807 - d->lostjumbo = 0; 603 + if (f->bcnt -= n) { 604 + f->lba += n >> 9; 605 + f->bufaddr += n; 606 + resend(d, t, f); 607 + goto xmit; 608 + } 808 609 break; 809 610 case WIN_IDENTIFY: 810 611 if (skb->len - sizeof *hin - sizeof *ahin < 512) { ··· 800 629 spin_unlock_irqrestore(&d->lock, flags); 801 630 return; 802 631 } 803 - ataid_complete(d, (char *) (ahin+1)); 632 + ataid_complete(d, t, (char *) (ahin+1)); 804 633 break; 805 634 default: 806 635 printk(KERN_INFO ··· 811 640 } 812 641 } 813 642 814 - if (buf) { 815 - buf->nframesout -= 1; 816 - if (buf->nframesout == 0 && buf->resid == 0) { 817 - unsigned long duration = jiffies - buf->start_time; 818 - unsigned long n_sect = buf->bio->bi_size >> 9; 819 - struct gendisk *disk = d->gd; 820 - const int rw = bio_data_dir(buf->bio); 821 - 822 - disk_stat_inc(disk, ios[rw]); 823 - disk_stat_add(disk, ticks[rw], duration); 824 - disk_stat_add(disk, sectors[rw], n_sect); 825 - disk_stat_add(disk, io_ticks, duration); 826 - n = (buf->flags & BUFFL_FAIL) ? -EIO : 0; 827 - bio_endio(buf->bio, n); 828 - mempool_free(buf, d->bufpool); 829 - } 643 + if (buf && --buf->nframesout == 0 && buf->resid == 0) { 644 + diskstats(d->gd, buf->bio, jiffies - buf->stime); 645 + n = (buf->flags & BUFFL_FAIL) ? -EIO : 0; 646 + bio_endio(buf->bio, n); 647 + mempool_free(buf, d->bufpool); 830 648 } 831 649 832 650 f->buf = NULL; 833 651 f->tag = FREETAG; 652 + t->nout--; 834 653 835 654 aoecmd_work(d); 655 + xmit: 836 656 sl = d->sendq_hd; 837 657 d->sendq_hd = d->sendq_tl = NULL; 838 658 ··· 841 679 aoenet_xmit(sl); 842 680 } 843 681 844 - /* 845 - * Since we only call this in one place (and it only prepares one frame) 846 - * we just return the skb. Usually we'd chain it up to the aoedev sendq. 847 - */ 848 - static struct sk_buff * 682 + struct sk_buff * 849 683 aoecmd_ata_id(struct aoedev *d) 850 684 { 851 685 struct aoe_hdr *h; 852 686 struct aoe_atahdr *ah; 853 687 struct frame *f; 854 688 struct sk_buff *skb; 689 + struct aoetgt *t; 855 690 856 691 f = freeframe(d); 857 - if (f == NULL) { 858 - printk(KERN_ERR "aoe: can't get a frame. This shouldn't happen.\n"); 692 + if (f == NULL) 859 693 return NULL; 860 - } 694 + 695 + t = *d->tgt; 861 696 862 697 /* initialize the headers & frame */ 863 698 skb = f->skb; ··· 862 703 ah = (struct aoe_atahdr *) (h+1); 863 704 skb_put(skb, sizeof *h + sizeof *ah); 864 705 memset(h, 0, skb->len); 865 - f->tag = aoehdr_atainit(d, h); 706 + f->tag = aoehdr_atainit(d, t, h); 707 + t->nout++; 866 708 f->waited = 0; 867 709 868 710 /* set up ata header */ ··· 871 711 ah->cmdstat = WIN_IDENTIFY; 872 712 ah->lba3 = 0xa0; 873 713 874 - skb->dev = d->ifp; 714 + skb->dev = t->ifp->nd; 875 715 876 716 d->rttavg = MAXTIMER; 877 717 d->timer.function = rexmit_timer; ··· 879 719 return skb_clone(skb, GFP_ATOMIC); 880 720 } 881 721 722 + static struct aoetgt * 723 + addtgt(struct aoedev *d, char *addr, ulong nframes) 724 + { 725 + struct aoetgt *t, **tt, **te; 726 + struct frame *f, *e; 727 + 728 + tt = d->targets; 729 + te = tt + NTARGETS; 730 + for (; tt < te && *tt; tt++) 731 + ; 732 + 733 + if (tt == te) 734 + return NULL; 735 + 736 + t = kcalloc(1, sizeof *t, GFP_ATOMIC); 737 + f = kcalloc(nframes, sizeof *f, GFP_ATOMIC); 738 + if (!t || !f) 739 + goto bail; 740 + t->nframes = nframes; 741 + t->frames = f; 742 + e = f + nframes; 743 + for (; f < e; f++) { 744 + f->tag = FREETAG; 745 + f->skb = new_skb(ETH_ZLEN); 746 + if (!f->skb) 747 + break; 748 + } 749 + if (f != e) { 750 + while (f > t->frames) { 751 + f--; 752 + dev_kfree_skb(f->skb); 753 + } 754 + goto bail; 755 + } 756 + memcpy(t->addr, addr, sizeof t->addr); 757 + t->ifp = t->ifs; 758 + t->maxout = t->nframes; 759 + return *tt = t; 760 + bail: 761 + kfree(t); 762 + kfree(f); 763 + return NULL; 764 + } 765 + 882 766 void 883 767 aoecmd_cfg_rsp(struct sk_buff *skb) 884 768 { 885 769 struct aoedev *d; 886 770 struct aoe_hdr *h; 887 771 struct aoe_cfghdr *ch; 772 + struct aoetgt *t; 773 + struct aoeif *ifp; 888 774 ulong flags, sysminor, aoemajor; 889 775 struct sk_buff *sl; 890 776 enum { MAXFRAMES = 16 }; ··· 961 755 if (n > MAXFRAMES) /* keep it reasonable */ 962 756 n = MAXFRAMES; 963 757 964 - d = aoedev_by_sysminor_m(sysminor, n); 758 + d = aoedev_by_sysminor_m(sysminor); 965 759 if (d == NULL) { 966 760 printk(KERN_INFO "aoe: device sysminor_m failure\n"); 967 761 return; ··· 969 763 970 764 spin_lock_irqsave(&d->lock, flags); 971 765 972 - /* permit device to migrate mac and network interface */ 973 - d->ifp = skb->dev; 974 - memcpy(d->addr, h->src, sizeof d->addr); 975 - if (!(d->flags & DEVFL_MAXBCNT)) { 976 - n = d->ifp->mtu; 766 + t = gettgt(d, h->src); 767 + if (!t) { 768 + t = addtgt(d, h->src, n); 769 + if (!t) { 770 + printk(KERN_INFO 771 + "aoe: device addtgt failure; " 772 + "too many targets?\n"); 773 + spin_unlock_irqrestore(&d->lock, flags); 774 + return; 775 + } 776 + } 777 + ifp = getif(t, skb->dev); 778 + if (!ifp) { 779 + ifp = addif(t, skb->dev); 780 + if (!ifp) { 781 + printk(KERN_INFO 782 + "aoe: device addif failure; " 783 + "too many interfaces?\n"); 784 + spin_unlock_irqrestore(&d->lock, flags); 785 + return; 786 + } 787 + } 788 + if (ifp->maxbcnt) { 789 + n = ifp->nd->mtu; 977 790 n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr); 978 791 n /= 512; 979 792 if (n > ch->scnt) 980 793 n = ch->scnt; 981 794 n = n ? n * 512 : DEFAULTBCNT; 982 - if (n != d->maxbcnt) { 795 + if (n != ifp->maxbcnt) { 983 796 printk(KERN_INFO 984 - "aoe: e%ld.%ld: setting %d byte data frames on %s\n", 985 - d->aoemajor, d->aoeminor, n, d->ifp->name); 986 - d->maxbcnt = n; 797 + "aoe: e%ld.%d: setting %d%s%s:%012llx\n", 798 + d->aoemajor, d->aoeminor, n, 799 + " byte data frames on ", ifp->nd->name, 800 + (unsigned long long) mac_addr(t->addr)); 801 + ifp->maxbcnt = n; 987 802 } 988 803 } 989 804 990 805 /* don't change users' perspective */ 991 - if (d->nopen && !(d->flags & DEVFL_PAUSE)) { 806 + if (d->nopen) { 992 807 spin_unlock_irqrestore(&d->lock, flags); 993 808 return; 994 809 } 995 - d->flags |= DEVFL_PAUSE; /* force pause */ 996 - d->mintimer = MINTIMER; 997 810 d->fw_ver = be16_to_cpu(ch->fwver); 998 811 999 - /* check for already outstanding ataid */ 1000 - sl = aoedev_isbusy(d) == 0 ? aoecmd_ata_id(d) : NULL; 812 + sl = aoecmd_ata_id(d); 1001 813 1002 814 spin_unlock_irqrestore(&d->lock, flags); 1003 815 1004 816 aoenet_xmit(sl); 1005 817 } 1006 818 819 + void 820 + aoecmd_cleanslate(struct aoedev *d) 821 + { 822 + struct aoetgt **t, **te; 823 + struct aoeif *p, *e; 824 + 825 + d->mintimer = MINTIMER; 826 + 827 + t = d->targets; 828 + te = t + NTARGETS; 829 + for (; t < te && *t; t++) { 830 + (*t)->maxout = (*t)->nframes; 831 + p = (*t)->ifs; 832 + e = p + NAOEIFS; 833 + for (; p < e; p++) { 834 + p->lostjumbo = 0; 835 + p->lost = 0; 836 + p->maxbcnt = DEFAULTBCNT; 837 + } 838 + } 839 + }
+79 -89
drivers/block/aoe/aoedev.c
··· 15 15 int 16 16 aoedev_isbusy(struct aoedev *d) 17 17 { 18 + struct aoetgt **t, **te; 18 19 struct frame *f, *e; 19 20 20 - f = d->frames; 21 - e = f + d->nframes; 22 - do { 23 - if (f->tag != FREETAG) 24 - return 1; 25 - } while (++f < e); 26 - 21 + t = d->targets; 22 + te = t + NTARGETS; 23 + for (; t < te && *t; t++) { 24 + f = (*t)->frames; 25 + e = f + (*t)->nframes; 26 + for (; f < e; f++) 27 + if (f->tag != FREETAG) 28 + return 1; 29 + } 27 30 return 0; 28 31 } 29 32 ··· 58 55 add_timer(&d->timer); 59 56 } 60 57 61 - /* called with devlist lock held */ 62 - static struct aoedev * 63 - aoedev_newdev(ulong nframes) 64 - { 65 - struct aoedev *d; 66 - struct frame *f, *e; 67 - 68 - d = kzalloc(sizeof *d, GFP_ATOMIC); 69 - f = kcalloc(nframes, sizeof *f, GFP_ATOMIC); 70 - switch (!d || !f) { 71 - case 0: 72 - d->nframes = nframes; 73 - d->frames = f; 74 - e = f + nframes; 75 - for (; f<e; f++) { 76 - f->tag = FREETAG; 77 - f->skb = new_skb(ETH_ZLEN); 78 - if (!f->skb) 79 - break; 80 - } 81 - if (f == e) 82 - break; 83 - while (f > d->frames) { 84 - f--; 85 - dev_kfree_skb(f->skb); 86 - } 87 - default: 88 - if (f) 89 - kfree(f); 90 - if (d) 91 - kfree(d); 92 - return NULL; 93 - } 94 - INIT_WORK(&d->work, aoecmd_sleepwork); 95 - spin_lock_init(&d->lock); 96 - init_timer(&d->timer); 97 - d->timer.data = (ulong) d; 98 - d->timer.function = dummy_timer; 99 - d->timer.expires = jiffies + HZ; 100 - add_timer(&d->timer); 101 - d->bufpool = NULL; /* defer to aoeblk_gdalloc */ 102 - INIT_LIST_HEAD(&d->bufq); 103 - d->next = devlist; 104 - devlist = d; 105 - 106 - return d; 107 - } 108 - 109 58 void 110 59 aoedev_downdev(struct aoedev *d) 111 60 { 61 + struct aoetgt **t, **te; 112 62 struct frame *f, *e; 113 63 struct buf *buf; 114 64 struct bio *bio; 115 65 116 - f = d->frames; 117 - e = f + d->nframes; 118 - for (; f<e; f->tag = FREETAG, f->buf = NULL, f++) { 119 - if (f->tag == FREETAG || f->buf == NULL) 120 - continue; 121 - buf = f->buf; 122 - bio = buf->bio; 123 - if (--buf->nframesout == 0) { 124 - mempool_free(buf, d->bufpool); 125 - bio_endio(bio, -EIO); 66 + t = d->targets; 67 + te = t + NTARGETS; 68 + for (; t < te && *t; t++) { 69 + f = (*t)->frames; 70 + e = f + (*t)->nframes; 71 + for (; f < e; f->tag = FREETAG, f->buf = NULL, f++) { 72 + if (f->tag == FREETAG || f->buf == NULL) 73 + continue; 74 + buf = f->buf; 75 + bio = buf->bio; 76 + if (--buf->nframesout == 0 77 + && buf != d->inprocess) { 78 + mempool_free(buf, d->bufpool); 79 + bio_endio(bio, -EIO); 80 + } 126 81 } 127 - skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0; 82 + (*t)->maxout = (*t)->nframes; 83 + (*t)->nout = 0; 84 + } 85 + buf = d->inprocess; 86 + if (buf) { 87 + bio = buf->bio; 88 + mempool_free(buf, d->bufpool); 89 + bio_endio(bio, -EIO); 128 90 } 129 91 d->inprocess = NULL; 92 + d->htgt = NULL; 130 93 131 94 while (!list_empty(&d->bufq)) { 132 95 buf = container_of(d->bufq.next, struct buf, bufs); ··· 105 136 if (d->gd) 106 137 d->gd->capacity = 0; 107 138 108 - d->flags &= ~(DEVFL_UP | DEVFL_PAUSE); 139 + d->flags &= ~DEVFL_UP; 109 140 } 110 141 111 142 /* find it or malloc it */ 112 143 struct aoedev * 113 - aoedev_by_sysminor_m(ulong sysminor, ulong bufcnt) 144 + aoedev_by_sysminor_m(ulong sysminor) 114 145 { 115 146 struct aoedev *d; 116 147 ulong flags; ··· 120 151 for (d=devlist; d; d=d->next) 121 152 if (d->sysminor == sysminor) 122 153 break; 123 - 124 - if (d == NULL) { 125 - d = aoedev_newdev(bufcnt); 126 - if (d == NULL) { 127 - spin_unlock_irqrestore(&devlist_lock, flags); 128 - printk(KERN_INFO "aoe: aoedev_newdev failure.\n"); 129 - return NULL; 130 - } 131 - d->sysminor = sysminor; 132 - d->aoemajor = AOEMAJOR(sysminor); 133 - d->aoeminor = AOEMINOR(sysminor); 134 - } 135 - 154 + if (d) 155 + goto out; 156 + d = kcalloc(1, sizeof *d, GFP_ATOMIC); 157 + if (!d) 158 + goto out; 159 + INIT_WORK(&d->work, aoecmd_sleepwork); 160 + spin_lock_init(&d->lock); 161 + init_timer(&d->timer); 162 + d->timer.data = (ulong) d; 163 + d->timer.function = dummy_timer; 164 + d->timer.expires = jiffies + HZ; 165 + add_timer(&d->timer); 166 + d->bufpool = NULL; /* defer to aoeblk_gdalloc */ 167 + d->tgt = d->targets; 168 + INIT_LIST_HEAD(&d->bufq); 169 + d->sysminor = sysminor; 170 + d->aoemajor = AOEMAJOR(sysminor); 171 + d->aoeminor = AOEMINOR(sysminor); 172 + d->mintimer = MINTIMER; 173 + d->next = devlist; 174 + devlist = d; 175 + out: 136 176 spin_unlock_irqrestore(&devlist_lock, flags); 137 177 return d; 138 178 } 139 179 140 180 static void 141 - aoedev_freedev(struct aoedev *d) 181 + freetgt(struct aoetgt *t) 142 182 { 143 183 struct frame *f, *e; 184 + 185 + f = t->frames; 186 + e = f + t->nframes; 187 + for (; f < e; f++) { 188 + skb_shinfo(f->skb)->nr_frags = 0; 189 + dev_kfree_skb(f->skb); 190 + } 191 + kfree(t->frames); 192 + kfree(t); 193 + } 194 + 195 + static void 196 + aoedev_freedev(struct aoedev *d) 197 + { 198 + struct aoetgt **t, **e; 144 199 145 200 if (d->gd) { 146 201 aoedisk_rm_sysfs(d); 147 202 del_gendisk(d->gd); 148 203 put_disk(d->gd); 149 204 } 150 - f = d->frames; 151 - e = f + d->nframes; 152 - for (; f<e; f++) { 153 - skb_shinfo(f->skb)->nr_frags = 0; 154 - dev_kfree_skb(f->skb); 155 - } 156 - kfree(d->frames); 205 + t = d->targets; 206 + e = t + NTARGETS; 207 + for (; t < e && *t; t++) 208 + freetgt(*t); 157 209 if (d->bufpool) 158 210 mempool_destroy(d->bufpool); 159 211 kfree(d);
+6 -3
drivers/block/aoe/aoenet.c
··· 137 137 if (n > NECODES) 138 138 n = 0; 139 139 if (net_ratelimit()) 140 - printk(KERN_ERR "aoe: error packet from %d.%d; ecode=%d '%s'\n", 141 - be16_to_cpu(get_unaligned(&h->major)), h->minor, 142 - h->err, aoe_errlist[n]); 140 + printk(KERN_ERR 141 + "%s%d.%d@%s; ecode=%d '%s'\n", 142 + "aoe: error packet from ", 143 + be16_to_cpu(get_unaligned(&h->major)), 144 + h->minor, skb->dev->name, 145 + h->err, aoe_errlist[n]); 143 146 goto exit; 144 147 } 145 148