Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge from-linus to-akpm

Len Brown 09d92002 cb220c1a

+143 -110
+1 -1
arch/x86_64/kernel/smpboot.c
··· 334 334 { 335 335 if (notscsync || !cpu_has_tsc) 336 336 return; 337 - sync_tsc(boot_cpu_id); 337 + sync_tsc(0); 338 338 } 339 339 340 340 static __init int notscsync_setup(char *s)
+1 -1
drivers/acpi/motherboard.c
··· 42 42 */ 43 43 #define IS_RESERVED_ADDR(base, len) \ 44 44 (((len) > 0) && ((base) > 0) && ((base) + (len) < IO_SPACE_LIMIT) \ 45 - && ((base) + (len) > 0x1000)) 45 + && ((base) + (len) > PCIBIOS_MIN_IO)) 46 46 /* 47 47 * Clearing the flag (IORESOURCE_BUSY) allows drivers to use 48 48 * the io ports if they really know they can use it, while
+8 -4
drivers/char/mem.c
··· 261 261 262 262 static int mmap_kmem(struct file * file, struct vm_area_struct * vma) 263 263 { 264 - unsigned long long val; 264 + unsigned long pfn; 265 + 266 + /* Turn a kernel-virtual address into a physical page frame */ 267 + pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; 268 + 265 269 /* 266 270 * RED-PEN: on some architectures there is more mapped memory 267 271 * than available in mem_map which pfn_valid checks ··· 273 269 * 274 270 * RED-PEN: vmalloc is not supported right now. 275 271 */ 276 - if (!pfn_valid(vma->vm_pgoff)) 272 + if (!pfn_valid(pfn)) 277 273 return -EIO; 278 - val = (u64)vma->vm_pgoff << PAGE_SHIFT; 279 - vma->vm_pgoff = __pa(val) >> PAGE_SHIFT; 274 + 275 + vma->vm_pgoff = pfn; 280 276 return mmap_mem(file, vma); 281 277 } 282 278
+1
drivers/net/e1000/e1000_main.c
··· 3789 3789 struct e1000_adapter *adapter = netdev_priv(netdev); 3790 3790 disable_irq(adapter->pdev->irq); 3791 3791 e1000_intr(adapter->pdev->irq, netdev, NULL); 3792 + e1000_clean_tx_irq(adapter); 3792 3793 enable_irq(adapter->pdev->irq); 3793 3794 } 3794 3795 #endif
+13 -35
drivers/scsi/dc395x.c
··· 183 183 * cross a page boundy. 184 184 */ 185 185 #define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY) 186 - #define VIRTX_LEN (sizeof(void *) * DC395x_MAX_SG_LISTENTRY) 186 + 187 187 188 188 struct SGentry { 189 189 u32 address; /* bus! address */ ··· 235 235 u8 sg_count; /* No of HW sg entries for this request */ 236 236 u8 sg_index; /* Index of HW sg entry for this request */ 237 237 u32 total_xfer_length; /* Total number of bytes remaining to be transfered */ 238 - void **virt_map; 239 238 unsigned char *virt_addr; /* Virtual address of current transfer position */ 240 239 241 240 /* ··· 1021 1022 reqlen, cmd->request_buffer, cmd->use_sg, 1022 1023 srb->sg_count); 1023 1024 1025 + srb->virt_addr = page_address(sl->page); 1024 1026 for (i = 0; i < srb->sg_count; i++) { 1025 - u32 seglen = (u32)sg_dma_len(sl + i); 1026 - sgp[i].address = (u32)sg_dma_address(sl + i); 1027 + u32 busaddr = (u32)sg_dma_address(&sl[i]); 1028 + u32 seglen = (u32)sl[i].length; 1029 + sgp[i].address = busaddr; 1027 1030 sgp[i].length = seglen; 1028 1031 srb->total_xfer_length += seglen; 1029 - srb->virt_map[i] = kmap(sl[i].page); 1030 1032 } 1031 - srb->virt_addr = srb->virt_map[0]; 1032 1033 sgp += srb->sg_count - 1; 1033 1034 1034 1035 /* ··· 1975 1976 int segment = cmd->use_sg; 1976 1977 u32 xferred = srb->total_xfer_length - left; /* bytes transfered */ 1977 1978 struct SGentry *psge = srb->segment_x + srb->sg_index; 1978 - void **virt = srb->virt_map; 1979 1979 1980 1980 dprintkdbg(DBG_0, 1981 1981 "sg_update_list: Transfered %i of %i bytes, %i remain\n", ··· 2014 2016 2015 2017 /* We have to walk the scatterlist to find it */ 2016 2018 sg = (struct scatterlist *)cmd->request_buffer; 2017 - idx = 0; 2018 2019 while (segment--) { 2019 2020 unsigned long mask = 2020 2021 ~((unsigned long)sg->length - 1) & PAGE_MASK; 2021 2022 if ((sg_dma_address(sg) & mask) == (psge->address & mask)) { 2022 - srb->virt_addr = virt[idx] + (psge->address & ~PAGE_MASK); 2023 + srb->virt_addr = (page_address(sg->page) 2024 + + psge->address - 2025 + (psge->address & PAGE_MASK)); 2023 2026 return; 2024 2027 } 2025 2028 ++sg; 2026 - ++idx; 2027 2029 } 2028 2030 2029 2031 dprintkl(KERN_ERR, "sg_update_list: sg_to_virt failed\n"); ··· 2149 2151 DC395x_read32(acb, TRM_S1040_DMA_CXCNT)); 2150 2152 } 2151 2153 /* 2152 - * calculate all the residue data that not yet transfered 2154 + * calculate all the residue data that not yet tranfered 2153 2155 * SCSI transfer counter + left in SCSI FIFO data 2154 2156 * 2155 2157 * .....TRM_S1040_SCSI_COUNTER (24bits) ··· 3267 3269 struct scsi_cmnd *cmd = srb->cmd; 3268 3270 enum dma_data_direction dir = cmd->sc_data_direction; 3269 3271 if (cmd->use_sg && dir != PCI_DMA_NONE) { 3270 - int i; 3271 3272 /* unmap DC395x SG list */ 3272 3273 dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n", 3273 3274 srb->sg_bus_addr, SEGMENTX_LEN); ··· 3276 3279 dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n", 3277 3280 cmd->use_sg, cmd->request_buffer); 3278 3281 /* unmap the sg segments */ 3279 - for (i = 0; i < srb->sg_count; i++) 3280 - kunmap(virt_to_page(srb->virt_map[i])); 3281 3282 pci_unmap_sg(acb->dev, 3282 3283 (struct scatterlist *)cmd->request_buffer, 3283 3284 cmd->use_sg, dir); ··· 3322 3327 3323 3328 if (cmd->use_sg) { 3324 3329 struct scatterlist* sg = (struct scatterlist *)cmd->request_buffer; 3325 - ptr = (struct ScsiInqData *)(srb->virt_map[0] + sg->offset); 3330 + ptr = (struct ScsiInqData *)(page_address(sg->page) + sg->offset); 3326 3331 } else { 3327 3332 ptr = (struct ScsiInqData *)(cmd->request_buffer); 3328 3333 } ··· 4257 4262 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN; 4258 4263 4259 4264 for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page) 4260 - kfree(acb->srb_array[i].segment_x); 4261 - 4262 - vfree(acb->srb_array[0].virt_map); 4265 + if (acb->srb_array[i].segment_x) 4266 + kfree(acb->srb_array[i].segment_x); 4263 4267 } 4264 4268 4265 4269 ··· 4274 4280 int srb_idx = 0; 4275 4281 unsigned i = 0; 4276 4282 struct SGentry *ptr; 4277 - void **virt_array; 4278 4283 4279 - for (i = 0; i < DC395x_MAX_SRB_CNT; i++) { 4284 + for (i = 0; i < DC395x_MAX_SRB_CNT; i++) 4280 4285 acb->srb_array[i].segment_x = NULL; 4281 - acb->srb_array[i].virt_map = NULL; 4282 - } 4283 4286 4284 4287 dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages); 4285 4288 while (pages--) { ··· 4297 4306 ptr + (i * DC395x_MAX_SG_LISTENTRY); 4298 4307 else 4299 4308 dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n"); 4300 - 4301 - virt_array = vmalloc((DC395x_MAX_SRB_CNT + 1) * DC395x_MAX_SG_LISTENTRY * sizeof(void*)); 4302 - 4303 - if (!virt_array) { 4304 - adapter_sg_tables_free(acb); 4305 - return 1; 4306 - } 4307 - 4308 - for (i = 0; i < DC395x_MAX_SRB_CNT + 1; i++) { 4309 - acb->srb_array[i].virt_map = virt_array; 4310 - virt_array += DC395x_MAX_SG_LISTENTRY; 4311 - } 4312 - 4313 4309 return 0; 4314 4310 } 4315 4311
+2 -2
drivers/video/fbmem.c
··· 628 628 int 629 629 fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) 630 630 { 631 - int err; 631 + int err, flags = info->flags; 632 632 633 633 if (var->activate & FB_ACTIVATE_INV_MODE) { 634 634 struct fb_videomode mode1, mode2; ··· 682 682 !list_empty(&info->modelist)) 683 683 err = fb_add_videomode(&mode, &info->modelist); 684 684 685 - if (!err && info->flags & FBINFO_MISC_USEREVENT) { 685 + if (!err && (flags & FBINFO_MISC_USEREVENT)) { 686 686 struct fb_event event; 687 687 688 688 info->flags &= ~FBINFO_MISC_USEREVENT;
+27 -23
drivers/video/intelfb/intelfbdrv.c
··· 583 583 return -ENODEV; 584 584 } 585 585 586 - /* Map the fb and MMIO regions */ 587 - dinfo->aperture.virtual = (u8 __iomem *)ioremap_nocache 588 - (dinfo->aperture.physical, dinfo->aperture.size); 589 - if (!dinfo->aperture.virtual) { 590 - ERR_MSG("Cannot remap FB region.\n"); 591 - cleanup(dinfo); 592 - return -ENODEV; 593 - } 594 - dinfo->mmio_base = 595 - (u8 __iomem *)ioremap_nocache(dinfo->mmio_base_phys, 596 - INTEL_REG_SIZE); 597 - if (!dinfo->mmio_base) { 598 - ERR_MSG("Cannot remap MMIO region.\n"); 599 - cleanup(dinfo); 600 - return -ENODEV; 601 - } 602 - 603 586 /* Get the chipset info. */ 604 587 dinfo->pci_chipset = pdev->device; 605 588 ··· 613 630 dinfo->accel = 0; 614 631 } 615 632 633 + if (MB(voffset) < stolen_size) 634 + offset = (stolen_size >> 12); 635 + else 636 + offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE; 637 + 616 638 /* Framebuffer parameters - Use all the stolen memory if >= vram */ 617 - if (ROUND_UP_TO_PAGE(stolen_size) >= MB(vram)) { 639 + if (ROUND_UP_TO_PAGE(stolen_size) >= ((offset << 12) + MB(vram))) { 618 640 dinfo->fb.size = ROUND_UP_TO_PAGE(stolen_size); 641 + dinfo->fb.offset = 0; 619 642 dinfo->fbmem_gart = 0; 620 643 } else { 621 644 dinfo->fb.size = MB(vram); ··· 652 663 return -ENODEV; 653 664 } 654 665 655 - if (MB(voffset) < stolen_size) 656 - offset = (stolen_size >> 12); 657 - else 658 - offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE; 659 - 660 666 /* set the mem offsets - set them after the already used pages */ 661 667 if (dinfo->accel) { 662 668 dinfo->ring.offset = offset + gtt_info.current_memory; ··· 664 680 dinfo->fb.offset = offset + 665 681 + gtt_info.current_memory + (dinfo->ring.size >> 12) 666 682 + (dinfo->cursor.size >> 12); 683 + } 684 + 685 + /* Map the fb and MMIO regions */ 686 + /* ioremap only up to the end of used aperture */ 687 + dinfo->aperture.virtual = (u8 __iomem *)ioremap_nocache 688 + (dinfo->aperture.physical, (dinfo->fb.offset << 12) 689 + + dinfo->fb.size); 690 + if (!dinfo->aperture.virtual) { 691 + ERR_MSG("Cannot remap FB region.\n"); 692 + cleanup(dinfo); 693 + return -ENODEV; 694 + } 695 + 696 + dinfo->mmio_base = 697 + (u8 __iomem *)ioremap_nocache(dinfo->mmio_base_phys, 698 + INTEL_REG_SIZE); 699 + if (!dinfo->mmio_base) { 700 + ERR_MSG("Cannot remap MMIO region.\n"); 701 + cleanup(dinfo); 702 + return -ENODEV; 667 703 } 668 704 669 705 /* Allocate memories (which aren't stolen) */
+1 -1
drivers/w1/w1.c
··· 593 593 * Return 0 - device(s) present, 1 - no devices present. 594 594 */ 595 595 if (w1_reset_bus(dev)) { 596 - dev_info(&dev->dev, "No devices present on the wire.\n"); 596 + dev_dbg(&dev->dev, "No devices present on the wire.\n"); 597 597 break; 598 598 } 599 599
+6
fs/cifs/CHANGES
··· 1 + Version 1.35 2 + ------------ 3 + Add writepage performance improvements. Fix path name conversions 4 + for long filenames on mounts which were done with "mapchars" mount option 5 + specified. 6 + 1 7 Version 1.34 2 8 ------------ 3 9 Fix error mapping of the TOO_MANY_LINKS (hardlinks) case.
+3
fs/cifs/cifssmb.c
··· 2602 2602 if(name_len < PATH_MAX) { 2603 2603 memcpy(pSMB->ResumeFileName, psrch_inf->presume_name, name_len); 2604 2604 byte_count += name_len; 2605 + /* 14 byte parm len above enough for 2 byte null terminator */ 2606 + pSMB->ResumeFileName[name_len] = 0; 2607 + pSMB->ResumeFileName[name_len+1] = 0; 2605 2608 } else { 2606 2609 rc = -EINVAL; 2607 2610 goto FNext2_err_exit;
+1
fs/cifs/misc.c
··· 611 611 src_char = source[i]; 612 612 switch (src_char) { 613 613 case 0: 614 + target[j] = 0; 614 615 goto ctoUCS_out; 615 616 case ':': 616 617 target[j] = cpu_to_le16(UNI_COLON);
+1 -1
fs/inotify.c
··· 402 402 return ERR_PTR(ret); 403 403 } 404 404 405 - dev->last_wd = ret; 405 + dev->last_wd = watch->wd; 406 406 watch->mask = mask; 407 407 atomic_set(&watch->count, 0); 408 408 INIT_LIST_HEAD(&watch->d_list);
+2 -1
fs/namei.c
··· 2216 2216 error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry); 2217 2217 if (!error) { 2218 2218 const char *new_name = old_dentry->d_name.name; 2219 - fsnotify_move(old_dir, new_dir, old_name, new_name, is_dir, new_dentry->d_inode); 2219 + fsnotify_move(old_dir, new_dir, old_name, new_name, is_dir, 2220 + new_dentry->d_inode, old_dentry->d_inode); 2220 2221 } 2221 2222 fsnotify_oldname_free(old_name); 2222 2223
+3
fs/reiserfs/namei.c
··· 593 593 */ 594 594 inode->i_uid = current->fsuid; 595 595 inode->i_mode = mode; 596 + /* Make inode invalid - just in case we are going to drop it before 597 + * the initialization happens */ 598 + INODE_PKEY(inode)->k_objectid = 0; 596 599 597 600 if (dir->i_mode & S_ISGID) { 598 601 inode->i_gid = dir->i_gid;
+3 -1
include/asm-i386/pci.h
··· 18 18 #define pcibios_scan_all_fns(a, b) 0 19 19 20 20 extern unsigned long pci_mem_start; 21 - #define PCIBIOS_MIN_IO 0x4000 21 + #define PCIBIOS_MIN_IO 0x1000 22 22 #define PCIBIOS_MIN_MEM (pci_mem_start) 23 + 24 + #define PCIBIOS_MIN_CARDBUS_IO 0x4000 23 25 24 26 void pcibios_config_init(void); 25 27 struct pci_bus * pcibios_scan_root(int bus);
+1 -1
include/asm-sh/unistd.h
··· 406 406 register long __sc7 __asm__ ("r7") = (long) arg4; \ 407 407 register long __sc0 __asm__ ("r0") = (long) arg5; \ 408 408 register long __sc1 __asm__ ("r1") = (long) arg6; \ 409 - __asm__ __volatile__ ("trapa #0x15" \ 409 + __asm__ __volatile__ ("trapa #0x16" \ 410 410 : "=z" (__sc0) \ 411 411 : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7), \ 412 412 "r" (__sc3), "r" (__sc1) \
+3 -1
include/asm-x86_64/pci.h
··· 22 22 extern int no_iommu, force_iommu; 23 23 24 24 extern unsigned long pci_mem_start; 25 - #define PCIBIOS_MIN_IO 0x4000 25 + #define PCIBIOS_MIN_IO 0x1000 26 26 #define PCIBIOS_MIN_MEM (pci_mem_start) 27 + 28 + #define PCIBIOS_MIN_CARDBUS_IO 0x4000 27 29 28 30 void pcibios_config_init(void); 29 31 struct pci_bus * pcibios_scan_root(int bus);
+5 -1
include/linux/fsnotify.h
··· 21 21 */ 22 22 static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, 23 23 const char *old_name, const char *new_name, 24 - int isdir, struct inode *target) 24 + int isdir, struct inode *target, struct inode *source) 25 25 { 26 26 u32 cookie = inotify_get_cookie(); 27 27 ··· 40 40 if (target) { 41 41 inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL); 42 42 inotify_inode_is_dead(target); 43 + } 44 + 45 + if (source) { 46 + inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL); 43 47 } 44 48 } 45 49
+3 -1
include/linux/inotify.h
··· 35 35 #define IN_CREATE 0x00000100 /* Subfile was created */ 36 36 #define IN_DELETE 0x00000200 /* Subfile was deleted */ 37 37 #define IN_DELETE_SELF 0x00000400 /* Self was deleted */ 38 + #define IN_MOVE_SELF 0x00000800 /* Self was moved */ 38 39 39 40 /* the following are legal events. they are sent as needed to any watch */ 40 41 #define IN_UNMOUNT 0x00002000 /* Backing fs was unmounted */ ··· 57 56 */ 58 57 #define IN_ALL_EVENTS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \ 59 58 IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \ 60 - IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF) 59 + IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF | \ 60 + IN_MOVE_SELF) 61 61 62 62 #ifdef __KERNEL__ 63 63
+14 -6
include/linux/netpoll.h
··· 9 9 10 10 #include <linux/netdevice.h> 11 11 #include <linux/interrupt.h> 12 + #include <linux/rcupdate.h> 12 13 #include <linux/list.h> 13 14 14 15 struct netpoll; ··· 27 26 struct netpoll_info { 28 27 spinlock_t poll_lock; 29 28 int poll_owner; 29 + int tries; 30 30 int rx_flags; 31 31 spinlock_t rx_lock; 32 32 struct netpoll *rx_np; /* netpoll that registered an rx_hook */ ··· 62 60 return ret; 63 61 } 64 62 65 - static inline void netpoll_poll_lock(struct net_device *dev) 63 + static inline void *netpoll_poll_lock(struct net_device *dev) 66 64 { 65 + rcu_read_lock(); /* deal with race on ->npinfo */ 67 66 if (dev->npinfo) { 68 67 spin_lock(&dev->npinfo->poll_lock); 69 68 dev->npinfo->poll_owner = smp_processor_id(); 69 + return dev->npinfo; 70 70 } 71 + return NULL; 71 72 } 72 73 73 - static inline void netpoll_poll_unlock(struct net_device *dev) 74 + static inline void netpoll_poll_unlock(void *have) 74 75 { 75 - if (dev->npinfo) { 76 - dev->npinfo->poll_owner = -1; 77 - spin_unlock(&dev->npinfo->poll_lock); 76 + struct netpoll_info *npi = have; 77 + 78 + if (npi) { 79 + npi->poll_owner = -1; 80 + spin_unlock(&npi->poll_lock); 78 81 } 82 + rcu_read_unlock(); 79 83 } 80 84 81 85 #else 82 86 #define netpoll_rx(a) 0 83 - #define netpoll_poll_lock(a) 87 + #define netpoll_poll_lock(a) 0 84 88 #define netpoll_poll_unlock(a) 85 89 #endif 86 90
+1 -1
include/linux/skbuff.h
··· 255 255 nohdr:1; 256 256 /* 3 bits spare */ 257 257 __u8 pkt_type; 258 - __u16 protocol; 258 + __be16 protocol; 259 259 260 260 void (*destructor)(struct sk_buff *skb); 261 261 #ifdef CONFIG_NETFILTER
+5 -4
net/core/dev.c
··· 1696 1696 struct softnet_data *queue = &__get_cpu_var(softnet_data); 1697 1697 unsigned long start_time = jiffies; 1698 1698 int budget = netdev_budget; 1699 - 1699 + void *have; 1700 + 1700 1701 local_irq_disable(); 1701 1702 1702 1703 while (!list_empty(&queue->poll_list)) { ··· 1710 1709 1711 1710 dev = list_entry(queue->poll_list.next, 1712 1711 struct net_device, poll_list); 1713 - netpoll_poll_lock(dev); 1712 + have = netpoll_poll_lock(dev); 1714 1713 1715 1714 if (dev->quota <= 0 || dev->poll(dev, &budget)) { 1716 - netpoll_poll_unlock(dev); 1715 + netpoll_poll_unlock(have); 1717 1716 local_irq_disable(); 1718 1717 list_del(&dev->poll_list); 1719 1718 list_add_tail(&dev->poll_list, &queue->poll_list); ··· 1722 1721 else 1723 1722 dev->quota = dev->weight; 1724 1723 } else { 1725 - netpoll_poll_unlock(dev); 1724 + netpoll_poll_unlock(have); 1726 1725 dev_put(dev); 1727 1726 local_irq_disable(); 1728 1727 }
+38 -25
net/core/netpoll.c
··· 33 33 #define MAX_UDP_CHUNK 1460 34 34 #define MAX_SKBS 32 35 35 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2) 36 + #define MAX_RETRIES 20000 36 37 37 38 static DEFINE_SPINLOCK(skb_list_lock); 38 39 static int nr_skbs; ··· 249 248 int status; 250 249 struct netpoll_info *npinfo; 251 250 252 - repeat: 253 - if(!np || !np->dev || !netif_running(np->dev)) { 251 + if (!np || !np->dev || !netif_running(np->dev)) { 254 252 __kfree_skb(skb); 255 253 return; 256 254 } 257 255 258 - /* avoid recursion */ 259 256 npinfo = np->dev->npinfo; 257 + 258 + /* avoid recursion */ 260 259 if (npinfo->poll_owner == smp_processor_id() || 261 260 np->dev->xmit_lock_owner == smp_processor_id()) { 262 261 if (np->drop) ··· 266 265 return; 267 266 } 268 267 269 - spin_lock(&np->dev->xmit_lock); 270 - np->dev->xmit_lock_owner = smp_processor_id(); 268 + do { 269 + npinfo->tries--; 270 + spin_lock(&np->dev->xmit_lock); 271 + np->dev->xmit_lock_owner = smp_processor_id(); 271 272 272 - /* 273 - * network drivers do not expect to be called if the queue is 274 - * stopped. 275 - */ 276 - if (netif_queue_stopped(np->dev)) { 273 + /* 274 + * network drivers do not expect to be called if the queue is 275 + * stopped. 276 + */ 277 + if (netif_queue_stopped(np->dev)) { 278 + np->dev->xmit_lock_owner = -1; 279 + spin_unlock(&np->dev->xmit_lock); 280 + netpoll_poll(np); 281 + udelay(50); 282 + continue; 283 + } 284 + 285 + status = np->dev->hard_start_xmit(skb, np->dev); 277 286 np->dev->xmit_lock_owner = -1; 278 287 spin_unlock(&np->dev->xmit_lock); 279 288 280 - netpoll_poll(np); 281 - goto repeat; 282 - } 289 + /* success */ 290 + if(!status) { 291 + npinfo->tries = MAX_RETRIES; /* reset */ 292 + return; 293 + } 283 294 284 - status = np->dev->hard_start_xmit(skb, np->dev); 285 - np->dev->xmit_lock_owner = -1; 286 - spin_unlock(&np->dev->xmit_lock); 287 - 288 - /* transmit busy */ 289 - if(status) { 295 + /* transmit busy */ 290 296 netpoll_poll(np); 291 - goto repeat; 292 - } 297 + udelay(50); 298 + } while (npinfo->tries > 0); 293 299 } 294 300 295 301 void netpoll_send_udp(struct netpoll *np, const char *msg, int len) ··· 357 349 unsigned char *arp_ptr; 358 350 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; 359 351 u32 sip, tip; 360 - unsigned long flags; 361 352 struct sk_buff *send_skb; 362 353 struct netpoll *np = NULL; 363 354 364 - spin_lock_irqsave(&npinfo->rx_lock, flags); 365 355 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) 366 356 np = npinfo->rx_np; 367 - spin_unlock_irqrestore(&npinfo->rx_lock, flags); 368 - 369 357 if (!np) 370 358 return; 371 359 ··· 643 639 if (!npinfo) 644 640 goto release; 645 641 642 + npinfo->rx_flags = 0; 646 643 npinfo->rx_np = NULL; 647 644 npinfo->poll_lock = SPIN_LOCK_UNLOCKED; 648 645 npinfo->poll_owner = -1; 646 + npinfo->tries = MAX_RETRIES; 649 647 npinfo->rx_lock = SPIN_LOCK_UNLOCKED; 650 648 } else 651 649 npinfo = ndev->npinfo; ··· 724 718 npinfo->rx_np = np; 725 719 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 726 720 } 721 + 722 + /* fill up the skb queue */ 723 + refill_skbs(); 724 + 727 725 /* last thing to do is link it to the net device structure */ 728 726 ndev->npinfo = npinfo; 727 + 728 + /* avoid racing with NAPI reading npinfo */ 729 + synchronize_rcu(); 729 730 730 731 return 0; 731 732