···6666#define EFX_USE_QWORD_IO 16767#endif68686969+/* Hardware issue requires that only 64-bit naturally aligned writes7070+ * are seen by hardware. Its not strictly necessary to restrict to7171+ * x86_64 arch, but done for safety since unusual write combining behaviour7272+ * can break PIO.7373+ */7474+#ifdef CONFIG_X86_646975/* PIO is a win only if write-combining is possible */7076#ifdef ARCH_HAS_IOREMAP_WC7177#define EFX_USE_PIO 17878+#endif7279#endif73807481#ifdef EFX_USE_QWORD_IO
+17-5
drivers/net/ethernet/sfc/tx.c
···189189 u8 buf[L1_CACHE_BYTES];190190};191191192192+/* Copy in explicit 64-bit writes. */193193+static void efx_memcpy_64(void __iomem *dest, void *src, size_t len)194194+{195195+ u64 *src64 = src;196196+ u64 __iomem *dest64 = dest;197197+ size_t l64 = len / 8;198198+ size_t i;199199+200200+ for (i = 0; i < l64; i++)201201+ writeq(src64[i], &dest64[i]);202202+}203203+192204/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.193205 * Advances piobuf pointer. Leaves additional data in the copy buffer.194206 */···210198{211199 int block_len = len & ~(sizeof(copy_buf->buf) - 1);212200213213- memcpy_toio(*piobuf, data, block_len);201201+ efx_memcpy_64(*piobuf, data, block_len);214202 *piobuf += block_len;215203 len -= block_len;216204···242230 if (copy_buf->used < sizeof(copy_buf->buf))243231 return;244232245245- memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));233233+ efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));246234 *piobuf += sizeof(copy_buf->buf);247235 data += copy_to_buf;248236 len -= copy_to_buf;···257245{258246 /* if there's anything in it, write the whole buffer, including junk */259247 if (copy_buf->used)260260- memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf));248248+ efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf));261249}262250263251/* Traverse skb structure and copy fragments in to PIO buffer.···316304 */317305 BUILD_BUG_ON(L1_CACHE_BYTES >318306 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));319319- memcpy_toio(tx_queue->piobuf, skb->data,320320- ALIGN(skb->len, L1_CACHE_BYTES));307307+ efx_memcpy_64(tx_queue->piobuf, skb->data,308308+ ALIGN(skb->len, L1_CACHE_BYTES));321309 }322310323311 EFX_POPULATE_QWORD_5(buffer->option,
-1
drivers/net/macvlan.c
···12041204 list_for_each_entry_safe(vlan, next, &port->vlans, list)12051205 vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);12061206 unregister_netdevice_many(&list_kill);12071207- list_del(&list_kill);12081207 break;12091208 case NETDEV_PRE_TYPE_CHANGE:12101209 /* Forbid underlaying device to change its type. */
···66346634/**66356635 * unregister_netdevice_many - unregister many devices66366636 * @head: list of devices66376637+ *66386638+ * Note: As most callers use a stack allocated list_head,66396639+ * we force a list_del() to make sure stack wont be corrupted later.66376640 */66386641void unregister_netdevice_many(struct list_head *head)66396642{···66466643 rollback_registered_many(head);66476644 list_for_each_entry(dev, head, unreg_list)66486645 net_set_todo(dev);66466646+ list_del(head);66496647 }66506648}66516649EXPORT_SYMBOL(unregister_netdevice_many);···71027098 }71037099 }71047100 unregister_netdevice_many(&dev_kill_list);71057105- list_del(&dev_kill_list);71067101 rtnl_unlock();71077102}71087103