Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: ti: netcp: restore get/set_pad_info() functionality

The commit 899077791403 ("netcp: try to reduce type confusion in
descriptors") introduces a regression in Kernel 4.5-rc1 and it breaks
get/set_pad_info() functionality.

The TI NETCP driver uses pad0 and pad1 fields of knav_dma_desc to
store DMA/MEM buffer pointer and buffer size respectively. And in both
cases for Keystone 2 the pointer type size is 32 bit regardless of
LAPE enabled or not, because CONFIG_ARCH_DMA_ADDR_T_64BIT originally
is not expected to be defined.

Unfortunately, above commit changed buffer's pointers save/restore
code (get/set_pad_info()) and added intermediate conversation to u64
which works incorrectly on 32bit Keystone 2 and causes TI NETCP driver
crash in RX/TX path due to "Unable to handle kernel NULL pointer"
exception. This issue was reported and discussed in [1].

Hence, fix it by partially reverting above commit and restoring
get/set_pad_info() functionality as it was before.

[1] https://www.mail-archive.com/netdev@vger.kernel.org/msg95361.html
Cc: Wingman Kwok <w-kwok2@ti.com>
Cc: Mugunthan V N <mugunthanvnm@ti.com>
CC: David Laight <David.Laight@ACULAB.COM>
CC: Arnd Bergmann <arnd@arndb.de>
Reported-by: Franklin S Cooper Jr <fcooper@ti.com>
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: Murali Karicheri <m-karicheri2@ti.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Karicheri, Muralidharan and committed by
David S. Miller
9ecfe875 3301be32

+18 -41
+18 -41
drivers/net/ethernet/ti/netcp_core.c
··· 117 117 *ndesc = le32_to_cpu(desc->next_desc); 118 118 } 119 119 120 - static void get_pad_info(u32 *pad0, u32 *pad1, u32 *pad2, struct knav_dma_desc *desc) 120 + static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc) 121 121 { 122 122 *pad0 = le32_to_cpu(desc->pad[0]); 123 123 *pad1 = le32_to_cpu(desc->pad[1]); 124 - *pad2 = le32_to_cpu(desc->pad[2]); 125 - } 126 - 127 - static void get_pad_ptr(void **padptr, struct knav_dma_desc *desc) 128 - { 129 - u64 pad64; 130 - 131 - pad64 = le32_to_cpu(desc->pad[0]) + 132 - ((u64)le32_to_cpu(desc->pad[1]) << 32); 133 - *padptr = (void *)(uintptr_t)pad64; 134 124 } 135 125 136 126 static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len, ··· 153 163 desc->packet_info = cpu_to_le32(pkt_info); 154 164 } 155 165 156 - static void set_pad_info(u32 pad0, u32 pad1, u32 pad2, struct knav_dma_desc *desc) 166 + static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc) 157 167 { 158 168 desc->pad[0] = cpu_to_le32(pad0); 159 169 desc->pad[1] = cpu_to_le32(pad1); 160 - desc->pad[2] = cpu_to_le32(pad1); 161 170 } 162 171 163 172 static void set_org_pkt_info(dma_addr_t buff, u32 buff_len, ··· 570 581 dma_addr_t dma_desc, dma_buf; 571 582 unsigned int buf_len, dma_sz = sizeof(*ndesc); 572 583 void *buf_ptr; 573 - u32 pad[2]; 574 584 u32 tmp; 575 585 576 586 get_words(&dma_desc, 1, &desc->next_desc); ··· 581 593 break; 582 594 } 583 595 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); 584 - get_pad_ptr(&buf_ptr, ndesc); 596 + get_pad_info((u32 *)&buf_ptr, &buf_len, ndesc); 585 597 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); 586 598 __free_page(buf_ptr); 587 599 knav_pool_desc_put(netcp->rx_pool, desc); 588 600 } 589 - 590 - get_pad_info(&pad[0], &pad[1], &buf_len, desc); 591 - buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); 601 + get_pad_info((u32 *)&buf_ptr, &buf_len, desc); 592 602 593 603 if (buf_ptr) 594 604 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr); ··· 625 639 dma_addr_t dma_desc, dma_buff; 626 640 struct netcp_packet p_info; 627 641 struct sk_buff *skb; 628 - u32 pad[2]; 629 642 void *org_buf_ptr; 643 + u32 tmp; 630 644 631 645 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); 632 646 if (!dma_desc) ··· 639 653 } 640 654 641 655 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); 642 - get_pad_info(&pad[0], &pad[1], &org_buf_len, desc); 643 - org_buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); 656 + get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc); 644 657 645 658 if (unlikely(!org_buf_ptr)) { 646 659 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); ··· 664 679 /* Fill in the page fragment list */ 665 680 while (dma_desc) { 666 681 struct page *page; 667 - void *ptr; 668 682 669 683 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); 670 684 if (unlikely(!ndesc)) { ··· 672 688 } 673 689 674 690 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); 675 - get_pad_ptr(&ptr, ndesc); 676 - page = ptr; 691 + get_pad_info((u32 *)&page, &tmp, ndesc); 677 692 678 693 if (likely(dma_buff && buf_len && page)) { 679 694 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, ··· 750 767 unsigned int buf_len, dma_sz; 751 768 dma_addr_t dma; 752 769 void *buf_ptr; 770 + u32 tmp; 753 771 754 772 /* Allocate descriptor */ 755 773 while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) { ··· 761 777 } 762 778 763 779 get_org_pkt_info(&dma, &buf_len, desc); 764 - get_pad_ptr(&buf_ptr, desc); 780 + get_pad_info((u32 *)&buf_ptr, &tmp, desc); 765 781 766 782 if (unlikely(!dma)) { 767 783 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n"); ··· 813 829 struct page *page; 814 830 dma_addr_t dma; 815 831 void *bufptr; 816 - u32 pad[3]; 832 + u32 pad[2]; 817 833 818 834 /* Allocate descriptor */ 819 835 hwdesc = knav_pool_desc_get(netcp->rx_pool); ··· 830 846 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 831 847 832 848 bufptr = netdev_alloc_frag(primary_buf_len); 833 - pad[2] = primary_buf_len; 849 + pad[1] = primary_buf_len; 834 850 835 851 if (unlikely(!bufptr)) { 836 852 dev_warn_ratelimited(netcp->ndev_dev, ··· 842 858 if (unlikely(dma_mapping_error(netcp->dev, dma))) 843 859 goto fail; 844 860 845 - pad[0] = lower_32_bits((uintptr_t)bufptr); 846 - pad[1] = upper_32_bits((uintptr_t)bufptr); 847 - 861 + pad[0] = (u32)bufptr; 848 862 } else { 849 863 /* Allocate a secondary receive queue entry */ 850 864 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); ··· 852 870 } 853 871 buf_len = PAGE_SIZE; 854 872 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); 855 - pad[0] = lower_32_bits(dma); 856 - pad[1] = upper_32_bits(dma); 857 - pad[2] = 0; 873 + pad[0] = (u32)page; 874 + pad[1] = 0; 858 875 } 859 876 860 877 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; ··· 863 882 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << 864 883 KNAV_DMA_DESC_RETQ_SHIFT; 865 884 set_org_pkt_info(dma, buf_len, hwdesc); 866 - set_pad_info(pad[0], pad[1], pad[2], hwdesc); 885 + set_pad_info(pad[0], pad[1], hwdesc); 867 886 set_desc_info(desc_info, pkt_info, hwdesc); 868 887 869 888 /* Push to FDQs */ ··· 952 971 unsigned int budget) 953 972 { 954 973 struct knav_dma_desc *desc; 955 - void *ptr; 956 974 struct sk_buff *skb; 957 975 unsigned int dma_sz; 958 976 dma_addr_t dma; 959 977 int pkts = 0; 978 + u32 tmp; 960 979 961 980 while (budget--) { 962 981 dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz); ··· 969 988 continue; 970 989 } 971 990 972 - get_pad_ptr(&ptr, desc); 973 - skb = ptr; 991 + get_pad_info((u32 *)&skb, &tmp, desc); 974 992 netcp_free_tx_desc_chain(netcp, desc, dma_sz); 975 993 if (!skb) { 976 994 dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); ··· 1174 1194 } 1175 1195 1176 1196 set_words(&tmp, 1, &desc->packet_info); 1177 - tmp = lower_32_bits((uintptr_t)&skb); 1178 - set_words(&tmp, 1, &desc->pad[0]); 1179 - tmp = upper_32_bits((uintptr_t)&skb); 1180 - set_words(&tmp, 1, &desc->pad[1]); 1197 + set_words((u32 *)&skb, 1, &desc->pad[0]); 1181 1198 1182 1199 if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { 1183 1200 tmp = tx_pipe->switch_to_port;