Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

soc: ti: knav_dma: rename pad in struct knav_dma_desc to sw_data

Rename the pad to sw_data as per description of this field in the hardware
spec(refer sprugr9 from www.ti.com). Latest version of the document is
at http://www.ti.com/lit/ug/sprugr9h/sprugr9h.pdf and section 3.1
Host Packet Descriptor describes this field.

Define and use a constant for the size of sw_data field similar to
other fields in the struct for desc and document the sw_data field
in the header. As the sw_data is not touched by hw, it's type can be
changed to u32.

Rename the helpers to match with the updated dma desc field sw_data.

Cc: Wingman Kwok <w-kwok2@ti.com>
Cc: Mugunthan V N <mugunthanvnm@ti.com>
CC: Arnd Bergmann <arnd@arndb.de>
CC: Grygorii Strashko <grygorii.strashko@ti.com>
CC: David Laight <David.Laight@ACULAB.COM>
Signed-off-by: Murali Karicheri <m-karicheri2@ti.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Karicheri, Muralidharan and committed by
David S. Miller
b1cb86ae 9ecfe875

+24 -20
+21 -19
drivers/net/ethernet/ti/netcp_core.c
··· 117 117 *ndesc = le32_to_cpu(desc->next_desc); 118 118 } 119 119 120 - static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc) 120 + static void get_sw_data(u32 *data0, u32 *data1, struct knav_dma_desc *desc) 121 121 { 122 - *pad0 = le32_to_cpu(desc->pad[0]); 123 - *pad1 = le32_to_cpu(desc->pad[1]); 122 + /* No Endian conversion needed as this data is untouched by hw */ 123 + *data0 = desc->sw_data[0]; 124 + *data1 = desc->sw_data[1]; 124 125 } 125 126 126 127 static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len, ··· 154 153 desc->packet_info = cpu_to_le32(pkt_info); 155 154 } 156 155 157 - static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc) 156 + static void set_sw_data(u32 data0, u32 data1, struct knav_dma_desc *desc) 158 157 { 159 - desc->pad[0] = cpu_to_le32(pad0); 160 - desc->pad[1] = cpu_to_le32(pad1); 158 + /* No Endian conversion needed as this data is untouched by hw */ 159 + desc->sw_data[0] = data0; 160 + desc->sw_data[1] = data1; 161 161 } 162 162 163 163 static void set_org_pkt_info(dma_addr_t buff, u32 buff_len, ··· 583 581 break; 584 582 } 585 583 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); 586 - get_pad_info((u32 *)&buf_ptr, &buf_len, ndesc); 584 + get_sw_data((u32 *)&buf_ptr, &buf_len, ndesc); 587 585 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); 588 586 __free_page(buf_ptr); 589 587 knav_pool_desc_put(netcp->rx_pool, desc); 590 588 } 591 - get_pad_info((u32 *)&buf_ptr, &buf_len, desc); 589 + get_sw_data((u32 *)&buf_ptr, &buf_len, desc); 592 590 593 591 if (buf_ptr) 594 592 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr); ··· 641 639 } 642 640 643 641 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); 644 - get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc); 642 + get_sw_data((u32 *)&org_buf_ptr, &org_buf_len, desc); 645 643 646 644 if (unlikely(!org_buf_ptr)) { 647 645 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); ··· 674 672 } 675 673 676 674 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); 677 - get_pad_info((u32 *)&page, &tmp, ndesc); 675 + get_sw_data((u32 *)&page, &tmp, ndesc); 678 676 679 677 if (likely(dma_buff && buf_len && page)) { 680 678 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, ··· 763 761 } 764 762 765 763 get_org_pkt_info(&dma, &buf_len, desc); 766 - get_pad_info((u32 *)&buf_ptr, &tmp, desc); 764 + get_sw_data((u32 *)&buf_ptr, &tmp, desc); 767 765 768 766 if (unlikely(!dma)) { 769 767 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n"); ··· 815 813 struct page *page; 816 814 dma_addr_t dma; 817 815 void *bufptr; 818 - u32 pad[2]; 816 + u32 sw_data[2]; 819 817 820 818 /* Allocate descriptor */ 821 819 hwdesc = knav_pool_desc_get(netcp->rx_pool); ··· 832 830 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 833 831 834 832 bufptr = netdev_alloc_frag(primary_buf_len); 835 - pad[1] = primary_buf_len; 833 + sw_data[1] = primary_buf_len; 836 834 837 835 if (unlikely(!bufptr)) { 838 836 dev_warn_ratelimited(netcp->ndev_dev, ··· 844 842 if (unlikely(dma_mapping_error(netcp->dev, dma))) 845 843 goto fail; 846 844 847 - pad[0] = (u32)bufptr; 845 + sw_data[0] = (u32)bufptr; 848 846 } else { 849 847 /* Allocate a secondary receive queue entry */ 850 848 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); ··· 854 852 } 855 853 buf_len = PAGE_SIZE; 856 854 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); 857 - pad[0] = (u32)page; 858 - pad[1] = 0; 855 + sw_data[0] = (u32)page; 856 + sw_data[1] = 0; 859 857 } 860 858 861 859 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; ··· 865 863 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << 866 864 KNAV_DMA_DESC_RETQ_SHIFT; 867 865 set_org_pkt_info(dma, buf_len, hwdesc); 868 - set_pad_info(pad[0], pad[1], hwdesc); 866 + set_sw_data(sw_data[0], sw_data[1], hwdesc); 869 867 set_desc_info(desc_info, pkt_info, hwdesc); 870 868 871 869 /* Push to FDQs */ ··· 971 969 continue; 972 970 } 973 971 974 - get_pad_info((u32 *)&skb, &tmp, desc); 972 + get_sw_data((u32 *)&skb, &tmp, desc); 975 973 netcp_free_tx_desc_chain(netcp, desc, dma_sz); 976 974 if (!skb) { 977 975 dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); ··· 1176 1174 } 1177 1175 1178 1176 set_words(&tmp, 1, &desc->packet_info); 1179 - set_words((u32 *)&skb, 1, &desc->pad[0]); 1177 + set_sw_data((u32)skb, 0, desc); 1180 1178 1181 1179 if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { 1182 1180 tmp = tx_pipe->switch_to_port;
+3 -1
include/linux/soc/ti/knav_dma.h
··· 44 44 45 45 #define KNAV_DMA_NUM_EPIB_WORDS 4 46 46 #define KNAV_DMA_NUM_PS_WORDS 16 47 + #define KNAV_DMA_NUM_SW_DATA_WORDS 4 47 48 #define KNAV_DMA_FDQ_PER_CHAN 4 48 49 49 50 /* Tx channel scheduling priority */ ··· 143 142 * @orig_buff: buff pointer since 'buff' can be overwritten 144 143 * @epib: Extended packet info block 145 144 * @psdata: Protocol specific 145 + * @sw_data: Software private data not touched by h/w 146 146 */ 147 147 struct knav_dma_desc { 148 148 __le32 desc_info; ··· 156 154 __le32 orig_buff; 157 155 __le32 epib[KNAV_DMA_NUM_EPIB_WORDS]; 158 156 __le32 psdata[KNAV_DMA_NUM_PS_WORDS]; 159 - __le32 pad[4]; 157 + u32 sw_data[KNAV_DMA_NUM_SW_DATA_WORDS]; 160 158 } ____cacheline_aligned; 161 159 162 160 #if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA)