qlge: Fix sparse warning regarding rx buffer queues.

Warnings:
drivers/net/qlge/qlge_main.c:909:17: warning: incorrect type in assignment (different base types)
drivers/net/qlge/qlge_main.c:909:17: expected unsigned int [unsigned] [usertype] addr_lo
drivers/net/qlge/qlge_main.c:909:17: got restricted unsigned int [usertype] <noident>
drivers/net/qlge/qlge_main.c:911:17: warning: incorrect type in assignment (different base types)
drivers/net/qlge/qlge_main.c:911:17: expected unsigned int [unsigned] [usertype] addr_hi
drivers/net/qlge/qlge_main.c:911:17: got restricted unsigned int [usertype] <noident>
drivers/net/qlge/qlge_main.c:974:17: warning: incorrect type in assignment (different base types)
drivers/net/qlge/qlge_main.c:974:17: expected unsigned int [unsigned] [usertype] addr_lo
drivers/net/qlge/qlge_main.c:974:17: got restricted unsigned int [usertype] <noident>
drivers/net/qlge/qlge_main.c:975:17: warning: incorrect type in assignment (different base types)
drivers/net/qlge/qlge_main.c:975:17: expected unsigned int [unsigned] [usertype] addr_hi
drivers/net/qlge/qlge_main.c:975:17: got restricted unsigned int [usertype] <noident>
drivers/net/qlge/qlge_main.c:2132:16: warning: incorrect type in assignment (different base types)
drivers/net/qlge/qlge_main.c:2132:16: expected unsigned int [unsigned] [usertype] addr_lo
drivers/net/qlge/qlge_main.c:2132:16: got restricted unsigned int [usertype] <noident>
drivers/net/qlge/qlge_main.c:2133:16: warning: incorrect type in assignment (different base types)
drivers/net/qlge/qlge_main.c:2133:16: expected unsigned int [unsigned] [usertype] addr_hi
drivers/net/qlge/qlge_main.c:2133:16: got restricted unsigned int [usertype] <noident>
drivers/net/qlge/qlge_main.c:2212:15: warning: incorrect type in assignment (different base types)
drivers/net/qlge/qlge_main.c:2212:15: expected unsigned int [unsigned] [usertype] addr_lo
drivers/net/qlge/qlge_main.c:2212:15: got restricted unsigned int [usertype] <noident>
drivers/net/qlge/qlge_main.c:2214:15: warning: incorrect type in assignment (different base types)
drivers/net/qlge/qlge_main.c:2214:15: expected unsigned int [unsigned] [usertype] addr_hi
drivers/net/qlge/qlge_main.c:2214:15: got restricted unsigned int [usertype] <noident>

Signed-off-by: Ron Mercer <ron.mercer@qlogic.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Ron Mercer and committed by
David S. Miller
2c9a0d41 fd2df4f7

+15 -51
+1 -10
drivers/net/qlge/qlge.h
··· 818 }; 819 820 /* DATA STRUCTURES SHARED WITH HARDWARE. */ 821 - 822 - struct bq_element { 823 - u32 addr_lo; 824 - #define BQ_END 0x00000001 825 - #define BQ_CONT 0x00000002 826 - #define BQ_MASK 0x00000003 827 - u32 addr_hi; 828 - } __attribute((packed)); 829 - 830 struct tx_buf_desc { 831 __le64 addr; 832 __le32 len; ··· 1130 struct page *lbq_page; 1131 struct sk_buff *skb; 1132 } p; 1133 - struct bq_element *bq; 1134 int index; 1135 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1136 DECLARE_PCI_UNMAP_LEN(maplen);
··· 818 }; 819 820 /* DATA STRUCTURES SHARED WITH HARDWARE. */ 821 struct tx_buf_desc { 822 __le64 addr; 823 __le32 len; ··· 1139 struct page *lbq_page; 1140 struct sk_buff *skb; 1141 } p; 1142 + __le64 *addr; 1143 int index; 1144 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1145 DECLARE_PCI_UNMAP_LEN(maplen);
+14 -41
drivers/net/qlge/qlge_main.c
··· 874 { 875 int clean_idx = rx_ring->lbq_clean_idx; 876 struct bq_desc *lbq_desc; 877 - struct bq_element *bq; 878 u64 map; 879 int i; 880 ··· 883 "lbq: try cleaning clean_idx = %d.\n", 884 clean_idx); 885 lbq_desc = &rx_ring->lbq[clean_idx]; 886 - bq = lbq_desc->bq; 887 if (lbq_desc->p.lbq_page == NULL) { 888 QPRINTK(qdev, RX_STATUS, DEBUG, 889 "lbq: getting new page for index %d.\n", ··· 904 } 905 pci_unmap_addr_set(lbq_desc, mapaddr, map); 906 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); 907 - bq->addr_lo = /*lbq_desc->addr_lo = */ 908 - cpu_to_le32(map); 909 - bq->addr_hi = /*lbq_desc->addr_hi = */ 910 - cpu_to_le32(map >> 32); 911 } 912 clean_idx++; 913 if (clean_idx == rx_ring->lbq_len) ··· 929 { 930 int clean_idx = rx_ring->sbq_clean_idx; 931 struct bq_desc *sbq_desc; 932 - struct bq_element *bq; 933 u64 map; 934 int i; 935 ··· 938 QPRINTK(qdev, RX_STATUS, DEBUG, 939 "sbq: try cleaning clean_idx = %d.\n", 940 clean_idx); 941 - bq = sbq_desc->bq; 942 if (sbq_desc->p.skb == NULL) { 943 QPRINTK(qdev, RX_STATUS, DEBUG, 944 "sbq: getting new skb for index %d.\n", ··· 964 pci_unmap_addr_set(sbq_desc, mapaddr, map); 965 pci_unmap_len_set(sbq_desc, maplen, 966 rx_ring->sbq_buf_size / 2); 967 - bq->addr_lo = cpu_to_le32(map); 968 - bq->addr_hi = cpu_to_le32(map >> 32); 969 } 970 971 clean_idx++; ··· 1332 * eventually be in trouble. 1333 */ 1334 int size, offset, i = 0; 1335 - struct bq_element *bq, bq_array[8]; 1336 sbq_desc = ql_get_curr_sbuf(rx_ring); 1337 pci_unmap_single(qdev->pdev, 1338 pci_unmap_addr(sbq_desc, mapaddr), ··· 1358 } else { 1359 QPRINTK(qdev, RX_STATUS, DEBUG, 1360 "Headers in small, %d bytes of data in chain of large.\n", length); 1361 - bq = (struct bq_element *)sbq_desc->p.skb->data; 1362 } 1363 while (length > 0) { 1364 lbq_desc = ql_get_curr_lbuf(rx_ring); 1365 - if ((bq->addr_lo & ~BQ_MASK) != lbq_desc->bq->addr_lo) { 1366 - QPRINTK(qdev, RX_STATUS, ERR, 1367 - "Panic!!! bad large buffer address, expected 0x%.08x, got 0x%.08x.\n", 1368 - lbq_desc->bq->addr_lo, bq->addr_lo); 1369 - return NULL; 1370 - } 1371 pci_unmap_page(qdev->pdev, 1372 pci_unmap_addr(lbq_desc, 1373 mapaddr), ··· 2079 put_page(lbq_desc->p.lbq_page); 2080 lbq_desc->p.lbq_page = NULL; 2081 } 2082 - lbq_desc->bq->addr_lo = 0; 2083 - lbq_desc->bq->addr_hi = 0; 2084 } 2085 } 2086 ··· 2091 int i; 2092 struct bq_desc *lbq_desc; 2093 u64 map; 2094 - struct bq_element *bq = rx_ring->lbq_base; 2095 2096 for (i = 0; i < rx_ring->lbq_len; i++) { 2097 lbq_desc = &rx_ring->lbq[i]; 2098 memset(lbq_desc, 0, sizeof(lbq_desc)); 2099 - lbq_desc->bq = bq; 2100 lbq_desc->index = i; 2101 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC); 2102 if (unlikely(!lbq_desc->p.lbq_page)) { ··· 2113 } 2114 pci_unmap_addr_set(lbq_desc, mapaddr, map); 2115 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); 2116 - bq->addr_lo = cpu_to_le32(map); 2117 - bq->addr_hi = cpu_to_le32(map >> 32); 2118 } 2119 bq++; 2120 } ··· 2142 dev_kfree_skb(sbq_desc->p.skb); 2143 sbq_desc->p.skb = NULL; 2144 } 2145 - if (sbq_desc->bq == NULL) { 2146 - QPRINTK(qdev, IFUP, ERR, "sbq_desc->bq %d is NULL.\n", 2147 - i); 2148 - return; 2149 - } 2150 - sbq_desc->bq->addr_lo = 0; 2151 - sbq_desc->bq->addr_hi = 0; 2152 } 2153 } 2154 ··· 2153 struct bq_desc *sbq_desc; 2154 struct sk_buff *skb; 2155 u64 map; 2156 - struct bq_element *bq = rx_ring->sbq_base; 2157 2158 for (i = 0; i < rx_ring->sbq_len; i++) { 2159 sbq_desc = &rx_ring->sbq[i]; 2160 memset(sbq_desc, 0, sizeof(sbq_desc)); 2161 sbq_desc->index = i; 2162 - sbq_desc->bq = bq; 2163 skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size); 2164 if (unlikely(!skb)) { 2165 /* Better luck next round */ ··· 2185 } 2186 pci_unmap_addr_set(sbq_desc, mapaddr, map); 2187 pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2); 2188 - bq->addr_lo = /*sbq_desc->addr_lo = */ 2189 - cpu_to_le32(map); 2190 - bq->addr_hi = /*sbq_desc->addr_hi = */ 2191 - cpu_to_le32(map >> 32); 2192 bq++; 2193 } 2194 return 0; ··· 3329 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); 3330 rx_ring->lbq_len = NUM_LARGE_BUFFERS; 3331 rx_ring->lbq_size = 3332 - rx_ring->lbq_len * sizeof(struct bq_element); 3333 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; 3334 rx_ring->sbq_len = NUM_SMALL_BUFFERS; 3335 rx_ring->sbq_size = 3336 - rx_ring->sbq_len * sizeof(struct bq_element); 3337 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2; 3338 rx_ring->type = DEFAULT_Q; 3339 } else if (i < qdev->rss_ring_first_cq_id) { ··· 3360 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); 3361 rx_ring->lbq_len = NUM_LARGE_BUFFERS; 3362 rx_ring->lbq_size = 3363 - rx_ring->lbq_len * sizeof(struct bq_element); 3364 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; 3365 rx_ring->sbq_len = NUM_SMALL_BUFFERS; 3366 rx_ring->sbq_size = 3367 - rx_ring->sbq_len * sizeof(struct bq_element); 3368 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2; 3369 rx_ring->type = RX_Q; 3370 }
··· 874 { 875 int clean_idx = rx_ring->lbq_clean_idx; 876 struct bq_desc *lbq_desc; 877 u64 map; 878 int i; 879 ··· 884 "lbq: try cleaning clean_idx = %d.\n", 885 clean_idx); 886 lbq_desc = &rx_ring->lbq[clean_idx]; 887 if (lbq_desc->p.lbq_page == NULL) { 888 QPRINTK(qdev, RX_STATUS, DEBUG, 889 "lbq: getting new page for index %d.\n", ··· 906 } 907 pci_unmap_addr_set(lbq_desc, mapaddr, map); 908 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); 909 + *lbq_desc->addr = cpu_to_le64(map); 910 } 911 clean_idx++; 912 if (clean_idx == rx_ring->lbq_len) ··· 934 { 935 int clean_idx = rx_ring->sbq_clean_idx; 936 struct bq_desc *sbq_desc; 937 u64 map; 938 int i; 939 ··· 944 QPRINTK(qdev, RX_STATUS, DEBUG, 945 "sbq: try cleaning clean_idx = %d.\n", 946 clean_idx); 947 if (sbq_desc->p.skb == NULL) { 948 QPRINTK(qdev, RX_STATUS, DEBUG, 949 "sbq: getting new skb for index %d.\n", ··· 971 pci_unmap_addr_set(sbq_desc, mapaddr, map); 972 pci_unmap_len_set(sbq_desc, maplen, 973 rx_ring->sbq_buf_size / 2); 974 + *sbq_desc->addr = cpu_to_le64(map); 975 } 976 977 clean_idx++; ··· 1340 * eventually be in trouble. 1341 */ 1342 int size, offset, i = 0; 1343 + __le64 *bq, bq_array[8]; 1344 sbq_desc = ql_get_curr_sbuf(rx_ring); 1345 pci_unmap_single(qdev->pdev, 1346 pci_unmap_addr(sbq_desc, mapaddr), ··· 1366 } else { 1367 QPRINTK(qdev, RX_STATUS, DEBUG, 1368 "Headers in small, %d bytes of data in chain of large.\n", length); 1369 + bq = (__le64 *)sbq_desc->p.skb->data; 1370 } 1371 while (length > 0) { 1372 lbq_desc = ql_get_curr_lbuf(rx_ring); 1373 pci_unmap_page(qdev->pdev, 1374 pci_unmap_addr(lbq_desc, 1375 mapaddr), ··· 2093 put_page(lbq_desc->p.lbq_page); 2094 lbq_desc->p.lbq_page = NULL; 2095 } 2096 } 2097 } 2098 ··· 2107 int i; 2108 struct bq_desc *lbq_desc; 2109 u64 map; 2110 + __le64 *bq = rx_ring->lbq_base; 2111 2112 for (i = 0; i < rx_ring->lbq_len; i++) { 2113 lbq_desc = &rx_ring->lbq[i]; 2114 memset(lbq_desc, 0, sizeof(lbq_desc)); 2115 + lbq_desc->addr = bq; 2116 lbq_desc->index = i; 2117 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC); 2118 if (unlikely(!lbq_desc->p.lbq_page)) { ··· 2129 } 2130 pci_unmap_addr_set(lbq_desc, mapaddr, map); 2131 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); 2132 + *lbq_desc->addr = cpu_to_le64(map); 2133 } 2134 bq++; 2135 } ··· 2159 dev_kfree_skb(sbq_desc->p.skb); 2160 sbq_desc->p.skb = NULL; 2161 } 2162 } 2163 } 2164 ··· 2177 struct bq_desc *sbq_desc; 2178 struct sk_buff *skb; 2179 u64 map; 2180 + __le64 *bq = rx_ring->sbq_base; 2181 2182 for (i = 0; i < rx_ring->sbq_len; i++) { 2183 sbq_desc = &rx_ring->sbq[i]; 2184 memset(sbq_desc, 0, sizeof(sbq_desc)); 2185 sbq_desc->index = i; 2186 + sbq_desc->addr = bq; 2187 skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size); 2188 if (unlikely(!skb)) { 2189 /* Better luck next round */ ··· 2209 } 2210 pci_unmap_addr_set(sbq_desc, mapaddr, map); 2211 pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2); 2212 + *sbq_desc->addr = cpu_to_le64(map); 2213 bq++; 2214 } 2215 return 0; ··· 3356 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); 3357 rx_ring->lbq_len = NUM_LARGE_BUFFERS; 3358 rx_ring->lbq_size = 3359 + rx_ring->lbq_len * sizeof(__le64); 3360 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; 3361 rx_ring->sbq_len = NUM_SMALL_BUFFERS; 3362 rx_ring->sbq_size = 3363 + rx_ring->sbq_len * sizeof(__le64); 3364 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2; 3365 rx_ring->type = DEFAULT_Q; 3366 } else if (i < qdev->rss_ring_first_cq_id) { ··· 3387 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); 3388 rx_ring->lbq_len = NUM_LARGE_BUFFERS; 3389 rx_ring->lbq_size = 3390 + rx_ring->lbq_len * sizeof(__le64); 3391 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; 3392 rx_ring->sbq_len = NUM_SMALL_BUFFERS; 3393 rx_ring->sbq_size = 3394 + rx_ring->sbq_len * sizeof(__le64); 3395 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2; 3396 rx_ring->type = RX_Q; 3397 }