mlx4_core: Clean up struct mlx4_buf

Now that struct mlx4_buf.u is a struct instead of a union because of
the vmap() changes, there's no point in having a struct at all. So
move .direct and .page_list directly into struct mlx4_buf and get rid
of a bunch of unnecessary ".u"s.

Signed-off-by: Roland Dreier <rolandd@cisco.com>

+26 -28
+20 -20
drivers/net/mlx4/alloc.c
··· 116 buf->nbufs = 1; 117 buf->npages = 1; 118 buf->page_shift = get_order(size) + PAGE_SHIFT; 119 - buf->u.direct.buf = dma_alloc_coherent(&dev->pdev->dev, 120 size, &t, GFP_KERNEL); 121 - if (!buf->u.direct.buf) 122 return -ENOMEM; 123 124 - buf->u.direct.map = t; 125 126 while (t & ((1 << buf->page_shift) - 1)) { 127 --buf->page_shift; 128 buf->npages *= 2; 129 } 130 131 - memset(buf->u.direct.buf, 0, size); 132 } else { 133 int i; 134 135 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; 136 buf->npages = buf->nbufs; 137 buf->page_shift = PAGE_SHIFT; 138 - buf->u.page_list = kzalloc(buf->nbufs * sizeof *buf->u.page_list, 139 GFP_KERNEL); 140 - if (!buf->u.page_list) 141 return -ENOMEM; 142 143 for (i = 0; i < buf->nbufs; ++i) { 144 - buf->u.page_list[i].buf = 145 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, 146 &t, GFP_KERNEL); 147 - if (!buf->u.page_list[i].buf) 148 goto err_free; 149 150 - buf->u.page_list[i].map = t; 151 152 - memset(buf->u.page_list[i].buf, 0, PAGE_SIZE); 153 } 154 155 if (BITS_PER_LONG == 64) { ··· 158 if (!pages) 159 goto err_free; 160 for (i = 0; i < buf->nbufs; ++i) 161 - pages[i] = virt_to_page(buf->u.page_list[i].buf); 162 - buf->u.direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); 163 kfree(pages); 164 - if (!buf->u.direct.buf) 165 goto err_free; 166 } 167 } ··· 180 int i; 181 182 if (buf->nbufs == 1) 183 - dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf, 184 - buf->u.direct.map); 185 else { 186 if (BITS_PER_LONG == 64) 187 - vunmap(buf->u.direct.buf); 188 189 for (i = 0; i < buf->nbufs; ++i) 190 - if (buf->u.page_list[i].buf) 191 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 192 - buf->u.page_list[i].buf, 193 - buf->u.page_list[i].map); 194 - kfree(buf->u.page_list); 195 } 196 } 197 EXPORT_SYMBOL_GPL(mlx4_buf_free);
··· 116 buf->nbufs = 1; 117 buf->npages = 1; 118 buf->page_shift = get_order(size) + PAGE_SHIFT; 119 + buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, 120 size, &t, GFP_KERNEL); 121 + if (!buf->direct.buf) 122 return -ENOMEM; 123 124 + buf->direct.map = t; 125 126 while (t & ((1 << buf->page_shift) - 1)) { 127 --buf->page_shift; 128 buf->npages *= 2; 129 } 130 131 + memset(buf->direct.buf, 0, size); 132 } else { 133 int i; 134 135 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; 136 buf->npages = buf->nbufs; 137 buf->page_shift = PAGE_SHIFT; 138 + buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list, 139 GFP_KERNEL); 140 + if (!buf->page_list) 141 return -ENOMEM; 142 143 for (i = 0; i < buf->nbufs; ++i) { 144 + buf->page_list[i].buf = 145 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, 146 &t, GFP_KERNEL); 147 + if (!buf->page_list[i].buf) 148 goto err_free; 149 150 + buf->page_list[i].map = t; 151 152 + memset(buf->page_list[i].buf, 0, PAGE_SIZE); 153 } 154 155 if (BITS_PER_LONG == 64) { ··· 158 if (!pages) 159 goto err_free; 160 for (i = 0; i < buf->nbufs; ++i) 161 + pages[i] = virt_to_page(buf->page_list[i].buf); 162 + buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); 163 kfree(pages); 164 + if (!buf->direct.buf) 165 goto err_free; 166 } 167 } ··· 180 int i; 181 182 if (buf->nbufs == 1) 183 + dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, 184 + buf->direct.map); 185 else { 186 if (BITS_PER_LONG == 64) 187 + vunmap(buf->direct.buf); 188 189 for (i = 0; i < buf->nbufs; ++i) 190 + if (buf->page_list[i].buf) 191 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 192 + buf->page_list[i].buf, 193 + buf->page_list[i].map); 194 + kfree(buf->page_list); 195 } 196 } 197 EXPORT_SYMBOL_GPL(mlx4_buf_free);
+2 -2
drivers/net/mlx4/mr.c
··· 419 420 for (i = 0; i < buf->npages; ++i) 421 if (buf->nbufs == 1) 422 - page_list[i] = buf->u.direct.map + (i << buf->page_shift); 423 else 424 - page_list[i] = buf->u.page_list[i].map; 425 426 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); 427
··· 419 420 for (i = 0; i < buf->npages; ++i) 421 if (buf->nbufs == 1) 422 + page_list[i] = buf->direct.map + (i << buf->page_shift); 423 else 424 + page_list[i] = buf->page_list[i].map; 425 426 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); 427
+4 -6
include/linux/mlx4/device.h
··· 189 }; 190 191 struct mlx4_buf { 192 - struct { 193 - struct mlx4_buf_list direct; 194 - struct mlx4_buf_list *page_list; 195 - } u; 196 int nbufs; 197 int npages; 198 int page_shift; ··· 309 static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) 310 { 311 if (BITS_PER_LONG == 64 || buf->nbufs == 1) 312 - return buf->u.direct.buf + offset; 313 else 314 - return buf->u.page_list[offset >> PAGE_SHIFT].buf + 315 (offset & (PAGE_SIZE - 1)); 316 } 317
··· 189 }; 190 191 struct mlx4_buf { 192 + struct mlx4_buf_list direct; 193 + struct mlx4_buf_list *page_list; 194 int nbufs; 195 int npages; 196 int page_shift; ··· 311 static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) 312 { 313 if (BITS_PER_LONG == 64 || buf->nbufs == 1) 314 + return buf->direct.buf + offset; 315 else 316 + return buf->page_list[offset >> PAGE_SHIFT].buf + 317 (offset & (PAGE_SIZE - 1)); 318 } 319