at v2.6.24-rc2 180 lines 4.8 kB view raw
1/* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33#include <linux/errno.h> 34#include <linux/slab.h> 35#include <linux/bitmap.h> 36#include <linux/dma-mapping.h> 37 38#include "mlx4.h" 39 40u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) 41{ 42 u32 obj; 43 44 spin_lock(&bitmap->lock); 45 46 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); 47 if (obj >= bitmap->max) { 48 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; 49 obj = find_first_zero_bit(bitmap->table, bitmap->max); 50 } 51 52 if (obj < bitmap->max) { 53 set_bit(obj, bitmap->table); 54 bitmap->last = (obj + 1) & (bitmap->max - 1); 55 obj |= bitmap->top; 56 } else 57 obj = -1; 58 59 spin_unlock(&bitmap->lock); 60 61 return obj; 62} 63 64void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj) 65{ 66 obj &= bitmap->max - 1; 67 68 spin_lock(&bitmap->lock); 69 clear_bit(obj, bitmap->table); 70 bitmap->last = min(bitmap->last, obj); 71 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; 72 spin_unlock(&bitmap->lock); 73} 74 75int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved) 76{ 77 int i; 78 79 /* num must be a power of 2 */ 80 if (num != roundup_pow_of_two(num)) 81 return -EINVAL; 82 83 bitmap->last = 0; 84 bitmap->top = 0; 85 bitmap->max = num; 86 bitmap->mask = mask; 87 spin_lock_init(&bitmap->lock); 88 bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long), GFP_KERNEL); 89 if (!bitmap->table) 90 return -ENOMEM; 91 92 for (i = 0; i < reserved; ++i) 93 set_bit(i, bitmap->table); 94 95 return 0; 96} 97 98void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap) 99{ 100 kfree(bitmap->table); 101} 102 103/* 104 * Handling for queue buffers -- we allocate a bunch of memory and 105 * register it in a memory region at HCA virtual address 0. If the 106 * requested size is > max_direct, we split the allocation into 107 * multiple pages, so we don't require too much contiguous memory. 108 */ 109 110int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, 111 struct mlx4_buf *buf) 112{ 113 dma_addr_t t; 114 115 if (size <= max_direct) { 116 buf->nbufs = 1; 117 buf->npages = 1; 118 buf->page_shift = get_order(size) + PAGE_SHIFT; 119 buf->u.direct.buf = dma_alloc_coherent(&dev->pdev->dev, 120 size, &t, GFP_KERNEL); 121 if (!buf->u.direct.buf) 122 return -ENOMEM; 123 124 buf->u.direct.map = t; 125 126 while (t & ((1 << buf->page_shift) - 1)) { 127 --buf->page_shift; 128 buf->npages *= 2; 129 } 130 131 memset(buf->u.direct.buf, 0, size); 132 } else { 133 int i; 134 135 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; 136 buf->npages = buf->nbufs; 137 buf->page_shift = PAGE_SHIFT; 138 buf->u.page_list = kzalloc(buf->nbufs * sizeof *buf->u.page_list, 139 GFP_KERNEL); 140 if (!buf->u.page_list) 141 return -ENOMEM; 142 143 for (i = 0; i < buf->nbufs; ++i) { 144 buf->u.page_list[i].buf = 145 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, 146 &t, GFP_KERNEL); 147 if (!buf->u.page_list[i].buf) 148 goto err_free; 149 150 buf->u.page_list[i].map = t; 151 152 memset(buf->u.page_list[i].buf, 0, PAGE_SIZE); 153 } 154 } 155 156 return 0; 157 158err_free: 159 mlx4_buf_free(dev, size, buf); 160 161 return -ENOMEM; 162} 163EXPORT_SYMBOL_GPL(mlx4_buf_alloc); 164 165void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) 166{ 167 int i; 168 169 if (buf->nbufs == 1) 170 dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf, 171 buf->u.direct.map); 172 else { 173 for (i = 0; i < buf->nbufs; ++i) 174 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 175 buf->u.page_list[i].buf, 176 buf->u.page_list[i].map); 177 kfree(buf->u.page_list); 178 } 179} 180EXPORT_SYMBOL_GPL(mlx4_buf_free);