Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.25-rc6 392 lines 9.7 kB view raw
1/* 2 * Functions related to tagged command queuing 3 */ 4#include <linux/kernel.h> 5#include <linux/module.h> 6#include <linux/bio.h> 7#include <linux/blkdev.h> 8 9#include "blk.h" 10 11/** 12 * blk_queue_find_tag - find a request by its tag and queue 13 * @q: The request queue for the device 14 * @tag: The tag of the request 15 * 16 * Notes: 17 * Should be used when a device returns a tag and you want to match 18 * it with a request. 19 * 20 * no locks need be held. 21 **/ 22struct request *blk_queue_find_tag(struct request_queue *q, int tag) 23{ 24 return blk_map_queue_find_tag(q->queue_tags, tag); 25} 26EXPORT_SYMBOL(blk_queue_find_tag); 27 28/** 29 * __blk_free_tags - release a given set of tag maintenance info 30 * @bqt: the tag map to free 31 * 32 * Tries to free the specified @bqt@. Returns true if it was 33 * actually freed and false if there are still references using it 34 */ 35static int __blk_free_tags(struct blk_queue_tag *bqt) 36{ 37 int retval; 38 39 retval = atomic_dec_and_test(&bqt->refcnt); 40 if (retval) { 41 BUG_ON(bqt->busy); 42 43 kfree(bqt->tag_index); 44 bqt->tag_index = NULL; 45 46 kfree(bqt->tag_map); 47 bqt->tag_map = NULL; 48 49 kfree(bqt); 50 } 51 52 return retval; 53} 54 55/** 56 * __blk_queue_free_tags - release tag maintenance info 57 * @q: the request queue for the device 58 * 59 * Notes: 60 * blk_cleanup_queue() will take care of calling this function, if tagging 61 * has been used. So there's no need to call this directly. 62 **/ 63void __blk_queue_free_tags(struct request_queue *q) 64{ 65 struct blk_queue_tag *bqt = q->queue_tags; 66 67 if (!bqt) 68 return; 69 70 __blk_free_tags(bqt); 71 72 q->queue_tags = NULL; 73 q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); 74} 75 76/** 77 * blk_free_tags - release a given set of tag maintenance info 78 * @bqt: the tag map to free 79 * 80 * For externally managed @bqt@ frees the map. Callers of this 81 * function must guarantee to have released all the queues that 82 * might have been using this tag map. 83 */ 84void blk_free_tags(struct blk_queue_tag *bqt) 85{ 86 if (unlikely(!__blk_free_tags(bqt))) 87 BUG(); 88} 89EXPORT_SYMBOL(blk_free_tags); 90 91/** 92 * blk_queue_free_tags - release tag maintenance info 93 * @q: the request queue for the device 94 * 95 * Notes: 96 * This is used to disabled tagged queuing to a device, yet leave 97 * queue in function. 98 **/ 99void blk_queue_free_tags(struct request_queue *q) 100{ 101 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 102} 103EXPORT_SYMBOL(blk_queue_free_tags); 104 105static int 106init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) 107{ 108 struct request **tag_index; 109 unsigned long *tag_map; 110 int nr_ulongs; 111 112 if (q && depth > q->nr_requests * 2) { 113 depth = q->nr_requests * 2; 114 printk(KERN_ERR "%s: adjusted depth to %d\n", 115 __FUNCTION__, depth); 116 } 117 118 tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); 119 if (!tag_index) 120 goto fail; 121 122 nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; 123 tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); 124 if (!tag_map) 125 goto fail; 126 127 tags->real_max_depth = depth; 128 tags->max_depth = depth; 129 tags->tag_index = tag_index; 130 tags->tag_map = tag_map; 131 132 return 0; 133fail: 134 kfree(tag_index); 135 return -ENOMEM; 136} 137 138static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, 139 int depth) 140{ 141 struct blk_queue_tag *tags; 142 143 tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); 144 if (!tags) 145 goto fail; 146 147 if (init_tag_map(q, tags, depth)) 148 goto fail; 149 150 tags->busy = 0; 151 atomic_set(&tags->refcnt, 1); 152 return tags; 153fail: 154 kfree(tags); 155 return NULL; 156} 157 158/** 159 * blk_init_tags - initialize the tag info for an external tag map 160 * @depth: the maximum queue depth supported 161 * @tags: the tag to use 162 **/ 163struct blk_queue_tag *blk_init_tags(int depth) 164{ 165 return __blk_queue_init_tags(NULL, depth); 166} 167EXPORT_SYMBOL(blk_init_tags); 168 169/** 170 * blk_queue_init_tags - initialize the queue tag info 171 * @q: the request queue for the device 172 * @depth: the maximum queue depth supported 173 * @tags: the tag to use 174 **/ 175int blk_queue_init_tags(struct request_queue *q, int depth, 176 struct blk_queue_tag *tags) 177{ 178 int rc; 179 180 BUG_ON(tags && q->queue_tags && tags != q->queue_tags); 181 182 if (!tags && !q->queue_tags) { 183 tags = __blk_queue_init_tags(q, depth); 184 185 if (!tags) 186 goto fail; 187 } else if (q->queue_tags) { 188 rc = blk_queue_resize_tags(q, depth); 189 if (rc) 190 return rc; 191 set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 192 return 0; 193 } else 194 atomic_inc(&tags->refcnt); 195 196 /* 197 * assign it, all done 198 */ 199 q->queue_tags = tags; 200 q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); 201 INIT_LIST_HEAD(&q->tag_busy_list); 202 return 0; 203fail: 204 kfree(tags); 205 return -ENOMEM; 206} 207EXPORT_SYMBOL(blk_queue_init_tags); 208 209/** 210 * blk_queue_resize_tags - change the queueing depth 211 * @q: the request queue for the device 212 * @new_depth: the new max command queueing depth 213 * 214 * Notes: 215 * Must be called with the queue lock held. 216 **/ 217int blk_queue_resize_tags(struct request_queue *q, int new_depth) 218{ 219 struct blk_queue_tag *bqt = q->queue_tags; 220 struct request **tag_index; 221 unsigned long *tag_map; 222 int max_depth, nr_ulongs; 223 224 if (!bqt) 225 return -ENXIO; 226 227 /* 228 * if we already have large enough real_max_depth. just 229 * adjust max_depth. *NOTE* as requests with tag value 230 * between new_depth and real_max_depth can be in-flight, tag 231 * map can not be shrunk blindly here. 232 */ 233 if (new_depth <= bqt->real_max_depth) { 234 bqt->max_depth = new_depth; 235 return 0; 236 } 237 238 /* 239 * Currently cannot replace a shared tag map with a new 240 * one, so error out if this is the case 241 */ 242 if (atomic_read(&bqt->refcnt) != 1) 243 return -EBUSY; 244 245 /* 246 * save the old state info, so we can copy it back 247 */ 248 tag_index = bqt->tag_index; 249 tag_map = bqt->tag_map; 250 max_depth = bqt->real_max_depth; 251 252 if (init_tag_map(q, bqt, new_depth)) 253 return -ENOMEM; 254 255 memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); 256 nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG; 257 memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long)); 258 259 kfree(tag_index); 260 kfree(tag_map); 261 return 0; 262} 263EXPORT_SYMBOL(blk_queue_resize_tags); 264 265/** 266 * blk_queue_end_tag - end tag operations for a request 267 * @q: the request queue for the device 268 * @rq: the request that has completed 269 * 270 * Description: 271 * Typically called when end_that_request_first() returns 0, meaning 272 * all transfers have been done for a request. It's important to call 273 * this function before end_that_request_last(), as that will put the 274 * request back on the free list thus corrupting the internal tag list. 275 * 276 * Notes: 277 * queue lock must be held. 278 **/ 279void blk_queue_end_tag(struct request_queue *q, struct request *rq) 280{ 281 struct blk_queue_tag *bqt = q->queue_tags; 282 int tag = rq->tag; 283 284 BUG_ON(tag == -1); 285 286 if (unlikely(tag >= bqt->real_max_depth)) 287 /* 288 * This can happen after tag depth has been reduced. 289 * FIXME: how about a warning or info message here? 290 */ 291 return; 292 293 list_del_init(&rq->queuelist); 294 rq->cmd_flags &= ~REQ_QUEUED; 295 rq->tag = -1; 296 297 if (unlikely(bqt->tag_index[tag] == NULL)) 298 printk(KERN_ERR "%s: tag %d is missing\n", 299 __FUNCTION__, tag); 300 301 bqt->tag_index[tag] = NULL; 302 303 if (unlikely(!test_bit(tag, bqt->tag_map))) { 304 printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", 305 __FUNCTION__, tag); 306 return; 307 } 308 /* 309 * The tag_map bit acts as a lock for tag_index[bit], so we need 310 * unlock memory barrier semantics. 311 */ 312 clear_bit_unlock(tag, bqt->tag_map); 313 bqt->busy--; 314} 315EXPORT_SYMBOL(blk_queue_end_tag); 316 317/** 318 * blk_queue_start_tag - find a free tag and assign it 319 * @q: the request queue for the device 320 * @rq: the block request that needs tagging 321 * 322 * Description: 323 * This can either be used as a stand-alone helper, or possibly be 324 * assigned as the queue &prep_rq_fn (in which case &struct request 325 * automagically gets a tag assigned). Note that this function 326 * assumes that any type of request can be queued! if this is not 327 * true for your device, you must check the request type before 328 * calling this function. The request will also be removed from 329 * the request queue, so it's the drivers responsibility to readd 330 * it if it should need to be restarted for some reason. 331 * 332 * Notes: 333 * queue lock must be held. 334 **/ 335int blk_queue_start_tag(struct request_queue *q, struct request *rq) 336{ 337 struct blk_queue_tag *bqt = q->queue_tags; 338 int tag; 339 340 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 341 printk(KERN_ERR 342 "%s: request %p for device [%s] already tagged %d", 343 __FUNCTION__, rq, 344 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); 345 BUG(); 346 } 347 348 /* 349 * Protect against shared tag maps, as we may not have exclusive 350 * access to the tag map. 351 */ 352 do { 353 tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); 354 if (tag >= bqt->max_depth) 355 return 1; 356 357 } while (test_and_set_bit_lock(tag, bqt->tag_map)); 358 /* 359 * We need lock ordering semantics given by test_and_set_bit_lock. 360 * See blk_queue_end_tag for details. 361 */ 362 363 rq->cmd_flags |= REQ_QUEUED; 364 rq->tag = tag; 365 bqt->tag_index[tag] = rq; 366 blkdev_dequeue_request(rq); 367 list_add(&rq->queuelist, &q->tag_busy_list); 368 bqt->busy++; 369 return 0; 370} 371EXPORT_SYMBOL(blk_queue_start_tag); 372 373/** 374 * blk_queue_invalidate_tags - invalidate all pending tags 375 * @q: the request queue for the device 376 * 377 * Description: 378 * Hardware conditions may dictate a need to stop all pending requests. 379 * In this case, we will safely clear the block side of the tag queue and 380 * readd all requests to the request queue in the right order. 381 * 382 * Notes: 383 * queue lock must be held. 384 **/ 385void blk_queue_invalidate_tags(struct request_queue *q) 386{ 387 struct list_head *tmp, *n; 388 389 list_for_each_safe(tmp, n, &q->tag_busy_list) 390 blk_requeue_request(q, list_entry_rq(tmp)); 391} 392EXPORT_SYMBOL(blk_queue_invalidate_tags);