[PATCH] cfq build fix

drivers/block/cfq-iosched.c: In function 'cfq_put_queue':
drivers/block/cfq-iosched.c:303: sorry, unimplemented: inlining failed in call to 'cfq_pending_requests': function body not available
drivers/block/cfq-iosched.c:1080: sorry, unimplemented: called from here
drivers/block/cfq-iosched.c: In function '__cfq_may_queue':
drivers/block/cfq-iosched.c:1955: warning: the address of 'cfq_cfqq_must_alloc_slice', will always evaluate as 'true'
make[1]: *** [drivers/block/cfq-iosched.o] Error 1
make: *** [drivers/block/cfq-iosched.o] Error 2

Cc: Jeff Garzik <jgarzik@pobox.com>
Cc: Jens Axboe <axboe@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Andrew Morton and committed by Linus Torvalds 99f95e52 6e5a3275

+24 -25
+24 -25
drivers/block/cfq-iosched.c
··· 300 static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); 301 static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *); 302 static void cfq_put_cfqd(struct cfq_data *cfqd); 303 - static inline int cfq_pending_requests(struct cfq_data *cfqd); 304 305 #define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) 306 ··· 345 } 346 347 return NULL; 348 } 349 350 /* ··· 1090 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); 1091 1092 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); 1093 - } 1094 - 1095 - /* 1096 - * scheduler run of queue, if there are requests pending and no one in the 1097 - * driver that will restart queueing 1098 - */ 1099 - static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 1100 - { 1101 - if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd)) 1102 - kblockd_schedule_work(&cfqd->unplug_work); 1103 } 1104 1105 /* ··· 1857 } 1858 } 1859 1860 - static inline int cfq_pending_requests(struct cfq_data *cfqd) 1861 - { 1862 - return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues; 1863 - } 1864 - 1865 - static int cfq_queue_empty(request_queue_t *q) 1866 - { 1867 - struct cfq_data *cfqd = q->elevator->elevator_data; 1868 - 1869 - return !cfq_pending_requests(cfqd); 1870 - } 1871 - 1872 static void cfq_completed_request(request_queue_t *q, struct request *rq) 1873 { 1874 struct cfq_rq *crq = RQ_DATA(rq); ··· 1951 { 1952 #if 1 1953 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && 1954 - !cfq_cfqq_must_alloc_slice) { 1955 cfq_mark_cfqq_must_alloc_slice(cfqq); 1956 return ELV_MQUEUE_MUST; 1957 } ··· 1968 * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we 1969 * can quickly flood the queue with writes from a single task 1970 */ 1971 - if (rw == READ || !cfq_cfqq_must_alloc_slice) { 1972 cfq_mark_cfqq_must_alloc_slice(cfqq); 1973 return ELV_MQUEUE_MUST; 1974 }
··· 300 static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); 301 static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *); 302 static void cfq_put_cfqd(struct cfq_data *cfqd); 303 304 #define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) 305 ··· 346 } 347 348 return NULL; 349 + } 350 + 351 + static inline int cfq_pending_requests(struct cfq_data *cfqd) 352 + { 353 + return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues; 354 + } 355 + 356 + /* 357 + * scheduler run of queue, if there are requests pending and no one in the 358 + * driver that will restart queueing 359 + */ 360 + static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 361 + { 362 + if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd)) 363 + kblockd_schedule_work(&cfqd->unplug_work); 364 + } 365 + 366 + static int cfq_queue_empty(request_queue_t *q) 367 + { 368 + struct cfq_data *cfqd = q->elevator->elevator_data; 369 + 370 + return !cfq_pending_requests(cfqd); 371 } 372 373 /* ··· 1069 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); 1070 1071 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); 1072 } 1073 1074 /* ··· 1846 } 1847 } 1848 1849 static void cfq_completed_request(request_queue_t *q, struct request *rq) 1850 { 1851 struct cfq_rq *crq = RQ_DATA(rq); ··· 1952 { 1953 #if 1 1954 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && 1955 + !cfq_cfqq_must_alloc_slice(cfqq)) { 1956 cfq_mark_cfqq_must_alloc_slice(cfqq); 1957 return ELV_MQUEUE_MUST; 1958 } ··· 1969 * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we 1970 * can quickly flood the queue with writes from a single task 1971 */ 1972 + if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) { 1973 cfq_mark_cfqq_must_alloc_slice(cfqq); 1974 return ELV_MQUEUE_MUST; 1975 }