blk-rq-qos: Remove unlikely() hints from QoS checks

The unlikely() annotations on QUEUE_FLAG_QOS_ENABLED checks are
counterproductive. Writeback throttling (WBT) might be enabled by
default, mainly because CONFIG_BLK_WBT_MQ defaults to 'y'.

Branch profiling on Meta servers, which have WBT enabled, confirms 100%
misprediction rates on these checks.

Remove the unlikely() annotations to let the CPU's branch predictor
learn the actual behavior, potentially improving I/O path performance.

Signed-off-by: Breno Leitao <leitao@debian.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by Breno Leitao and committed by Jens Axboe 7d121d70 08e136eb

Changed files
+9 -16
block
+9 -16
block/blk-rq-qos.h
··· 112 112 113 113 static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio) 114 114 { 115 - if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && 116 - q->rq_qos) 115 + if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) 117 116 __rq_qos_cleanup(q->rq_qos, bio); 118 117 } 119 118 120 119 static inline void rq_qos_done(struct request_queue *q, struct request *rq) 121 120 { 122 - if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && 123 - q->rq_qos && !blk_rq_is_passthrough(rq)) 121 + if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && 122 + q->rq_qos && !blk_rq_is_passthrough(rq)) 124 123 __rq_qos_done(q->rq_qos, rq); 125 124 } 126 125 127 126 static inline void rq_qos_issue(struct request_queue *q, struct request *rq) 128 127 { 129 - if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && 130 - q->rq_qos) 128 + if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) 131 129 __rq_qos_issue(q->rq_qos, rq); 132 130 } 133 131 134 132 static inline void rq_qos_requeue(struct request_queue *q, struct request *rq) 135 133 { 136 - if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && 137 - q->rq_qos) 134 + if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) 138 135 __rq_qos_requeue(q->rq_qos, rq); 139 136 } 140 137 ··· 159 162 160 163 static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio) 161 164 { 162 - if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && 163 - q->rq_qos) { 165 + if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) { 164 166 bio_set_flag(bio, BIO_QOS_THROTTLED); 165 167 __rq_qos_throttle(q->rq_qos, bio); 166 168 } ··· 168 172 static inline void rq_qos_track(struct request_queue *q, struct request *rq, 169 173 struct bio *bio) 170 174 { 171 - if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && 172 - q->rq_qos) 175 + if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) 173 176 __rq_qos_track(q->rq_qos, rq, bio); 174 177 } 175 178 176 179 static inline void rq_qos_merge(struct request_queue *q, struct request *rq, 177 180 struct bio *bio) 178 181 { 179 - if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && 180 - q->rq_qos) { 182 + if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) { 181 183 bio_set_flag(bio, BIO_QOS_MERGED); 182 184 __rq_qos_merge(q->rq_qos, rq, bio); 183 185 } ··· 183 189 184 190 static inline void rq_qos_queue_depth_changed(struct request_queue *q) 185 191 { 186 - if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && 187 - q->rq_qos) 192 + if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) 188 193 __rq_qos_queue_depth_changed(q->rq_qos); 189 194 } 190 195