Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

md/raid6: let async recovery function support different page offset

For now, asynchronous raid6 recovery calculate functions are require
common offset for pages. But, we expect them to support different page
offset after introducing stripe shared page. Do that by simplily adding
page offset where each page address are referred. Then, replace the
old interface with the new ones in raid6 and raid6test.

Signed-off-by: Yufen Yu <yuyufen@huawei.com>
Signed-off-by: Song Liu <songliubraving@fb.com>

authored by

Yufen Yu and committed by
Song Liu
4f86ff55 d69454bc

+128 -49
+120 -43
crypto/async_tx/async_raid6_recov.c
··· 15 15 #include <linux/dmaengine.h> 16 16 17 17 static struct dma_async_tx_descriptor * 18 - async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, 19 - size_t len, struct async_submit_ctl *submit) 18 + async_sum_product(struct page *dest, unsigned int d_off, 19 + struct page **srcs, unsigned int *src_offs, unsigned char *coef, 20 + size_t len, struct async_submit_ctl *submit) 20 21 { 21 22 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, 22 23 &dest, 1, srcs, 2, len); ··· 38 37 39 38 if (submit->flags & ASYNC_TX_FENCE) 40 39 dma_flags |= DMA_PREP_FENCE; 41 - unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); 42 - unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); 40 + unmap->addr[0] = dma_map_page(dev, srcs[0], src_offs[0], 41 + len, DMA_TO_DEVICE); 42 + unmap->addr[1] = dma_map_page(dev, srcs[1], src_offs[1], 43 + len, DMA_TO_DEVICE); 43 44 unmap->to_cnt = 2; 44 45 45 - unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); 46 + unmap->addr[2] = dma_map_page(dev, dest, d_off, 47 + len, DMA_BIDIRECTIONAL); 46 48 unmap->bidi_cnt = 1; 47 49 /* engine only looks at Q, but expects it to follow P */ 48 50 pq[1] = unmap->addr[2]; ··· 70 66 async_tx_quiesce(&submit->depend_tx); 71 67 amul = raid6_gfmul[coef[0]]; 72 68 bmul = raid6_gfmul[coef[1]]; 73 - a = page_address(srcs[0]); 74 - b = page_address(srcs[1]); 75 - c = page_address(dest); 69 + a = page_address(srcs[0]) + src_offs[0]; 70 + b = page_address(srcs[1]) + src_offs[1]; 71 + c = page_address(dest) + d_off; 76 72 77 73 while (len--) { 78 74 ax = amul[*a++]; ··· 84 80 } 85 81 86 82 static struct dma_async_tx_descriptor * 87 - async_mult(struct page *dest, struct page *src, u8 coef, size_t len, 88 - struct async_submit_ctl *submit) 83 + async_mult(struct page *dest, unsigned int d_off, struct page *src, 84 + unsigned int s_off, u8 coef, size_t len, 85 + struct async_submit_ctl *submit) 89 86 { 90 87 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, 91 88 &dest, 1, &src, 1, len); ··· 106 101 107 102 if (submit->flags & ASYNC_TX_FENCE) 108 103 dma_flags |= DMA_PREP_FENCE; 109 - unmap->addr[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); 104 + unmap->addr[0] = dma_map_page(dev, src, s_off, 105 + len, DMA_TO_DEVICE); 110 106 unmap->to_cnt++; 111 - unmap->addr[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); 107 + unmap->addr[1] = dma_map_page(dev, dest, d_off, 108 + len, DMA_BIDIRECTIONAL); 112 109 dma_dest[1] = unmap->addr[1]; 113 110 unmap->bidi_cnt++; 114 111 unmap->len = len; ··· 140 133 */ 141 134 async_tx_quiesce(&submit->depend_tx); 142 135 qmul = raid6_gfmul[coef]; 143 - d = page_address(dest); 144 - s = page_address(src); 136 + d = page_address(dest) + d_off; 137 + s = page_address(src) + s_off; 145 138 146 139 while (len--) 147 140 *d++ = qmul[*s++]; ··· 151 144 152 145 static struct dma_async_tx_descriptor * 153 146 __2data_recov_4(int disks, size_t bytes, int faila, int failb, 154 - struct page **blocks, struct async_submit_ctl *submit) 147 + struct page **blocks, unsigned int *offs, 148 + struct async_submit_ctl *submit) 155 149 { 156 150 struct dma_async_tx_descriptor *tx = NULL; 157 151 struct page *p, *q, *a, *b; 152 + unsigned int p_off, q_off, a_off, b_off; 158 153 struct page *srcs[2]; 154 + unsigned int src_offs[2]; 159 155 unsigned char coef[2]; 160 156 enum async_tx_flags flags = submit->flags; 161 157 dma_async_tx_callback cb_fn = submit->cb_fn; ··· 166 156 void *scribble = submit->scribble; 167 157 168 158 p = blocks[disks-2]; 159 + p_off = offs[disks-2]; 169 160 q = blocks[disks-1]; 161 + q_off = offs[disks-1]; 170 162 171 163 a = blocks[faila]; 164 + a_off = offs[faila]; 172 165 b = blocks[failb]; 166 + b_off = offs[failb]; 173 167 174 168 /* in the 4 disk case P + Pxy == P and Q + Qxy == Q */ 175 169 /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ 176 170 srcs[0] = p; 171 + src_offs[0] = p_off; 177 172 srcs[1] = q; 173 + src_offs[1] = q_off; 178 174 coef[0] = raid6_gfexi[failb-faila]; 179 175 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; 180 176 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); 181 - tx = async_sum_product(b, srcs, coef, bytes, submit); 177 + tx = async_sum_product(b, b_off, srcs, src_offs, coef, bytes, submit); 182 178 183 179 /* Dy = P+Pxy+Dx */ 184 180 srcs[0] = p; 181 + src_offs[0] = p_off; 185 182 srcs[1] = b; 183 + src_offs[1] = b_off; 186 184 init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn, 187 185 cb_param, scribble); 188 - tx = async_xor(a, srcs, 0, 2, bytes, submit); 186 + tx = async_xor_offs(a, a_off, srcs, src_offs, 2, bytes, submit); 189 187 190 188 return tx; 191 189 ··· 201 183 202 184 static struct dma_async_tx_descriptor * 203 185 __2data_recov_5(int disks, size_t bytes, int faila, int failb, 204 - struct page **blocks, struct async_submit_ctl *submit) 186 + struct page **blocks, unsigned int *offs, 187 + struct async_submit_ctl *submit) 205 188 { 206 189 struct dma_async_tx_descriptor *tx = NULL; 207 190 struct page *p, *q, *g, *dp, *dq; 191 + unsigned int p_off, q_off, g_off, dp_off, dq_off; 208 192 struct page *srcs[2]; 193 + unsigned int src_offs[2]; 209 194 unsigned char coef[2]; 210 195 enum async_tx_flags flags = submit->flags; 211 196 dma_async_tx_callback cb_fn = submit->cb_fn; ··· 229 208 BUG_ON(good_srcs > 1); 230 209 231 210 p = blocks[disks-2]; 211 + p_off = offs[disks-2]; 232 212 q = blocks[disks-1]; 213 + q_off = offs[disks-1]; 233 214 g = blocks[good]; 215 + g_off = offs[good]; 234 216 235 217 /* Compute syndrome with zero for the missing data pages 236 218 * Use the dead data pages as temporary storage for delta p and 237 219 * delta q 238 220 */ 239 221 dp = blocks[faila]; 222 + dp_off = offs[faila]; 240 223 dq = blocks[failb]; 224 + dq_off = offs[failb]; 241 225 242 226 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); 243 - tx = async_memcpy(dp, g, 0, 0, bytes, submit); 227 + tx = async_memcpy(dp, g, dp_off, g_off, bytes, submit); 244 228 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); 245 - tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); 229 + tx = async_mult(dq, dq_off, g, g_off, 230 + raid6_gfexp[good], bytes, submit); 246 231 247 232 /* compute P + Pxy */ 248 233 srcs[0] = dp; 234 + src_offs[0] = dp_off; 249 235 srcs[1] = p; 236 + src_offs[1] = p_off; 250 237 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, 251 238 NULL, NULL, scribble); 252 - tx = async_xor(dp, srcs, 0, 2, bytes, submit); 239 + tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit); 253 240 254 241 /* compute Q + Qxy */ 255 242 srcs[0] = dq; 243 + src_offs[0] = dq_off; 256 244 srcs[1] = q; 245 + src_offs[1] = q_off; 257 246 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, 258 247 NULL, NULL, scribble); 259 - tx = async_xor(dq, srcs, 0, 2, bytes, submit); 248 + tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit); 260 249 261 250 /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ 262 251 srcs[0] = dp; 252 + src_offs[0] = dp_off; 263 253 srcs[1] = dq; 254 + src_offs[1] = dq_off; 264 255 coef[0] = raid6_gfexi[failb-faila]; 265 256 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; 266 257 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); 267 - tx = async_sum_product(dq, srcs, coef, bytes, submit); 258 + tx = async_sum_product(dq, dq_off, srcs, src_offs, coef, bytes, submit); 268 259 269 260 /* Dy = P+Pxy+Dx */ 270 261 srcs[0] = dp; 262 + src_offs[0] = dp_off; 271 263 srcs[1] = dq; 264 + src_offs[1] = dq_off; 272 265 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, 273 266 cb_param, scribble); 274 - tx = async_xor(dp, srcs, 0, 2, bytes, submit); 267 + tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit); 275 268 276 269 return tx; 277 270 } 278 271 279 272 static struct dma_async_tx_descriptor * 280 273 __2data_recov_n(int disks, size_t bytes, int faila, int failb, 281 - struct page **blocks, struct async_submit_ctl *submit) 274 + struct page **blocks, unsigned int *offs, 275 + struct async_submit_ctl *submit) 282 276 { 283 277 struct dma_async_tx_descriptor *tx = NULL; 284 278 struct page *p, *q, *dp, *dq; 279 + unsigned int p_off, q_off, dp_off, dq_off; 285 280 struct page *srcs[2]; 281 + unsigned int src_offs[2]; 286 282 unsigned char coef[2]; 287 283 enum async_tx_flags flags = submit->flags; 288 284 dma_async_tx_callback cb_fn = submit->cb_fn; ··· 307 269 void *scribble = submit->scribble; 308 270 309 271 p = blocks[disks-2]; 272 + p_off = offs[disks-2]; 310 273 q = blocks[disks-1]; 274 + q_off = offs[disks-1]; 311 275 312 276 /* Compute syndrome with zero for the missing data pages 313 277 * Use the dead data pages as temporary storage for 314 278 * delta p and delta q 315 279 */ 316 280 dp = blocks[faila]; 281 + dp_off = offs[faila]; 317 282 blocks[faila] = NULL; 318 283 blocks[disks-2] = dp; 284 + offs[disks-2] = dp_off; 319 285 dq = blocks[failb]; 286 + dq_off = offs[failb]; 320 287 blocks[failb] = NULL; 321 288 blocks[disks-1] = dq; 289 + offs[disks-1] = dq_off; 322 290 323 291 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); 324 - tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); 292 + tx = async_gen_syndrome(blocks, offs, disks, bytes, submit); 325 293 326 294 /* Restore pointer table */ 327 295 blocks[faila] = dp; 296 + offs[faila] = dp_off; 328 297 blocks[failb] = dq; 298 + offs[failb] = dq_off; 329 299 blocks[disks-2] = p; 300 + offs[disks-2] = p_off; 330 301 blocks[disks-1] = q; 302 + offs[disks-1] = q_off; 331 303 332 304 /* compute P + Pxy */ 333 305 srcs[0] = dp; 306 + src_offs[0] = dp_off; 334 307 srcs[1] = p; 308 + src_offs[1] = p_off; 335 309 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, 336 310 NULL, NULL, scribble); 337 - tx = async_xor(dp, srcs, 0, 2, bytes, submit); 311 + tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit); 338 312 339 313 /* compute Q + Qxy */ 340 314 srcs[0] = dq; 315 + src_offs[0] = dq_off; 341 316 srcs[1] = q; 317 + src_offs[1] = q_off; 342 318 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, 343 319 NULL, NULL, scribble); 344 - tx = async_xor(dq, srcs, 0, 2, bytes, submit); 320 + tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit); 345 321 346 322 /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ 347 323 srcs[0] = dp; 324 + src_offs[0] = dp_off; 348 325 srcs[1] = dq; 326 + src_offs[1] = dq_off; 349 327 coef[0] = raid6_gfexi[failb-faila]; 350 328 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; 351 329 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); 352 - tx = async_sum_product(dq, srcs, coef, bytes, submit); 330 + tx = async_sum_product(dq, dq_off, srcs, src_offs, coef, bytes, submit); 353 331 354 332 /* Dy = P+Pxy+Dx */ 355 333 srcs[0] = dp; 334 + src_offs[0] = dp_off; 356 335 srcs[1] = dq; 336 + src_offs[1] = dq_off; 357 337 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, 358 338 cb_param, scribble); 359 - tx = async_xor(dp, srcs, 0, 2, bytes, submit); 339 + tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit); 360 340 361 341 return tx; 362 342 } ··· 386 330 * @faila: first failed drive index 387 331 * @failb: second failed drive index 388 332 * @blocks: array of source pointers where the last two entries are p and q 333 + * @offs: array of offset for pages in blocks 389 334 * @submit: submission/completion modifiers 390 335 */ 391 336 struct dma_async_tx_descriptor * 392 337 async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, 393 - struct page **blocks, struct async_submit_ctl *submit) 338 + struct page **blocks, unsigned int *offs, 339 + struct async_submit_ctl *submit) 394 340 { 395 341 void *scribble = submit->scribble; 396 342 int non_zero_srcs, i; ··· 416 358 if (blocks[i] == NULL) 417 359 ptrs[i] = (void *) raid6_empty_zero_page; 418 360 else 419 - ptrs[i] = page_address(blocks[i]); 361 + ptrs[i] = page_address(blocks[i]) + offs[i]; 420 362 421 363 raid6_2data_recov(disks, bytes, faila, failb, ptrs); 422 364 ··· 441 383 * explicitly handle the special case of a 4 disk array with 442 384 * both data disks missing. 443 385 */ 444 - return __2data_recov_4(disks, bytes, faila, failb, blocks, submit); 386 + return __2data_recov_4(disks, bytes, faila, failb, 387 + blocks, offs, submit); 445 388 case 3: 446 389 /* dma devices do not uniformly understand a single 447 390 * source pq operation (in contrast to the synchronous 448 391 * case), so explicitly handle the special case of a 5 disk 449 392 * array with 2 of 3 data disks missing. 450 393 */ 451 - return __2data_recov_5(disks, bytes, faila, failb, blocks, submit); 394 + return __2data_recov_5(disks, bytes, faila, failb, 395 + blocks, offs, submit); 452 396 default: 453 - return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); 397 + return __2data_recov_n(disks, bytes, faila, failb, 398 + blocks, offs, submit); 454 399 } 455 400 } 456 401 EXPORT_SYMBOL_GPL(async_raid6_2data_recov); ··· 464 403 * @bytes: block size 465 404 * @faila: failed drive index 466 405 * @blocks: array of source pointers where the last two entries are p and q 406 + * @offs: array of offset for pages in blocks 467 407 * @submit: submission/completion modifiers 468 408 */ 469 409 struct dma_async_tx_descriptor * 470 410 async_raid6_datap_recov(int disks, size_t bytes, int faila, 471 - struct page **blocks, struct async_submit_ctl *submit) 411 + struct page **blocks, unsigned int *offs, 412 + struct async_submit_ctl *submit) 472 413 { 473 414 struct dma_async_tx_descriptor *tx = NULL; 474 415 struct page *p, *q, *dq; 416 + unsigned int p_off, q_off, dq_off; 475 417 u8 coef; 476 418 enum async_tx_flags flags = submit->flags; 477 419 dma_async_tx_callback cb_fn = submit->cb_fn; ··· 482 418 void *scribble = submit->scribble; 483 419 int good_srcs, good, i; 484 420 struct page *srcs[2]; 421 + unsigned int src_offs[2]; 485 422 486 423 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); 487 424 ··· 499 434 if (blocks[i] == NULL) 500 435 ptrs[i] = (void*)raid6_empty_zero_page; 501 436 else 502 - ptrs[i] = page_address(blocks[i]); 437 + ptrs[i] = page_address(blocks[i]) + offs[i]; 503 438 504 439 raid6_datap_recov(disks, bytes, faila, ptrs); 505 440 ··· 523 458 BUG_ON(good_srcs == 0); 524 459 525 460 p = blocks[disks-2]; 461 + p_off = offs[disks-2]; 526 462 q = blocks[disks-1]; 463 + q_off = offs[disks-1]; 527 464 528 465 /* Compute syndrome with zero for the missing data page 529 466 * Use the dead data page as temporary storage for delta q 530 467 */ 531 468 dq = blocks[faila]; 469 + dq_off = offs[faila]; 532 470 blocks[faila] = NULL; 533 471 blocks[disks-1] = dq; 472 + offs[disks-1] = dq_off; 534 473 535 474 /* in the 4-disk case we only need to perform a single source 536 475 * multiplication with the one good data block. 537 476 */ 538 477 if (good_srcs == 1) { 539 478 struct page *g = blocks[good]; 479 + unsigned int g_off = offs[good]; 540 480 541 481 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, 542 482 scribble); 543 - tx = async_memcpy(p, g, 0, 0, bytes, submit); 483 + tx = async_memcpy(p, g, p_off, g_off, bytes, submit); 544 484 545 485 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, 546 486 scribble); 547 - tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); 487 + tx = async_mult(dq, dq_off, g, g_off, 488 + raid6_gfexp[good], bytes, submit); 548 489 } else { 549 490 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, 550 491 scribble); 551 - tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); 492 + tx = async_gen_syndrome(blocks, offs, disks, bytes, submit); 552 493 } 553 494 554 495 /* Restore pointer table */ 555 496 blocks[faila] = dq; 497 + offs[faila] = dq_off; 556 498 blocks[disks-1] = q; 499 + offs[disks-1] = q_off; 557 500 558 501 /* calculate g^{-faila} */ 559 502 coef = raid6_gfinv[raid6_gfexp[faila]]; 560 503 561 504 srcs[0] = dq; 505 + src_offs[0] = dq_off; 562 506 srcs[1] = q; 507 + src_offs[1] = q_off; 563 508 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, 564 509 NULL, NULL, scribble); 565 - tx = async_xor(dq, srcs, 0, 2, bytes, submit); 510 + tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit); 566 511 567 512 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); 568 - tx = async_mult(dq, dq, coef, bytes, submit); 513 + tx = async_mult(dq, dq_off, dq, dq_off, coef, bytes, submit); 569 514 570 515 srcs[0] = p; 516 + src_offs[0] = p_off; 571 517 srcs[1] = dq; 518 + src_offs[1] = dq_off; 572 519 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, 573 520 cb_param, scribble); 574 - tx = async_xor(p, srcs, 0, 2, bytes, submit); 521 + tx = async_xor_offs(p, p_off, srcs, src_offs, 2, bytes, submit); 575 522 576 523 return tx; 577 524 }
+2 -2
crypto/async_tx/raid6test.c
··· 101 101 /* data+P failure. */ 102 102 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); 103 103 tx = async_raid6_datap_recov(disks, bytes, 104 - faila, ptrs, &submit); 104 + faila, ptrs, offs, &submit); 105 105 } else { 106 106 /* data+data failure. */ 107 107 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); 108 108 tx = async_raid6_2data_recov(disks, bytes, 109 - faila, failb, ptrs, &submit); 109 + faila, failb, ptrs, offs, &submit); 110 110 } 111 111 } 112 112 init_completion(&cmp);
+2 -2
drivers/md/raid5.c
··· 1685 1685 return async_raid6_datap_recov(syndrome_disks+2, 1686 1686 RAID5_STRIPE_SIZE(sh->raid_conf), 1687 1687 faila, 1688 - blocks, &submit); 1688 + blocks, offs, &submit); 1689 1689 } else { 1690 1690 /* We're missing D+D. */ 1691 1691 return async_raid6_2data_recov(syndrome_disks+2, 1692 1692 RAID5_STRIPE_SIZE(sh->raid_conf), 1693 1693 faila, failb, 1694 - blocks, &submit); 1694 + blocks, offs, &submit); 1695 1695 } 1696 1696 } 1697 1697 }
+4 -2
include/linux/async_tx.h
··· 196 196 197 197 struct dma_async_tx_descriptor * 198 198 async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb, 199 - struct page **ptrs, struct async_submit_ctl *submit); 199 + struct page **ptrs, unsigned int *offs, 200 + struct async_submit_ctl *submit); 200 201 201 202 struct dma_async_tx_descriptor * 202 203 async_raid6_datap_recov(int src_num, size_t bytes, int faila, 203 - struct page **ptrs, struct async_submit_ctl *submit); 204 + struct page **ptrs, unsigned int *offs, 205 + struct async_submit_ctl *submit); 204 206 205 207 void async_tx_quiesce(struct dma_async_tx_descriptor **tx); 206 208 #endif /* _ASYNC_TX_H_ */