Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

firewire: Generalize the iso transmit descriptor buffer logic.

The descriptor circular buffer logic used for iso transmission is
useful for async transmit too, so pull the sharable logic out in
a few standalone functions.

Signed-off-by: Kristian Høgsberg <krh@redhat.com>
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>

authored by

Kristian Høgsberg and committed by
Stefan Richter
30200739 9aad8125

+216 -140
+216 -140
drivers/firewire/fw-ohci.c
··· 75 75 struct tasklet_struct tasklet; 76 76 }; 77 77 78 + struct context; 79 + 80 + typedef int (*descriptor_callback_t)(struct context *ctx, 81 + struct descriptor *d, 82 + struct descriptor *last); 83 + struct context { 84 + struct fw_ohci *ohci; 85 + u32 regs; 86 + 87 + struct descriptor *buffer; 88 + dma_addr_t buffer_bus; 89 + size_t buffer_size; 90 + struct descriptor *head_descriptor; 91 + struct descriptor *tail_descriptor; 92 + struct descriptor *tail_descriptor_last; 93 + struct descriptor *prev_descriptor; 94 + 95 + descriptor_callback_t callback; 96 + 97 + struct tasklet_struct tasklet; 98 + }; 99 + 100 + 101 + 78 102 struct at_context { 79 103 struct fw_ohci *ohci; 80 104 dma_addr_t descriptor_bus; ··· 127 103 128 104 struct iso_context { 129 105 struct fw_iso_context base; 130 - struct tasklet_struct tasklet; 131 - u32 regs; 132 - 133 - struct descriptor *buffer; 134 - dma_addr_t buffer_bus; 135 - struct descriptor *head_descriptor; 136 - struct descriptor *tail_descriptor; 137 - struct descriptor *tail_descriptor_last; 138 - struct descriptor *prev_descriptor; 106 + struct context context; 139 107 }; 140 108 141 109 #define CONFIG_ROM_SIZE 1024 ··· 409 393 flush_writes(ctx->ohci); 410 394 411 395 return 0; 396 + } 397 + 398 + static void context_tasklet(unsigned long data) 399 + { 400 + struct context *ctx = (struct context *) data; 401 + struct fw_ohci *ohci = ctx->ohci; 402 + struct descriptor *d, *last; 403 + u32 address; 404 + int z; 405 + 406 + dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus, 407 + ctx->buffer_size, DMA_TO_DEVICE); 408 + 409 + d = ctx->tail_descriptor; 410 + last = ctx->tail_descriptor_last; 411 + 412 + while (last->branch_address != 0) { 413 + address = le32_to_cpu(last->branch_address); 414 + z = address & 0xf; 415 + d = ctx->buffer + (address - ctx->buffer_bus) / sizeof *d; 416 + last = (z == 2) ? d : d + z - 1; 417 + 418 + if (!ctx->callback(ctx, d, last)) 419 + break; 420 + 421 + ctx->tail_descriptor = d; 422 + ctx->tail_descriptor_last = last; 423 + } 424 + } 425 + 426 + static int 427 + context_init(struct context *ctx, struct fw_ohci *ohci, 428 + size_t buffer_size, u32 regs, 429 + descriptor_callback_t callback) 430 + { 431 + ctx->ohci = ohci; 432 + ctx->regs = regs; 433 + ctx->buffer_size = buffer_size; 434 + ctx->buffer = kmalloc(buffer_size, GFP_KERNEL); 435 + if (ctx->buffer == NULL) 436 + return -ENOMEM; 437 + 438 + tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); 439 + ctx->callback = callback; 440 + 441 + ctx->buffer_bus = 442 + dma_map_single(ohci->card.device, ctx->buffer, 443 + buffer_size, DMA_TO_DEVICE); 444 + if (dma_mapping_error(ctx->buffer_bus)) { 445 + kfree(ctx->buffer); 446 + return -ENOMEM; 447 + } 448 + 449 + ctx->head_descriptor = ctx->buffer; 450 + ctx->prev_descriptor = ctx->buffer; 451 + ctx->tail_descriptor = ctx->buffer; 452 + ctx->tail_descriptor_last = ctx->buffer; 453 + 454 + /* We put a dummy descriptor in the buffer that has a NULL 455 + * branch address and looks like it's been sent. That way we 456 + * have a descriptor to append DMA programs to. Also, the 457 + * ring buffer invariant is that it always has at least one 458 + * element so that head == tail means buffer full. */ 459 + 460 + memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor); 461 + ctx->head_descriptor->control = cpu_to_le16(descriptor_output_last); 462 + ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011); 463 + ctx->head_descriptor++; 464 + 465 + return 0; 466 + } 467 + 468 + static void 469 + context_release(struct context *ctx) 470 + { 471 + struct fw_card *card = &ctx->ohci->card; 472 + 473 + dma_unmap_single(card->device, ctx->buffer_bus, 474 + ctx->buffer_size, DMA_TO_DEVICE); 475 + kfree(ctx->buffer); 476 + } 477 + 478 + static struct descriptor * 479 + context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus) 480 + { 481 + struct descriptor *d, *tail, *end; 482 + 483 + d = ctx->head_descriptor; 484 + tail = ctx->tail_descriptor; 485 + end = ctx->buffer + ctx->buffer_size / sizeof(struct descriptor); 486 + 487 + if (d + z <= tail) { 488 + goto has_space; 489 + } else if (d > tail && d + z <= end) { 490 + goto has_space; 491 + } else if (d > tail && ctx->buffer + z <= tail) { 492 + d = ctx->buffer; 493 + goto has_space; 494 + } 495 + 496 + return NULL; 497 + 498 + has_space: 499 + memset(d, 0, z * sizeof *d); 500 + *d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof *d; 501 + 502 + return d; 503 + } 504 + 505 + static void context_run(struct context *ctx, u32 cycle_match) 506 + { 507 + struct fw_ohci *ohci = ctx->ohci; 508 + 509 + reg_write(ohci, command_ptr(ctx->regs), 510 + le32_to_cpu(ctx->tail_descriptor_last->branch_address)); 511 + reg_write(ohci, control_clear(ctx->regs), ~0); 512 + reg_write(ohci, control_set(ctx->regs), CONTEXT_RUN | cycle_match); 513 + flush_writes(ohci); 514 + } 515 + 516 + static void context_append(struct context *ctx, 517 + struct descriptor *d, int z, int extra) 518 + { 519 + dma_addr_t d_bus; 520 + 521 + d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof *d; 522 + 523 + ctx->head_descriptor = d + z + extra; 524 + ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z); 525 + ctx->prev_descriptor = z == 2 ? d : d + z - 1; 526 + 527 + dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus, 528 + ctx->buffer_size, DMA_TO_DEVICE); 529 + 530 + reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_WAKE); 531 + flush_writes(ctx->ohci); 532 + } 533 + 534 + static void context_stop(struct context *ctx) 535 + { 536 + u32 reg; 537 + 538 + reg_write(ctx->ohci, control_clear(ctx->regs), CONTEXT_RUN); 539 + 540 + reg = reg_read(ctx->ohci, control_set(ctx->regs)); 541 + if (reg & CONTEXT_ACTIVE) 542 + fw_notify("Tried to stop context, but it is still active " 543 + "(0x%08x).\n", reg); 412 544 } 413 545 414 546 static void ··· 1016 852 1017 853 while (iso_event) { 1018 854 i = ffs(iso_event) - 1; 1019 - tasklet_schedule(&ohci->ir_context_list[i].tasklet); 855 + tasklet_schedule(&ohci->ir_context_list[i].context.tasklet); 1020 856 iso_event &= ~(1 << i); 1021 857 } 1022 858 ··· 1025 861 1026 862 while (iso_event) { 1027 863 i = ffs(iso_event) - 1; 1028 - tasklet_schedule(&ohci->it_context_list[i].tasklet); 864 + tasklet_schedule(&ohci->it_context_list[i].context.tasklet); 1029 865 iso_event &= ~(1 << i); 1030 866 } 1031 867 ··· 1249 1085 1250 1086 #define ISO_BUFFER_SIZE (64 * 1024) 1251 1087 1252 - static void flush_iso_context(struct iso_context *ctx) 1088 + static int handle_it_packet(struct context *context, 1089 + struct descriptor *d, 1090 + struct descriptor *last) 1253 1091 { 1254 - struct fw_ohci *ohci = fw_ohci(ctx->base.card); 1255 - struct descriptor *d, *last; 1256 - u32 address; 1257 - int z; 1092 + struct iso_context *ctx = 1093 + container_of(context, struct iso_context, context); 1094 + 1095 + if (last->transfer_status == 0) 1096 + /* This descriptor isn't done yet, stop iteration. */ 1097 + return 0; 1258 1098 1259 - dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus, 1260 - ISO_BUFFER_SIZE, DMA_TO_DEVICE); 1099 + if (le16_to_cpu(last->control) & descriptor_irq_always) 1100 + ctx->base.callback(&ctx->base, 1101 + 0, le16_to_cpu(last->res_count), 1102 + ctx->base.callback_data); 1261 1103 1262 - d = ctx->tail_descriptor; 1263 - last = ctx->tail_descriptor_last; 1264 - 1265 - while (last->branch_address != 0 && last->transfer_status != 0) { 1266 - address = le32_to_cpu(last->branch_address); 1267 - z = address & 0xf; 1268 - d = ctx->buffer + (address - ctx->buffer_bus) / sizeof *d; 1269 - 1270 - if (z == 2) 1271 - last = d; 1272 - else 1273 - last = d + z - 1; 1274 - 1275 - if (le16_to_cpu(last->control) & descriptor_irq_always) 1276 - ctx->base.callback(&ctx->base, 1277 - 0, le16_to_cpu(last->res_count), 1278 - ctx->base.callback_data); 1279 - } 1280 - 1281 - ctx->tail_descriptor = d; 1282 - ctx->tail_descriptor_last = last; 1104 + return 1; 1283 1105 } 1284 1106 1285 - static void it_context_tasklet(unsigned long data) 1286 - { 1287 - struct iso_context *ctx = (struct iso_context *)data; 1288 - 1289 - flush_iso_context(ctx); 1290 - } 1291 - 1292 - static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, 1293 - int type) 1107 + static struct fw_iso_context * 1108 + ohci_allocate_iso_context(struct fw_card *card, int type) 1294 1109 { 1295 1110 struct fw_ohci *ohci = fw_ohci(card); 1296 1111 struct iso_context *ctx, *list; 1297 - void (*tasklet) (unsigned long data); 1112 + descriptor_callback_t callback; 1298 1113 u32 *mask; 1299 1114 unsigned long flags; 1300 - int index; 1115 + int index, retval; 1301 1116 1302 1117 if (type == FW_ISO_CONTEXT_TRANSMIT) { 1303 1118 mask = &ohci->it_context_mask; 1304 1119 list = ohci->it_context_list; 1305 - tasklet = it_context_tasklet; 1120 + callback = handle_it_packet; 1306 1121 } else { 1307 - mask = &ohci->ir_context_mask; 1308 - list = ohci->ir_context_list; 1309 - tasklet = ir_context_tasklet; 1122 + return ERR_PTR(-EINVAL); 1310 1123 } 1311 1124 1312 1125 spin_lock_irqsave(&ohci->lock, flags); ··· 1297 1156 1298 1157 ctx = &list[index]; 1299 1158 memset(ctx, 0, sizeof *ctx); 1300 - tasklet_init(&ctx->tasklet, tasklet, (unsigned long)ctx); 1301 - 1302 - ctx->buffer = kmalloc(ISO_BUFFER_SIZE, GFP_KERNEL); 1303 - if (ctx->buffer == NULL) 1304 - goto buffer_alloc_failed; 1305 - 1306 - ctx->buffer_bus = 1307 - dma_map_single(card->device, ctx->buffer, 1308 - ISO_BUFFER_SIZE, DMA_TO_DEVICE); 1309 - if (dma_mapping_error(ctx->buffer_bus)) 1310 - goto buffer_map_failed; 1311 - 1312 - ctx->head_descriptor = ctx->buffer; 1313 - ctx->prev_descriptor = ctx->buffer; 1314 - ctx->tail_descriptor = ctx->buffer; 1315 - ctx->tail_descriptor_last = ctx->buffer; 1316 - 1317 - /* We put a dummy descriptor in the buffer that has a NULL 1318 - * branch address and looks like it's been sent. That way we 1319 - * have a descriptor to append DMA programs to. Also, the 1320 - * ring buffer invariant is that it always has at least one 1321 - * element so that head == tail means buffer full. */ 1322 - 1323 - memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor); 1324 - ctx->head_descriptor->control = cpu_to_le16(descriptor_output_last); 1325 - ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011); 1326 - ctx->head_descriptor++; 1159 + retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE, 1160 + OHCI1394_IsoXmitContextBase(index), callback); 1161 + if (retval < 0) { 1162 + spin_lock_irqsave(&ohci->lock, flags); 1163 + *mask |= 1 << index; 1164 + spin_unlock_irqrestore(&ohci->lock, flags); 1165 + return ERR_PTR(retval); 1166 + } 1327 1167 1328 1168 return &ctx->base; 1329 - 1330 - buffer_map_failed: 1331 - kfree(ctx->buffer); 1332 - buffer_alloc_failed: 1333 - spin_lock_irqsave(&ohci->lock, flags); 1334 - *mask |= 1 << index; 1335 - spin_unlock_irqrestore(&ohci->lock, flags); 1336 - 1337 - return ERR_PTR(-ENOMEM); 1338 1169 } 1339 1170 1340 1171 static int ohci_send_iso(struct fw_iso_context *base, s32 cycle) 1341 1172 { 1342 - struct iso_context *ctx = (struct iso_context *)base; 1343 - struct fw_ohci *ohci = fw_ohci(ctx->base.card); 1173 + struct iso_context *ctx = container_of(base, struct iso_context, base); 1174 + struct fw_ohci *ohci = ctx->context.ohci; 1344 1175 u32 cycle_match = 0; 1345 1176 int index; 1346 1177 ··· 1322 1209 (cycle & 0x7fff) << 16; 1323 1210 1324 1211 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); 1325 - reg_write(ohci, OHCI1394_IsoXmitCommandPtr(index), 1326 - le32_to_cpu(ctx->tail_descriptor_last->branch_address)); 1327 - reg_write(ohci, OHCI1394_IsoXmitContextControlClear(index), ~0); 1328 - reg_write(ohci, OHCI1394_IsoXmitContextControlSet(index), 1329 - CONTEXT_RUN | cycle_match); 1330 - flush_writes(ohci); 1212 + context_run(&ctx->context, cycle_match); 1331 1213 1332 1214 return 0; 1333 1215 } ··· 1330 1222 static void ohci_free_iso_context(struct fw_iso_context *base) 1331 1223 { 1332 1224 struct fw_ohci *ohci = fw_ohci(base->card); 1333 - struct iso_context *ctx = (struct iso_context *)base; 1225 + struct iso_context *ctx = container_of(base, struct iso_context, base); 1334 1226 unsigned long flags; 1335 1227 int index; 1336 - 1337 - flush_iso_context(ctx); 1338 1228 1339 1229 spin_lock_irqsave(&ohci->lock, flags); 1340 1230 ··· 1349 1243 } 1350 1244 flush_writes(ohci); 1351 1245 1352 - dma_unmap_single(ohci->card.device, ctx->buffer_bus, 1353 - ISO_BUFFER_SIZE, DMA_TO_DEVICE); 1246 + context_release(&ctx->context); 1354 1247 1355 1248 spin_unlock_irqrestore(&ohci->lock, flags); 1356 1249 } ··· 1360 1255 struct fw_iso_buffer *buffer, 1361 1256 unsigned long payload) 1362 1257 { 1363 - struct iso_context *ctx = (struct iso_context *)base; 1364 - struct fw_ohci *ohci = fw_ohci(ctx->base.card); 1365 - struct descriptor *d, *end, *last, *tail, *pd; 1258 + struct iso_context *ctx = container_of(base, struct iso_context, base); 1259 + struct descriptor *d, *last, *pd; 1366 1260 struct fw_iso_packet *p; 1367 1261 __le32 *header; 1368 1262 dma_addr_t d_bus, page_bus; 1369 1263 u32 z, header_z, payload_z, irq; 1370 1264 u32 payload_index, payload_end_index, next_page_index; 1371 - int index, page, end_page, i, length, offset; 1265 + int page, end_page, i, length, offset; 1372 1266 1373 1267 /* FIXME: Cycle lost behavior should be configurable: lose 1374 1268 * packet, retransmit or terminate.. */ 1375 1269 1376 1270 p = packet; 1377 1271 payload_index = payload; 1378 - d = ctx->head_descriptor; 1379 - tail = ctx->tail_descriptor; 1380 - end = ctx->buffer + ISO_BUFFER_SIZE / sizeof(struct descriptor); 1381 1272 1382 1273 if (p->skip) 1383 1274 z = 1; ··· 1394 1293 /* Get header size in number of descriptors. */ 1395 1294 header_z = DIV_ROUND_UP(p->header_length, sizeof *d); 1396 1295 1397 - if (d + z + header_z <= tail) { 1398 - goto has_space; 1399 - } else if (d > tail && d + z + header_z <= end) { 1400 - goto has_space; 1401 - } else if (d > tail && ctx->buffer + z + header_z <= tail) { 1402 - d = ctx->buffer; 1403 - goto has_space; 1404 - } 1405 - 1406 - /* No space in buffer */ 1407 - return -1; 1408 - 1409 - has_space: 1410 - memset(d, 0, (z + header_z) * sizeof *d); 1411 - d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof *d; 1296 + d = context_get_descriptors(&ctx->context, z + header_z, &d_bus); 1297 + if (d == NULL) 1298 + return -ENOMEM; 1412 1299 1413 1300 if (!p->skip) { 1414 1301 d[0].control = cpu_to_le16(descriptor_key_immediate); ··· 1435 1346 payload_index += length; 1436 1347 } 1437 1348 1438 - if (z == 2) 1439 - last = d; 1440 - else 1441 - last = d + z - 1; 1442 - 1443 1349 if (p->interrupt) 1444 1350 irq = descriptor_irq_always; 1445 1351 else 1446 1352 irq = descriptor_no_irq; 1447 1353 1354 + last = z == 2 ? d : d + z - 1; 1448 1355 last->control |= cpu_to_le16(descriptor_output_last | 1449 1356 descriptor_status | 1450 1357 descriptor_branch_always | 1451 1358 irq); 1452 1359 1453 - dma_sync_single_for_device(ohci->card.device, ctx->buffer_bus, 1454 - ISO_BUFFER_SIZE, DMA_TO_DEVICE); 1455 - 1456 - ctx->head_descriptor = d + z + header_z; 1457 - ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z); 1458 - ctx->prev_descriptor = last; 1459 - 1460 - index = ctx - ohci->it_context_list; 1461 - reg_write(ohci, OHCI1394_IsoXmitContextControlSet(index), CONTEXT_WAKE); 1462 - flush_writes(ohci); 1360 + context_append(&ctx->context, d, z, header_z); 1463 1361 1464 1362 return 0; 1465 1363 }