Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

firewire: Uppercase most macro names.

Signed-off-by: Kristian Hoegsberg <krh@redhat.com>
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>

authored by

Kristian Høgsberg and committed by
Stefan Richter
a77754a7 a98e2719

+228 -229
+20 -20
drivers/firewire/fw-card.c
··· 44 44 static LIST_HEAD(descriptor_list); 45 45 static int descriptor_count; 46 46 47 - #define bib_crc(v) ((v) << 0) 48 - #define bib_crc_length(v) ((v) << 16) 49 - #define bib_info_length(v) ((v) << 24) 47 + #define BIB_CRC(v) ((v) << 0) 48 + #define BIB_CRC_LENGTH(v) ((v) << 16) 49 + #define BIB_INFO_LENGTH(v) ((v) << 24) 50 50 51 - #define bib_link_speed(v) ((v) << 0) 52 - #define bib_generation(v) ((v) << 4) 53 - #define bib_max_rom(v) ((v) << 8) 54 - #define bib_max_receive(v) ((v) << 12) 55 - #define bib_cyc_clk_acc(v) ((v) << 16) 56 - #define bib_pmc ((1) << 27) 57 - #define bib_bmc ((1) << 28) 58 - #define bib_isc ((1) << 29) 59 - #define bib_cmc ((1) << 30) 60 - #define bib_imc ((1) << 31) 51 + #define BIB_LINK_SPEED(v) ((v) << 0) 52 + #define BIB_GENERATION(v) ((v) << 4) 53 + #define BIB_MAX_ROM(v) ((v) << 8) 54 + #define BIB_MAX_RECEIVE(v) ((v) << 12) 55 + #define BIB_CYC_CLK_ACC(v) ((v) << 16) 56 + #define BIB_PMC ((1) << 27) 57 + #define BIB_BMC ((1) << 28) 58 + #define BIB_ISC ((1) << 29) 59 + #define BIB_CMC ((1) << 30) 60 + #define BIB_IMC ((1) << 31) 61 61 62 62 static u32 * 63 63 generate_config_rom(struct fw_card *card, size_t *config_rom_length) ··· 76 76 */ 77 77 78 78 memset(config_rom, 0, sizeof config_rom); 79 - config_rom[0] = bib_crc_length(4) | bib_info_length(4) | bib_crc(0); 79 + config_rom[0] = BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0); 80 80 config_rom[1] = 0x31333934; 81 81 82 82 config_rom[2] = 83 - bib_link_speed(card->link_speed) | 84 - bib_generation(card->config_rom_generation++ % 14 + 2) | 85 - bib_max_rom(2) | 86 - bib_max_receive(card->max_receive) | 87 - bib_bmc | bib_isc | bib_cmc | bib_imc; 83 + BIB_LINK_SPEED(card->link_speed) | 84 + BIB_GENERATION(card->config_rom_generation++ % 14 + 2) | 85 + BIB_MAX_ROM(2) | 86 + BIB_MAX_RECEIVE(card->max_receive) | 87 + BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC; 88 88 config_rom[3] = card->guid >> 32; 89 89 config_rom[4] = card->guid; 90 90 ··· 318 318 */ 319 319 spin_unlock_irqrestore(&card->lock, flags); 320 320 return; 321 - } else if (root->config_rom[2] & bib_cmc) { 321 + } else if (root->config_rom[2] & BIB_CMC) { 322 322 /* 323 323 * FIXME: I suppose we should set the cmstr bit in the 324 324 * STATE_CLEAR register of this node, as described in
+74 -74
drivers/firewire/fw-ohci.c
··· 33 33 #include "fw-transaction.h" 34 34 #include "fw-ohci.h" 35 35 36 - #define descriptor_output_more 0 37 - #define descriptor_output_last (1 << 12) 38 - #define descriptor_input_more (2 << 12) 39 - #define descriptor_input_last (3 << 12) 40 - #define descriptor_status (1 << 11) 41 - #define descriptor_key_immediate (2 << 8) 42 - #define descriptor_ping (1 << 7) 43 - #define descriptor_yy (1 << 6) 44 - #define descriptor_no_irq (0 << 4) 45 - #define descriptor_irq_error (1 << 4) 46 - #define descriptor_irq_always (3 << 4) 47 - #define descriptor_branch_always (3 << 2) 48 - #define descriptor_wait (3 << 0) 36 + #define DESCRIPTOR_OUTPUT_MORE 0 37 + #define DESCRIPTOR_OUTPUT_LAST (1 << 12) 38 + #define DESCRIPTOR_INPUT_MORE (2 << 12) 39 + #define DESCRIPTOR_INPUT_LAST (3 << 12) 40 + #define DESCRIPTOR_STATUS (1 << 11) 41 + #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8) 42 + #define DESCRIPTOR_PING (1 << 7) 43 + #define DESCRIPTOR_YY (1 << 6) 44 + #define DESCRIPTOR_NO_IRQ (0 << 4) 45 + #define DESCRIPTOR_IRQ_ERROR (1 << 4) 46 + #define DESCRIPTOR_IRQ_ALWAYS (3 << 4) 47 + #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2) 48 + #define DESCRIPTOR_WAIT (3 << 0) 49 49 50 50 struct descriptor { 51 51 __le16 req_count; ··· 70 70 __le32 reserved1; 71 71 } __attribute__((aligned(16))); 72 72 73 - #define control_set(regs) (regs) 74 - #define control_clear(regs) ((regs) + 4) 75 - #define command_ptr(regs) ((regs) + 12) 76 - #define context_match(regs) ((regs) + 16) 73 + #define CONTROL_SET(regs) (regs) 74 + #define CONTROL_CLEAR(regs) ((regs) + 4) 75 + #define COMMAND_PTR(regs) ((regs) + 12) 76 + #define CONTEXT_MATCH(regs) ((regs) + 16) 77 77 78 78 struct ar_buffer { 79 79 struct descriptor descriptor; ··· 112 112 struct tasklet_struct tasklet; 113 113 }; 114 114 115 - #define it_header_sy(v) ((v) << 0) 116 - #define it_header_tcode(v) ((v) << 4) 117 - #define it_header_channel(v) ((v) << 8) 118 - #define it_header_tag(v) ((v) << 14) 119 - #define it_header_speed(v) ((v) << 16) 120 - #define it_header_data_length(v) ((v) << 16) 115 + #define IT_HEADER_SY(v) ((v) << 0) 116 + #define IT_HEADER_TCODE(v) ((v) << 4) 117 + #define IT_HEADER_CHANNEL(v) ((v) << 8) 118 + #define IT_HEADER_TAG(v) ((v) << 14) 119 + #define IT_HEADER_SPEED(v) ((v) << 16) 120 + #define IT_HEADER_DATA_LENGTH(v) ((v) << 16) 121 121 122 122 struct iso_context { 123 123 struct fw_iso_context base; ··· 256 256 } 257 257 258 258 memset(&ab->descriptor, 0, sizeof ab->descriptor); 259 - ab->descriptor.control = cpu_to_le16(descriptor_input_more | 260 - descriptor_status | 261 - descriptor_branch_always); 259 + ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 260 + DESCRIPTOR_STATUS | 261 + DESCRIPTOR_BRANCH_ALWAYS); 262 262 offset = offsetof(struct ar_buffer, data); 263 263 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset); 264 264 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset); ··· 271 271 ctx->last_buffer->next = ab; 272 272 ctx->last_buffer = ab; 273 273 274 - reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_WAKE); 274 + reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 275 275 flush_writes(ctx->ohci); 276 276 277 277 return 0; ··· 416 416 ctx->current_buffer = ab.next; 417 417 ctx->pointer = ctx->current_buffer->data; 418 418 419 - reg_write(ctx->ohci, command_ptr(ctx->regs), ab.descriptor.branch_address); 420 - reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_RUN); 419 + reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab.descriptor.branch_address); 420 + reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); 421 421 flush_writes(ctx->ohci); 422 422 423 423 return 0; ··· 488 488 */ 489 489 490 490 memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor); 491 - ctx->head_descriptor->control = cpu_to_le16(descriptor_output_last); 491 + ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); 492 492 ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011); 493 493 ctx->head_descriptor++; 494 494 ··· 536 536 { 537 537 struct fw_ohci *ohci = ctx->ohci; 538 538 539 - reg_write(ohci, command_ptr(ctx->regs), 539 + reg_write(ohci, COMMAND_PTR(ctx->regs), 540 540 le32_to_cpu(ctx->tail_descriptor_last->branch_address)); 541 - reg_write(ohci, control_clear(ctx->regs), ~0); 542 - reg_write(ohci, control_set(ctx->regs), CONTEXT_RUN | extra); 541 + reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); 542 + reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); 543 543 flush_writes(ohci); 544 544 } 545 545 ··· 557 557 dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus, 558 558 ctx->buffer_size, DMA_TO_DEVICE); 559 559 560 - reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_WAKE); 560 + reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 561 561 flush_writes(ctx->ohci); 562 562 } 563 563 ··· 566 566 u32 reg; 567 567 int i; 568 568 569 - reg_write(ctx->ohci, control_clear(ctx->regs), CONTEXT_RUN); 569 + reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); 570 570 flush_writes(ctx->ohci); 571 571 572 572 for (i = 0; i < 10; i++) { 573 - reg = reg_read(ctx->ohci, control_set(ctx->regs)); 573 + reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); 574 574 if ((reg & CONTEXT_ACTIVE) == 0) 575 575 break; 576 576 ··· 605 605 return -1; 606 606 } 607 607 608 - d[0].control = cpu_to_le16(descriptor_key_immediate); 608 + d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 609 609 d[0].res_count = cpu_to_le16(packet->timestamp); 610 610 611 611 /* ··· 660 660 z = 2; 661 661 } 662 662 663 - last->control |= cpu_to_le16(descriptor_output_last | 664 - descriptor_irq_always | 665 - descriptor_branch_always); 663 + last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | 664 + DESCRIPTOR_IRQ_ALWAYS | 665 + DESCRIPTOR_BRANCH_ALWAYS); 666 666 667 667 /* FIXME: Document how the locking works. */ 668 668 if (ohci->generation != packet->generation) { ··· 673 673 context_append(ctx, d, z, 4 - z); 674 674 675 675 /* If the context isn't already running, start it up. */ 676 - reg = reg_read(ctx->ohci, control_set(ctx->regs)); 676 + reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); 677 677 if ((reg & CONTEXT_RUN) == 0) 678 678 context_run(ctx, 0); 679 679 ··· 750 750 return 1; 751 751 } 752 752 753 - #define header_get_destination(q) (((q) >> 16) & 0xffff) 754 - #define header_get_tcode(q) (((q) >> 4) & 0x0f) 755 - #define header_get_offset_high(q) (((q) >> 0) & 0xffff) 756 - #define header_get_data_length(q) (((q) >> 16) & 0xffff) 757 - #define header_get_extended_tcode(q) (((q) >> 0) & 0xffff) 753 + #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) 754 + #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) 755 + #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) 756 + #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) 757 + #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) 758 758 759 759 static void 760 760 handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) ··· 762 762 struct fw_packet response; 763 763 int tcode, length, i; 764 764 765 - tcode = header_get_tcode(packet->header[0]); 765 + tcode = HEADER_GET_TCODE(packet->header[0]); 766 766 if (TCODE_IS_BLOCK_PACKET(tcode)) 767 - length = header_get_data_length(packet->header[3]); 767 + length = HEADER_GET_DATA_LENGTH(packet->header[3]); 768 768 else 769 769 length = 4; 770 770 ··· 791 791 __be32 *payload, lock_old; 792 792 u32 lock_arg, lock_data; 793 793 794 - tcode = header_get_tcode(packet->header[0]); 795 - length = header_get_data_length(packet->header[3]); 794 + tcode = HEADER_GET_TCODE(packet->header[0]); 795 + length = HEADER_GET_DATA_LENGTH(packet->header[3]); 796 796 payload = packet->payload; 797 - ext_tcode = header_get_extended_tcode(packet->header[3]); 797 + ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]); 798 798 799 799 if (tcode == TCODE_LOCK_REQUEST && 800 800 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) { ··· 838 838 839 839 offset = 840 840 ((unsigned long long) 841 - header_get_offset_high(packet->header[1]) << 32) | 841 + HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) | 842 842 packet->header[2]; 843 843 csr = offset - CSR_REGISTER_BASE; 844 844 ··· 874 874 875 875 spin_lock_irqsave(&ctx->ohci->lock, flags); 876 876 877 - if (header_get_destination(packet->header[0]) == ctx->ohci->node_id && 877 + if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && 878 878 ctx->ohci->generation == packet->generation) { 879 879 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 880 880 handle_local_request(ctx, packet); ··· 1306 1306 1307 1307 ctx->header_length = i; 1308 1308 1309 - if (le16_to_cpu(db->control) & descriptor_irq_always) { 1309 + if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) { 1310 1310 ir_header = (__le32 *) (db + 1); 1311 1311 ctx->base.callback(&ctx->base, 1312 1312 le32_to_cpu(ir_header[0]) & 0xffff, ··· 1329 1329 /* This descriptor isn't done yet, stop iteration. */ 1330 1330 return 0; 1331 1331 1332 - if (le16_to_cpu(last->control) & descriptor_irq_always) 1332 + if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) 1333 1333 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count), 1334 1334 0, NULL, ctx->base.callback_data); 1335 1335 ··· 1428 1428 1429 1429 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index); 1430 1430 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); 1431 - reg_write(ohci, context_match(ctx->context.regs), match); 1431 + reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); 1432 1432 context_run(&ctx->context, control); 1433 1433 } 1434 1434 ··· 1525 1525 return -ENOMEM; 1526 1526 1527 1527 if (!p->skip) { 1528 - d[0].control = cpu_to_le16(descriptor_key_immediate); 1528 + d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 1529 1529 d[0].req_count = cpu_to_le16(8); 1530 1530 1531 1531 header = (__le32 *) &d[1]; 1532 - header[0] = cpu_to_le32(it_header_sy(p->sy) | 1533 - it_header_tag(p->tag) | 1534 - it_header_tcode(TCODE_STREAM_DATA) | 1535 - it_header_channel(ctx->base.channel) | 1536 - it_header_speed(ctx->base.speed)); 1532 + header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | 1533 + IT_HEADER_TAG(p->tag) | 1534 + IT_HEADER_TCODE(TCODE_STREAM_DATA) | 1535 + IT_HEADER_CHANNEL(ctx->base.channel) | 1536 + IT_HEADER_SPEED(ctx->base.speed)); 1537 1537 header[1] = 1538 - cpu_to_le32(it_header_data_length(p->header_length + 1538 + cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length + 1539 1539 p->payload_length)); 1540 1540 } 1541 1541 ··· 1562 1562 } 1563 1563 1564 1564 if (p->interrupt) 1565 - irq = descriptor_irq_always; 1565 + irq = DESCRIPTOR_IRQ_ALWAYS; 1566 1566 else 1567 - irq = descriptor_no_irq; 1567 + irq = DESCRIPTOR_NO_IRQ; 1568 1568 1569 1569 last = z == 2 ? d : d + z - 1; 1570 - last->control |= cpu_to_le16(descriptor_output_last | 1571 - descriptor_status | 1572 - descriptor_branch_always | 1570 + last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | 1571 + DESCRIPTOR_STATUS | 1572 + DESCRIPTOR_BRANCH_ALWAYS | 1573 1573 irq); 1574 1574 1575 1575 context_append(&ctx->context, d, z, header_z); ··· 1602 1602 return -ENOMEM; 1603 1603 1604 1604 db = (struct db_descriptor *) d; 1605 - db->control = cpu_to_le16(descriptor_status | 1606 - descriptor_branch_always | 1607 - descriptor_wait); 1605 + db->control = cpu_to_le16(DESCRIPTOR_STATUS | 1606 + DESCRIPTOR_BRANCH_ALWAYS | 1607 + DESCRIPTOR_WAIT); 1608 1608 db->first_size = cpu_to_le16(ctx->base.header_size + 4); 1609 1609 context_append(&ctx->context, d, 2, 0); 1610 1610 } ··· 1634 1634 return -ENOMEM; 1635 1635 1636 1636 db = (struct db_descriptor *) d; 1637 - db->control = cpu_to_le16(descriptor_status | 1638 - descriptor_branch_always); 1637 + db->control = cpu_to_le16(DESCRIPTOR_STATUS | 1638 + DESCRIPTOR_BRANCH_ALWAYS); 1639 1639 db->first_size = cpu_to_le16(ctx->base.header_size + 4); 1640 1640 db->first_req_count = cpu_to_le16(header_size); 1641 1641 db->first_res_count = db->first_req_count; ··· 1652 1652 db->second_buffer = cpu_to_le32(page_bus + offset); 1653 1653 1654 1654 if (p->interrupt && length == rest) 1655 - db->control |= cpu_to_le16(descriptor_irq_always); 1655 + db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); 1656 1656 1657 1657 context_append(&ctx->context, d, z, header_z); 1658 1658 offset = (offset + length) & ~PAGE_MASK;
+53 -54
drivers/firewire/fw-sbp2.c
··· 123 123 #define SBP2_STATUS_ILLEGAL_REQUEST 0x2 124 124 #define SBP2_STATUS_VENDOR_DEPENDENT 0x3 125 125 126 - #define status_get_orb_high(v) ((v).status & 0xffff) 127 - #define status_get_sbp_status(v) (((v).status >> 16) & 0xff) 128 - #define status_get_len(v) (((v).status >> 24) & 0x07) 129 - #define status_get_dead(v) (((v).status >> 27) & 0x01) 130 - #define status_get_response(v) (((v).status >> 28) & 0x03) 131 - #define status_get_source(v) (((v).status >> 30) & 0x03) 132 - #define status_get_orb_low(v) ((v).orb_low) 133 - #define status_get_data(v) ((v).data) 126 + #define STATUS_GET_ORB_HIGH(v) ((v).status & 0xffff) 127 + #define STATUS_GET_SBP_STATUS(v) (((v).status >> 16) & 0xff) 128 + #define STATUS_GET_LEN(v) (((v).status >> 24) & 0x07) 129 + #define STATUS_GET_DEAD(v) (((v).status >> 27) & 0x01) 130 + #define STATUS_GET_RESPONSE(v) (((v).status >> 28) & 0x03) 131 + #define STATUS_GET_SOURCE(v) (((v).status >> 30) & 0x03) 132 + #define STATUS_GET_ORB_LOW(v) ((v).orb_low) 133 + #define STATUS_GET_DATA(v) ((v).data) 134 134 135 135 struct sbp2_status { 136 136 u32 status; ··· 152 152 struct list_head link; 153 153 }; 154 154 155 - #define management_orb_lun(v) ((v)) 156 - #define management_orb_function(v) ((v) << 16) 157 - #define management_orb_reconnect(v) ((v) << 20) 158 - #define management_orb_exclusive ((1) << 28) 159 - #define management_orb_request_format(v) ((v) << 29) 160 - #define management_orb_notify ((1) << 31) 155 + #define MANAGEMENT_ORB_LUN(v) ((v)) 156 + #define MANAGEMENT_ORB_FUNCTION(v) ((v) << 16) 157 + #define MANAGEMENT_ORB_RECONNECT(v) ((v) << 20) 158 + #define MANAGEMENT_ORB_EXCLUSIVE ((1) << 28) 159 + #define MANAGEMENT_ORB_REQUEST_FORMAT(v) ((v) << 29) 160 + #define MANAGEMENT_ORB_NOTIFY ((1) << 31) 161 161 162 - #define management_orb_response_length(v) ((v)) 163 - #define management_orb_password_length(v) ((v) << 16) 162 + #define MANAGEMENT_ORB_RESPONSE_LENGTH(v) ((v)) 163 + #define MANAGEMENT_ORB_PASSWORD_LENGTH(v) ((v) << 16) 164 164 165 165 struct sbp2_management_orb { 166 166 struct sbp2_orb base; ··· 177 177 struct sbp2_status status; 178 178 }; 179 179 180 - #define login_response_get_login_id(v) ((v).misc & 0xffff) 181 - #define login_response_get_length(v) (((v).misc >> 16) & 0xffff) 180 + #define LOGIN_RESPONSE_GET_LOGIN_ID(v) ((v).misc & 0xffff) 181 + #define LOGIN_RESPONSE_GET_LENGTH(v) (((v).misc >> 16) & 0xffff) 182 182 183 183 struct sbp2_login_response { 184 184 u32 misc; 185 185 struct sbp2_pointer command_block_agent; 186 186 u32 reconnect_hold; 187 187 }; 188 - 189 - #define command_orb_data_size(v) ((v)) 190 - #define command_orb_page_size(v) ((v) << 16) 191 - #define command_orb_page_table_present ((1) << 19) 192 - #define command_orb_max_payload(v) ((v) << 20) 193 - #define command_orb_speed(v) ((v) << 24) 194 - #define command_orb_direction(v) ((v) << 27) 195 - #define command_orb_request_format(v) ((v) << 29) 196 - #define command_orb_notify ((1) << 31) 188 + #define COMMAND_ORB_DATA_SIZE(v) ((v)) 189 + #define COMMAND_ORB_PAGE_SIZE(v) ((v) << 16) 190 + #define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19) 191 + #define COMMAND_ORB_MAX_PAYLOAD(v) ((v) << 20) 192 + #define COMMAND_ORB_SPEED(v) ((v) << 24) 193 + #define COMMAND_ORB_DIRECTION(v) ((v) << 27) 194 + #define COMMAND_ORB_REQUEST_FORMAT(v) ((v) << 29) 195 + #define COMMAND_ORB_NOTIFY ((1) << 31) 197 196 198 197 struct sbp2_command_orb { 199 198 struct sbp2_orb base; ··· 289 290 fw_memcpy_from_be32(&status, payload, header_size); 290 291 if (length > header_size) 291 292 memcpy(status.data, payload + 8, length - header_size); 292 - if (status_get_source(status) == 2 || status_get_source(status) == 3) { 293 + if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) { 293 294 fw_notify("non-orb related status write, not handled\n"); 294 295 fw_send_response(card, request, RCODE_COMPLETE); 295 296 return; ··· 298 299 /* Lookup the orb corresponding to this status write. */ 299 300 spin_lock_irqsave(&card->lock, flags); 300 301 list_for_each_entry(orb, &sd->orb_list, link) { 301 - if (status_get_orb_high(status) == 0 && 302 - status_get_orb_low(status) == orb->request_bus && 302 + if (STATUS_GET_ORB_HIGH(status) == 0 && 303 + STATUS_GET_ORB_LOW(status) == orb->request_bus && 303 304 orb->rcode == RCODE_COMPLETE) { 304 305 list_del(&orb->link); 305 306 break; ··· 424 425 orb->request.response.low = orb->response_bus; 425 426 426 427 orb->request.misc = 427 - management_orb_notify | 428 - management_orb_function(function) | 429 - management_orb_lun(lun); 428 + MANAGEMENT_ORB_NOTIFY | 429 + MANAGEMENT_ORB_FUNCTION(function) | 430 + MANAGEMENT_ORB_LUN(lun); 430 431 orb->request.length = 431 - management_orb_response_length(sizeof orb->response); 432 + MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof orb->response); 432 433 433 434 orb->request.status_fifo.high = sd->address_handler.offset >> 32; 434 435 orb->request.status_fifo.low = sd->address_handler.offset; ··· 440 441 */ 441 442 if (function == SBP2_LOGIN_REQUEST) { 442 443 orb->request.misc |= 443 - management_orb_exclusive | 444 - management_orb_reconnect(0); 444 + MANAGEMENT_ORB_EXCLUSIVE | 445 + MANAGEMENT_ORB_RECONNECT(0); 445 446 } 446 447 447 448 fw_memcpy_to_be32(&orb->request, &orb->request, sizeof orb->request); ··· 468 469 goto out; 469 470 } 470 471 471 - if (status_get_response(orb->status) != 0 || 472 - status_get_sbp_status(orb->status) != 0) { 472 + if (STATUS_GET_RESPONSE(orb->status) != 0 || 473 + STATUS_GET_SBP_STATUS(orb->status) != 0) { 473 474 fw_error("error status: %d:%d\n", 474 - status_get_response(orb->status), 475 - status_get_sbp_status(orb->status)); 475 + STATUS_GET_RESPONSE(orb->status), 476 + STATUS_GET_SBP_STATUS(orb->status)); 476 477 goto out; 477 478 } 478 479 ··· 576 577 sd->command_block_agent_address = 577 578 ((u64) (response.command_block_agent.high & 0xffff) << 32) | 578 579 response.command_block_agent.low; 579 - sd->login_id = login_response_get_login_id(response); 580 + sd->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response); 580 581 581 582 fw_notify("logged in to sbp2 unit %s (%d retries)\n", 582 583 unit->device.bus_id, sd->retries); ··· 827 828 int result; 828 829 829 830 if (status != NULL) { 830 - if (status_get_dead(*status)) 831 + if (STATUS_GET_DEAD(*status)) 831 832 sbp2_agent_reset(unit); 832 833 833 - switch (status_get_response(*status)) { 834 + switch (STATUS_GET_RESPONSE(*status)) { 834 835 case SBP2_STATUS_REQUEST_COMPLETE: 835 836 result = DID_OK << 16; 836 837 break; ··· 844 845 break; 845 846 } 846 847 847 - if (result == DID_OK << 16 && status_get_len(*status) > 1) 848 - result = sbp2_status_to_sense_data(status_get_data(*status), 848 + if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1) 849 + result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status), 849 850 orb->cmd->sense_buffer); 850 851 } else { 851 852 /* ··· 905 906 orb->request.data_descriptor.high = sd->address_high; 906 907 orb->request.data_descriptor.low = sg_dma_address(sg); 907 908 orb->request.misc |= 908 - command_orb_data_size(sg_dma_len(sg)); 909 + COMMAND_ORB_DATA_SIZE(sg_dma_len(sg)); 909 910 return; 910 911 } 911 912 ··· 942 943 orb->request.data_descriptor.high = sd->address_high; 943 944 orb->request.data_descriptor.low = orb->page_table_bus; 944 945 orb->request.misc |= 945 - command_orb_page_table_present | 946 - command_orb_data_size(j); 946 + COMMAND_ORB_PAGE_TABLE_PRESENT | 947 + COMMAND_ORB_DATA_SIZE(j); 947 948 948 949 fw_memcpy_to_be32(orb->page_table, orb->page_table, size); 949 950 } ··· 968 969 orb->request.data_descriptor.high = sd->address_high; 969 970 orb->request.data_descriptor.low = orb->request_buffer_bus; 970 971 orb->request.misc |= 971 - command_orb_data_size(orb->cmd->request_bufflen); 972 + COMMAND_ORB_DATA_SIZE(orb->cmd->request_bufflen); 972 973 } 973 974 974 975 /* SCSI stack integration */ ··· 1016 1017 * if we set this to max_speed + 7, we get the right value. 1017 1018 */ 1018 1019 orb->request.misc = 1019 - command_orb_max_payload(device->node->max_speed + 7) | 1020 - command_orb_speed(device->node->max_speed) | 1021 - command_orb_notify; 1020 + COMMAND_ORB_MAX_PAYLOAD(device->node->max_speed + 7) | 1021 + COMMAND_ORB_SPEED(device->node->max_speed) | 1022 + COMMAND_ORB_NOTIFY; 1022 1023 1023 1024 if (cmd->sc_data_direction == DMA_FROM_DEVICE) 1024 1025 orb->request.misc |= 1025 - command_orb_direction(SBP2_DIRECTION_FROM_MEDIA); 1026 + COMMAND_ORB_DIRECTION(SBP2_DIRECTION_FROM_MEDIA); 1026 1027 else if (cmd->sc_data_direction == DMA_TO_DEVICE) 1027 1028 orb->request.misc |= 1028 - command_orb_direction(SBP2_DIRECTION_TO_MEDIA); 1029 + COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA); 1029 1030 1030 1031 if (cmd->use_sg) { 1031 1032 sbp2_command_orb_map_scatterlist(orb);
+20 -20
drivers/firewire/fw-topology.c
··· 24 24 #include "fw-transaction.h" 25 25 #include "fw-topology.h" 26 26 27 - #define self_id_phy_id(q) (((q) >> 24) & 0x3f) 28 - #define self_id_extended(q) (((q) >> 23) & 0x01) 29 - #define self_id_link_on(q) (((q) >> 22) & 0x01) 30 - #define self_id_gap_count(q) (((q) >> 16) & 0x3f) 31 - #define self_id_phy_speed(q) (((q) >> 14) & 0x03) 32 - #define self_id_contender(q) (((q) >> 11) & 0x01) 33 - #define self_id_phy_initiator(q) (((q) >> 1) & 0x01) 34 - #define self_id_more_packets(q) (((q) >> 0) & 0x01) 27 + #define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f) 28 + #define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01) 29 + #define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01) 30 + #define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f) 31 + #define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03) 32 + #define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01) 33 + #define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01) 34 + #define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01) 35 35 36 - #define self_id_ext_sequence(q) (((q) >> 20) & 0x07) 36 + #define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07) 37 37 38 38 static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count) 39 39 { ··· 61 61 62 62 shift -= 2; 63 63 if (shift == 0) { 64 - if (!self_id_more_packets(q)) 64 + if (!SELF_ID_MORE_PACKETS(q)) 65 65 return sid + 1; 66 66 67 67 shift = 16; ··· 75 75 * packets increase as expected. 76 76 */ 77 77 78 - if (!self_id_extended(q) || 79 - seq != self_id_ext_sequence(q)) 78 + if (!SELF_ID_EXTENDED(q) || 79 + seq != SELF_ID_EXT_SEQUENCE(q)) 80 80 return NULL; 81 81 82 82 seq++; ··· 103 103 return NULL; 104 104 105 105 node->color = color; 106 - node->node_id = LOCAL_BUS | self_id_phy_id(sid); 107 - node->link_on = self_id_link_on(sid); 108 - node->phy_speed = self_id_phy_speed(sid); 106 + node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid); 107 + node->link_on = SELF_ID_LINK_ON(sid); 108 + node->phy_speed = SELF_ID_PHY_SPEED(sid); 109 109 node->port_count = port_count; 110 110 111 111 atomic_set(&node->ref_count, 1); ··· 181 181 end = sid + self_id_count; 182 182 phy_id = 0; 183 183 irm_node = NULL; 184 - gap_count = self_id_gap_count(*sid); 184 + gap_count = SELF_ID_GAP_COUNT(*sid); 185 185 topology_type = 0; 186 186 187 187 while (sid < end) { ··· 193 193 } 194 194 195 195 q = *sid; 196 - if (phy_id != self_id_phy_id(q)) { 196 + if (phy_id != SELF_ID_PHY_ID(q)) { 197 197 fw_error("PHY ID mismatch in self ID: %d != %d.\n", 198 - phy_id, self_id_phy_id(q)); 198 + phy_id, SELF_ID_PHY_ID(q)); 199 199 return NULL; 200 200 } 201 201 ··· 221 221 if (phy_id == (card->node_id & 0x3f)) 222 222 local_node = node; 223 223 224 - if (self_id_contender(q)) 224 + if (SELF_ID_CONTENDER(q)) 225 225 irm_node = node; 226 226 227 227 if (node->phy_speed == SCODE_BETA) ··· 283 283 * setting, we fall back to 63 which will force a gap 284 284 * count reconfiguration and a reset. 285 285 */ 286 - if (self_id_gap_count(q) != gap_count) 286 + if (SELF_ID_GAP_COUNT(q) != gap_count) 287 287 gap_count = 63; 288 288 289 289 update_hop_count(node);
+61 -61
drivers/firewire/fw-transaction.c
··· 34 34 #include "fw-topology.h" 35 35 #include "fw-device.h" 36 36 37 - #define header_pri(pri) ((pri) << 0) 38 - #define header_tcode(tcode) ((tcode) << 4) 39 - #define header_retry(retry) ((retry) << 8) 40 - #define header_tlabel(tlabel) ((tlabel) << 10) 41 - #define header_destination(destination) ((destination) << 16) 42 - #define header_source(source) ((source) << 16) 43 - #define header_rcode(rcode) ((rcode) << 12) 44 - #define header_offset_high(offset_high) ((offset_high) << 0) 45 - #define header_data_length(length) ((length) << 16) 46 - #define header_extended_tcode(tcode) ((tcode) << 0) 37 + #define HEADER_PRI(pri) ((pri) << 0) 38 + #define HEADER_TCODE(tcode) ((tcode) << 4) 39 + #define HEADER_RETRY(retry) ((retry) << 8) 40 + #define HEADER_TLABEL(tlabel) ((tlabel) << 10) 41 + #define HEADER_DESTINATION(destination) ((destination) << 16) 42 + #define HEADER_SOURCE(source) ((source) << 16) 43 + #define HEADER_RCODE(rcode) ((rcode) << 12) 44 + #define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0) 45 + #define HEADER_DATA_LENGTH(length) ((length) << 16) 46 + #define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0) 47 47 48 - #define header_get_tcode(q) (((q) >> 4) & 0x0f) 49 - #define header_get_tlabel(q) (((q) >> 10) & 0x3f) 50 - #define header_get_rcode(q) (((q) >> 12) & 0x0f) 51 - #define header_get_destination(q) (((q) >> 16) & 0xffff) 52 - #define header_get_source(q) (((q) >> 16) & 0xffff) 53 - #define header_get_offset_high(q) (((q) >> 0) & 0xffff) 54 - #define header_get_data_length(q) (((q) >> 16) & 0xffff) 55 - #define header_get_extended_tcode(q) (((q) >> 0) & 0xffff) 48 + #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) 49 + #define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f) 50 + #define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f) 51 + #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) 52 + #define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff) 53 + #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) 54 + #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) 55 + #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) 56 56 57 - #define phy_config_gap_count(gap_count) (((gap_count) << 16) | (1 << 22)) 58 - #define phy_config_root_id(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) 59 - #define phy_identifier(id) ((id) << 30) 57 + #define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22)) 58 + #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) 59 + #define PHY_IDENTIFIER(id) ((id) << 30) 60 60 61 61 static int 62 62 close_transaction(struct fw_transaction *transaction, ··· 159 159 ext_tcode = 0; 160 160 161 161 packet->header[0] = 162 - header_retry(RETRY_X) | 163 - header_tlabel(tlabel) | 164 - header_tcode(tcode) | 165 - header_destination(node_id); 162 + HEADER_RETRY(RETRY_X) | 163 + HEADER_TLABEL(tlabel) | 164 + HEADER_TCODE(tcode) | 165 + HEADER_DESTINATION(node_id); 166 166 packet->header[1] = 167 - header_offset_high(offset >> 32) | header_source(source_id); 167 + HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id); 168 168 packet->header[2] = 169 169 offset; 170 170 ··· 178 178 case TCODE_LOCK_REQUEST: 179 179 case TCODE_WRITE_BLOCK_REQUEST: 180 180 packet->header[3] = 181 - header_data_length(length) | 182 - header_extended_tcode(ext_tcode); 181 + HEADER_DATA_LENGTH(length) | 182 + HEADER_EXTENDED_TCODE(ext_tcode); 183 183 packet->header_length = 16; 184 184 packet->payload = payload; 185 185 packet->payload_length = length; ··· 192 192 193 193 case TCODE_READ_BLOCK_REQUEST: 194 194 packet->header[3] = 195 - header_data_length(length) | 196 - header_extended_tcode(ext_tcode); 195 + HEADER_DATA_LENGTH(length) | 196 + HEADER_EXTENDED_TCODE(ext_tcode); 197 197 packet->header_length = 16; 198 198 packet->payload_length = 0; 199 199 break; ··· 325 325 { 326 326 u32 q; 327 327 328 - q = phy_identifier(PHY_PACKET_CONFIG) | 329 - phy_config_root_id(node_id) | 330 - phy_config_gap_count(gap_count); 328 + q = PHY_IDENTIFIER(PHY_PACKET_CONFIG) | 329 + PHY_CONFIG_ROOT_ID(node_id) | 330 + PHY_CONFIG_GAP_COUNT(gap_count); 331 331 332 332 send_phy_packet(card, q, generation); 333 333 } ··· 485 485 { 486 486 int tcode, tlabel, extended_tcode, source, destination; 487 487 488 - tcode = header_get_tcode(request_header[0]); 489 - tlabel = header_get_tlabel(request_header[0]); 490 - source = header_get_destination(request_header[0]); 491 - destination = header_get_source(request_header[1]); 492 - extended_tcode = header_get_extended_tcode(request_header[3]); 488 + tcode = HEADER_GET_TCODE(request_header[0]); 489 + tlabel = HEADER_GET_TLABEL(request_header[0]); 490 + source = HEADER_GET_DESTINATION(request_header[0]); 491 + destination = HEADER_GET_SOURCE(request_header[1]); 492 + extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]); 493 493 494 494 response->header[0] = 495 - header_retry(RETRY_1) | 496 - header_tlabel(tlabel) | 497 - header_destination(destination); 495 + HEADER_RETRY(RETRY_1) | 496 + HEADER_TLABEL(tlabel) | 497 + HEADER_DESTINATION(destination); 498 498 response->header[1] = 499 - header_source(source) | 500 - header_rcode(rcode); 499 + HEADER_SOURCE(source) | 500 + HEADER_RCODE(rcode); 501 501 response->header[2] = 0; 502 502 503 503 switch (tcode) { 504 504 case TCODE_WRITE_QUADLET_REQUEST: 505 505 case TCODE_WRITE_BLOCK_REQUEST: 506 - response->header[0] |= header_tcode(TCODE_WRITE_RESPONSE); 506 + response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE); 507 507 response->header_length = 12; 508 508 response->payload_length = 0; 509 509 break; 510 510 511 511 case TCODE_READ_QUADLET_REQUEST: 512 512 response->header[0] |= 513 - header_tcode(TCODE_READ_QUADLET_RESPONSE); 513 + HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE); 514 514 if (payload != NULL) 515 515 response->header[3] = *(u32 *)payload; 516 516 else ··· 521 521 522 522 case TCODE_READ_BLOCK_REQUEST: 523 523 case TCODE_LOCK_REQUEST: 524 - response->header[0] |= header_tcode(tcode + 2); 524 + response->header[0] |= HEADER_TCODE(tcode + 2); 525 525 response->header[3] = 526 - header_data_length(length) | 527 - header_extended_tcode(extended_tcode); 526 + HEADER_DATA_LENGTH(length) | 527 + HEADER_EXTENDED_TCODE(extended_tcode); 528 528 response->header_length = 16; 529 529 response->payload = payload; 530 530 response->payload_length = length; ··· 544 544 u32 *data, length; 545 545 int request_tcode, t; 546 546 547 - request_tcode = header_get_tcode(p->header[0]); 547 + request_tcode = HEADER_GET_TCODE(p->header[0]); 548 548 switch (request_tcode) { 549 549 case TCODE_WRITE_QUADLET_REQUEST: 550 550 data = &p->header[3]; ··· 554 554 case TCODE_WRITE_BLOCK_REQUEST: 555 555 case TCODE_LOCK_REQUEST: 556 556 data = p->payload; 557 - length = header_get_data_length(p->header[3]); 557 + length = HEADER_GET_DATA_LENGTH(p->header[3]); 558 558 break; 559 559 560 560 case TCODE_READ_QUADLET_REQUEST: ··· 564 564 565 565 case TCODE_READ_BLOCK_REQUEST: 566 566 data = NULL; 567 - length = header_get_data_length(p->header[3]); 567 + length = HEADER_GET_DATA_LENGTH(p->header[3]); 568 568 break; 569 569 570 570 default: ··· 644 644 645 645 offset = 646 646 ((unsigned long long) 647 - header_get_offset_high(p->header[1]) << 32) | p->header[2]; 648 - tcode = header_get_tcode(p->header[0]); 649 - destination = header_get_destination(p->header[0]); 650 - source = header_get_source(p->header[0]); 647 + HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2]; 648 + tcode = HEADER_GET_TCODE(p->header[0]); 649 + destination = HEADER_GET_DESTINATION(p->header[0]); 650 + source = HEADER_GET_SOURCE(p->header[0]); 651 651 652 652 spin_lock_irqsave(&address_handler_lock, flags); 653 653 handler = lookup_enclosing_address_handler(&address_handler_list, ··· 682 682 size_t data_length; 683 683 int tcode, tlabel, destination, source, rcode; 684 684 685 - tcode = header_get_tcode(p->header[0]); 686 - tlabel = header_get_tlabel(p->header[0]); 687 - destination = header_get_destination(p->header[0]); 688 - source = header_get_source(p->header[1]); 689 - rcode = header_get_rcode(p->header[1]); 685 + tcode = HEADER_GET_TCODE(p->header[0]); 686 + tlabel = HEADER_GET_TLABEL(p->header[0]); 687 + destination = HEADER_GET_DESTINATION(p->header[0]); 688 + source = HEADER_GET_SOURCE(p->header[1]); 689 + rcode = HEADER_GET_RCODE(p->header[1]); 690 690 691 691 spin_lock_irqsave(&card->lock, flags); 692 692 list_for_each_entry(t, &card->transaction_list, link) { ··· 723 723 case TCODE_READ_BLOCK_RESPONSE: 724 724 case TCODE_LOCK_RESPONSE: 725 725 data = p->payload; 726 - data_length = header_get_data_length(p->header[3]); 726 + data_length = HEADER_GET_DATA_LENGTH(p->header[3]); 727 727 break; 728 728 729 729 default: