Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dlm: use __le types for rcom messages

This patch changes to use __le types directly in the dlm rcom
structure which is casted at the right dlm message buffer positions.

The main goal what is reached here is to remove sparse warnings
regarding to host to little byte order conversion or vice versa. Leaving
those sparse issues ignored and always do it in out/in functionality
tends to leave it unknown in which byte order the variable is being
handled.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>

authored by

Alexander Aring and committed by
David Teigland
2f9dbeda 3428785a

+52 -80
+5 -5
fs/dlm/dlm_internal.h
··· 451 451 452 452 struct dlm_rcom { 453 453 struct dlm_header rc_header; 454 - uint32_t rc_type; /* DLM_RCOM_ */ 455 - int rc_result; /* multi-purpose */ 456 - uint64_t rc_id; /* match reply with request */ 457 - uint64_t rc_seq; /* sender's ls_recover_seq */ 458 - uint64_t rc_seq_reply; /* remote ls_recover_seq */ 454 + __le32 rc_type; /* DLM_RCOM_ */ 455 + __le32 rc_result; /* multi-purpose */ 456 + __le64 rc_id; /* match reply with request */ 457 + __le64 rc_seq; /* sender's ls_recover_seq */ 458 + __le64 rc_seq_reply; /* remote ls_recover_seq */ 459 459 char rc_buf[]; 460 460 }; 461 461
+1 -2
fs/dlm/lock.c
··· 5062 5062 type = p->message.m_type; 5063 5063 break; 5064 5064 case DLM_RCOM: 5065 - dlm_rcom_in(&p->rcom); 5066 - type = p->rcom.rc_type; 5065 + type = le32_to_cpu(p->rcom.rc_type); 5067 5066 break; 5068 5067 default: 5069 5068 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
+2 -7
fs/dlm/member.c
··· 120 120 121 121 ro0 = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config)); 122 122 123 - for (i = 0, ro = ro0; i < num_slots; i++, ro++) { 124 - ro->ro_nodeid = le32_to_cpu(ro->ro_nodeid); 125 - ro->ro_slot = le16_to_cpu(ro->ro_slot); 126 - } 127 - 128 123 log_slots(ls, gen, num_slots, ro0, NULL, 0); 129 124 130 125 list_for_each_entry(memb, &ls->ls_nodes, list) { 131 126 for (i = 0, ro = ro0; i < num_slots; i++, ro++) { 132 - if (ro->ro_nodeid != memb->nodeid) 127 + if (le32_to_cpu(ro->ro_nodeid) != memb->nodeid) 133 128 continue; 134 - memb->slot = ro->ro_slot; 129 + memb->slot = le16_to_cpu(ro->ro_slot); 135 130 memb->slot_prev = memb->slot; 136 131 break; 137 132 }
+39 -41
fs/dlm/rcom.c
··· 40 40 rc->rc_header.h_length = cpu_to_le16(mb_len); 41 41 rc->rc_header.h_cmd = DLM_RCOM; 42 42 43 - rc->rc_type = type; 43 + rc->rc_type = cpu_to_le32(type); 44 44 45 45 spin_lock(&ls->ls_recover_lock); 46 - rc->rc_seq = ls->ls_recover_seq; 46 + rc->rc_seq = cpu_to_le64(ls->ls_recover_seq); 47 47 spin_unlock(&ls->ls_recover_lock); 48 48 49 49 *rc_ret = rc; ··· 91 91 92 92 static void send_rcom(struct dlm_mhandle *mh, struct dlm_rcom *rc) 93 93 { 94 - dlm_rcom_out(rc); 95 94 dlm_midcomms_commit_mhandle(mh); 96 95 } 97 96 98 97 static void send_rcom_stateless(struct dlm_msg *msg, struct dlm_rcom *rc) 99 98 { 100 - dlm_rcom_out(rc); 101 99 dlm_lowcomms_commit_msg(msg); 102 100 dlm_lowcomms_put_msg(msg); 103 101 } ··· 143 145 return 0; 144 146 } 145 147 146 - static void allow_sync_reply(struct dlm_ls *ls, uint64_t *new_seq) 148 + static void allow_sync_reply(struct dlm_ls *ls, __le64 *new_seq) 147 149 { 148 150 spin_lock(&ls->ls_rcom_spin); 149 - *new_seq = ++ls->ls_rcom_seq; 151 + *new_seq = cpu_to_le64(++ls->ls_rcom_seq); 150 152 set_bit(LSFL_RCOM_WAIT, &ls->ls_flags); 151 153 spin_unlock(&ls->ls_rcom_spin); 152 154 } ··· 180 182 181 183 if (nodeid == dlm_our_nodeid()) { 182 184 rc = ls->ls_recover_buf; 183 - rc->rc_result = dlm_recover_status(ls); 185 + rc->rc_result = cpu_to_le32(dlm_recover_status(ls)); 184 186 goto out; 185 187 } 186 188 ··· 206 208 207 209 rc = ls->ls_recover_buf; 208 210 209 - if (rc->rc_result == -ESRCH) { 211 + if (rc->rc_result == cpu_to_le32(-ESRCH)) { 210 212 /* we pretend the remote lockspace exists with 0 status */ 211 213 log_debug(ls, "remote node %d not ready", nodeid); 212 214 rc->rc_result = 0; ··· 257 259 258 260 rc->rc_id = rc_in->rc_id; 259 261 rc->rc_seq_reply = rc_in->rc_seq; 260 - rc->rc_result = status; 262 + rc->rc_result = cpu_to_le32(status); 261 263 262 264 set_rcom_config(ls, (struct rcom_config *)rc->rc_buf, num_slots); 263 265 ··· 285 287 { 286 288 spin_lock(&ls->ls_rcom_spin); 287 289 if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) || 288 - rc_in->rc_id != ls->ls_rcom_seq) { 290 + le64_to_cpu(rc_in->rc_id) != ls->ls_rcom_seq) { 289 291 log_debug(ls, "reject reply %d from %d seq %llx expect %llx", 290 - rc_in->rc_type, 292 + le32_to_cpu(rc_in->rc_type), 291 293 le32_to_cpu(rc_in->rc_header.h_nodeid), 292 - (unsigned long long)rc_in->rc_id, 294 + (unsigned long long)le64_to_cpu(rc_in->rc_id), 293 295 (unsigned long long)ls->ls_rcom_seq); 294 296 goto out; 295 297 } ··· 365 367 if (error) 366 368 goto out; 367 369 memcpy(rc->rc_buf, r->res_name, r->res_length); 368 - rc->rc_id = (unsigned long) r->res_id; 370 + rc->rc_id = cpu_to_le64(r->res_id); 369 371 370 372 send_rcom(mh, rc); 371 373 out: ··· 381 383 sizeof(struct dlm_rcom); 382 384 383 385 /* Old code would send this special id to trigger a debug dump. */ 384 - if (rc_in->rc_id == 0xFFFFFFFF) { 386 + if (rc_in->rc_id == cpu_to_le64(0xFFFFFFFF)) { 385 387 log_error(ls, "receive_rcom_lookup dump from %d", nodeid); 386 388 dlm_dump_rsb_name(ls, rc_in->rc_buf, len); 387 389 return; ··· 395 397 DLM_LU_RECOVER_MASTER, &ret_nodeid, NULL); 396 398 if (error) 397 399 ret_nodeid = error; 398 - rc->rc_result = ret_nodeid; 400 + rc->rc_result = cpu_to_le32(ret_nodeid); 399 401 rc->rc_id = rc_in->rc_id; 400 402 rc->rc_seq_reply = rc_in->rc_seq; 401 403 ··· 454 456 455 457 rl = (struct rcom_lock *) rc->rc_buf; 456 458 pack_rcom_lock(r, lkb, rl); 457 - rc->rc_id = (unsigned long) r; 459 + rc->rc_id = cpu_to_le64(r); 458 460 459 461 send_rcom(mh, rc); 460 462 out: ··· 508 510 rc->rc_header.h_length = cpu_to_le16(mb_len); 509 511 rc->rc_header.h_cmd = DLM_RCOM; 510 512 511 - rc->rc_type = DLM_RCOM_STATUS_REPLY; 513 + rc->rc_type = cpu_to_le32(DLM_RCOM_STATUS_REPLY); 512 514 rc->rc_id = rc_in->rc_id; 513 515 rc->rc_seq_reply = rc_in->rc_seq; 514 - rc->rc_result = -ESRCH; 516 + rc->rc_result = cpu_to_le32(-ESRCH); 515 517 516 518 rf = (struct rcom_config *) rc->rc_buf; 517 519 rf->rf_lvblen = cpu_to_le32(~0U); 518 520 519 - dlm_rcom_out(rc); 520 521 dlm_midcomms_commit_mhandle(mh); 521 522 522 523 return 0; ··· 574 577 uint64_t seq; 575 578 576 579 switch (rc->rc_type) { 577 - case DLM_RCOM_STATUS_REPLY: 580 + case cpu_to_le32(DLM_RCOM_STATUS_REPLY): 578 581 reply = 1; 579 582 break; 580 - case DLM_RCOM_NAMES: 583 + case cpu_to_le32(DLM_RCOM_NAMES): 581 584 names = 1; 582 585 break; 583 - case DLM_RCOM_NAMES_REPLY: 586 + case cpu_to_le32(DLM_RCOM_NAMES_REPLY): 584 587 names = 1; 585 588 reply = 1; 586 589 break; 587 - case DLM_RCOM_LOOKUP: 590 + case cpu_to_le32(DLM_RCOM_LOOKUP): 588 591 lookup = 1; 589 592 break; 590 - case DLM_RCOM_LOOKUP_REPLY: 593 + case cpu_to_le32(DLM_RCOM_LOOKUP_REPLY): 591 594 lookup = 1; 592 595 reply = 1; 593 596 break; 594 - case DLM_RCOM_LOCK: 597 + case cpu_to_le32(DLM_RCOM_LOCK): 595 598 lock = 1; 596 599 break; 597 - case DLM_RCOM_LOCK_REPLY: 600 + case cpu_to_le32(DLM_RCOM_LOCK_REPLY): 598 601 lock = 1; 599 602 reply = 1; 600 603 break; ··· 606 609 seq = ls->ls_recover_seq; 607 610 spin_unlock(&ls->ls_recover_lock); 608 611 609 - if (stop && (rc->rc_type != DLM_RCOM_STATUS)) 612 + if (stop && (rc->rc_type != cpu_to_le32(DLM_RCOM_STATUS))) 610 613 goto ignore; 611 614 612 - if (reply && (rc->rc_seq_reply != seq)) 615 + if (reply && (le64_to_cpu(rc->rc_seq_reply) != seq)) 613 616 goto ignore; 614 617 615 618 if (!(status & DLM_RS_NODES) && (names || lookup || lock)) ··· 619 622 goto ignore; 620 623 621 624 switch (rc->rc_type) { 622 - case DLM_RCOM_STATUS: 625 + case cpu_to_le32(DLM_RCOM_STATUS): 623 626 receive_rcom_status(ls, rc); 624 627 break; 625 628 626 - case DLM_RCOM_NAMES: 629 + case cpu_to_le32(DLM_RCOM_NAMES): 627 630 receive_rcom_names(ls, rc); 628 631 break; 629 632 630 - case DLM_RCOM_LOOKUP: 633 + case cpu_to_le32(DLM_RCOM_LOOKUP): 631 634 receive_rcom_lookup(ls, rc); 632 635 break; 633 636 634 - case DLM_RCOM_LOCK: 637 + case cpu_to_le32(DLM_RCOM_LOCK): 635 638 if (le16_to_cpu(rc->rc_header.h_length) < lock_size) 636 639 goto Eshort; 637 640 receive_rcom_lock(ls, rc); 638 641 break; 639 642 640 - case DLM_RCOM_STATUS_REPLY: 643 + case cpu_to_le32(DLM_RCOM_STATUS_REPLY): 641 644 receive_sync_reply(ls, rc); 642 645 break; 643 646 644 - case DLM_RCOM_NAMES_REPLY: 647 + case cpu_to_le32(DLM_RCOM_NAMES_REPLY): 645 648 receive_sync_reply(ls, rc); 646 649 break; 647 650 648 - case DLM_RCOM_LOOKUP_REPLY: 651 + case cpu_to_le32(DLM_RCOM_LOOKUP_REPLY): 649 652 receive_rcom_lookup_reply(ls, rc); 650 653 break; 651 654 652 - case DLM_RCOM_LOCK_REPLY: 655 + case cpu_to_le32(DLM_RCOM_LOCK_REPLY): 653 656 if (le16_to_cpu(rc->rc_header.h_length) < lock_size) 654 657 goto Eshort; 655 658 dlm_recover_process_copy(ls, rc); 656 659 break; 657 660 658 661 default: 659 - log_error(ls, "receive_rcom bad type %d", rc->rc_type); 662 + log_error(ls, "receive_rcom bad type %d", 663 + le32_to_cpu(rc->rc_type)); 660 664 } 661 665 return; 662 666 663 667 ignore: 664 668 log_limit(ls, "dlm_receive_rcom ignore msg %d " 665 669 "from %d %llu %llu recover seq %llu sts %x gen %u", 666 - rc->rc_type, 670 + le32_to_cpu(rc->rc_type), 667 671 nodeid, 668 - (unsigned long long)rc->rc_seq, 669 - (unsigned long long)rc->rc_seq_reply, 672 + (unsigned long long)le64_to_cpu(rc->rc_seq), 673 + (unsigned long long)le64_to_cpu(rc->rc_seq_reply), 670 674 (unsigned long long)seq, 671 675 status, ls->ls_generation); 672 676 return; 673 677 Eshort: 674 678 log_error(ls, "recovery message %d from %d is too short", 675 - rc->rc_type, nodeid); 679 + le32_to_cpu(rc->rc_type), nodeid); 676 680 } 677 681
+5 -5
fs/dlm/recover.c
··· 114 114 if (save_slots) 115 115 dlm_slot_save(ls, rc, memb); 116 116 117 - if (rc->rc_result & wait_status) 117 + if (le32_to_cpu(rc->rc_result) & wait_status) 118 118 break; 119 119 if (delay < 1000) 120 120 delay += 20; ··· 141 141 if (error) 142 142 break; 143 143 144 - if (rc->rc_result & wait_status) 144 + if (le32_to_cpu(rc->rc_result) & wait_status) 145 145 break; 146 146 if (delay < 1000) 147 147 delay += 20; ··· 568 568 struct dlm_rsb *r; 569 569 int ret_nodeid, new_master; 570 570 571 - r = recover_idr_find(ls, rc->rc_id); 571 + r = recover_idr_find(ls, le64_to_cpu(rc->rc_id)); 572 572 if (!r) { 573 573 log_error(ls, "dlm_recover_master_reply no id %llx", 574 - (unsigned long long)rc->rc_id); 574 + (unsigned long long)le64_to_cpu(rc->rc_id)); 575 575 goto out; 576 576 } 577 577 578 - ret_nodeid = rc->rc_result; 578 + ret_nodeid = le32_to_cpu(rc->rc_result); 579 579 580 580 if (ret_nodeid == dlm_our_nodeid()) 581 581 new_master = 0;
-18
fs/dlm/util.c
··· 108 108 ms->m_asts = le32_to_cpu(ms->m_asts); 109 109 ms->m_result = from_dlm_errno(le32_to_cpu(ms->m_result)); 110 110 } 111 - 112 - void dlm_rcom_out(struct dlm_rcom *rc) 113 - { 114 - rc->rc_type = cpu_to_le32(rc->rc_type); 115 - rc->rc_result = cpu_to_le32(rc->rc_result); 116 - rc->rc_id = cpu_to_le64(rc->rc_id); 117 - rc->rc_seq = cpu_to_le64(rc->rc_seq); 118 - rc->rc_seq_reply = cpu_to_le64(rc->rc_seq_reply); 119 - } 120 - 121 - void dlm_rcom_in(struct dlm_rcom *rc) 122 - { 123 - rc->rc_type = le32_to_cpu(rc->rc_type); 124 - rc->rc_result = le32_to_cpu(rc->rc_result); 125 - rc->rc_id = le64_to_cpu(rc->rc_id); 126 - rc->rc_seq = le64_to_cpu(rc->rc_seq); 127 - rc->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply); 128 - }
-2
fs/dlm/util.h
··· 13 13 14 14 void dlm_message_out(struct dlm_message *ms); 15 15 void dlm_message_in(struct dlm_message *ms); 16 - void dlm_rcom_out(struct dlm_rcom *rc); 17 - void dlm_rcom_in(struct dlm_rcom *rc); 18 16 19 17 #endif 20 18