Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2024-08-26 (ice)

This series contains updates to ice driver only.

Jake implements and uses rd32_poll_timeout to replace a jiffies loop for
calling ice_sq_done. The rd32_poll_timeout() function is designed to allow
simplifying other places in the driver where we need to read a register
until it matches a known value.

Jake, Bruce, and Przemek update ice_debug_cq() to be more robust, and more
useful for tracing control queue messages sent and received by the device
driver.

Jake rewords several commands in the ice_control.c file which previously
referred to the "Admin queue" when they were actually generic functions
usable on any control queue.

Jake removes the unused and unnecessary cmd_buf array allocation for send
queues. This logic originally was going to be useful if we ever implemented
asynchronous completion of transmit messages. This support is unlikely to
materialize, so the overhead of allocating a command buffer is unnecessary.

Sergey improves the log messages when the ice driver reports that the NVM
version on the device is not supported by the driver. Now, these messages
include both the discovered NVM version and the requested/expected NVM
version.

Aleksandr Mishin corrects overallocation of memory related to adding
scheduler nodes.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
ice: Adjust over allocation of memory in ice_sched_add_root_node() and ice_sched_add_node()
ice: Report NVM version numbers on mismatch during load
ice: remove unnecessary control queue cmd_buf arrays
ice: reword comments referring to control queues
ice: stop intermixing AQ commands/responses debug dumps
ice: do not clutter debug logs with unused data
ice: improve debug print for control queue messages
ice: implement and use rd32_poll_timeout for ice_sq_done timeout
====================

Link: https://patch.msgid.link/20240826224655.133847-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+118 -101
+4
drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
··· 2632 2632 /* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */ 2633 2633 #define ICE_AQ_LG_BUF 512 2634 2634 2635 + #define ICE_AQ_FLAG_DD_S 0 2636 + #define ICE_AQ_FLAG_CMP_S 1 2635 2637 #define ICE_AQ_FLAG_ERR_S 2 2636 2638 #define ICE_AQ_FLAG_LB_S 9 2637 2639 #define ICE_AQ_FLAG_RD_S 10 2638 2640 #define ICE_AQ_FLAG_BUF_S 12 2639 2641 #define ICE_AQ_FLAG_SI_S 13 2640 2642 2643 + #define ICE_AQ_FLAG_DD BIT(ICE_AQ_FLAG_DD_S) /* 0x1 */ 2644 + #define ICE_AQ_FLAG_CMP BIT(ICE_AQ_FLAG_CMP_S) /* 0x2 */ 2641 2645 #define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */ 2642 2646 #define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */ 2643 2647 #define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */
+93 -83
drivers/net/ethernet/intel/ice/ice_controlq.c
··· 99 99 return -ENOMEM; 100 100 cq->sq.desc_buf.size = size; 101 101 102 - cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 103 - sizeof(struct ice_sq_cd), GFP_KERNEL); 104 - if (!cq->sq.cmd_buf) { 105 - dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, 106 - cq->sq.desc_buf.va, cq->sq.desc_buf.pa); 107 - cq->sq.desc_buf.va = NULL; 108 - cq->sq.desc_buf.pa = 0; 109 - cq->sq.desc_buf.size = 0; 110 - return -ENOMEM; 111 - } 112 - 113 102 return 0; 114 103 } 115 104 ··· 177 188 if (cq->rq_buf_size > ICE_AQ_LG_BUF) 178 189 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 179 190 desc->opcode = 0; 180 - /* This is in accordance with Admin queue design, there is no 191 + /* This is in accordance with control queue design, there is no 181 192 * register for buffer size configuration 182 193 */ 183 194 desc->datalen = cpu_to_le16(bi->size); ··· 327 338 (qi)->ring.r.ring##_bi[i].size = 0;\ 328 339 } \ 329 340 } \ 330 - /* free the buffer info list */ \ 331 - devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ 332 341 /* free DMA head */ \ 333 342 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ 334 343 } while (0) ··· 392 405 } 393 406 394 407 /** 395 - * ice_init_rq - initialize ARQ 408 + * ice_init_rq - initialize receive side of a control queue 396 409 * @hw: pointer to the hardware structure 397 410 * @cq: pointer to the specific Control queue 398 411 * 399 - * The main initialization routine for the Admin Receive (Event) Queue. 412 + * The main initialization routine for Receive side of a control queue. 400 413 * Prior to calling this function, the driver *MUST* set the following fields 401 414 * in the cq->structure: 402 415 * - cq->num_rq_entries ··· 452 465 } 453 466 454 467 /** 455 - * ice_shutdown_sq - shutdown the Control ATQ 468 + * ice_shutdown_sq - shutdown the transmit side of a control queue 456 469 * @hw: pointer to the hardware structure 457 470 * @cq: pointer to the specific Control queue 458 471 * ··· 469 482 goto shutdown_sq_out; 470 483 } 471 484 472 - /* Stop firmware AdminQ processing */ 485 + /* Stop processing of the control queue */ 473 486 wr32(hw, cq->sq.head, 0); 474 487 wr32(hw, cq->sq.tail, 0); 475 488 wr32(hw, cq->sq.len, 0); ··· 488 501 } 489 502 490 503 /** 491 - * ice_aq_ver_check - Check the reported AQ API version. 504 + * ice_aq_ver_check - Check the reported AQ API version 492 505 * @hw: pointer to the hardware structure 493 506 * 494 507 * Checks if the driver should load on a given AQ API version. ··· 508 521 } else if (hw->api_maj_ver == exp_fw_api_ver_major) { 509 522 if (hw->api_min_ver > (exp_fw_api_ver_minor + 2)) 510 523 dev_info(ice_hw_to_dev(hw), 511 - "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 524 + "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n", 525 + hw->api_maj_ver, hw->api_min_ver, 526 + exp_fw_api_ver_major, exp_fw_api_ver_minor); 512 527 else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor) 513 528 dev_info(ice_hw_to_dev(hw), 514 - "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 529 + "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n", 530 + hw->api_maj_ver, hw->api_min_ver, 531 + exp_fw_api_ver_major, exp_fw_api_ver_minor); 515 532 } else { 516 533 /* Major API version is older than expected, log a warning */ 517 534 dev_info(ice_hw_to_dev(hw), 518 - "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 535 + "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n", 536 + hw->api_maj_ver, hw->api_min_ver, 537 + exp_fw_api_ver_major, exp_fw_api_ver_minor); 519 538 } 520 539 return true; 521 540 } ··· 848 855 } 849 856 850 857 /** 851 - * ice_clean_sq - cleans Admin send queue (ATQ) 858 + * ice_clean_sq - cleans send side of a control queue 852 859 * @hw: pointer to the hardware structure 853 860 * @cq: pointer to the specific Control queue 854 861 * ··· 858 865 { 859 866 struct ice_ctl_q_ring *sq = &cq->sq; 860 867 u16 ntc = sq->next_to_clean; 861 - struct ice_sq_cd *details; 862 868 struct ice_aq_desc *desc; 863 869 864 870 desc = ICE_CTL_Q_DESC(*sq, ntc); 865 - details = ICE_CTL_Q_DETAILS(*sq, ntc); 866 871 867 872 while (rd32(hw, cq->sq.head) != ntc) { 868 873 ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); 869 874 memset(desc, 0, sizeof(*desc)); 870 - memset(details, 0, sizeof(*details)); 871 875 ntc++; 872 876 if (ntc == sq->count) 873 877 ntc = 0; 874 878 desc = ICE_CTL_Q_DESC(*sq, ntc); 875 - details = ICE_CTL_Q_DETAILS(*sq, ntc); 876 879 } 877 880 878 881 sq->next_to_clean = ntc; ··· 877 888 } 878 889 879 890 /** 891 + * ice_ctl_q_str - Convert control queue type to string 892 + * @qtype: the control queue type 893 + * 894 + * Return: A string name for the given control queue type. 895 + */ 896 + static const char *ice_ctl_q_str(enum ice_ctl_q qtype) 897 + { 898 + switch (qtype) { 899 + case ICE_CTL_Q_UNKNOWN: 900 + return "Unknown CQ"; 901 + case ICE_CTL_Q_ADMIN: 902 + return "AQ"; 903 + case ICE_CTL_Q_MAILBOX: 904 + return "MBXQ"; 905 + case ICE_CTL_Q_SB: 906 + return "SBQ"; 907 + default: 908 + return "Unrecognized CQ"; 909 + } 910 + } 911 + 912 + /** 880 913 * ice_debug_cq 881 914 * @hw: pointer to the hardware structure 915 + * @cq: pointer to the specific Control queue 882 916 * @desc: pointer to control queue descriptor 883 917 * @buf: pointer to command buffer 884 918 * @buf_len: max length of buf 919 + * @response: true if this is the writeback response 885 920 * 886 921 * Dumps debug log about control command with descriptor contents. 887 922 */ 888 - static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len) 923 + static void ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq, 924 + void *desc, void *buf, u16 buf_len, bool response) 889 925 { 890 926 struct ice_aq_desc *cq_desc = desc; 891 - u16 len; 927 + u16 datalen, flags; 892 928 893 929 if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) && 894 930 !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask)) ··· 922 908 if (!desc) 923 909 return; 924 910 925 - len = le16_to_cpu(cq_desc->datalen); 911 + datalen = le16_to_cpu(cq_desc->datalen); 912 + flags = le16_to_cpu(cq_desc->flags); 926 913 927 - ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 928 - le16_to_cpu(cq_desc->opcode), 929 - le16_to_cpu(cq_desc->flags), 930 - le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); 931 - ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n", 914 + ice_debug(hw, ICE_DBG_AQ_DESC, "%s %s: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n\tcookie (h,l) 0x%08X 0x%08X\n\tparam (0,1) 0x%08X 0x%08X\n\taddr (h,l) 0x%08X 0x%08X\n", 915 + ice_ctl_q_str(cq->qtype), response ? "Response" : "Command", 916 + le16_to_cpu(cq_desc->opcode), flags, datalen, 917 + le16_to_cpu(cq_desc->retval), 932 918 le32_to_cpu(cq_desc->cookie_high), 933 - le32_to_cpu(cq_desc->cookie_low)); 934 - ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n", 919 + le32_to_cpu(cq_desc->cookie_low), 935 920 le32_to_cpu(cq_desc->params.generic.param0), 936 - le32_to_cpu(cq_desc->params.generic.param1)); 937 - ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n", 921 + le32_to_cpu(cq_desc->params.generic.param1), 938 922 le32_to_cpu(cq_desc->params.generic.addr_high), 939 923 le32_to_cpu(cq_desc->params.generic.addr_low)); 940 - if (buf && cq_desc->datalen != 0) { 941 - ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n"); 942 - if (buf_len < len) 943 - len = buf_len; 924 + /* Dump buffer iff 1) one exists and 2) is either a response indicated 925 + * by the DD and/or CMP flag set or a command with the RD flag set. 926 + */ 927 + if (buf && cq_desc->datalen && 928 + (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP | ICE_AQ_FLAG_RD))) { 929 + char prefix[] = KBUILD_MODNAME " 0x12341234 0x12341234 "; 944 930 945 - ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len); 931 + sprintf(prefix, KBUILD_MODNAME " 0x%08X 0x%08X ", 932 + le32_to_cpu(cq_desc->params.generic.addr_high), 933 + le32_to_cpu(cq_desc->params.generic.addr_low)); 934 + ice_debug_array_w_prefix(hw, ICE_DBG_AQ_DESC_BUF, prefix, 935 + buf, 936 + min_t(u16, buf_len, datalen)); 946 937 } 947 938 } 948 939 949 940 /** 950 - * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) 941 + * ice_sq_done - poll until the last send on a control queue has completed 951 942 * @hw: pointer to the HW struct 952 943 * @cq: pointer to the specific Control queue 953 944 * 954 - * Returns true if the firmware has processed all descriptors on the 955 - * admin send queue. Returns false if there are still requests pending. 945 + * Use read_poll_timeout to poll the control queue head, checking until it 946 + * matches next_to_use. According to the control queue designers, this has 947 + * better timing reliability than the DD bit. 948 + * 949 + * Return: true if all the descriptors on the send side of a control queue 950 + * are finished processing, false otherwise. 956 951 */ 957 952 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) 958 953 { 959 - /* AQ designers suggest use of head for better 960 - * timing reliability than DD bit 954 + u32 head; 955 + 956 + /* Wait a short time before the initial check, to allow hardware time 957 + * for completion. 961 958 */ 962 - return rd32(hw, cq->sq.head) == cq->sq.next_to_use; 959 + udelay(5); 960 + 961 + return !rd32_poll_timeout(hw, cq->sq.head, 962 + head, head == cq->sq.next_to_use, 963 + 20, ICE_CTL_Q_SQ_CMD_TIMEOUT); 963 964 } 964 965 965 966 /** 966 - * ice_sq_send_cmd - send command to Control Queue (ATQ) 967 + * ice_sq_send_cmd - send command to a control queue 967 968 * @hw: pointer to the HW struct 968 969 * @cq: pointer to the specific Control queue 969 970 * @desc: prefilled descriptor describing the command ··· 986 957 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 987 958 * @cd: pointer to command details structure 988 959 * 989 - * This is the main send command routine for the ATQ. It runs the queue, 990 - * cleans the queue, etc. 960 + * Main command for the transmit side of a control queue. It puts the command 961 + * on the queue, bumps the tail, waits for processing of the command, captures 962 + * command status and results, etc. 991 963 */ 992 964 int 993 965 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, ··· 998 968 struct ice_dma_mem *dma_buf = NULL; 999 969 struct ice_aq_desc *desc_on_ring; 1000 970 bool cmd_completed = false; 1001 - struct ice_sq_cd *details; 1002 - unsigned long timeout; 1003 971 int status = 0; 1004 972 u16 retval = 0; 1005 973 u32 val = 0; ··· 1041 1013 goto sq_send_command_error; 1042 1014 } 1043 1015 1044 - details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); 1045 - if (cd) 1046 - *details = *cd; 1047 - else 1048 - memset(details, 0, sizeof(*details)); 1049 - 1050 1016 /* Call clean and check queue available function to reclaim the 1051 1017 * descriptors that were processed by FW/MBX; the function returns the 1052 1018 * number of desc available. The clean function called here could be ··· 1077 1055 /* Debug desc and buffer */ 1078 1056 ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n"); 1079 1057 1080 - ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size); 1058 + ice_debug_cq(hw, cq, (void *)desc_on_ring, buf, buf_size, false); 1081 1059 1082 1060 (cq->sq.next_to_use)++; 1083 1061 if (cq->sq.next_to_use == cq->sq.count) ··· 1085 1063 wr32(hw, cq->sq.tail, cq->sq.next_to_use); 1086 1064 ice_flush(hw); 1087 1065 1088 - /* Wait a short time before initial ice_sq_done() check, to allow 1089 - * hardware time for completion. 1066 + /* Wait for the command to complete. If it finishes within the 1067 + * timeout, copy the descriptor back to temp. 1090 1068 */ 1091 - udelay(5); 1092 - 1093 - timeout = jiffies + ICE_CTL_Q_SQ_CMD_TIMEOUT; 1094 - do { 1095 - if (ice_sq_done(hw, cq)) 1096 - break; 1097 - 1098 - usleep_range(100, 150); 1099 - } while (time_before(jiffies, timeout)); 1100 - 1101 - /* if ready, copy the desc back to temp */ 1102 1069 if (ice_sq_done(hw, cq)) { 1103 1070 memcpy(desc, desc_on_ring, sizeof(*desc)); 1104 1071 if (buf) { ··· 1119 1108 1120 1109 ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n"); 1121 1110 1122 - ice_debug_cq(hw, (void *)desc, buf, buf_size); 1111 + ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true); 1123 1112 1124 1113 /* save writeback AQ if requested */ 1125 - if (details->wb_desc) 1126 - memcpy(details->wb_desc, desc_on_ring, 1127 - sizeof(*details->wb_desc)); 1114 + if (cd && cd->wb_desc) 1115 + memcpy(cd->wb_desc, desc_on_ring, sizeof(*cd->wb_desc)); 1128 1116 1129 1117 /* update the error if time out occurred */ 1130 1118 if (!cmd_completed) { ··· 1164 1154 * @e: event info from the receive descriptor, includes any buffers 1165 1155 * @pending: number of events that could be left to process 1166 1156 * 1167 - * This function cleans one Admin Receive Queue element and returns 1168 - * the contents through e. It can also return how many events are 1169 - * left to process through 'pending'. 1157 + * Clean one element from the receive side of a control queue. On return 'e' 1158 + * contains contents of the message, and 'pending' contains the number of 1159 + * events left to process. 1170 1160 */ 1171 1161 int 1172 1162 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, ··· 1222 1212 1223 1213 ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n"); 1224 1214 1225 - ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size); 1215 + ice_debug_cq(hw, cq, (void *)desc, e->msg_buf, cq->rq_buf_size, true); 1226 1216 1227 1217 /* Restore the original datalen and buffer address in the desc, 1228 1218 * FW updates datalen to indicate the event message size
+1 -4
drivers/net/ethernet/intel/ice/ice_controlq.h
··· 43 43 }; 44 44 45 45 /* Control Queue timeout settings - max delay 1s */ 46 - #define ICE_CTL_Q_SQ_CMD_TIMEOUT HZ /* Wait max 1s */ 46 + #define ICE_CTL_Q_SQ_CMD_TIMEOUT USEC_PER_SEC 47 47 #define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */ 48 48 #define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */ 49 49 50 50 struct ice_ctl_q_ring { 51 51 void *dma_head; /* Virtual address to DMA head */ 52 52 struct ice_dma_mem desc_buf; /* descriptor ring memory */ 53 - void *cmd_buf; /* command buffer memory */ 54 53 55 54 union { 56 55 struct ice_dma_mem *sq_bi; ··· 78 79 struct ice_sq_cd { 79 80 struct ice_aq_desc *wb_desc; 80 81 }; 81 - 82 - #define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i])) 83 82 84 83 /* rq event information */ 85 84 struct ice_rq_event_info {
+18 -10
drivers/net/ethernet/intel/ice/ice_osdep.h
··· 12 12 #include <linux/ethtool.h> 13 13 #include <linux/etherdevice.h> 14 14 #include <linux/if_ether.h> 15 + #include <linux/iopoll.h> 15 16 #include <linux/pci_ids.h> 16 17 #ifndef CONFIG_64BIT 17 18 #include <linux/io-64-nonatomic-lo-hi.h> ··· 23 22 #define rd32(a, reg) readl((a)->hw_addr + (reg)) 24 23 #define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) 25 24 #define rd64(a, reg) readq((a)->hw_addr + (reg)) 25 + 26 + #define rd32_poll_timeout(a, addr, val, cond, delay_us, timeout_us) \ 27 + read_poll_timeout(rd32, val, cond, delay_us, timeout_us, false, a, addr) 26 28 27 29 #define ice_flush(a) rd32((a), GLGEN_STAT) 28 30 #define ICE_M(m, s) ((m ## U) << (s)) ··· 43 39 #define ice_debug(hw, type, fmt, args...) \ 44 40 dev_dbg(ice_hw_to_dev(hw), fmt, ##args) 45 41 46 - #define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ 47 - print_hex_dump_debug(KBUILD_MODNAME " ", \ 48 - DUMP_PREFIX_OFFSET, rowsize, \ 49 - groupsize, buf, len, false) 50 - #else 42 + #define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \ 43 + print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET, \ 44 + rowsize, groupsize, buf, len, false) 45 + #else /* CONFIG_DYNAMIC_DEBUG */ 51 46 #define ice_debug(hw, type, fmt, args...) \ 52 47 do { \ 53 48 if ((type) & (hw)->debug_mask) \ ··· 54 51 } while (0) 55 52 56 53 #ifdef DEBUG 57 - #define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ 54 + #define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \ 58 55 do { \ 59 56 if ((type) & (hw)->debug_mask) \ 60 - print_hex_dump_debug(KBUILD_MODNAME, \ 61 - DUMP_PREFIX_OFFSET, \ 57 + print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET,\ 62 58 rowsize, groupsize, buf, \ 63 59 len, false); \ 64 60 } while (0) 65 - #else 66 - #define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ 61 + #else /* DEBUG */ 62 + #define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \ 67 63 do { \ 68 64 struct ice_hw *hw_l = hw; \ 69 65 if ((type) & (hw_l)->debug_mask) { \ ··· 79 77 } while (0) 80 78 #endif /* DEBUG */ 81 79 #endif /* CONFIG_DYNAMIC_DEBUG */ 80 + 81 + #define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ 82 + _ice_debug_array(hw, type, KBUILD_MODNAME, rowsize, groupsize, buf, len) 83 + 84 + #define ice_debug_array_w_prefix(hw, type, prefix, buf, len) \ 85 + _ice_debug_array(hw, type, prefix, 16, 1, buf, len) 82 86 83 87 #endif /* _ICE_OSDEP_H_ */
+2 -4
drivers/net/ethernet/intel/ice/ice_sched.c
··· 28 28 if (!root) 29 29 return -ENOMEM; 30 30 31 - /* coverity[suspicious_sizeof] */ 32 31 root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0], 33 - sizeof(*root), GFP_KERNEL); 32 + sizeof(*root->children), GFP_KERNEL); 34 33 if (!root->children) { 35 34 devm_kfree(ice_hw_to_dev(hw), root); 36 35 return -ENOMEM; ··· 185 186 if (!node) 186 187 return -ENOMEM; 187 188 if (hw->max_children[layer]) { 188 - /* coverity[suspicious_sizeof] */ 189 189 node->children = devm_kcalloc(ice_hw_to_dev(hw), 190 190 hw->max_children[layer], 191 - sizeof(*node), GFP_KERNEL); 191 + sizeof(*node->children), GFP_KERNEL); 192 192 if (!node->children) { 193 193 devm_kfree(ice_hw_to_dev(hw), node); 194 194 return -ENOMEM;