Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sfc: move channel start/stop code

Also includes interrupt enabling/disabling code.
Small code styling fixes included.

Signed-off-by: Alexandru-Mihai Maftei <amaftei@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Alex Maftei (amaftei) and committed by
David S. Miller
e20ba5b1 768fd266

+190 -188
-119
drivers/net/ethernet/sfc/efx.c
··· 1086 1086 } 1087 1087 #endif /* CONFIG_SMP */ 1088 1088 1089 - int efx_soft_enable_interrupts(struct efx_nic *efx) 1090 - { 1091 - struct efx_channel *channel, *end_channel; 1092 - int rc; 1093 - 1094 - BUG_ON(efx->state == STATE_DISABLED); 1095 - 1096 - efx->irq_soft_enabled = true; 1097 - smp_wmb(); 1098 - 1099 - efx_for_each_channel(channel, efx) { 1100 - if (!channel->type->keep_eventq) { 1101 - rc = efx_init_eventq(channel); 1102 - if (rc) 1103 - goto fail; 1104 - } 1105 - efx_start_eventq(channel); 1106 - } 1107 - 1108 - efx_mcdi_mode_event(efx); 1109 - 1110 - return 0; 1111 - fail: 1112 - end_channel = channel; 1113 - efx_for_each_channel(channel, efx) { 1114 - if (channel == end_channel) 1115 - break; 1116 - efx_stop_eventq(channel); 1117 - if (!channel->type->keep_eventq) 1118 - efx_fini_eventq(channel); 1119 - } 1120 - 1121 - return rc; 1122 - } 1123 - 1124 - void efx_soft_disable_interrupts(struct efx_nic *efx) 1125 - { 1126 - struct efx_channel *channel; 1127 - 1128 - if (efx->state == STATE_DISABLED) 1129 - return; 1130 - 1131 - efx_mcdi_mode_poll(efx); 1132 - 1133 - efx->irq_soft_enabled = false; 1134 - smp_wmb(); 1135 - 1136 - if (efx->legacy_irq) 1137 - synchronize_irq(efx->legacy_irq); 1138 - 1139 - efx_for_each_channel(channel, efx) { 1140 - if (channel->irq) 1141 - synchronize_irq(channel->irq); 1142 - 1143 - efx_stop_eventq(channel); 1144 - if (!channel->type->keep_eventq) 1145 - efx_fini_eventq(channel); 1146 - } 1147 - 1148 - /* Flush the asynchronous MCDI request queue */ 1149 - efx_mcdi_flush_async(efx); 1150 - } 1151 - 1152 - int efx_enable_interrupts(struct efx_nic *efx) 1153 - { 1154 - struct efx_channel *channel, *end_channel; 1155 - int rc; 1156 - 1157 - BUG_ON(efx->state == STATE_DISABLED); 1158 - 1159 - if (efx->eeh_disabled_legacy_irq) { 1160 - enable_irq(efx->legacy_irq); 1161 - efx->eeh_disabled_legacy_irq = false; 1162 - } 1163 - 1164 - efx->type->irq_enable_master(efx); 1165 - 1166 - efx_for_each_channel(channel, efx) { 1167 - if (channel->type->keep_eventq) { 1168 - rc = efx_init_eventq(channel); 1169 - if (rc) 1170 - goto fail; 1171 - } 1172 - } 1173 - 1174 - rc = efx_soft_enable_interrupts(efx); 1175 - if (rc) 1176 - goto fail; 1177 - 1178 - return 0; 1179 - 1180 - fail: 1181 - end_channel = channel; 1182 - efx_for_each_channel(channel, efx) { 1183 - if (channel == end_channel) 1184 - break; 1185 - if (channel->type->keep_eventq) 1186 - efx_fini_eventq(channel); 1187 - } 1188 - 1189 - efx->type->irq_disable_non_ev(efx); 1190 - 1191 - return rc; 1192 - } 1193 - 1194 - void efx_disable_interrupts(struct efx_nic *efx) 1195 - { 1196 - struct efx_channel *channel; 1197 - 1198 - efx_soft_disable_interrupts(efx); 1199 - 1200 - efx_for_each_channel(channel, efx) { 1201 - if (channel->type->keep_eventq) 1202 - efx_fini_eventq(channel); 1203 - } 1204 - 1205 - efx->type->irq_disable_non_ev(efx); 1206 - } 1207 - 1208 1089 void efx_remove_interrupts(struct efx_nic *efx) 1209 1090 { 1210 1091 struct efx_channel *channel;
+190
drivers/net/ethernet/sfc/efx_channels.c
··· 33 33 */ 34 34 static int napi_weight = 64; 35 35 36 + /************* 37 + * START/STOP 38 + *************/ 39 + 40 + int efx_soft_enable_interrupts(struct efx_nic *efx) 41 + { 42 + struct efx_channel *channel, *end_channel; 43 + int rc; 44 + 45 + BUG_ON(efx->state == STATE_DISABLED); 46 + 47 + efx->irq_soft_enabled = true; 48 + smp_wmb(); 49 + 50 + efx_for_each_channel(channel, efx) { 51 + if (!channel->type->keep_eventq) { 52 + rc = efx_init_eventq(channel); 53 + if (rc) 54 + goto fail; 55 + } 56 + efx_start_eventq(channel); 57 + } 58 + 59 + efx_mcdi_mode_event(efx); 60 + 61 + return 0; 62 + fail: 63 + end_channel = channel; 64 + efx_for_each_channel(channel, efx) { 65 + if (channel == end_channel) 66 + break; 67 + efx_stop_eventq(channel); 68 + if (!channel->type->keep_eventq) 69 + efx_fini_eventq(channel); 70 + } 71 + 72 + return rc; 73 + } 74 + 75 + void efx_soft_disable_interrupts(struct efx_nic *efx) 76 + { 77 + struct efx_channel *channel; 78 + 79 + if (efx->state == STATE_DISABLED) 80 + return; 81 + 82 + efx_mcdi_mode_poll(efx); 83 + 84 + efx->irq_soft_enabled = false; 85 + smp_wmb(); 86 + 87 + if (efx->legacy_irq) 88 + synchronize_irq(efx->legacy_irq); 89 + 90 + efx_for_each_channel(channel, efx) { 91 + if (channel->irq) 92 + synchronize_irq(channel->irq); 93 + 94 + efx_stop_eventq(channel); 95 + if (!channel->type->keep_eventq) 96 + efx_fini_eventq(channel); 97 + } 98 + 99 + /* Flush the asynchronous MCDI request queue */ 100 + efx_mcdi_flush_async(efx); 101 + } 102 + 103 + int efx_enable_interrupts(struct efx_nic *efx) 104 + { 105 + struct efx_channel *channel, *end_channel; 106 + int rc; 107 + 108 + /* TODO: Is this really a bug? */ 109 + BUG_ON(efx->state == STATE_DISABLED); 110 + 111 + if (efx->eeh_disabled_legacy_irq) { 112 + enable_irq(efx->legacy_irq); 113 + efx->eeh_disabled_legacy_irq = false; 114 + } 115 + 116 + efx->type->irq_enable_master(efx); 117 + 118 + efx_for_each_channel(channel, efx) { 119 + if (channel->type->keep_eventq) { 120 + rc = efx_init_eventq(channel); 121 + if (rc) 122 + goto fail; 123 + } 124 + } 125 + 126 + rc = efx_soft_enable_interrupts(efx); 127 + if (rc) 128 + goto fail; 129 + 130 + return 0; 131 + 132 + fail: 133 + end_channel = channel; 134 + efx_for_each_channel(channel, efx) { 135 + if (channel == end_channel) 136 + break; 137 + if (channel->type->keep_eventq) 138 + efx_fini_eventq(channel); 139 + } 140 + 141 + efx->type->irq_disable_non_ev(efx); 142 + 143 + return rc; 144 + } 145 + 146 + void efx_disable_interrupts(struct efx_nic *efx) 147 + { 148 + struct efx_channel *channel; 149 + 150 + efx_soft_disable_interrupts(efx); 151 + 152 + efx_for_each_channel(channel, efx) { 153 + if (channel->type->keep_eventq) 154 + efx_fini_eventq(channel); 155 + } 156 + 157 + efx->type->irq_disable_non_ev(efx); 158 + } 159 + 160 + void efx_start_channels(struct efx_nic *efx) 161 + { 162 + struct efx_tx_queue *tx_queue; 163 + struct efx_rx_queue *rx_queue; 164 + struct efx_channel *channel; 165 + 166 + efx_for_each_channel(channel, efx) { 167 + efx_for_each_channel_tx_queue(tx_queue, channel) { 168 + efx_init_tx_queue(tx_queue); 169 + atomic_inc(&efx->active_queues); 170 + } 171 + 172 + efx_for_each_channel_rx_queue(rx_queue, channel) { 173 + efx_init_rx_queue(rx_queue); 174 + atomic_inc(&efx->active_queues); 175 + efx_stop_eventq(channel); 176 + efx_fast_push_rx_descriptors(rx_queue, false); 177 + efx_start_eventq(channel); 178 + } 179 + 180 + WARN_ON(channel->rx_pkt_n_frags); 181 + } 182 + } 183 + 184 + void efx_stop_channels(struct efx_nic *efx) 185 + { 186 + struct efx_tx_queue *tx_queue; 187 + struct efx_rx_queue *rx_queue; 188 + struct efx_channel *channel; 189 + int rc; 190 + 191 + /* Stop RX refill */ 192 + efx_for_each_channel(channel, efx) { 193 + efx_for_each_channel_rx_queue(rx_queue, channel) 194 + rx_queue->refill_enabled = false; 195 + } 196 + 197 + efx_for_each_channel(channel, efx) { 198 + /* RX packet processing is pipelined, so wait for the 199 + * NAPI handler to complete. At least event queue 0 200 + * might be kept active by non-data events, so don't 201 + * use napi_synchronize() but actually disable NAPI 202 + * temporarily. 203 + */ 204 + if (efx_channel_has_rx_queue(channel)) { 205 + efx_stop_eventq(channel); 206 + efx_start_eventq(channel); 207 + } 208 + } 209 + 210 + rc = efx->type->fini_dmaq(efx); 211 + if (rc) { 212 + netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); 213 + } else { 214 + netif_dbg(efx, drv, efx->net_dev, 215 + "successfully flushed all queues\n"); 216 + } 217 + 218 + efx_for_each_channel(channel, efx) { 219 + efx_for_each_channel_rx_queue(rx_queue, channel) 220 + efx_fini_rx_queue(rx_queue); 221 + efx_for_each_possible_channel_tx_queue(tx_queue, channel) 222 + efx_fini_tx_queue(tx_queue); 223 + } 224 + } 225 + 36 226 /************************************************************************** 37 227 * 38 228 * NAPI interface
-69
drivers/net/ethernet/sfc/efx_common.c
··· 238 238 * 239 239 *************************************************************************/ 240 240 241 - void efx_start_channels(struct efx_nic *efx) 242 - { 243 - struct efx_tx_queue *tx_queue; 244 - struct efx_rx_queue *rx_queue; 245 - struct efx_channel *channel; 246 - 247 - efx_for_each_channel(channel, efx) { 248 - efx_for_each_channel_tx_queue(tx_queue, channel) { 249 - efx_init_tx_queue(tx_queue); 250 - atomic_inc(&efx->active_queues); 251 - } 252 - 253 - efx_for_each_channel_rx_queue(rx_queue, channel) { 254 - efx_init_rx_queue(rx_queue); 255 - atomic_inc(&efx->active_queues); 256 - efx_stop_eventq(channel); 257 - efx_fast_push_rx_descriptors(rx_queue, false); 258 - efx_start_eventq(channel); 259 - } 260 - 261 - WARN_ON(channel->rx_pkt_n_frags); 262 - } 263 - } 264 - 265 241 /* Channels are shutdown and reinitialised whilst the NIC is running 266 242 * to propagate configuration changes (mtu, checksum offload), or 267 243 * to clear hardware error conditions ··· 316 340 317 341 if (netif_device_present(efx->net_dev)) 318 342 netif_tx_wake_all_queues(efx->net_dev); 319 - } 320 - 321 - void efx_stop_channels(struct efx_nic *efx) 322 - { 323 - struct efx_tx_queue *tx_queue; 324 - struct efx_rx_queue *rx_queue; 325 - struct efx_channel *channel; 326 - int rc = 0; 327 - 328 - /* Stop RX refill */ 329 - efx_for_each_channel(channel, efx) { 330 - efx_for_each_channel_rx_queue(rx_queue, channel) 331 - rx_queue->refill_enabled = false; 332 - } 333 - 334 - efx_for_each_channel(channel, efx) { 335 - /* RX packet processing is pipelined, so wait for the 336 - * NAPI handler to complete. At least event queue 0 337 - * might be kept active by non-data events, so don't 338 - * use napi_synchronize() but actually disable NAPI 339 - * temporarily. 340 - */ 341 - if (efx_channel_has_rx_queue(channel)) { 342 - efx_stop_eventq(channel); 343 - efx_start_eventq(channel); 344 - } 345 - } 346 - 347 - if (efx->type->fini_dmaq) 348 - rc = efx->type->fini_dmaq(efx); 349 - 350 - if (rc) { 351 - netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); 352 - } else { 353 - netif_dbg(efx, drv, efx->net_dev, 354 - "successfully flushed all queues\n"); 355 - } 356 - 357 - efx_for_each_channel(channel, efx) { 358 - efx_for_each_channel_rx_queue(rx_queue, channel) 359 - efx_fini_rx_queue(rx_queue); 360 - efx_for_each_possible_channel_tx_queue(tx_queue, channel) 361 - efx_fini_tx_queue(tx_queue); 362 - } 363 - efx->xdp_rxq_info_failed = false; 364 343 } 365 344 366 345 static void efx_stop_datapath(struct efx_nic *efx)