Merge branch 'sfc-2.6.39' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-2.6

+30 -31
+4 -2
drivers/net/sfc/efx.c
··· 328 328 * processing to finish, then directly poll (and ack ) the eventq. 329 329 * Finally reenable NAPI and interrupts. 330 330 * 331 - * Since we are touching interrupts the caller should hold the suspend lock 331 + * This is for use only during a loopback self-test. It must not 332 + * deliver any packets up the stack as this can result in deadlock. 332 333 */ 333 334 void efx_process_channel_now(struct efx_channel *channel) 334 335 { ··· 337 336 338 337 BUG_ON(channel->channel >= efx->n_channels); 339 338 BUG_ON(!channel->enabled); 339 + BUG_ON(!efx->loopback_selftest); 340 340 341 341 /* Disable interrupts and wait for ISRs to complete */ 342 342 efx_nic_disable_interrupts(efx); ··· 1438 1436 * restart the transmit interface early so the watchdog timer stops */ 1439 1437 efx_start_port(efx); 1440 1438 1441 - if (efx_dev_registered(efx)) 1439 + if (efx_dev_registered(efx) && !efx->port_inhibited) 1442 1440 netif_tx_wake_all_queues(efx->net_dev); 1443 1441 1444 1442 efx_for_each_channel(channel, efx)
+2
drivers/net/sfc/io.h
··· 152 152 153 153 spin_lock_irqsave(&efx->biu_lock, flags); 154 154 value->u32[0] = _efx_readd(efx, reg + 0); 155 + rmb(); 155 156 value->u32[1] = _efx_readd(efx, reg + 4); 156 157 value->u32[2] = _efx_readd(efx, reg + 8); 157 158 value->u32[3] = _efx_readd(efx, reg + 12); ··· 175 174 value->u64[0] = (__force __le64)__raw_readq(membase + addr); 176 175 #else 177 176 value->u32[0] = (__force __le32)__raw_readl(membase + addr); 177 + rmb(); 178 178 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); 179 179 #endif 180 180 spin_unlock_irqrestore(&efx->biu_lock, flags);
-2
drivers/net/sfc/net_driver.h
··· 330 330 * @eventq_mask: Event queue pointer mask 331 331 * @eventq_read_ptr: Event queue read pointer 332 332 * @last_eventq_read_ptr: Last event queue read pointer value. 333 - * @magic_count: Event queue test event count 334 333 * @irq_count: Number of IRQs since last adaptive moderation decision 335 334 * @irq_mod_score: IRQ moderation score 336 335 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors ··· 359 360 unsigned int eventq_mask; 360 361 unsigned int eventq_read_ptr; 361 362 unsigned int last_eventq_read_ptr; 362 - unsigned int magic_count; 363 363 364 364 unsigned int irq_count; 365 365 unsigned int irq_mod_score;
+15 -7
drivers/net/sfc/nic.c
··· 84 84 static inline efx_qword_t *efx_event(struct efx_channel *channel, 85 85 unsigned int index) 86 86 { 87 - return ((efx_qword_t *) (channel->eventq.addr)) + index; 87 + return ((efx_qword_t *) (channel->eventq.addr)) + 88 + (index & channel->eventq_mask); 88 89 } 89 90 90 91 /* See if an event is present ··· 674 673 efx_dword_t reg; 675 674 struct efx_nic *efx = channel->efx; 676 675 677 - EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr); 676 + EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 677 + channel->eventq_read_ptr & channel->eventq_mask); 678 678 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base, 679 679 channel->channel); 680 680 } ··· 910 908 911 909 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 912 910 if (code == EFX_CHANNEL_MAGIC_TEST(channel)) 913 - ++channel->magic_count; 911 + ; /* ignore */ 914 912 else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) 915 913 /* The queue must be empty, so we won't receive any rx 916 914 * events, so efx_process_channel() won't refill the ··· 1017 1015 /* Clear this event by marking it all ones */ 1018 1016 EFX_SET_QWORD(*p_event); 1019 1017 1020 - /* Increment read pointer */ 1021 - read_ptr = (read_ptr + 1) & channel->eventq_mask; 1018 + ++read_ptr; 1022 1019 1023 1020 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1024 1021 ··· 1061 1060 return spent; 1062 1061 } 1063 1062 1063 + /* Check whether an event is present in the eventq at the current 1064 + * read pointer. Only useful for self-test. 1065 + */ 1066 + bool efx_nic_event_present(struct efx_channel *channel) 1067 + { 1068 + return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 1069 + } 1064 1070 1065 1071 /* Allocate buffer table entries for event queue */ 1066 1072 int efx_nic_probe_eventq(struct efx_channel *channel) ··· 1173 1165 struct efx_tx_queue *tx_queue; 1174 1166 struct efx_rx_queue *rx_queue; 1175 1167 unsigned int read_ptr = channel->eventq_read_ptr; 1176 - unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask; 1168 + unsigned int end_ptr = read_ptr + channel->eventq_mask - 1; 1177 1169 1178 1170 do { 1179 1171 efx_qword_t *event = efx_event(channel, read_ptr); ··· 1213 1205 * it's ok to throw away every non-flush event */ 1214 1206 EFX_SET_QWORD(*event); 1215 1207 1216 - read_ptr = (read_ptr + 1) & channel->eventq_mask; 1208 + ++read_ptr; 1217 1209 } while (read_ptr != end_ptr); 1218 1210 1219 1211 channel->eventq_read_ptr = read_ptr;
+1
drivers/net/sfc/nic.h
··· 184 184 extern void efx_nic_remove_eventq(struct efx_channel *channel); 185 185 extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); 186 186 extern void efx_nic_eventq_read_ack(struct efx_channel *channel); 187 + extern bool efx_nic_event_present(struct efx_channel *channel); 187 188 188 189 /* MAC/PHY */ 189 190 extern void falcon_drain_tx_fifo(struct efx_nic *efx);
+6 -19
drivers/net/sfc/selftest.c
··· 131 131 static int efx_test_interrupts(struct efx_nic *efx, 132 132 struct efx_self_tests *tests) 133 133 { 134 - struct efx_channel *channel; 135 - 136 134 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); 137 135 tests->interrupt = -1; 138 136 139 137 /* Reset interrupt flag */ 140 138 efx->last_irq_cpu = -1; 141 139 smp_wmb(); 142 - 143 - /* ACK each interrupting event queue. Receiving an interrupt due to 144 - * traffic before a test event is raised is considered a pass */ 145 - efx_for_each_channel(channel, efx) { 146 - if (channel->work_pending) 147 - efx_process_channel_now(channel); 148 - if (efx->last_irq_cpu >= 0) 149 - goto success; 150 - } 151 140 152 141 efx_nic_generate_interrupt(efx); 153 142 ··· 162 173 struct efx_self_tests *tests) 163 174 { 164 175 struct efx_nic *efx = channel->efx; 165 - unsigned int magic_count, count; 176 + unsigned int read_ptr, count; 166 177 167 178 tests->eventq_dma[channel->channel] = -1; 168 179 tests->eventq_int[channel->channel] = -1; 169 180 tests->eventq_poll[channel->channel] = -1; 170 181 171 - magic_count = channel->magic_count; 182 + read_ptr = channel->eventq_read_ptr; 172 183 channel->efx->last_irq_cpu = -1; 173 184 smp_wmb(); 174 185 ··· 179 190 do { 180 191 schedule_timeout_uninterruptible(HZ / 100); 181 192 182 - if (channel->work_pending) 183 - efx_process_channel_now(channel); 184 - 185 - if (channel->magic_count != magic_count) 193 + if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr) 186 194 goto eventq_ok; 187 195 } while (++count < 2); 188 196 ··· 197 211 } 198 212 199 213 /* Check to see if event was received even if interrupt wasn't */ 200 - efx_process_channel_now(channel); 201 - if (channel->magic_count != magic_count) { 214 + if (efx_nic_event_present(channel)) { 202 215 netif_err(efx, drv, efx->net_dev, 203 216 "channel %d event was generated, but " 204 217 "failed to trigger an interrupt\n", channel->channel); ··· 754 769 efx->loopback_mode = loopback_mode; 755 770 __efx_reconfigure_port(efx); 756 771 mutex_unlock(&efx->mac_lock); 772 + 773 + netif_tx_wake_all_queues(efx->net_dev); 757 774 758 775 return rc_test; 759 776 }
+2 -1
drivers/net/sfc/tx.c
··· 435 435 * queue state. */ 436 436 smp_mb(); 437 437 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 438 - likely(efx->port_enabled)) { 438 + likely(efx->port_enabled) && 439 + likely(!efx->port_inhibited)) { 439 440 fill_level = tx_queue->insert_count - tx_queue->read_count; 440 441 if (fill_level < EFX_TXQ_THRESHOLD(efx)) { 441 442 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));