Merge branch 'sfc-2.6.39' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-2.6

+30 -31
+4 -2
drivers/net/sfc/efx.c
··· 328 * processing to finish, then directly poll (and ack ) the eventq. 329 * Finally reenable NAPI and interrupts. 330 * 331 - * Since we are touching interrupts the caller should hold the suspend lock 332 */ 333 void efx_process_channel_now(struct efx_channel *channel) 334 { ··· 337 338 BUG_ON(channel->channel >= efx->n_channels); 339 BUG_ON(!channel->enabled); 340 341 /* Disable interrupts and wait for ISRs to complete */ 342 efx_nic_disable_interrupts(efx); ··· 1438 * restart the transmit interface early so the watchdog timer stops */ 1439 efx_start_port(efx); 1440 1441 - if (efx_dev_registered(efx)) 1442 netif_tx_wake_all_queues(efx->net_dev); 1443 1444 efx_for_each_channel(channel, efx)
··· 328 * processing to finish, then directly poll (and ack ) the eventq. 329 * Finally reenable NAPI and interrupts. 330 * 331 + * This is for use only during a loopback self-test. It must not 332 + * deliver any packets up the stack as this can result in deadlock. 333 */ 334 void efx_process_channel_now(struct efx_channel *channel) 335 { ··· 336 337 BUG_ON(channel->channel >= efx->n_channels); 338 BUG_ON(!channel->enabled); 339 + BUG_ON(!efx->loopback_selftest); 340 341 /* Disable interrupts and wait for ISRs to complete */ 342 efx_nic_disable_interrupts(efx); ··· 1436 * restart the transmit interface early so the watchdog timer stops */ 1437 efx_start_port(efx); 1438 1439 + if (efx_dev_registered(efx) && !efx->port_inhibited) 1440 netif_tx_wake_all_queues(efx->net_dev); 1441 1442 efx_for_each_channel(channel, efx)
+2
drivers/net/sfc/io.h
··· 152 153 spin_lock_irqsave(&efx->biu_lock, flags); 154 value->u32[0] = _efx_readd(efx, reg + 0); 155 value->u32[1] = _efx_readd(efx, reg + 4); 156 value->u32[2] = _efx_readd(efx, reg + 8); 157 value->u32[3] = _efx_readd(efx, reg + 12); ··· 175 value->u64[0] = (__force __le64)__raw_readq(membase + addr); 176 #else 177 value->u32[0] = (__force __le32)__raw_readl(membase + addr); 178 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); 179 #endif 180 spin_unlock_irqrestore(&efx->biu_lock, flags);
··· 152 153 spin_lock_irqsave(&efx->biu_lock, flags); 154 value->u32[0] = _efx_readd(efx, reg + 0); 155 + rmb(); 156 value->u32[1] = _efx_readd(efx, reg + 4); 157 value->u32[2] = _efx_readd(efx, reg + 8); 158 value->u32[3] = _efx_readd(efx, reg + 12); ··· 174 value->u64[0] = (__force __le64)__raw_readq(membase + addr); 175 #else 176 value->u32[0] = (__force __le32)__raw_readl(membase + addr); 177 + rmb(); 178 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); 179 #endif 180 spin_unlock_irqrestore(&efx->biu_lock, flags);
-2
drivers/net/sfc/net_driver.h
··· 330 * @eventq_mask: Event queue pointer mask 331 * @eventq_read_ptr: Event queue read pointer 332 * @last_eventq_read_ptr: Last event queue read pointer value. 333 - * @magic_count: Event queue test event count 334 * @irq_count: Number of IRQs since last adaptive moderation decision 335 * @irq_mod_score: IRQ moderation score 336 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors ··· 359 unsigned int eventq_mask; 360 unsigned int eventq_read_ptr; 361 unsigned int last_eventq_read_ptr; 362 - unsigned int magic_count; 363 364 unsigned int irq_count; 365 unsigned int irq_mod_score;
··· 330 * @eventq_mask: Event queue pointer mask 331 * @eventq_read_ptr: Event queue read pointer 332 * @last_eventq_read_ptr: Last event queue read pointer value. 333 * @irq_count: Number of IRQs since last adaptive moderation decision 334 * @irq_mod_score: IRQ moderation score 335 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors ··· 360 unsigned int eventq_mask; 361 unsigned int eventq_read_ptr; 362 unsigned int last_eventq_read_ptr; 363 364 unsigned int irq_count; 365 unsigned int irq_mod_score;
+15 -7
drivers/net/sfc/nic.c
··· 84 static inline efx_qword_t *efx_event(struct efx_channel *channel, 85 unsigned int index) 86 { 87 - return ((efx_qword_t *) (channel->eventq.addr)) + index; 88 } 89 90 /* See if an event is present ··· 674 efx_dword_t reg; 675 struct efx_nic *efx = channel->efx; 676 677 - EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr); 678 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base, 679 channel->channel); 680 } ··· 910 911 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 912 if (code == EFX_CHANNEL_MAGIC_TEST(channel)) 913 - ++channel->magic_count; 914 else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) 915 /* The queue must be empty, so we won't receive any rx 916 * events, so efx_process_channel() won't refill the ··· 1017 /* Clear this event by marking it all ones */ 1018 EFX_SET_QWORD(*p_event); 1019 1020 - /* Increment read pointer */ 1021 - read_ptr = (read_ptr + 1) & channel->eventq_mask; 1022 1023 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1024 ··· 1061 return spent; 1062 } 1063 1064 1065 /* Allocate buffer table entries for event queue */ 1066 int efx_nic_probe_eventq(struct efx_channel *channel) ··· 1173 struct efx_tx_queue *tx_queue; 1174 struct efx_rx_queue *rx_queue; 1175 unsigned int read_ptr = channel->eventq_read_ptr; 1176 - unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask; 1177 1178 do { 1179 efx_qword_t *event = efx_event(channel, read_ptr); ··· 1213 * it's ok to throw away every non-flush event */ 1214 EFX_SET_QWORD(*event); 1215 1216 - read_ptr = (read_ptr + 1) & channel->eventq_mask; 1217 } while (read_ptr != end_ptr); 1218 1219 channel->eventq_read_ptr = read_ptr;
··· 84 static inline efx_qword_t *efx_event(struct efx_channel *channel, 85 unsigned int index) 86 { 87 + return ((efx_qword_t *) (channel->eventq.addr)) + 88 + (index & channel->eventq_mask); 89 } 90 91 /* See if an event is present ··· 673 efx_dword_t reg; 674 struct efx_nic *efx = channel->efx; 675 676 + EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 677 + channel->eventq_read_ptr & channel->eventq_mask); 678 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base, 679 channel->channel); 680 } ··· 908 909 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 910 if (code == EFX_CHANNEL_MAGIC_TEST(channel)) 911 + ; /* ignore */ 912 else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) 913 /* The queue must be empty, so we won't receive any rx 914 * events, so efx_process_channel() won't refill the ··· 1015 /* Clear this event by marking it all ones */ 1016 EFX_SET_QWORD(*p_event); 1017 1018 + ++read_ptr; 1019 1020 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1021 ··· 1060 return spent; 1061 } 1062 1063 + /* Check whether an event is present in the eventq at the current 1064 + * read pointer. Only useful for self-test. 1065 + */ 1066 + bool efx_nic_event_present(struct efx_channel *channel) 1067 + { 1068 + return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 1069 + } 1070 1071 /* Allocate buffer table entries for event queue */ 1072 int efx_nic_probe_eventq(struct efx_channel *channel) ··· 1165 struct efx_tx_queue *tx_queue; 1166 struct efx_rx_queue *rx_queue; 1167 unsigned int read_ptr = channel->eventq_read_ptr; 1168 + unsigned int end_ptr = read_ptr + channel->eventq_mask - 1; 1169 1170 do { 1171 efx_qword_t *event = efx_event(channel, read_ptr); ··· 1205 * it's ok to throw away every non-flush event */ 1206 EFX_SET_QWORD(*event); 1207 1208 + ++read_ptr; 1209 } while (read_ptr != end_ptr); 1210 1211 channel->eventq_read_ptr = read_ptr;
+1
drivers/net/sfc/nic.h
··· 184 extern void efx_nic_remove_eventq(struct efx_channel *channel); 185 extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); 186 extern void efx_nic_eventq_read_ack(struct efx_channel *channel); 187 188 /* MAC/PHY */ 189 extern void falcon_drain_tx_fifo(struct efx_nic *efx);
··· 184 extern void efx_nic_remove_eventq(struct efx_channel *channel); 185 extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); 186 extern void efx_nic_eventq_read_ack(struct efx_channel *channel); 187 + extern bool efx_nic_event_present(struct efx_channel *channel); 188 189 /* MAC/PHY */ 190 extern void falcon_drain_tx_fifo(struct efx_nic *efx);
+6 -19
drivers/net/sfc/selftest.c
··· 131 static int efx_test_interrupts(struct efx_nic *efx, 132 struct efx_self_tests *tests) 133 { 134 - struct efx_channel *channel; 135 - 136 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); 137 tests->interrupt = -1; 138 139 /* Reset interrupt flag */ 140 efx->last_irq_cpu = -1; 141 smp_wmb(); 142 - 143 - /* ACK each interrupting event queue. Receiving an interrupt due to 144 - * traffic before a test event is raised is considered a pass */ 145 - efx_for_each_channel(channel, efx) { 146 - if (channel->work_pending) 147 - efx_process_channel_now(channel); 148 - if (efx->last_irq_cpu >= 0) 149 - goto success; 150 - } 151 152 efx_nic_generate_interrupt(efx); 153 ··· 162 struct efx_self_tests *tests) 163 { 164 struct efx_nic *efx = channel->efx; 165 - unsigned int magic_count, count; 166 167 tests->eventq_dma[channel->channel] = -1; 168 tests->eventq_int[channel->channel] = -1; 169 tests->eventq_poll[channel->channel] = -1; 170 171 - magic_count = channel->magic_count; 172 channel->efx->last_irq_cpu = -1; 173 smp_wmb(); 174 ··· 179 do { 180 schedule_timeout_uninterruptible(HZ / 100); 181 182 - if (channel->work_pending) 183 - efx_process_channel_now(channel); 184 - 185 - if (channel->magic_count != magic_count) 186 goto eventq_ok; 187 } while (++count < 2); 188 ··· 197 } 198 199 /* Check to see if event was received even if interrupt wasn't */ 200 - efx_process_channel_now(channel); 201 - if (channel->magic_count != magic_count) { 202 netif_err(efx, drv, efx->net_dev, 203 "channel %d event was generated, but " 204 "failed to trigger an interrupt\n", channel->channel); ··· 754 efx->loopback_mode = loopback_mode; 755 __efx_reconfigure_port(efx); 756 mutex_unlock(&efx->mac_lock); 757 758 return rc_test; 759 }
··· 131 static int efx_test_interrupts(struct efx_nic *efx, 132 struct efx_self_tests *tests) 133 { 134 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); 135 tests->interrupt = -1; 136 137 /* Reset interrupt flag */ 138 efx->last_irq_cpu = -1; 139 smp_wmb(); 140 141 efx_nic_generate_interrupt(efx); 142 ··· 173 struct efx_self_tests *tests) 174 { 175 struct efx_nic *efx = channel->efx; 176 + unsigned int read_ptr, count; 177 178 tests->eventq_dma[channel->channel] = -1; 179 tests->eventq_int[channel->channel] = -1; 180 tests->eventq_poll[channel->channel] = -1; 181 182 + read_ptr = channel->eventq_read_ptr; 183 channel->efx->last_irq_cpu = -1; 184 smp_wmb(); 185 ··· 190 do { 191 schedule_timeout_uninterruptible(HZ / 100); 192 193 + if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr) 194 goto eventq_ok; 195 } while (++count < 2); 196 ··· 211 } 212 213 /* Check to see if event was received even if interrupt wasn't */ 214 + if (efx_nic_event_present(channel)) { 215 netif_err(efx, drv, efx->net_dev, 216 "channel %d event was generated, but " 217 "failed to trigger an interrupt\n", channel->channel); ··· 769 efx->loopback_mode = loopback_mode; 770 __efx_reconfigure_port(efx); 771 mutex_unlock(&efx->mac_lock); 772 + 773 + netif_tx_wake_all_queues(efx->net_dev); 774 775 return rc_test; 776 }
+2 -1
drivers/net/sfc/tx.c
··· 435 * queue state. */ 436 smp_mb(); 437 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 438 - likely(efx->port_enabled)) { 439 fill_level = tx_queue->insert_count - tx_queue->read_count; 440 if (fill_level < EFX_TXQ_THRESHOLD(efx)) { 441 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
··· 435 * queue state. */ 436 smp_mb(); 437 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 438 + likely(efx->port_enabled) && 439 + likely(!efx->port_inhibited)) { 440 fill_level = tx_queue->insert_count - tx_queue->read_count; 441 if (fill_level < EFX_TXQ_THRESHOLD(efx)) { 442 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));