Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (42 commits)
[PATCH] Fix section mismatch in de2104x.c
[PATCH] sky2: set lower pause threshold to prevent overrun
[PATCH] sky2: revert pci express extensions
[PATCH] skge: version 1.9
[PATCH] skge: better flow control negotiation
[PATCH] skge: pause mapping for fiber
[PATCH] skge: fix stuck irq when fiber down
[PATCH] powerpc/cell spidernet release all descrs
[PATCH] powerpc/cell spidernet DMA direction fix
[PATCH] powerpc/cell spidernet variable name change
[PATCH] powerpc/cell spidernet reduce DMA kicking
[PATCH] powerpc/cell spidernet
[PATCH] powerpc/cell spidernet refine locking
[PATCH] powerpc/cell spidernet NAPI polling info.
[PATCH] powerpc/cell spidernet low watermark patch.
[PATCH] powerpc/cell spidernet incorrect offset
[PATCH] powerpc/cell spidernet stop error printing patch.
[PATCH] powerpc/cell spidernet fix error interrupt print
[PATCH] powerpc/cell spidernet bogus rx interrupt bit
[PATCH] Spidernet stop queue when queue is full.
...

+717 -604
+5 -4
drivers/net/b44.c
··· 1706 1706 1707 1707 __b44_set_mac_addr(bp); 1708 1708 1709 - if (dev->flags & IFF_ALLMULTI) 1709 + if ((dev->flags & IFF_ALLMULTI) || 1710 + (dev->mc_count > B44_MCAST_TABLE_SIZE)) 1710 1711 val |= RXCONFIG_ALLMULTI; 1711 1712 else 1712 1713 i = __b44_load_mcast(bp, dev); 1713 1714 1714 - for (; i < 64; i++) { 1715 + for (; i < 64; i++) 1715 1716 __b44_cam_write(bp, zero, i); 1716 - } 1717 + 1717 1718 bw32(bp, B44_RXCONFIG, val); 1718 1719 val = br32(bp, B44_CAM_CTRL); 1719 1720 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); ··· 2056 2055 u16 *ptr = (u16 *) data; 2057 2056 2058 2057 for (i = 0; i < 128; i += 2) 2059 - ptr[i / 2] = readw(bp->regs + 4096 + i); 2058 + ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i)); 2060 2059 2061 2060 return 0; 2062 2061 }
+2 -2
drivers/net/bonding/bond_alb.c
··· 1433 1433 * write lock to protect from other code that also 1434 1434 * sets the promiscuity. 1435 1435 */ 1436 - write_lock(&bond->curr_slave_lock); 1436 + write_lock_bh(&bond->curr_slave_lock); 1437 1437 1438 1438 if (bond_info->primary_is_promisc && 1439 1439 (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) { ··· 1448 1448 bond_info->primary_is_promisc = 0; 1449 1449 } 1450 1450 1451 - write_unlock(&bond->curr_slave_lock); 1451 + write_unlock_bh(&bond->curr_slave_lock); 1452 1452 1453 1453 if (bond_info->rlb_rebalance) { 1454 1454 bond_info->rlb_rebalance = 0;
+7 -6
drivers/net/ehea/ehea.h
··· 39 39 #include <asm/io.h> 40 40 41 41 #define DRV_NAME "ehea" 42 - #define DRV_VERSION "EHEA_0028" 42 + #define DRV_VERSION "EHEA_0034" 43 43 44 44 #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ 45 45 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) ··· 50 50 #define EHEA_MAX_ENTRIES_SQ 32767 51 51 #define EHEA_MIN_ENTRIES_QP 127 52 52 53 + #define EHEA_SMALL_QUEUES 53 54 #define EHEA_NUM_TX_QP 1 54 55 55 56 #ifdef EHEA_SMALL_QUEUES ··· 60 59 #define EHEA_DEF_ENTRIES_RQ2 1023 61 60 #define EHEA_DEF_ENTRIES_RQ3 1023 62 61 #else 63 - #define EHEA_MAX_CQE_COUNT 32000 64 - #define EHEA_DEF_ENTRIES_SQ 16000 65 - #define EHEA_DEF_ENTRIES_RQ1 32080 66 - #define EHEA_DEF_ENTRIES_RQ2 4020 67 - #define EHEA_DEF_ENTRIES_RQ3 4020 62 + #define EHEA_MAX_CQE_COUNT 4080 63 + #define EHEA_DEF_ENTRIES_SQ 4080 64 + #define EHEA_DEF_ENTRIES_RQ1 8160 65 + #define EHEA_DEF_ENTRIES_RQ2 2040 66 + #define EHEA_DEF_ENTRIES_RQ3 2040 68 67 #endif 69 68 70 69 #define EHEA_MAX_ENTRIES_EQ 20
+3 -3
drivers/net/ehea/ehea_main.c
··· 766 766 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { 767 767 if (!netif_carrier_ok(port->netdev)) { 768 768 ret = ehea_sense_port_attr( 769 - adapter->port[portnum]); 769 + port); 770 770 if (ret) { 771 771 ehea_error("failed resensing port " 772 772 "attributes"); ··· 818 818 netif_stop_queue(port->netdev); 819 819 break; 820 820 default: 821 - ehea_error("unknown event code %x", ec); 821 + ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe); 822 822 break; 823 823 } 824 824 } ··· 1841 1841 1842 1842 if (netif_msg_tx_queued(port)) { 1843 1843 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr); 1844 - ehea_dump(swqe, sizeof(*swqe), "swqe"); 1844 + ehea_dump(swqe, 512, "swqe"); 1845 1845 } 1846 1846 1847 1847 ehea_post_swqe(pr->qp, swqe);
+229 -326
drivers/net/ehea/ehea_phyp.c
··· 44 44 #define H_ALL_RES_TYPE_MR 5 45 45 #define H_ALL_RES_TYPE_MW 6 46 46 47 - static long ehea_hcall_9arg_9ret(unsigned long opcode, 48 - unsigned long arg1, unsigned long arg2, 49 - unsigned long arg3, unsigned long arg4, 50 - unsigned long arg5, unsigned long arg6, 51 - unsigned long arg7, unsigned long arg8, 52 - unsigned long arg9, unsigned long *out1, 53 - unsigned long *out2,unsigned long *out3, 54 - unsigned long *out4,unsigned long *out5, 55 - unsigned long *out6,unsigned long *out7, 56 - unsigned long *out8,unsigned long *out9) 47 + static long ehea_plpar_hcall_norets(unsigned long opcode, 48 + unsigned long arg1, 49 + unsigned long arg2, 50 + unsigned long arg3, 51 + unsigned long arg4, 52 + unsigned long arg5, 53 + unsigned long arg6, 54 + unsigned long arg7) 57 55 { 58 - long hret; 56 + long ret; 59 57 int i, sleep_msecs; 60 58 61 59 for (i = 0; i < 5; i++) { 62 - hret = plpar_hcall_9arg_9ret(opcode,arg1, arg2, arg3, arg4, 63 - arg5, arg6, arg7, arg8, arg9, out1, 64 - out2, out3, out4, out5, out6, out7, 65 - out8, out9); 66 - if (H_IS_LONG_BUSY(hret)) { 67 - sleep_msecs = get_longbusy_msecs(hret); 60 + ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4, 61 + arg5, arg6, arg7); 62 + 63 + if (H_IS_LONG_BUSY(ret)) { 64 + sleep_msecs = get_longbusy_msecs(ret); 68 65 msleep_interruptible(sleep_msecs); 69 66 continue; 70 67 } 71 68 72 - if (hret < H_SUCCESS) 73 - ehea_error("op=%lx hret=%lx " 74 - "i1=%lx i2=%lx i3=%lx i4=%lx i5=%lx i6=%lx " 75 - "i7=%lx i8=%lx i9=%lx " 76 - "o1=%lx o2=%lx o3=%lx o4=%lx o5=%lx o6=%lx " 77 - "o7=%lx o8=%lx o9=%lx", 78 - opcode, hret, arg1, arg2, arg3, arg4, arg5, 79 - arg6, arg7, arg8, arg9, *out1, *out2, *out3, 80 - *out4, *out5, *out6, *out7, *out8, *out9); 81 - return hret; 69 + if (ret < H_SUCCESS) 70 + ehea_error("opcode=%lx ret=%lx" 71 + " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" 72 + " arg5=%lx arg6=%lx arg7=%lx ", 73 + opcode, ret, 74 + arg1, arg2, arg3, arg4, arg5, 75 + arg6, arg7); 76 + 77 + return ret; 82 78 } 79 + 80 + return H_BUSY; 81 + } 82 + 83 + static long ehea_plpar_hcall9(unsigned long opcode, 84 + unsigned long *outs, /* array of 9 outputs */ 85 + unsigned long arg1, 86 + unsigned long arg2, 87 + unsigned long arg3, 88 + unsigned long arg4, 89 + unsigned long arg5, 90 + unsigned long arg6, 91 + unsigned long arg7, 92 + unsigned long arg8, 93 + unsigned long arg9) 94 + { 95 + long ret; 96 + int i, sleep_msecs; 97 + 98 + for (i = 0; i < 5; i++) { 99 + ret = plpar_hcall9(opcode, outs, 100 + arg1, arg2, arg3, arg4, arg5, 101 + arg6, arg7, arg8, arg9); 102 + 103 + if (H_IS_LONG_BUSY(ret)) { 104 + sleep_msecs = get_longbusy_msecs(ret); 105 + msleep_interruptible(sleep_msecs); 106 + continue; 107 + } 108 + 109 + if (ret < H_SUCCESS) 110 + ehea_error("opcode=%lx ret=%lx" 111 + " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" 112 + " arg5=%lx arg6=%lx arg7=%lx arg8=%lx" 113 + " arg9=%lx" 114 + " out1=%lx out2=%lx out3=%lx out4=%lx" 115 + " out5=%lx out6=%lx out7=%lx out8=%lx" 116 + " out9=%lx", 117 + opcode, ret, 118 + arg1, arg2, arg3, arg4, arg5, 119 + arg6, arg7, arg8, arg9, 120 + outs[0], outs[1], outs[2], outs[3], 121 + outs[4], outs[5], outs[6], outs[7], 122 + outs[8]); 123 + 124 + return ret; 125 + } 126 + 83 127 return H_BUSY; 84 128 } 85 129 86 130 u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category, 87 131 const u64 qp_handle, const u64 sel_mask, void *cb_addr) 88 132 { 89 - u64 dummy; 90 - 91 - if ((((u64)cb_addr) & (PAGE_SIZE - 1)) != 0) { 92 - ehea_error("not on pageboundary"); 93 - return H_PARAMETER; 94 - } 95 - 96 - return ehea_hcall_9arg_9ret(H_QUERY_HEA_QP, 97 - adapter_handle, /* R4 */ 98 - qp_category, /* R5 */ 99 - qp_handle, /* R6 */ 100 - sel_mask, /* R7 */ 101 - virt_to_abs(cb_addr), /* R8 */ 102 - 0, 0, 0, 0, /* R9-R12 */ 103 - &dummy, /* R4 */ 104 - &dummy, /* R5 */ 105 - &dummy, /* R6 */ 106 - &dummy, /* R7 */ 107 - &dummy, /* R8 */ 108 - &dummy, /* R9 */ 109 - &dummy, /* R10 */ 110 - &dummy, /* R11 */ 111 - &dummy); /* R12 */ 133 + return ehea_plpar_hcall_norets(H_QUERY_HEA_QP, 134 + adapter_handle, /* R4 */ 135 + qp_category, /* R5 */ 136 + qp_handle, /* R6 */ 137 + sel_mask, /* R7 */ 138 + virt_to_abs(cb_addr), /* R8 */ 139 + 0, 0); 112 140 } 113 141 114 142 /* input param R5 */ ··· 208 180 u64 *qp_handle, struct h_epas *h_epas) 209 181 { 210 182 u64 hret; 183 + u64 outs[PLPAR_HCALL9_BUFSIZE]; 211 184 212 185 u64 allocate_controls = 213 186 EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0) ··· 248 219 EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold) 249 220 | EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold); 250 221 251 - u64 r5_out = 0; 252 - u64 r6_out = 0; 253 - u64 r7_out = 0; 254 - u64 r8_out = 0; 255 - u64 r9_out = 0; 256 - u64 g_la_user_out = 0; 257 - u64 r11_out = 0; 258 - u64 r12_out = 0; 222 + hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE, 223 + outs, 224 + adapter_handle, /* R4 */ 225 + allocate_controls, /* R5 */ 226 + init_attr->send_cq_handle, /* R6 */ 227 + init_attr->recv_cq_handle, /* R7 */ 228 + init_attr->aff_eq_handle, /* R8 */ 229 + r9_reg, /* R9 */ 230 + max_r10_reg, /* R10 */ 231 + r11_in, /* R11 */ 232 + threshold); /* R12 */ 259 233 260 - hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE, 261 - adapter_handle, /* R4 */ 262 - allocate_controls, /* R5 */ 263 - init_attr->send_cq_handle, /* R6 */ 264 - init_attr->recv_cq_handle, /* R7 */ 265 - init_attr->aff_eq_handle, /* R8 */ 266 - r9_reg, /* R9 */ 267 - max_r10_reg, /* R10 */ 268 - r11_in, /* R11 */ 269 - threshold, /* R12 */ 270 - qp_handle, /* R4 */ 271 - &r5_out, /* R5 */ 272 - &r6_out, /* R6 */ 273 - &r7_out, /* R7 */ 274 - &r8_out, /* R8 */ 275 - &r9_out, /* R9 */ 276 - &g_la_user_out, /* R10 */ 277 - &r11_out, /* R11 */ 278 - &r12_out); /* R12 */ 279 - 280 - init_attr->qp_nr = (u32)r5_out; 234 + *qp_handle = outs[0]; 235 + init_attr->qp_nr = (u32)outs[1]; 281 236 282 237 init_attr->act_nr_send_wqes = 283 - (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, r6_out); 238 + (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, outs[2]); 284 239 init_attr->act_nr_rwqes_rq1 = 285 - (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, r6_out); 240 + (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, outs[2]); 286 241 init_attr->act_nr_rwqes_rq2 = 287 - (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, r6_out); 242 + (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, outs[2]); 288 243 init_attr->act_nr_rwqes_rq3 = 289 - (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, r6_out); 244 + (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, outs[2]); 290 245 291 246 init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq; 292 247 init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1; ··· 278 265 init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3; 279 266 280 267 init_attr->nr_sq_pages = 281 - (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, r8_out); 268 + (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, outs[4]); 282 269 init_attr->nr_rq1_pages = 283 - (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, r8_out); 270 + (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, outs[4]); 284 271 init_attr->nr_rq2_pages = 285 - (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, r9_out); 272 + (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, outs[5]); 286 273 init_attr->nr_rq3_pages = 287 - (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, r9_out); 274 + (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, outs[5]); 288 275 289 276 init_attr->liobn_sq = 290 - (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, r11_out); 277 + (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, outs[7]); 291 278 init_attr->liobn_rq1 = 292 - (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, r11_out); 279 + (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, outs[7]); 293 280 init_attr->liobn_rq2 = 294 - (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, r12_out); 281 + (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, outs[8]); 295 282 init_attr->liobn_rq3 = 296 - (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, r12_out); 283 + (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, outs[8]); 297 284 298 285 if (!hret) 299 - hcp_epas_ctor(h_epas, g_la_user_out, g_la_user_out); 286 + hcp_epas_ctor(h_epas, outs[6], outs[6]); 300 287 301 288 return hret; 302 289 } ··· 305 292 struct ehea_cq_attr *cq_attr, 306 293 u64 *cq_handle, struct h_epas *epas) 307 294 { 308 - u64 hret, dummy, act_nr_of_cqes_out, act_pages_out; 309 - u64 g_la_privileged_out, g_la_user_out; 295 + u64 hret; 296 + u64 outs[PLPAR_HCALL9_BUFSIZE]; 310 297 311 - hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE, 312 - adapter_handle, /* R4 */ 313 - H_ALL_RES_TYPE_CQ, /* R5 */ 314 - cq_attr->eq_handle, /* R6 */ 315 - cq_attr->cq_token, /* R7 */ 316 - cq_attr->max_nr_of_cqes, /* R8 */ 317 - 0, 0, 0, 0, /* R9-R12 */ 318 - cq_handle, /* R4 */ 319 - &dummy, /* R5 */ 320 - &dummy, /* R6 */ 321 - &act_nr_of_cqes_out, /* R7 */ 322 - &act_pages_out, /* R8 */ 323 - &g_la_privileged_out, /* R9 */ 324 - &g_la_user_out, /* R10 */ 325 - &dummy, /* R11 */ 326 - &dummy); /* R12 */ 298 + hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE, 299 + outs, 300 + adapter_handle, /* R4 */ 301 + H_ALL_RES_TYPE_CQ, /* R5 */ 302 + cq_attr->eq_handle, /* R6 */ 303 + cq_attr->cq_token, /* R7 */ 304 + cq_attr->max_nr_of_cqes, /* R8 */ 305 + 0, 0, 0, 0); /* R9-R12 */ 327 306 328 - cq_attr->act_nr_of_cqes = act_nr_of_cqes_out; 329 - cq_attr->nr_pages = act_pages_out; 307 + *cq_handle = outs[0]; 308 + cq_attr->act_nr_of_cqes = outs[3]; 309 + cq_attr->nr_pages = outs[4]; 330 310 331 311 if (!hret) 332 - hcp_epas_ctor(epas, g_la_privileged_out, g_la_user_out); 312 + hcp_epas_ctor(epas, outs[5], outs[6]); 333 313 334 314 return hret; 335 315 } ··· 367 361 u64 ehea_h_alloc_resource_eq(const u64 adapter_handle, 368 362 struct ehea_eq_attr *eq_attr, u64 *eq_handle) 369 363 { 370 - u64 hret, dummy, eq_liobn, allocate_controls; 371 - u64 ist1_out, ist2_out, ist3_out, ist4_out; 372 - u64 act_nr_of_eqes_out, act_pages_out; 364 + u64 hret, allocate_controls; 365 + u64 outs[PLPAR_HCALL9_BUFSIZE]; 373 366 374 367 /* resource type */ 375 368 allocate_controls = ··· 377 372 | EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen) 378 373 | EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1); 379 374 380 - hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE, 381 - adapter_handle, /* R4 */ 382 - allocate_controls, /* R5 */ 383 - eq_attr->max_nr_of_eqes, /* R6 */ 384 - 0, 0, 0, 0, 0, 0, /* R7-R10 */ 385 - eq_handle, /* R4 */ 386 - &dummy, /* R5 */ 387 - &eq_liobn, /* R6 */ 388 - &act_nr_of_eqes_out, /* R7 */ 389 - &act_pages_out, /* R8 */ 390 - &ist1_out, /* R9 */ 391 - &ist2_out, /* R10 */ 392 - &ist3_out, /* R11 */ 393 - &ist4_out); /* R12 */ 375 + hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE, 376 + outs, 377 + adapter_handle, /* R4 */ 378 + allocate_controls, /* R5 */ 379 + eq_attr->max_nr_of_eqes, /* R6 */ 380 + 0, 0, 0, 0, 0, 0); /* R7-R10 */ 394 381 395 - eq_attr->act_nr_of_eqes = act_nr_of_eqes_out; 396 - eq_attr->nr_pages = act_pages_out; 397 - eq_attr->ist1 = ist1_out; 398 - eq_attr->ist2 = ist2_out; 399 - eq_attr->ist3 = ist3_out; 400 - eq_attr->ist4 = ist4_out; 382 + *eq_handle = outs[0]; 383 + eq_attr->act_nr_of_eqes = outs[3]; 384 + eq_attr->nr_pages = outs[4]; 385 + eq_attr->ist1 = outs[5]; 386 + eq_attr->ist2 = outs[6]; 387 + eq_attr->ist3 = outs[7]; 388 + eq_attr->ist4 = outs[8]; 401 389 402 390 return hret; 403 391 } ··· 400 402 void *cb_addr, u64 *inv_attr_id, u64 *proc_mask, 401 403 u16 *out_swr, u16 *out_rwr) 402 404 { 403 - u64 hret, dummy, act_out_swr, act_out_rwr; 405 + u64 hret; 406 + u64 outs[PLPAR_HCALL9_BUFSIZE]; 404 407 405 - if ((((u64)cb_addr) & (PAGE_SIZE - 1)) != 0) { 406 - ehea_error("not on page boundary"); 407 - return H_PARAMETER; 408 - } 408 + hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP, 409 + outs, 410 + adapter_handle, /* R4 */ 411 + (u64) cat, /* R5 */ 412 + qp_handle, /* R6 */ 413 + sel_mask, /* R7 */ 414 + virt_to_abs(cb_addr), /* R8 */ 415 + 0, 0, 0, 0); /* R9-R12 */ 409 416 410 - hret = ehea_hcall_9arg_9ret(H_MODIFY_HEA_QP, 411 - adapter_handle, /* R4 */ 412 - (u64) cat, /* R5 */ 413 - qp_handle, /* R6 */ 414 - sel_mask, /* R7 */ 415 - virt_to_abs(cb_addr), /* R8 */ 416 - 0, 0, 0, 0, /* R9-R12 */ 417 - inv_attr_id, /* R4 */ 418 - &dummy, /* R5 */ 419 - &dummy, /* R6 */ 420 - &act_out_swr, /* R7 */ 421 - &act_out_rwr, /* R8 */ 422 - proc_mask, /* R9 */ 423 - &dummy, /* R10 */ 424 - &dummy, /* R11 */ 425 - &dummy); /* R12 */ 426 - *out_swr = act_out_swr; 427 - *out_rwr = act_out_rwr; 417 + *inv_attr_id = outs[0]; 418 + *out_swr = outs[3]; 419 + *out_rwr = outs[4]; 420 + *proc_mask = outs[5]; 428 421 429 422 return hret; 430 423 } ··· 424 435 const u8 queue_type, const u64 resource_handle, 425 436 const u64 log_pageaddr, u64 count) 426 437 { 427 - u64 dummy, reg_control; 438 + u64 reg_control; 428 439 429 440 reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize) 430 441 | EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type); 431 442 432 - return ehea_hcall_9arg_9ret(H_REGISTER_HEA_RPAGES, 433 - adapter_handle, /* R4 */ 434 - reg_control, /* R5 */ 435 - resource_handle, /* R6 */ 436 - log_pageaddr, /* R7 */ 437 - count, /* R8 */ 438 - 0, 0, 0, 0, /* R9-R12 */ 439 - &dummy, /* R4 */ 440 - &dummy, /* R5 */ 441 - &dummy, /* R6 */ 442 - &dummy, /* R7 */ 443 - &dummy, /* R8 */ 444 - &dummy, /* R9 */ 445 - &dummy, /* R10 */ 446 - &dummy, /* R11 */ 447 - &dummy); /* R12 */ 443 + return ehea_plpar_hcall_norets(H_REGISTER_HEA_RPAGES, 444 + adapter_handle, /* R4 */ 445 + reg_control, /* R5 */ 446 + resource_handle, /* R6 */ 447 + log_pageaddr, /* R7 */ 448 + count, /* R8 */ 449 + 0, 0); /* R9-R10 */ 448 450 } 449 451 450 452 u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle, 451 453 const u64 vaddr_in, const u32 access_ctrl, const u32 pd, 452 454 struct ehea_mr *mr) 453 455 { 454 - u64 hret, dummy, lkey_out; 456 + u64 hret; 457 + u64 outs[PLPAR_HCALL9_BUFSIZE]; 455 458 456 - hret = ehea_hcall_9arg_9ret(H_REGISTER_SMR, 457 - adapter_handle , /* R4 */ 458 - orig_mr_handle, /* R5 */ 459 - vaddr_in, /* R6 */ 460 - (((u64)access_ctrl) << 32ULL), /* R7 */ 461 - pd, /* R8 */ 462 - 0, 0, 0, 0, /* R9-R12 */ 463 - &mr->handle, /* R4 */ 464 - &dummy, /* R5 */ 465 - &lkey_out, /* R6 */ 466 - &dummy, /* R7 */ 467 - &dummy, /* R8 */ 468 - &dummy, /* R9 */ 469 - &dummy, /* R10 */ 470 - &dummy, /* R11 */ 471 - &dummy); /* R12 */ 472 - mr->lkey = (u32)lkey_out; 459 + hret = ehea_plpar_hcall9(H_REGISTER_SMR, 460 + outs, 461 + adapter_handle , /* R4 */ 462 + orig_mr_handle, /* R5 */ 463 + vaddr_in, /* R6 */ 464 + (((u64)access_ctrl) << 32ULL), /* R7 */ 465 + pd, /* R8 */ 466 + 0, 0, 0, 0); /* R9-R12 */ 467 + 468 + mr->handle = outs[0]; 469 + mr->lkey = (u32)outs[2]; 473 470 474 471 return hret; 475 472 } 476 473 477 474 u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle) 478 475 { 479 - u64 hret, dummy, ladr_next_sq_wqe_out; 480 - u64 ladr_next_rq1_wqe_out, ladr_next_rq2_wqe_out, ladr_next_rq3_wqe_out; 476 + u64 outs[PLPAR_HCALL9_BUFSIZE]; 481 477 482 - hret = ehea_hcall_9arg_9ret(H_DISABLE_AND_GET_HEA, 483 - adapter_handle, /* R4 */ 484 - H_DISABLE_GET_EHEA_WQE_P, /* R5 */ 485 - qp_handle, /* R6 */ 486 - 0, 0, 0, 0, 0, 0, /* R7-R12 */ 487 - &ladr_next_sq_wqe_out, /* R4 */ 488 - &ladr_next_rq1_wqe_out, /* R5 */ 489 - &ladr_next_rq2_wqe_out, /* R6 */ 490 - &ladr_next_rq3_wqe_out, /* R7 */ 491 - &dummy, /* R8 */ 492 - &dummy, /* R9 */ 493 - &dummy, /* R10 */ 494 - &dummy, /* R11 */ 495 - &dummy); /* R12 */ 496 - return hret; 478 + return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA, 479 + outs, 480 + adapter_handle, /* R4 */ 481 + H_DISABLE_GET_EHEA_WQE_P, /* R5 */ 482 + qp_handle, /* R6 */ 483 + 0, 0, 0, 0, 0, 0); /* R7-R12 */ 497 484 } 498 485 499 486 u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle) 500 487 { 501 - u64 dummy; 502 - 503 - return ehea_hcall_9arg_9ret(H_FREE_RESOURCE, 504 - adapter_handle, /* R4 */ 505 - res_handle, /* R5 */ 506 - 0, 0, 0, 0, 0, 0, 0, /* R6-R12 */ 507 - &dummy, /* R4 */ 508 - &dummy, /* R5 */ 509 - &dummy, /* R6 */ 510 - &dummy, /* R7 */ 511 - &dummy, /* R8 */ 512 - &dummy, /* R9 */ 513 - &dummy, /* R10 */ 514 - &dummy, /* R11 */ 515 - &dummy); /* R12 */ 488 + return ehea_plpar_hcall_norets(H_FREE_RESOURCE, 489 + adapter_handle, /* R4 */ 490 + res_handle, /* R5 */ 491 + 0, 0, 0, 0, 0); /* R6-R10 */ 516 492 } 517 493 518 494 u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr, 519 495 const u64 length, const u32 access_ctrl, 520 496 const u32 pd, u64 *mr_handle, u32 *lkey) 521 497 { 522 - u64 hret, dummy, lkey_out; 498 + u64 hret; 499 + u64 outs[PLPAR_HCALL9_BUFSIZE]; 523 500 524 - hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE, 525 - adapter_handle, /* R4 */ 526 - 5, /* R5 */ 527 - vaddr, /* R6 */ 528 - length, /* R7 */ 529 - (((u64) access_ctrl) << 32ULL),/* R8 */ 530 - pd, /* R9 */ 531 - 0, 0, 0, /* R10-R12 */ 532 - mr_handle, /* R4 */ 533 - &dummy, /* R5 */ 534 - &lkey_out, /* R6 */ 535 - &dummy, /* R7 */ 536 - &dummy, /* R8 */ 537 - &dummy, /* R9 */ 538 - &dummy, /* R10 */ 539 - &dummy, /* R11 */ 540 - &dummy); /* R12 */ 541 - *lkey = (u32) lkey_out; 501 + hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE, 502 + outs, 503 + adapter_handle, /* R4 */ 504 + 5, /* R5 */ 505 + vaddr, /* R6 */ 506 + length, /* R7 */ 507 + (((u64) access_ctrl) << 32ULL), /* R8 */ 508 + pd, /* R9 */ 509 + 0, 0, 0); /* R10-R12 */ 542 510 511 + *mr_handle = outs[0]; 512 + *lkey = (u32)outs[2]; 543 513 return hret; 544 514 } 545 515 ··· 518 570 519 571 u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr) 520 572 { 521 - u64 hret, dummy, cb_logaddr; 573 + u64 hret, cb_logaddr; 522 574 523 575 cb_logaddr = virt_to_abs(cb_addr); 524 576 525 - hret = ehea_hcall_9arg_9ret(H_QUERY_HEA, 526 - adapter_handle, /* R4 */ 527 - cb_logaddr, /* R5 */ 528 - 0, 0, 0, 0, 0, 0, 0, /* R6-R12 */ 529 - &dummy, /* R4 */ 530 - &dummy, /* R5 */ 531 - &dummy, /* R6 */ 532 - &dummy, /* R7 */ 533 - &dummy, /* R8 */ 534 - &dummy, /* R9 */ 535 - &dummy, /* R10 */ 536 - &dummy, /* R11 */ 537 - &dummy); /* R12 */ 577 + hret = ehea_plpar_hcall_norets(H_QUERY_HEA, 578 + adapter_handle, /* R4 */ 579 + cb_logaddr, /* R5 */ 580 + 0, 0, 0, 0, 0); /* R6-R10 */ 538 581 #ifdef DEBUG 539 582 ehea_dmp(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea"); 540 583 #endif ··· 536 597 const u8 cb_cat, const u64 select_mask, 537 598 void *cb_addr) 538 599 { 539 - u64 port_info, dummy; 600 + u64 port_info; 540 601 u64 cb_logaddr = virt_to_abs(cb_addr); 541 602 u64 arr_index = 0; 542 603 543 604 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat) 544 605 | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num); 545 606 546 - return ehea_hcall_9arg_9ret(H_QUERY_HEA_PORT, 547 - adapter_handle, /* R4 */ 548 - port_info, /* R5 */ 549 - select_mask, /* R6 */ 550 - arr_index, /* R7 */ 551 - cb_logaddr, /* R8 */ 552 - 0, 0, 0, 0, /* R9-R12 */ 553 - &dummy, /* R4 */ 554 - &dummy, /* R5 */ 555 - &dummy, /* R6 */ 556 - &dummy, /* R7 */ 557 - &dummy, /* R8 */ 558 - &dummy, /* R9 */ 559 - &dummy, /* R10 */ 560 - &dummy, /* R11 */ 561 - &dummy); /* R12 */ 607 + return ehea_plpar_hcall_norets(H_QUERY_HEA_PORT, 608 + adapter_handle, /* R4 */ 609 + port_info, /* R5 */ 610 + select_mask, /* R6 */ 611 + arr_index, /* R7 */ 612 + cb_logaddr, /* R8 */ 613 + 0, 0); /* R9-R10 */ 562 614 } 563 615 564 616 u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num, 565 617 const u8 cb_cat, const u64 select_mask, 566 618 void *cb_addr) 567 619 { 568 - u64 port_info, dummy, inv_attr_ident, proc_mask; 620 + u64 outs[PLPAR_HCALL9_BUFSIZE]; 621 + u64 port_info; 569 622 u64 arr_index = 0; 570 623 u64 cb_logaddr = virt_to_abs(cb_addr); 571 624 ··· 566 635 #ifdef DEBUG 567 636 ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL"); 568 637 #endif 569 - return ehea_hcall_9arg_9ret(H_MODIFY_HEA_PORT, 570 - adapter_handle, /* R4 */ 571 - port_info, /* R5 */ 572 - select_mask, /* R6 */ 573 - arr_index, /* R7 */ 574 - cb_logaddr, /* R8 */ 575 - 0, 0, 0, 0, /* R9-R12 */ 576 - &inv_attr_ident, /* R4 */ 577 - &proc_mask, /* R5 */ 578 - &dummy, /* R6 */ 579 - &dummy, /* R7 */ 580 - &dummy, /* R8 */ 581 - &dummy, /* R9 */ 582 - &dummy, /* R10 */ 583 - &dummy, /* R11 */ 584 - &dummy); /* R12 */ 638 + return ehea_plpar_hcall9(H_MODIFY_HEA_PORT, 639 + outs, 640 + adapter_handle, /* R4 */ 641 + port_info, /* R5 */ 642 + select_mask, /* R6 */ 643 + arr_index, /* R7 */ 644 + cb_logaddr, /* R8 */ 645 + 0, 0, 0, 0); /* R9-R12 */ 585 646 } 586 647 587 648 u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num, 588 649 const u8 reg_type, const u64 mc_mac_addr, 589 650 const u16 vlan_id, const u32 hcall_id) 590 651 { 591 - u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id, dummy; 652 + u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id; 592 653 u64 mac_addr = mc_mac_addr >> 16; 593 654 594 655 r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num); ··· 588 665 r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr); 589 666 r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id); 590 667 591 - return ehea_hcall_9arg_9ret(hcall_id, 592 - adapter_handle, /* R4 */ 593 - r5_port_num, /* R5 */ 594 - r6_reg_type, /* R6 */ 595 - r7_mc_mac_addr, /* R7 */ 596 - r8_vlan_id, /* R8 */ 597 - 0, 0, 0, 0, /* R9-R12 */ 598 - &dummy, /* R4 */ 599 - &dummy, /* R5 */ 600 - &dummy, /* R6 */ 601 - &dummy, /* R7 */ 602 - &dummy, /* R8 */ 603 - &dummy, /* R9 */ 604 - &dummy, /* R10 */ 605 - &dummy, /* R11 */ 606 - &dummy); /* R12 */ 668 + return ehea_plpar_hcall_norets(hcall_id, 669 + adapter_handle, /* R4 */ 670 + r5_port_num, /* R5 */ 671 + r6_reg_type, /* R6 */ 672 + r7_mc_mac_addr, /* R7 */ 673 + r8_vlan_id, /* R8 */ 674 + 0, 0); /* R9-R12 */ 607 675 } 608 676 609 677 u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle, 610 678 const u64 event_mask) 611 679 { 612 - u64 dummy; 613 - 614 - return ehea_hcall_9arg_9ret(H_RESET_EVENTS, 615 - adapter_handle, /* R4 */ 616 - neq_handle, /* R5 */ 617 - event_mask, /* R6 */ 618 - 0, 0, 0, 0, 0, 0, /* R7-R12 */ 619 - &dummy, /* R4 */ 620 - &dummy, /* R5 */ 621 - &dummy, /* R6 */ 622 - &dummy, /* R7 */ 623 - &dummy, /* R8 */ 624 - &dummy, /* R9 */ 625 - &dummy, /* R10 */ 626 - &dummy, /* R11 */ 627 - &dummy); /* R12 */ 680 + return ehea_plpar_hcall_norets(H_RESET_EVENTS, 681 + adapter_handle, /* R4 */ 682 + neq_handle, /* R5 */ 683 + event_mask, /* R6 */ 684 + 0, 0, 0, 0); /* R7-R12 */ 628 685 }
+17 -14
drivers/net/forcedeth.c
··· 2497 2497 u8 __iomem *base = get_hwbase(dev); 2498 2498 u32 events; 2499 2499 int i; 2500 + unsigned long flags; 2500 2501 2501 2502 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 2502 2503 ··· 2509 2508 if (!(events & np->irqmask)) 2510 2509 break; 2511 2510 2512 - spin_lock_irq(&np->lock); 2511 + spin_lock_irqsave(&np->lock, flags); 2513 2512 nv_tx_done(dev); 2514 - spin_unlock_irq(&np->lock); 2513 + spin_unlock_irqrestore(&np->lock, flags); 2515 2514 2516 2515 if (events & (NVREG_IRQ_TX_ERR)) { 2517 2516 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2518 2517 dev->name, events); 2519 2518 } 2520 2519 if (i > max_interrupt_work) { 2521 - spin_lock_irq(&np->lock); 2520 + spin_lock_irqsave(&np->lock, flags); 2522 2521 /* disable interrupts on the nic */ 2523 2522 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 2524 2523 pci_push(base); ··· 2528 2527 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2529 2528 } 2530 2529 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 2531 - spin_unlock_irq(&np->lock); 2530 + spin_unlock_irqrestore(&np->lock, flags); 2532 2531 break; 2533 2532 } 2534 2533 ··· 2602 2601 u8 __iomem *base = get_hwbase(dev); 2603 2602 u32 events; 2604 2603 int i; 2604 + unsigned long flags; 2605 2605 2606 2606 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 2607 2607 ··· 2616 2614 2617 2615 nv_rx_process(dev, dev->weight); 2618 2616 if (nv_alloc_rx(dev)) { 2619 - spin_lock_irq(&np->lock); 2617 + spin_lock_irqsave(&np->lock, flags); 2620 2618 if (!np->in_shutdown) 2621 2619 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2622 - spin_unlock_irq(&np->lock); 2620 + spin_unlock_irqrestore(&np->lock, flags); 2623 2621 } 2624 2622 2625 2623 if (i > max_interrupt_work) { 2626 - spin_lock_irq(&np->lock); 2624 + spin_lock_irqsave(&np->lock, flags); 2627 2625 /* disable interrupts on the nic */ 2628 2626 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 2629 2627 pci_push(base); ··· 2633 2631 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2634 2632 } 2635 2633 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 2636 - spin_unlock_irq(&np->lock); 2634 + spin_unlock_irqrestore(&np->lock, flags); 2637 2635 break; 2638 2636 } 2639 2637 } ··· 2650 2648 u8 __iomem *base = get_hwbase(dev); 2651 2649 u32 events; 2652 2650 int i; 2651 + unsigned long flags; 2653 2652 2654 2653 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 2655 2654 ··· 2663 2660 break; 2664 2661 2665 2662 if (events & NVREG_IRQ_LINK) { 2666 - spin_lock_irq(&np->lock); 2663 + spin_lock_irqsave(&np->lock, flags); 2667 2664 nv_link_irq(dev); 2668 - spin_unlock_irq(&np->lock); 2665 + spin_unlock_irqrestore(&np->lock, flags); 2669 2666 } 2670 2667 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2671 - spin_lock_irq(&np->lock); 2668 + spin_lock_irqsave(&np->lock, flags); 2672 2669 nv_linkchange(dev); 2673 - spin_unlock_irq(&np->lock); 2670 + spin_unlock_irqrestore(&np->lock, flags); 2674 2671 np->link_timeout = jiffies + LINK_TIMEOUT; 2675 2672 } 2676 2673 if (events & (NVREG_IRQ_UNKNOWN)) { ··· 2678 2675 dev->name, events); 2679 2676 } 2680 2677 if (i > max_interrupt_work) { 2681 - spin_lock_irq(&np->lock); 2678 + spin_lock_irqsave(&np->lock, flags); 2682 2679 /* disable interrupts on the nic */ 2683 2680 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 2684 2681 pci_push(base); ··· 2688 2685 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2689 2686 } 2690 2687 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 2691 - spin_unlock_irq(&np->lock); 2688 + spin_unlock_irqrestore(&np->lock, flags); 2692 2689 break; 2693 2690 } 2694 2691
+50 -8
drivers/net/ibmveth.c
··· 213 213 } 214 214 215 215 free_index = pool->consumer_index++ % pool->size; 216 + pool->consumer_index = free_index; 216 217 index = pool->free_map[free_index]; 217 218 218 219 ibmveth_assert(index != IBM_VETH_INVALID_MAP); ··· 239 238 if(lpar_rc != H_SUCCESS) { 240 239 pool->free_map[free_index] = index; 241 240 pool->skbuff[index] = NULL; 242 - pool->consumer_index--; 241 + if (pool->consumer_index == 0) 242 + pool->consumer_index = pool->size - 1; 243 + else 244 + pool->consumer_index--; 243 245 dma_unmap_single(&adapter->vdev->dev, 244 246 pool->dma_addr[index], pool->buff_size, 245 247 DMA_FROM_DEVICE); ··· 330 326 DMA_FROM_DEVICE); 331 327 332 328 free_index = adapter->rx_buff_pool[pool].producer_index++ % adapter->rx_buff_pool[pool].size; 329 + adapter->rx_buff_pool[pool].producer_index = free_index; 333 330 adapter->rx_buff_pool[pool].free_map[free_index] = index; 334 331 335 332 mb(); ··· 442 437 &adapter->rx_buff_pool[i]); 443 438 } 444 439 440 + static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, 441 + union ibmveth_buf_desc rxq_desc, u64 mac_address) 442 + { 443 + int rc, try_again = 1; 444 + 445 + /* After a kexec the adapter will still be open, so our attempt to 446 + * open it will fail. So if we get a failure we free the adapter and 447 + * try again, but only once. */ 448 + retry: 449 + rc = h_register_logical_lan(adapter->vdev->unit_address, 450 + adapter->buffer_list_dma, rxq_desc.desc, 451 + adapter->filter_list_dma, mac_address); 452 + 453 + if (rc != H_SUCCESS && try_again) { 454 + do { 455 + rc = h_free_logical_lan(adapter->vdev->unit_address); 456 + } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); 457 + 458 + try_again = 0; 459 + goto retry; 460 + } 461 + 462 + return rc; 463 + } 464 + 445 465 static int ibmveth_open(struct net_device *netdev) 446 466 { 447 467 struct ibmveth_adapter *adapter = netdev->priv; ··· 532 502 ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); 533 503 ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); 534 504 505 + h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); 535 506 536 - lpar_rc = h_register_logical_lan(adapter->vdev->unit_address, 537 - adapter->buffer_list_dma, 538 - rxq_desc.desc, 539 - adapter->filter_list_dma, 540 - mac_address); 507 + lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); 541 508 542 509 if(lpar_rc != H_SUCCESS) { 543 510 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); ··· 932 905 return -EINVAL; 933 906 } 934 907 908 + #ifdef CONFIG_NET_POLL_CONTROLLER 909 + static void ibmveth_poll_controller(struct net_device *dev) 910 + { 911 + ibmveth_replenish_task(dev->priv); 912 + ibmveth_interrupt(dev->irq, dev); 913 + } 914 + #endif 915 + 935 916 static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 936 917 { 937 918 int rc, i; ··· 1012 977 netdev->ethtool_ops = &netdev_ethtool_ops; 1013 978 netdev->change_mtu = ibmveth_change_mtu; 1014 979 SET_NETDEV_DEV(netdev, &dev->dev); 980 + #ifdef CONFIG_NET_POLL_CONTROLLER 981 + netdev->poll_controller = ibmveth_poll_controller; 982 + #endif 1015 983 netdev->features |= NETIF_F_LLTX; 1016 984 spin_lock_init(&adapter->stats_lock); 1017 985 ··· 1170 1132 { 1171 1133 struct proc_dir_entry *entry; 1172 1134 if (ibmveth_proc_dir) { 1173 - entry = create_proc_entry(adapter->netdev->name, S_IFREG, ibmveth_proc_dir); 1135 + char u_addr[10]; 1136 + sprintf(u_addr, "%x", adapter->vdev->unit_address); 1137 + entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir); 1174 1138 if (!entry) { 1175 1139 ibmveth_error_printk("Cannot create adapter proc entry"); 1176 1140 } else { ··· 1187 1147 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) 1188 1148 { 1189 1149 if (ibmveth_proc_dir) { 1190 - remove_proc_entry(adapter->netdev->name, ibmveth_proc_dir); 1150 + char u_addr[10]; 1151 + sprintf(u_addr, "%x", adapter->vdev->unit_address); 1152 + remove_proc_entry(u_addr, ibmveth_proc_dir); 1191 1153 } 1192 1154 } 1193 1155
+2 -2
drivers/net/mv643xx_eth.c
··· 2155 2155 for (offset = ETH_MIB_BAD_OCTETS_RECEIVED; 2156 2156 offset <= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS; 2157 2157 offset += 4) 2158 - *(u32 *)((char *)p + offset) = read_mib(mp, offset); 2158 + *(u32 *)((char *)p + offset) += read_mib(mp, offset); 2159 2159 2160 2160 p->good_octets_sent += read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_LOW); 2161 2161 p->good_octets_sent += ··· 2164 2164 for (offset = ETH_MIB_GOOD_FRAMES_SENT; 2165 2165 offset <= ETH_MIB_LATE_COLLISION; 2166 2166 offset += 4) 2167 - *(u32 *)((char *)p + offset) = read_mib(mp, offset); 2167 + *(u32 *)((char *)p + offset) += read_mib(mp, offset); 2168 2168 } 2169 2169 2170 2170 /*
+134 -86
drivers/net/skge.c
··· 43 43 #include "skge.h" 44 44 45 45 #define DRV_NAME "skge" 46 - #define DRV_VERSION "1.8" 46 + #define DRV_VERSION "1.9" 47 47 #define PFX DRV_NAME " " 48 48 49 49 #define DEFAULT_TX_RING_SIZE 128 ··· 197 197 else if (hw->chip_id == CHIP_ID_YUKON) 198 198 supported &= ~SUPPORTED_1000baseT_Half; 199 199 } else 200 - supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE 201 - | SUPPORTED_Autoneg; 200 + supported = SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half 201 + | SUPPORTED_FIBRE | SUPPORTED_Autoneg; 202 202 203 203 return supported; 204 204 } ··· 487 487 { 488 488 struct skge_port *skge = netdev_priv(dev); 489 489 490 - ecmd->tx_pause = (skge->flow_control == FLOW_MODE_LOC_SEND) 491 - || (skge->flow_control == FLOW_MODE_SYMMETRIC); 492 - ecmd->rx_pause = (skge->flow_control == FLOW_MODE_REM_SEND) 493 - || (skge->flow_control == FLOW_MODE_SYMMETRIC); 490 + ecmd->rx_pause = (skge->flow_control == FLOW_MODE_SYMMETRIC) 491 + || (skge->flow_control == FLOW_MODE_SYM_OR_REM); 492 + ecmd->tx_pause = ecmd->rx_pause || (skge->flow_control == FLOW_MODE_LOC_SEND); 494 493 495 - ecmd->autoneg = skge->autoneg; 494 + ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause; 496 495 } 497 496 498 497 static int skge_set_pauseparam(struct net_device *dev, 499 498 struct ethtool_pauseparam *ecmd) 500 499 { 501 500 struct skge_port *skge = netdev_priv(dev); 501 + struct ethtool_pauseparam old; 502 502 503 - skge->autoneg = ecmd->autoneg; 504 - if (ecmd->rx_pause && ecmd->tx_pause) 505 - skge->flow_control = FLOW_MODE_SYMMETRIC; 506 - else if (ecmd->rx_pause && !ecmd->tx_pause) 507 - skge->flow_control = FLOW_MODE_REM_SEND; 508 - else if (!ecmd->rx_pause && ecmd->tx_pause) 509 - skge->flow_control = FLOW_MODE_LOC_SEND; 510 - else 511 - skge->flow_control = FLOW_MODE_NONE; 503 + skge_get_pauseparam(dev, &old); 504 + 505 + if (ecmd->autoneg != old.autoneg) 506 + skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC; 507 + else { 508 + if (ecmd->rx_pause && ecmd->tx_pause) 509 + skge->flow_control = FLOW_MODE_SYMMETRIC; 510 + else if (ecmd->rx_pause && !ecmd->tx_pause) 511 + skge->flow_control = FLOW_MODE_SYM_OR_REM; 512 + else if (!ecmd->rx_pause && ecmd->tx_pause) 513 + skge->flow_control = FLOW_MODE_LOC_SEND; 514 + else 515 + skge->flow_control = FLOW_MODE_NONE; 516 + } 512 517 513 518 if (netif_running(dev)) 514 519 skge_phy_reset(skge); 520 + 515 521 return 0; 516 522 } 517 523 ··· 860 854 return 0; 861 855 } 862 856 857 + static const char *skge_pause(enum pause_status status) 858 + { 859 + switch(status) { 860 + case FLOW_STAT_NONE: 861 + return "none"; 862 + case FLOW_STAT_REM_SEND: 863 + return "rx only"; 864 + case FLOW_STAT_LOC_SEND: 865 + return "tx_only"; 866 + case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */ 867 + return "both"; 868 + default: 869 + return "indeterminated"; 870 + } 871 + } 872 + 873 + 863 874 static void skge_link_up(struct skge_port *skge) 864 875 { 865 876 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), ··· 885 862 netif_carrier_on(skge->netdev); 886 863 netif_wake_queue(skge->netdev); 887 864 888 - if (netif_msg_link(skge)) 865 + if (netif_msg_link(skge)) { 889 866 printk(KERN_INFO PFX 890 867 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n", 891 868 skge->netdev->name, skge->speed, 892 869 skge->duplex == DUPLEX_FULL ? "full" : "half", 893 - (skge->flow_control == FLOW_MODE_NONE) ? "none" : 894 - (skge->flow_control == FLOW_MODE_LOC_SEND) ? "tx only" : 895 - (skge->flow_control == FLOW_MODE_REM_SEND) ? "rx only" : 896 - (skge->flow_control == FLOW_MODE_SYMMETRIC) ? "tx and rx" : 897 - "unknown"); 870 + skge_pause(skge->flow_status)); 871 + } 898 872 } 899 873 900 874 static void skge_link_down(struct skge_port *skge) ··· 902 882 903 883 if (netif_msg_link(skge)) 904 884 printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name); 885 + } 886 + 887 + 888 + static void xm_link_down(struct skge_hw *hw, int port) 889 + { 890 + struct net_device *dev = hw->dev[port]; 891 + struct skge_port *skge = netdev_priv(dev); 892 + u16 cmd, msk; 893 + 894 + if (hw->phy_type == SK_PHY_XMAC) { 895 + msk = xm_read16(hw, port, XM_IMSK); 896 + msk |= XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE | XM_IS_AND; 897 + xm_write16(hw, port, XM_IMSK, msk); 898 + } 899 + 900 + cmd = xm_read16(hw, port, XM_MMU_CMD); 901 + cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); 902 + xm_write16(hw, port, XM_MMU_CMD, cmd); 903 + /* dummy read to ensure writing */ 904 + (void) xm_read16(hw, port, XM_MMU_CMD); 905 + 906 + if (netif_carrier_ok(dev)) 907 + skge_link_down(skge); 905 908 } 906 909 907 910 static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) ··· 1035 992 [FLOW_MODE_NONE] = 0, 1036 993 [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM, 1037 994 [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP, 1038 - [FLOW_MODE_REM_SEND] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM, 995 + [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM, 996 + }; 997 + 998 + /* special defines for FIBER (88E1011S only) */ 999 + static const u16 fiber_pause_map[] = { 1000 + [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE, 1001 + [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD, 1002 + [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD, 1003 + [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD, 1039 1004 }; 1040 1005 1041 1006 ··· 1059 1008 status = xm_phy_read(hw, port, PHY_BCOM_STAT); 1060 1009 1061 1010 if ((status & PHY_ST_LSYNC) == 0) { 1062 - u16 cmd = xm_read16(hw, port, XM_MMU_CMD); 1063 - cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); 1064 - xm_write16(hw, port, XM_MMU_CMD, cmd); 1065 - /* dummy read to ensure writing */ 1066 - (void) xm_read16(hw, port, XM_MMU_CMD); 1067 - 1068 - if (netif_carrier_ok(dev)) 1069 - skge_link_down(skge); 1011 + xm_link_down(hw, port); 1070 1012 return; 1071 1013 } 1072 1014 ··· 1092 1048 return; 1093 1049 } 1094 1050 1095 - 1096 1051 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 1097 1052 switch (aux & PHY_B_AS_PAUSE_MSK) { 1098 1053 case PHY_B_AS_PAUSE_MSK: 1099 - skge->flow_control = FLOW_MODE_SYMMETRIC; 1054 + skge->flow_status = FLOW_STAT_SYMMETRIC; 1100 1055 break; 1101 1056 case PHY_B_AS_PRR: 1102 - skge->flow_control = FLOW_MODE_REM_SEND; 1057 + skge->flow_status = FLOW_STAT_REM_SEND; 1103 1058 break; 1104 1059 case PHY_B_AS_PRT: 1105 - skge->flow_control = FLOW_MODE_LOC_SEND; 1060 + skge->flow_status = FLOW_STAT_LOC_SEND; 1106 1061 break; 1107 1062 default: 1108 - skge->flow_control = FLOW_MODE_NONE; 1063 + skge->flow_status = FLOW_STAT_NONE; 1109 1064 } 1110 1065 skge->speed = SPEED_1000; 1111 1066 } ··· 1234 1191 if (skge->advertising & ADVERTISED_1000baseT_Full) 1235 1192 ctrl |= PHY_X_AN_FD; 1236 1193 1237 - switch(skge->flow_control) { 1238 - case FLOW_MODE_NONE: 1239 - ctrl |= PHY_X_P_NO_PAUSE; 1240 - break; 1241 - case FLOW_MODE_LOC_SEND: 1242 - ctrl |= PHY_X_P_ASYM_MD; 1243 - break; 1244 - case FLOW_MODE_SYMMETRIC: 1245 - ctrl |= PHY_X_P_BOTH_MD; 1246 - break; 1247 - } 1194 + ctrl |= fiber_pause_map[skge->flow_control]; 1248 1195 1249 1196 xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl); 1250 1197 ··· 1268 1235 status = xm_phy_read(hw, port, PHY_XMAC_STAT); 1269 1236 1270 1237 if ((status & PHY_ST_LSYNC) == 0) { 1271 - u16 cmd = xm_read16(hw, port, XM_MMU_CMD); 1272 - cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); 1273 - xm_write16(hw, port, XM_MMU_CMD, cmd); 1274 - /* dummy read to ensure writing */ 1275 - (void) xm_read16(hw, port, XM_MMU_CMD); 1276 - 1277 - if (netif_carrier_ok(dev)) 1278 - skge_link_down(skge); 1238 + xm_link_down(hw, port); 1279 1239 return; 1280 1240 } 1281 1241 ··· 1302 1276 } 1303 1277 1304 1278 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 1305 - if (lpa & PHY_X_P_SYM_MD) 1306 - skge->flow_control = FLOW_MODE_SYMMETRIC; 1307 - else if ((lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) 1308 - skge->flow_control = FLOW_MODE_REM_SEND; 1309 - else if ((lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) 1310 - skge->flow_control = FLOW_MODE_LOC_SEND; 1279 + if ((skge->flow_control == FLOW_MODE_SYMMETRIC || 1280 + skge->flow_control == FLOW_MODE_SYM_OR_REM) && 1281 + (lpa & PHY_X_P_SYM_MD)) 1282 + skge->flow_status = FLOW_STAT_SYMMETRIC; 1283 + else if (skge->flow_control == FLOW_MODE_SYM_OR_REM && 1284 + (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) 1285 + /* Enable PAUSE receive, disable PAUSE transmit */ 1286 + skge->flow_status = FLOW_STAT_REM_SEND; 1287 + else if (skge->flow_control == FLOW_MODE_LOC_SEND && 1288 + (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) 1289 + /* Disable PAUSE receive, enable PAUSE transmit */ 1290 + skge->flow_status = FLOW_STAT_LOC_SEND; 1311 1291 else 1312 - skge->flow_control = FLOW_MODE_NONE; 1313 - 1292 + skge->flow_status = FLOW_STAT_NONE; 1314 1293 1315 1294 skge->speed = SPEED_1000; 1316 1295 } ··· 1599 1568 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n", 1600 1569 skge->netdev->name, status); 1601 1570 1571 + if (hw->phy_type == SK_PHY_XMAC && 1572 + (status & (XM_IS_INP_ASS | XM_IS_LIPA_RC))) 1573 + xm_link_down(hw, port); 1574 + 1602 1575 if (status & XM_IS_TXF_UR) { 1603 1576 xm_write32(hw, port, XM_MODE, XM_MD_FTF); 1604 1577 ++skge->net_stats.tx_fifo_errors; ··· 1617 1582 { 1618 1583 struct skge_hw *hw = skge->hw; 1619 1584 int port = skge->port; 1620 - u16 cmd; 1585 + u16 cmd, msk; 1621 1586 u32 mode; 1622 1587 1623 1588 cmd = xm_read16(hw, port, XM_MMU_CMD); ··· 1626 1591 * enabling pause frame reception is required for 1000BT 1627 1592 * because the XMAC is not reset if the link is going down 1628 1593 */ 1629 - if (skge->flow_control == FLOW_MODE_NONE || 1630 - skge->flow_control == FLOW_MODE_LOC_SEND) 1594 + if (skge->flow_status == FLOW_STAT_NONE || 1595 + skge->flow_status == FLOW_STAT_LOC_SEND) 1631 1596 /* Disable Pause Frame Reception */ 1632 1597 cmd |= XM_MMU_IGN_PF; 1633 1598 else ··· 1637 1602 xm_write16(hw, port, XM_MMU_CMD, cmd); 1638 1603 1639 1604 mode = xm_read32(hw, port, XM_MODE); 1640 - if (skge->flow_control == FLOW_MODE_SYMMETRIC || 1641 - skge->flow_control == FLOW_MODE_LOC_SEND) { 1605 + if (skge->flow_status== FLOW_STAT_SYMMETRIC || 1606 + skge->flow_status == FLOW_STAT_LOC_SEND) { 1642 1607 /* 1643 1608 * Configure Pause Frame Generation 1644 1609 * Use internal and external Pause Frame Generation. ··· 1666 1631 } 1667 1632 1668 1633 xm_write32(hw, port, XM_MODE, mode); 1669 - xm_write16(hw, port, XM_IMSK, XM_DEF_MSK); 1634 + msk = XM_DEF_MSK; 1635 + if (hw->phy_type != SK_PHY_XMAC) 1636 + msk |= XM_IS_INP_ASS; /* disable GP0 interrupt bit */ 1637 + 1638 + xm_write16(hw, port, XM_IMSK, msk); 1670 1639 xm_read16(hw, port, XM_ISRC); 1671 1640 1672 1641 /* get MMU Command Reg. */ ··· 1818 1779 adv |= PHY_M_AN_10_FD; 1819 1780 if (skge->advertising & ADVERTISED_10baseT_Half) 1820 1781 adv |= PHY_M_AN_10_HD; 1821 - } else /* special defines for FIBER (88E1011S only) */ 1822 - adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD; 1823 1782 1824 - /* Set Flow-control capabilities */ 1825 - adv |= phy_pause_map[skge->flow_control]; 1783 + /* Set Flow-control capabilities */ 1784 + adv |= phy_pause_map[skge->flow_control]; 1785 + } else { 1786 + if (skge->advertising & ADVERTISED_1000baseT_Full) 1787 + adv |= PHY_M_AN_1000X_AFD; 1788 + if (skge->advertising & ADVERTISED_1000baseT_Half) 1789 + adv |= PHY_M_AN_1000X_AHD; 1790 + 1791 + adv |= fiber_pause_map[skge->flow_control]; 1792 + } 1826 1793 1827 1794 /* Restart Auto-negotiation */ 1828 1795 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; ··· 1962 1917 case FLOW_MODE_LOC_SEND: 1963 1918 /* disable Rx flow-control */ 1964 1919 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; 1920 + break; 1921 + case FLOW_MODE_SYMMETRIC: 1922 + case FLOW_MODE_SYM_OR_REM: 1923 + /* enable Tx & Rx flow-control */ 1924 + break; 1965 1925 } 1966 1926 1967 1927 gma_write16(hw, port, GM_GP_CTRL, reg); ··· 2161 2111 ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 2162 2112 gma_write16(hw, port, GM_GP_CTRL, ctrl); 2163 2113 2164 - if (skge->flow_control == FLOW_MODE_REM_SEND) { 2114 + if (skge->flow_status == FLOW_STAT_REM_SEND) { 2115 + ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); 2116 + ctrl |= PHY_M_AN_ASP; 2165 2117 /* restore Asymmetric Pause bit */ 2166 - gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, 2167 - gm_phy_read(hw, port, 2168 - PHY_MARV_AUNE_ADV) 2169 - | PHY_M_AN_ASP); 2170 - 2118 + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl); 2171 2119 } 2172 2120 2173 2121 yukon_reset(hw, port); ··· 2212 2164 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 2213 2165 switch (phystat & PHY_M_PS_PAUSE_MSK) { 2214 2166 case PHY_M_PS_PAUSE_MSK: 2215 - skge->flow_control = FLOW_MODE_SYMMETRIC; 2167 + skge->flow_status = FLOW_STAT_SYMMETRIC; 2216 2168 break; 2217 2169 case PHY_M_PS_RX_P_EN: 2218 - skge->flow_control = FLOW_MODE_REM_SEND; 2170 + skge->flow_status = FLOW_STAT_REM_SEND; 2219 2171 break; 2220 2172 case PHY_M_PS_TX_P_EN: 2221 - skge->flow_control = FLOW_MODE_LOC_SEND; 2173 + skge->flow_status = FLOW_STAT_LOC_SEND; 2222 2174 break; 2223 2175 default: 2224 - skge->flow_control = FLOW_MODE_NONE; 2176 + skge->flow_status = FLOW_STAT_NONE; 2225 2177 } 2226 2178 2227 - if (skge->flow_control == FLOW_MODE_NONE || 2179 + if (skge->flow_status == FLOW_STAT_NONE || 2228 2180 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) 2229 2181 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 2230 2182 else ··· 3447 3399 3448 3400 /* Auto speed and flow control */ 3449 3401 skge->autoneg = AUTONEG_ENABLE; 3450 - skge->flow_control = FLOW_MODE_SYMMETRIC; 3402 + skge->flow_control = FLOW_MODE_SYM_OR_REM; 3451 3403 skge->duplex = -1; 3452 3404 skge->speed = -1; 3453 3405 skge->advertising = skge_supported_modes(hw);
+19 -6
drivers/net/skge.h
··· 2195 2195 XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */ 2196 2196 }; 2197 2197 2198 - #define XM_DEF_MSK (~(XM_IS_RXC_OV | XM_IS_TXC_OV | XM_IS_RXF_OV | XM_IS_TXF_UR)) 2198 + #define XM_DEF_MSK (~(XM_IS_INP_ASS | XM_IS_LIPA_RC | \ 2199 + XM_IS_RXF_OV | XM_IS_TXF_UR)) 2199 2200 2200 2201 2201 2202 /* XM_HW_CFG 16 bit r/w Hardware Config Register */ ··· 2427 2426 struct mutex phy_mutex; 2428 2427 }; 2429 2428 2430 - enum { 2431 - FLOW_MODE_NONE = 0, /* No Flow-Control */ 2432 - FLOW_MODE_LOC_SEND = 1, /* Local station sends PAUSE */ 2433 - FLOW_MODE_REM_SEND = 2, /* Symmetric or just remote */ 2429 + enum pause_control { 2430 + FLOW_MODE_NONE = 1, /* No Flow-Control */ 2431 + FLOW_MODE_LOC_SEND = 2, /* Local station sends PAUSE */ 2434 2432 FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */ 2433 + FLOW_MODE_SYM_OR_REM = 4, /* Both stations may send PAUSE or 2434 + * just the remote station may send PAUSE 2435 + */ 2435 2436 }; 2437 + 2438 + enum pause_status { 2439 + FLOW_STAT_INDETERMINATED=0, /* indeterminated */ 2440 + FLOW_STAT_NONE, /* No Flow Control */ 2441 + FLOW_STAT_REM_SEND, /* Remote Station sends PAUSE */ 2442 + FLOW_STAT_LOC_SEND, /* Local station sends PAUSE */ 2443 + FLOW_STAT_SYMMETRIC, /* Both station may send PAUSE */ 2444 + }; 2445 + 2436 2446 2437 2447 struct skge_port { 2438 2448 u32 msg_enable; ··· 2457 2445 struct net_device_stats net_stats; 2458 2446 2459 2447 struct work_struct link_thread; 2448 + enum pause_control flow_control; 2449 + enum pause_status flow_status; 2460 2450 u8 rx_csum; 2461 2451 u8 blink_on; 2462 - u8 flow_control; 2463 2452 u8 wol; 2464 2453 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */ 2465 2454 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
+11 -25
drivers/net/sky2.c
··· 683 683 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); 684 684 685 685 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { 686 - sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); 686 + sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 512/8); 687 687 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); 688 688 if (hw->dev[port]->mtu > ETH_DATA_LEN) { 689 689 /* set Tx GMAC FIFO Almost Empty Threshold */ ··· 1907 1907 pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, 1908 1908 length, PCI_DMA_FROMDEVICE); 1909 1909 re->skb->ip_summed = CHECKSUM_NONE; 1910 - __skb_put(skb, length); 1910 + skb_put(skb, length); 1911 1911 } 1912 1912 return skb; 1913 1913 } ··· 1970 1970 if (skb_shinfo(skb)->nr_frags) 1971 1971 skb_put_frags(skb, hdr_space, length); 1972 1972 else 1973 - skb_put(skb, hdr_space); 1973 + skb_put(skb, length); 1974 1974 return skb; 1975 1975 } 1976 1976 ··· 2220 2220 /* PCI-Express uncorrectable Error occurred */ 2221 2221 u32 pex_err; 2222 2222 2223 - pex_err = sky2_pci_read32(hw, 2224 - hw->err_cap + PCI_ERR_UNCOR_STATUS); 2223 + pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT); 2225 2224 2226 2225 if (net_ratelimit()) 2227 2226 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", ··· 2228 2229 2229 2230 /* clear the interrupt */ 2230 2231 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2231 - sky2_pci_write32(hw, 2232 - hw->err_cap + PCI_ERR_UNCOR_STATUS, 2233 - 0xffffffffUL); 2232 + sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 2233 + 0xffffffffUL); 2234 2234 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2235 2235 2236 - 2237 - /* In case of fatal error mask off to keep from getting stuck */ 2238 - if (pex_err & (PCI_ERR_UNC_POISON_TLP | PCI_ERR_UNC_FCP 2239 - | PCI_ERR_UNC_DLP)) { 2236 + if (pex_err & PEX_FATAL_ERRORS) { 2240 2237 u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK); 2241 2238 hwmsk &= ~Y2_IS_PCI_EXP; 2242 2239 sky2_write32(hw, B0_HWE_IMSK, hwmsk); 2243 2240 } 2244 - 2245 2241 } 2246 2242 2247 2243 if (status & Y2_HWE_L1_MASK) ··· 2417 2423 u16 status; 2418 2424 u8 t8; 2419 2425 int i; 2420 - u32 msk; 2421 2426 2422 2427 sky2_write8(hw, B0_CTST, CS_RST_CLR); 2423 2428 ··· 2457 2464 sky2_write8(hw, B0_CTST, CS_MRST_CLR); 2458 2465 2459 2466 /* clear any PEX errors */ 2460 - if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) { 2461 - hw->err_cap = pci_find_ext_capability(hw->pdev, PCI_EXT_CAP_ID_ERR); 2462 - if (hw->err_cap) 2463 - sky2_pci_write32(hw, 2464 - hw->err_cap + PCI_ERR_UNCOR_STATUS, 2465 - 0xffffffffUL); 2466 - } 2467 + if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) 2468 + sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL); 2469 + 2467 2470 2468 2471 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); 2469 2472 hw->ports = 1; ··· 2516 2527 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53); 2517 2528 } 2518 2529 2519 - msk = Y2_HWE_ALL_MASK; 2520 - if (!hw->err_cap) 2521 - msk &= ~Y2_IS_PCI_EXP; 2522 - sky2_write32(hw, B0_HWE_IMSK, msk); 2530 + sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK); 2523 2531 2524 2532 for (i = 0; i < hw->ports; i++) 2525 2533 sky2_gmac_reset(hw, i);
+43 -2
drivers/net/sky2.h
··· 6 6 7 7 #define ETH_JUMBO_MTU 9000 /* Maximum MTU supported */ 8 8 9 - /* PCI device specific config registers */ 9 + /* PCI config registers */ 10 10 enum { 11 11 PCI_DEV_REG1 = 0x40, 12 12 PCI_DEV_REG2 = 0x44, 13 + PCI_DEV_STATUS = 0x7c, 13 14 PCI_DEV_REG3 = 0x80, 14 15 PCI_DEV_REG4 = 0x84, 15 16 PCI_DEV_REG5 = 0x88, 17 + }; 18 + 19 + enum { 20 + PEX_DEV_CAP = 0xe4, 21 + PEX_DEV_CTRL = 0xe8, 22 + PEX_DEV_STA = 0xea, 23 + PEX_LNK_STAT = 0xf2, 24 + PEX_UNC_ERR_STAT= 0x104, 16 25 }; 17 26 18 27 /* Yukon-2 */ ··· 72 63 PCI_STATUS_REC_MASTER_ABORT | \ 73 64 PCI_STATUS_REC_TARGET_ABORT | \ 74 65 PCI_STATUS_PARITY) 66 + 67 + enum pex_dev_ctrl { 68 + PEX_DC_MAX_RRS_MSK = 7<<12, /* Bit 14..12: Max. Read Request Size */ 69 + PEX_DC_EN_NO_SNOOP = 1<<11,/* Enable No Snoop */ 70 + PEX_DC_EN_AUX_POW = 1<<10,/* Enable AUX Power */ 71 + PEX_DC_EN_PHANTOM = 1<<9, /* Enable Phantom Functions */ 72 + PEX_DC_EN_EXT_TAG = 1<<8, /* Enable Extended Tag Field */ 73 + PEX_DC_MAX_PLS_MSK = 7<<5, /* Bit 7.. 5: Max. Payload Size Mask */ 74 + PEX_DC_EN_REL_ORD = 1<<4, /* Enable Relaxed Ordering */ 75 + PEX_DC_EN_UNS_RQ_RP = 1<<3, /* Enable Unsupported Request Reporting */ 76 + PEX_DC_EN_FAT_ER_RP = 1<<2, /* Enable Fatal Error Reporting */ 77 + PEX_DC_EN_NFA_ER_RP = 1<<1, /* Enable Non-Fatal Error Reporting */ 78 + PEX_DC_EN_COR_ER_RP = 1<<0, /* Enable Correctable Error Reporting */ 79 + }; 80 + #define PEX_DC_MAX_RD_RQ_SIZE(x) (((x)<<12) & PEX_DC_MAX_RRS_MSK) 81 + 82 + /* PEX_UNC_ERR_STAT PEX Uncorrectable Errors Status Register (Yukon-2) */ 83 + enum pex_err { 84 + PEX_UNSUP_REQ = 1<<20, /* Unsupported Request Error */ 85 + 86 + PEX_MALFOR_TLP = 1<<18, /* Malformed TLP */ 87 + 88 + PEX_UNEXP_COMP = 1<<16, /* Unexpected Completion */ 89 + 90 + PEX_COMP_TO = 1<<14, /* Completion Timeout */ 91 + PEX_FLOW_CTRL_P = 1<<13, /* Flow Control Protocol Error */ 92 + PEX_POIS_TLP = 1<<12, /* Poisoned TLP */ 93 + 94 + PEX_DATA_LINK_P = 1<<4, /* Data Link Protocol Error */ 95 + PEX_FATAL_ERRORS= (PEX_MALFOR_TLP | PEX_FLOW_CTRL_P | PEX_DATA_LINK_P), 96 + }; 97 + 98 + 75 99 enum csr_regs { 76 100 B0_RAP = 0x0000, 77 101 B0_CTST = 0x0004, ··· 1878 1836 struct net_device *dev[2]; 1879 1837 1880 1838 int pm_cap; 1881 - int err_cap; 1882 1839 u8 chip_id; 1883 1840 u8 chip_rev; 1884 1841 u8 pmd_type;
+18
drivers/net/smc91x.h
··· 398 398 399 399 #define SMC_IRQ_FLAGS (0) 400 400 401 + #elif defined(CONFIG_ARCH_VERSATILE) 402 + 403 + #define SMC_CAN_USE_8BIT 1 404 + #define SMC_CAN_USE_16BIT 1 405 + #define SMC_CAN_USE_32BIT 1 406 + #define SMC_NOWAIT 1 407 + 408 + #define SMC_inb(a, r) readb((a) + (r)) 409 + #define SMC_inw(a, r) readw((a) + (r)) 410 + #define SMC_inl(a, r) readl((a) + (r)) 411 + #define SMC_outb(v, a, r) writeb(v, (a) + (r)) 412 + #define SMC_outw(v, a, r) writew(v, (a) + (r)) 413 + #define SMC_outl(v, a, r) writel(v, (a) + (r)) 414 + #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) 415 + #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) 416 + 417 + #define SMC_IRQ_FLAGS (0) 418 + 401 419 #else 402 420 403 421 #define SMC_CAN_USE_8BIT 1
+149 -99
drivers/net/spider_net.c
··· 55 55 "<Jens.Osterkamp@de.ibm.com>"); 56 56 MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver"); 57 57 MODULE_LICENSE("GPL"); 58 + MODULE_VERSION(VERSION); 58 59 59 60 static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT; 60 61 static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT; 61 62 62 - module_param(rx_descriptors, int, 0644); 63 - module_param(tx_descriptors, int, 0644); 63 + module_param(rx_descriptors, int, 0444); 64 + module_param(tx_descriptors, int, 0444); 64 65 65 66 MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \ 66 67 "in rx chains"); ··· 301 300 spider_net_init_chain(struct spider_net_card *card, 302 301 struct spider_net_descr_chain *chain, 303 302 struct spider_net_descr *start_descr, 304 - int direction, int no) 303 + int no) 305 304 { 306 305 int i; 307 306 struct spider_net_descr *descr; ··· 316 315 317 316 buf = pci_map_single(card->pdev, descr, 318 317 SPIDER_NET_DESCR_SIZE, 319 - direction); 318 + PCI_DMA_BIDIRECTIONAL); 320 319 321 320 if (pci_dma_mapping_error(buf)) 322 321 goto iommu_error; ··· 330 329 (descr-1)->next = start_descr; 331 330 start_descr->prev = descr-1; 332 331 333 - descr = start_descr; 334 - if (direction == PCI_DMA_FROMDEVICE) 335 - for (i=0; i < no; i++, descr++) 336 - descr->next_descr_addr = descr->next->bus_addr; 337 - 338 332 spin_lock_init(&chain->lock); 339 333 chain->head = start_descr; 340 334 chain->tail = start_descr; ··· 342 346 if (descr->bus_addr) 343 347 pci_unmap_single(card->pdev, descr->bus_addr, 344 348 SPIDER_NET_DESCR_SIZE, 345 - direction); 349 + PCI_DMA_BIDIRECTIONAL); 346 350 return -ENOMEM; 347 351 } 348 352 ··· 358 362 struct spider_net_descr *descr; 359 363 360 364 descr = card->rx_chain.head; 361 - while (descr->next != card->rx_chain.head) { 365 + do { 362 366 if (descr->skb) { 363 367 dev_kfree_skb(descr->skb); 364 368 pci_unmap_single(card->pdev, descr->buf_addr, 365 369 SPIDER_NET_MAX_FRAME, 366 - PCI_DMA_FROMDEVICE); 370 + PCI_DMA_BIDIRECTIONAL); 367 371 } 368 372 descr = descr->next; 369 - } 373 + } while (descr != card->rx_chain.head); 370 374 } 371 375 372 376 /** ··· 641 645 spider_net_prepare_tx_descr(struct spider_net_card *card, 642 646 struct sk_buff *skb) 643 647 { 644 - struct spider_net_descr *descr = card->tx_chain.head; 648 + struct spider_net_descr *descr; 645 649 dma_addr_t buf; 650 + unsigned long flags; 651 + int length; 646 652 647 - buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 653 + length = skb->len; 654 + if (length < ETH_ZLEN) { 655 + if (skb_pad(skb, ETH_ZLEN-length)) 656 + return 0; 657 + length = ETH_ZLEN; 658 + } 659 + 660 + buf = pci_map_single(card->pdev, skb->data, length, PCI_DMA_TODEVICE); 648 661 if (pci_dma_mapping_error(buf)) { 649 662 if (netif_msg_tx_err(card) && net_ratelimit()) 650 663 pr_err("could not iommu-map packet (%p, %i). " 651 - "Dropping packet\n", skb->data, skb->len); 664 + "Dropping packet\n", skb->data, length); 652 665 card->spider_stats.tx_iommu_map_error++; 653 666 return -ENOMEM; 654 667 } 655 668 669 + spin_lock_irqsave(&card->tx_chain.lock, flags); 670 + descr = card->tx_chain.head; 671 + card->tx_chain.head = descr->next; 672 + 656 673 descr->buf_addr = buf; 657 - descr->buf_size = skb->len; 674 + descr->buf_size = length; 658 675 descr->next_descr_addr = 0; 659 676 descr->skb = skb; 660 677 descr->data_status = 0; 661 678 662 679 descr->dmac_cmd_status = 663 680 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; 681 + spin_unlock_irqrestore(&card->tx_chain.lock, flags); 682 + 664 683 if (skb->protocol == htons(ETH_P_IP)) 665 684 switch (skb->nh.iph->protocol) { 666 685 case IPPROTO_TCP: ··· 686 675 break; 687 676 } 688 677 678 + /* Chain the bus address, so that the DMA engine finds this descr. */ 689 679 descr->prev->next_descr_addr = descr->bus_addr; 690 680 681 + card->netdev->trans_start = jiffies; /* set netdev watchdog timer */ 691 682 return 0; 692 683 } 693 684 694 - /** 695 - * spider_net_release_tx_descr - processes a used tx descriptor 696 - * @card: card structure 697 - * @descr: descriptor to release 698 - * 699 - * releases a used tx descriptor (unmapping, freeing of skb) 700 - */ 701 - static inline void 702 - spider_net_release_tx_descr(struct spider_net_card *card) 685 + static int 686 + spider_net_set_low_watermark(struct spider_net_card *card) 703 687 { 688 + unsigned long flags; 689 + int status; 690 + int cnt=0; 691 + int i; 704 692 struct spider_net_descr *descr = card->tx_chain.tail; 705 - struct sk_buff *skb; 706 693 707 - card->tx_chain.tail = card->tx_chain.tail->next; 708 - descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE; 694 + /* Measure the length of the queue. Measurement does not 695 + * need to be precise -- does not need a lock. */ 696 + while (descr != card->tx_chain.head) { 697 + status = descr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE; 698 + if (status == SPIDER_NET_DESCR_NOT_IN_USE) 699 + break; 700 + descr = descr->next; 701 + cnt++; 702 + } 709 703 710 - /* unmap the skb */ 711 - skb = descr->skb; 712 - pci_unmap_single(card->pdev, descr->buf_addr, skb->len, 713 - PCI_DMA_TODEVICE); 714 - dev_kfree_skb_any(skb); 704 + /* If TX queue is short, don't even bother with interrupts */ 705 + if (cnt < card->num_tx_desc/4) 706 + return cnt; 707 + 708 + /* Set low-watermark 3/4th's of the way into the queue. */ 709 + descr = card->tx_chain.tail; 710 + cnt = (cnt*3)/4; 711 + for (i=0;i<cnt; i++) 712 + descr = descr->next; 713 + 714 + /* Set the new watermark, clear the old watermark */ 715 + spin_lock_irqsave(&card->tx_chain.lock, flags); 716 + descr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG; 717 + if (card->low_watermark && card->low_watermark != descr) 718 + card->low_watermark->dmac_cmd_status = 719 + card->low_watermark->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG; 720 + card->low_watermark = descr; 721 + spin_unlock_irqrestore(&card->tx_chain.lock, flags); 722 + return cnt; 715 723 } 716 724 717 725 /** ··· 749 719 spider_net_release_tx_chain(struct spider_net_card *card, int brutal) 750 720 { 751 721 struct spider_net_descr_chain *chain = &card->tx_chain; 722 + struct spider_net_descr *descr; 723 + struct sk_buff *skb; 724 + u32 buf_addr; 725 + unsigned long flags; 752 726 int status; 753 727 754 - spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR); 755 - 756 728 while (chain->tail != chain->head) { 757 - status = spider_net_get_descr_status(chain->tail); 729 + spin_lock_irqsave(&chain->lock, flags); 730 + descr = chain->tail; 731 + 732 + status = spider_net_get_descr_status(descr); 758 733 switch (status) { 759 734 case SPIDER_NET_DESCR_COMPLETE: 760 735 card->netdev_stats.tx_packets++; 761 - card->netdev_stats.tx_bytes += chain->tail->skb->len; 736 + card->netdev_stats.tx_bytes += descr->skb->len; 762 737 break; 763 738 764 739 case SPIDER_NET_DESCR_CARDOWNED: 765 - if (!brutal) 740 + if (!brutal) { 741 + spin_unlock_irqrestore(&chain->lock, flags); 766 742 return 1; 743 + } 744 + 767 745 /* fallthrough, if we release the descriptors 768 746 * brutally (then we don't care about 769 747 * SPIDER_NET_DESCR_CARDOWNED) */ ··· 788 750 789 751 default: 790 752 card->netdev_stats.tx_dropped++; 791 - return 1; 753 + if (!brutal) { 754 + spin_unlock_irqrestore(&chain->lock, flags); 755 + return 1; 756 + } 792 757 } 793 - spider_net_release_tx_descr(card); 794 - } 795 758 759 + chain->tail = descr->next; 760 + descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE; 761 + skb = descr->skb; 762 + buf_addr = descr->buf_addr; 763 + spin_unlock_irqrestore(&chain->lock, flags); 764 + 765 + /* unmap the skb */ 766 + if (skb) { 767 + int len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; 768 + pci_unmap_single(card->pdev, buf_addr, len, PCI_DMA_TODEVICE); 769 + dev_kfree_skb(skb); 770 + } 771 + } 796 772 return 0; 797 773 } 798 774 ··· 815 763 * @card: card structure 816 764 * @descr: descriptor address to enable TX processing at 817 765 * 818 - * spider_net_kick_tx_dma writes the current tx chain head as start address 819 - * of the tx descriptor chain and enables the transmission DMA engine 766 + * This routine will start the transmit DMA running if 767 + * it is not already running. This routine ned only be 768 + * called when queueing a new packet to an empty tx queue. 769 + * Writes the current tx chain head as start address 770 + * of the tx descriptor chain and enables the transmission 771 + * DMA engine. 820 772 */ 821 773 static inline void 822 774 spider_net_kick_tx_dma(struct spider_net_card *card) ··· 860 804 static int 861 805 spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) 862 806 { 807 + int cnt; 863 808 struct spider_net_card *card = netdev_priv(netdev); 864 809 struct spider_net_descr_chain *chain = &card->tx_chain; 865 - struct spider_net_descr *descr = chain->head; 866 - unsigned long flags; 867 - int result; 868 - 869 - spin_lock_irqsave(&chain->lock, flags); 870 810 871 811 spider_net_release_tx_chain(card, 0); 872 812 873 - if (chain->head->next == chain->tail->prev) { 813 + if ((chain->head->next == chain->tail->prev) || 814 + (spider_net_prepare_tx_descr(card, skb) != 0)) { 815 + 874 816 card->netdev_stats.tx_dropped++; 875 - result = NETDEV_TX_LOCKED; 876 - goto out; 817 + netif_stop_queue(netdev); 818 + return NETDEV_TX_BUSY; 877 819 } 878 820 879 - if (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) { 880 - card->netdev_stats.tx_dropped++; 881 - result = NETDEV_TX_LOCKED; 882 - goto out; 883 - } 884 - 885 - if (spider_net_prepare_tx_descr(card, skb) != 0) { 886 - card->netdev_stats.tx_dropped++; 887 - result = NETDEV_TX_BUSY; 888 - goto out; 889 - } 890 - 891 - result = NETDEV_TX_OK; 892 - 893 - spider_net_kick_tx_dma(card); 894 - card->tx_chain.head = card->tx_chain.head->next; 895 - 896 - out: 897 - spin_unlock_irqrestore(&chain->lock, flags); 898 - netif_wake_queue(netdev); 899 - return result; 821 + cnt = spider_net_set_low_watermark(card); 822 + if (cnt < 5) 823 + spider_net_kick_tx_dma(card); 824 + return NETDEV_TX_OK; 900 825 } 901 826 902 827 /** 903 828 * spider_net_cleanup_tx_ring - cleans up the TX ring 904 829 * @card: card structure 905 830 * 906 - * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use 907 - * interrupts to cleanup our TX ring) and returns sent packets to the stack 908 - * by freeing them 831 + * spider_net_cleanup_tx_ring is called by either the tx_timer 832 + * or from the NAPI polling routine. 833 + * This routine releases resources associted with transmitted 834 + * packets, including updating the queue tail pointer. 909 835 */ 910 836 static void 911 837 spider_net_cleanup_tx_ring(struct spider_net_card *card) 912 838 { 913 - unsigned long flags; 914 - 915 - spin_lock_irqsave(&card->tx_chain.lock, flags); 916 - 917 839 if ((spider_net_release_tx_chain(card, 0) != 0) && 918 - (card->netdev->flags & IFF_UP)) 840 + (card->netdev->flags & IFF_UP)) { 919 841 spider_net_kick_tx_dma(card); 920 - 921 - spin_unlock_irqrestore(&card->tx_chain.lock, flags); 842 + netif_wake_queue(card->netdev); 843 + } 922 844 } 923 845 924 846 /** ··· 1087 1053 int packets_to_do, packets_done = 0; 1088 1054 int no_more_packets = 0; 1089 1055 1056 + spider_net_cleanup_tx_ring(card); 1090 1057 packets_to_do = min(*budget, netdev->quota); 1091 1058 1092 1059 while (packets_to_do) { ··· 1278 1243 case SPIDER_NET_PHYINT: 1279 1244 case SPIDER_NET_GMAC2INT: 1280 1245 case SPIDER_NET_GMAC1INT: 1281 - case SPIDER_NET_GIPSINT: 1282 1246 case SPIDER_NET_GFIFOINT: 1283 1247 case SPIDER_NET_DMACINT: 1284 1248 case SPIDER_NET_GSYSINT: 1285 1249 break; */ 1250 + 1251 + case SPIDER_NET_GIPSINT: 1252 + show_error = 0; 1253 + break; 1286 1254 1287 1255 case SPIDER_NET_GPWOPCMPINT: 1288 1256 /* PHY write operation completed */ ··· 1345 1307 case SPIDER_NET_GDTDCEINT: 1346 1308 /* chain end. If a descriptor should be sent, kick off 1347 1309 * tx dma 1348 - if (card->tx_chain.tail == card->tx_chain.head) 1310 + if (card->tx_chain.tail != card->tx_chain.head) 1349 1311 spider_net_kick_tx_dma(card); 1350 - show_error = 0; */ 1312 + */ 1313 + show_error = 0; 1351 1314 break; 1352 1315 1353 1316 /* case SPIDER_NET_G1TMCNTINT: not used. print a message */ ··· 1393 1354 if (netif_msg_intr(card)) 1394 1355 pr_err("got descriptor chain end interrupt, " 1395 1356 "restarting DMAC %c.\n", 1396 - 'D'+i-SPIDER_NET_GDDDCEINT); 1357 + 'D'-(i-SPIDER_NET_GDDDCEINT)/3); 1397 1358 spider_net_refill_rx_chain(card); 1398 1359 spider_net_enable_rxdmac(card); 1399 1360 show_error = 0; ··· 1462 1423 } 1463 1424 1464 1425 if ((show_error) && (netif_msg_intr(card))) 1465 - pr_err("Got error interrupt, GHIINT0STS = 0x%08x, " 1426 + pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, " 1466 1427 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n", 1428 + card->netdev->name, 1467 1429 status_reg, error_reg1, error_reg2); 1468 1430 1469 1431 /* clear interrupt sources */ ··· 1500 1460 spider_net_rx_irq_off(card); 1501 1461 netif_rx_schedule(netdev); 1502 1462 } 1463 + if (status_reg & SPIDER_NET_TXINT) 1464 + netif_rx_schedule(netdev); 1503 1465 1504 1466 if (status_reg & SPIDER_NET_ERRINT ) 1505 1467 spider_net_handle_error_irq(card, status_reg); ··· 1641 1599 SPIDER_NET_INT2_MASK_VALUE); 1642 1600 1643 1601 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 1644 - SPIDER_NET_GDTDCEIDIS); 1602 + SPIDER_NET_GDTBSTA | SPIDER_NET_GDTDCEIDIS); 1645 1603 } 1646 1604 1647 1605 /** ··· 1657 1615 spider_net_open(struct net_device *netdev) 1658 1616 { 1659 1617 struct spider_net_card *card = netdev_priv(netdev); 1660 - int result; 1618 + struct spider_net_descr *descr; 1619 + int i, result; 1661 1620 1662 1621 result = -ENOMEM; 1663 1622 if (spider_net_init_chain(card, &card->tx_chain, card->descr, 1664 - PCI_DMA_TODEVICE, card->tx_desc)) 1623 + card->num_tx_desc)) 1665 1624 goto alloc_tx_failed; 1625 + 1626 + card->low_watermark = NULL; 1627 + 1628 + /* rx_chain is after tx_chain, so offset is descr + tx_count */ 1666 1629 if (spider_net_init_chain(card, &card->rx_chain, 1667 - card->descr + card->rx_desc, 1668 - PCI_DMA_FROMDEVICE, card->rx_desc)) 1630 + card->descr + card->num_tx_desc, 1631 + card->num_rx_desc)) 1669 1632 goto alloc_rx_failed; 1633 + 1634 + descr = card->rx_chain.head; 1635 + for (i=0; i < card->num_rx_desc; i++, descr++) 1636 + descr->next_descr_addr = descr->next->bus_addr; 1670 1637 1671 1638 /* allocate rx skbs */ 1672 1639 if (spider_net_alloc_rx_skbs(card)) ··· 1929 1878 spider_net_disable_rxdmac(card); 1930 1879 1931 1880 /* release chains */ 1932 - if (spin_trylock(&card->tx_chain.lock)) { 1933 - spider_net_release_tx_chain(card, 1); 1934 - spin_unlock(&card->tx_chain.lock); 1935 - } 1881 + spider_net_release_tx_chain(card, 1); 1936 1882 1937 1883 spider_net_free_chain(card, &card->tx_chain); 1938 1884 spider_net_free_chain(card, &card->rx_chain); ··· 2060 2012 2061 2013 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; 2062 2014 2063 - card->tx_desc = tx_descriptors; 2064 - card->rx_desc = rx_descriptors; 2015 + card->num_tx_desc = tx_descriptors; 2016 + card->num_rx_desc = rx_descriptors; 2065 2017 2066 2018 spider_net_setup_netdev_ops(netdev); 2067 2019 ··· 2300 2252 */ 2301 2253 static int __init spider_net_init(void) 2302 2254 { 2255 + printk(KERN_INFO "Spidernet version %s.\n", VERSION); 2256 + 2303 2257 if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) { 2304 2258 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN; 2305 2259 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
+21 -14
drivers/net/spider_net.h
··· 24 24 #ifndef _SPIDER_NET_H 25 25 #define _SPIDER_NET_H 26 26 27 + #define VERSION "1.1 A" 28 + 27 29 #include "sungem_phy.h" 28 30 29 31 extern int spider_net_stop(struct net_device *netdev); ··· 49 47 #define SPIDER_NET_TX_DESCRIPTORS_MIN 16 50 48 #define SPIDER_NET_TX_DESCRIPTORS_MAX 512 51 49 52 - #define SPIDER_NET_TX_TIMER 20 50 + #define SPIDER_NET_TX_TIMER (HZ/5) 53 51 54 52 #define SPIDER_NET_RX_CSUM_DEFAULT 1 55 53 ··· 191 189 #define SPIDER_NET_MACMODE_VALUE 0x00000001 192 190 #define SPIDER_NET_BURSTLMT_VALUE 0x00000200 /* about 16 us */ 193 191 194 - /* 1(0) enable r/tx dma 192 + /* DMAC control register GDMACCNTR 193 + * 194 + * 1(0) enable r/tx dma 195 195 * 0000000 fixed to 0 196 196 * 197 197 * 000000 fixed to 0 ··· 202 198 * 203 199 * 000000 fixed to 0 204 200 * 00 burst alignment: 128 bytes 201 + * 11 burst alignment: 1024 bytes 205 202 * 206 203 * 00000 fixed to 0 207 204 * 0 descr writeback size 32 bytes ··· 213 208 #define SPIDER_NET_DMA_RX_VALUE 0x80000000 214 209 #define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003 215 210 /* to set TX_DMA_EN */ 216 - #define SPIDER_NET_TX_DMA_EN 0x80000000 217 - #define SPIDER_NET_GDTDCEIDIS 0x00000002 218 - #define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \ 219 - SPIDER_NET_GDTDCEIDIS 211 + #define SPIDER_NET_TX_DMA_EN 0x80000000 212 + #define SPIDER_NET_GDTBSTA 0x00000300 213 + #define SPIDER_NET_GDTDCEIDIS 0x00000002 214 + #define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \ 215 + SPIDER_NET_GDTBSTA | \ 216 + SPIDER_NET_GDTDCEIDIS 217 + 220 218 #define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003 221 219 222 220 /* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */ ··· 328 320 SPIDER_NET_GRISPDNGINT 329 321 }; 330 322 331 - #define SPIDER_NET_TXINT ( (1 << SPIDER_NET_GTTEDINT) | \ 332 - (1 << SPIDER_NET_GDTDCEINT) | \ 333 - (1 << SPIDER_NET_GDTFDCINT) ) 323 + #define SPIDER_NET_TXINT ( (1 << SPIDER_NET_GDTFDCINT) ) 334 324 335 - /* we rely on flagged descriptor interrupts*/ 336 - #define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) | \ 337 - (1 << SPIDER_NET_GRMFLLINT) ) 325 + /* We rely on flagged descriptor interrupts */ 326 + #define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) ) 338 327 339 328 #define SPIDER_NET_ERRINT ( 0xffffffff & \ 340 329 (~SPIDER_NET_TXINT) & \ ··· 354 349 #define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */ 355 350 #define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */ 356 351 #define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000 352 + #define SPIDER_NET_DESCR_TXDESFLG 0x00800000 357 353 358 354 struct spider_net_descr { 359 355 /* as defined by the hardware */ ··· 439 433 440 434 struct spider_net_descr_chain tx_chain; 441 435 struct spider_net_descr_chain rx_chain; 436 + struct spider_net_descr *low_watermark; 442 437 443 438 struct net_device_stats netdev_stats; 444 439 ··· 455 448 456 449 /* for ethtool */ 457 450 int msg_enable; 458 - int rx_desc; 459 - int tx_desc; 451 + int num_rx_desc; 452 + int num_tx_desc; 460 453 struct spider_net_extra_stats spider_stats; 461 454 462 455 struct spider_net_descr descr[0];
+3 -3
drivers/net/spider_net_ethtool.c
··· 76 76 /* clear and fill out info */ 77 77 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); 78 78 strncpy(drvinfo->driver, spider_net_driver_name, 32); 79 - strncpy(drvinfo->version, "0.1", 32); 79 + strncpy(drvinfo->version, VERSION, 32); 80 80 strcpy(drvinfo->fw_version, "no information"); 81 81 strncpy(drvinfo->bus_info, pci_name(card->pdev), 32); 82 82 } ··· 158 158 struct spider_net_card *card = netdev->priv; 159 159 160 160 ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX; 161 - ering->tx_pending = card->tx_desc; 161 + ering->tx_pending = card->num_tx_desc; 162 162 ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX; 163 - ering->rx_pending = card->rx_desc; 163 + ering->rx_pending = card->num_rx_desc; 164 164 } 165 165 166 166 static int spider_net_get_stats_count(struct net_device *netdev)
+4 -4
drivers/net/tulip/de2104x.c
··· 1730 1730 } 1731 1731 1732 1732 /* Note: this routine returns extra data bits for size detection. */ 1733 - static unsigned __init tulip_read_eeprom(void __iomem *regs, int location, int addr_len) 1733 + static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len) 1734 1734 { 1735 1735 int i; 1736 1736 unsigned retval = 0; ··· 1926 1926 goto fill_defaults; 1927 1927 } 1928 1928 1929 - static int __init de_init_one (struct pci_dev *pdev, 1929 + static int __devinit de_init_one (struct pci_dev *pdev, 1930 1930 const struct pci_device_id *ent) 1931 1931 { 1932 1932 struct net_device *dev; ··· 2082 2082 return rc; 2083 2083 } 2084 2084 2085 - static void __exit de_remove_one (struct pci_dev *pdev) 2085 + static void __devexit de_remove_one (struct pci_dev *pdev) 2086 2086 { 2087 2087 struct net_device *dev = pci_get_drvdata(pdev); 2088 2088 struct de_private *de = dev->priv; ··· 2164 2164 .name = DRV_NAME, 2165 2165 .id_table = de_pci_tbl, 2166 2166 .probe = de_init_one, 2167 - .remove = __exit_p(de_remove_one), 2167 + .remove = __devexit_p(de_remove_one), 2168 2168 #ifdef CONFIG_PM 2169 2169 .suspend = de_suspend, 2170 2170 .resume = de_resume,