Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: ec_bhf: remove excessive debug messages

This cuts down the number of debug information spit out by
the driver.

Signed-off-by: Dariusz Marcinkiewicz <reksio@newterm.pl>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Darek Marcinkiewicz and committed by
David S. Miller
a9b0b2fa a98406e2

+10 -91
+10 -91
drivers/net/ethernet/ec_bhf.c
··· 1 1 /* 2 - * drivers/net/ethernet/beckhoff/ec_bhf.c 2 + * drivers/net/ethernet/ec_bhf.c 3 3 * 4 4 * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl> 5 5 * ··· 18 18 * Those can be found on Bechhoff CX50xx industrial PCs. 19 19 */ 20 20 21 - #if 0 22 - #define DEBUG 23 - #endif 24 21 #include <linux/kernel.h> 25 22 #include <linux/module.h> 26 23 #include <linux/moduleparam.h> ··· 70 73 #define DMA_CHAN_SIZE 0x8 71 74 72 75 #define DMA_WINDOW_SIZE_MASK 0xfffffffc 76 + 77 + #define ETHERCAT_MASTER_ID 0x14 73 78 74 79 static struct pci_device_id ids[] = { 75 80 { PCI_DEVICE(0x15ec, 0x5000), }, ··· 130 131 131 132 struct ec_bhf_priv { 132 133 struct net_device *net_dev; 133 - 134 134 struct pci_dev *dev; 135 135 136 136 void __iomem *io; ··· 160 162 161 163 #define PRIV_TO_DEV(priv) (&(priv)->dev->dev) 162 164 163 - #define ETHERCAT_MASTER_ID 0x14 164 - 165 - static void ec_bhf_print_status(struct ec_bhf_priv *priv) 166 - { 167 - struct device *dev = PRIV_TO_DEV(priv); 168 - 169 - dev_dbg(dev, "Frame error counter: %d\n", 170 - ioread8(priv->mac_io + MAC_FRAME_ERR_CNT)); 171 - dev_dbg(dev, "RX error counter: %d\n", 172 - ioread8(priv->mac_io + MAC_RX_ERR_CNT)); 173 - dev_dbg(dev, "CRC error counter: %d\n", 174 - ioread8(priv->mac_io + MAC_CRC_ERR_CNT)); 175 - dev_dbg(dev, "TX frame counter: %d\n", 176 - ioread32(priv->mac_io + MAC_TX_FRAME_CNT)); 177 - dev_dbg(dev, "RX frame counter: %d\n", 178 - ioread32(priv->mac_io + MAC_RX_FRAME_CNT)); 179 - dev_dbg(dev, "TX fifo level: %d\n", 180 - ioread8(priv->mac_io + MAC_TX_FIFO_LVL)); 181 - dev_dbg(dev, "Dropped frames: %d\n", 182 - ioread8(priv->mac_io + MAC_DROPPED_FRMS)); 183 - dev_dbg(dev, "Connected with CCAT slot: %d\n", 184 - ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG)); 185 - dev_dbg(dev, "Link status: %d\n", 186 - ioread8(priv->mii_io + MII_LINK_STATUS)); 187 - } 188 - 189 165 static void ec_bhf_reset(struct ec_bhf_priv *priv) 190 166 { 191 167 iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT); ··· 182 210 u32 addr = (u8 *)desc - priv->tx_buf.buf; 183 211 184 212 iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG); 185 - 186 - dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n"); 187 213 } 188 214 189 215 static int ec_bhf_desc_sent(struct tx_desc *desc) ··· 214 244 static void ec_bhf_process_rx(struct ec_bhf_priv *priv) 215 245 { 216 246 struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext]; 217 - struct device *dev = PRIV_TO_DEV(priv); 218 247 219 248 while (ec_bhf_pkt_received(desc)) { 220 249 int pkt_size = (le16_to_cpu(desc->header.len) & ··· 222 253 struct sk_buff *skb; 223 254 224 255 skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size); 225 - dev_dbg(dev, "Received packet, size: %d\n", pkt_size); 226 - 227 256 if (skb) { 228 257 memcpy(skb_put(skb, pkt_size), data, pkt_size); 229 258 skb->protocol = eth_type_trans(skb, priv->net_dev); 230 - dev_dbg(dev, "Protocol type: %x\n", skb->protocol); 231 - 232 259 priv->stat_rx_bytes += pkt_size; 233 260 234 261 netif_rx(skb); 235 262 } else { 236 - dev_err_ratelimited(dev, 237 - "Couldn't allocate a skb_buff for a packet of size %u\n", 238 - pkt_size); 263 + dev_err_ratelimited(PRIV_TO_DEV(priv), 264 + "Couldn't allocate a skb_buff for a packet of size %u\n", 265 + pkt_size); 239 266 } 240 267 241 268 desc->header.recv = 0; ··· 241 276 priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount; 242 277 desc = &priv->rx_descs[priv->rx_dnext]; 243 278 } 244 - 245 279 } 246 280 247 281 static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer) ··· 263 299 unsigned block_count, i; 264 300 void __iomem *ec_info; 265 301 266 - dev_dbg(dev, "Info block:\n"); 267 - dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io)); 268 - dev_dbg(dev, "Revision of function: %x\n", 269 - (unsigned)ioread16(priv->io + INFO_BLOCK_REV)); 270 - 271 302 block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT); 272 - dev_dbg(dev, "Number of function blocks: %x\n", block_count); 273 - 274 303 for (i = 0; i < block_count; i++) { 275 304 u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE + 276 305 INFO_BLOCK_TYPE); ··· 274 317 dev_err(dev, "EtherCAT master with DMA block not found\n"); 275 318 return -ENODEV; 276 319 } 277 - dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i); 278 320 279 321 ec_info = priv->io + i * INFO_BLOCK_SIZE; 280 - dev_dbg(dev, "EtherCAT master revision: %d\n", 281 - ioread16(ec_info + INFO_BLOCK_REV)); 282 322 283 323 priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN); 284 - dev_dbg(dev, "EtherCAT master tx dma channel: %d\n", 285 - priv->tx_dma_chan); 286 - 287 324 priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN); 288 - dev_dbg(dev, "EtherCAT master rx dma channel: %d\n", 289 - priv->rx_dma_chan); 290 325 291 326 priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET); 292 327 priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET); 293 328 priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET); 294 329 priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET); 295 - 296 - dev_dbg(dev, 297 - "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n", 298 - priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io); 299 330 300 331 return 0; 301 332 } ··· 294 349 struct ec_bhf_priv *priv = netdev_priv(net_dev); 295 350 struct tx_desc *desc; 296 351 unsigned len; 297 - 298 - dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n"); 299 352 300 353 desc = &priv->tx_descs[priv->tx_dnext]; 301 354 ··· 309 366 priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount; 310 367 311 368 if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) { 312 - /* Make sure that update updates to tx_dnext are perceived 369 + /* Make sure that updates to tx_dnext are perceived 313 370 * by timer routine. 314 371 */ 315 372 smp_wmb(); 316 373 317 374 netif_stop_queue(net_dev); 318 - 319 - dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n"); 320 - ec_bhf_print_status(priv); 321 375 } 322 376 323 377 priv->stat_tx_bytes += len; ··· 337 397 338 398 mask = ioread32(priv->dma_io + offset); 339 399 mask &= DMA_WINDOW_SIZE_MASK; 340 - dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel); 341 400 342 401 /* We want to allocate a chunk of memory that is: 343 402 * - aligned to the mask we just read ··· 347 408 buf->len = min_t(int, ~mask + 1, size); 348 409 buf->alloc_len = 2 * buf->len; 349 410 350 - dev_dbg(dev, "Allocating %d bytes for channel %d", 351 - (int)buf->alloc_len, channel); 352 411 buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys, 353 412 GFP_KERNEL); 354 413 if (buf->alloc == NULL) { 355 - dev_info(dev, "Failed to allocate buffer\n"); 414 + dev_err(dev, "Failed to allocate buffer\n"); 356 415 return -ENOMEM; 357 416 } 358 417 ··· 359 422 360 423 iowrite32(0, priv->dma_io + offset + 4); 361 424 iowrite32(buf->buf_phys, priv->dma_io + offset); 362 - dev_dbg(dev, "Buffer: %x and read from dev: %x", 363 - (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset)); 364 425 365 426 return 0; 366 427 } ··· 368 433 int i = 0; 369 434 370 435 priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc); 371 - priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf; 436 + priv->tx_descs = (struct tx_desc *)priv->tx_buf.buf; 372 437 priv->tx_dnext = 0; 373 438 374 439 for (i = 0; i < priv->tx_dcount; i++) ··· 380 445 int i; 381 446 382 447 priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc); 383 - priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf; 448 + priv->rx_descs = (struct rx_desc *)priv->rx_buf.buf; 384 449 priv->rx_dnext = 0; 385 450 386 451 for (i = 0; i < priv->rx_dcount; i++) { ··· 404 469 struct device *dev = PRIV_TO_DEV(priv); 405 470 int err = 0; 406 471 407 - dev_info(dev, "Opening device\n"); 408 - 409 472 ec_bhf_reset(priv); 410 473 411 474 err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan, ··· 414 481 } 415 482 ec_bhf_setup_rx_descs(priv); 416 483 417 - dev_info(dev, "RX buffer allocated, address: %x\n", 418 - (unsigned)priv->rx_buf.buf_phys); 419 - 420 484 err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan, 421 485 FIFO_SIZE * sizeof(struct tx_desc)); 422 486 if (err) { 423 487 dev_err(dev, "Failed to allocate tx buffer\n"); 424 488 goto error_rx_free; 425 489 } 426 - dev_dbg(dev, "TX buffer allocated, addres: %x\n", 427 - (unsigned)priv->tx_buf.buf_phys); 428 - 429 490 iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG); 430 - 431 491 ec_bhf_setup_tx_descs(priv); 432 492 433 493 netif_start_queue(net_dev); ··· 429 503 priv->hrtimer.function = ec_bhf_timer_fun; 430 504 hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency), 431 505 HRTIMER_MODE_REL); 432 - 433 - dev_info(PRIV_TO_DEV(priv), "Device open\n"); 434 - 435 - ec_bhf_print_status(priv); 436 506 437 507 return 0; 438 508 ··· 561 639 goto err_free_net_dev; 562 640 563 641 memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6); 564 - 565 - dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n", 566 - net_dev->dev_addr); 567 642 568 643 err = register_netdev(net_dev); 569 644 if (err < 0)